Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #define pr_fmt(fmt) "GICv3: " fmt
0008 
0009 #include <linux/acpi.h>
0010 #include <linux/cpu.h>
0011 #include <linux/cpu_pm.h>
0012 #include <linux/delay.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/irqdomain.h>
0015 #include <linux/of.h>
0016 #include <linux/of_address.h>
0017 #include <linux/of_irq.h>
0018 #include <linux/percpu.h>
0019 #include <linux/refcount.h>
0020 #include <linux/slab.h>
0021 
0022 #include <linux/irqchip.h>
0023 #include <linux/irqchip/arm-gic-common.h>
0024 #include <linux/irqchip/arm-gic-v3.h>
0025 #include <linux/irqchip/irq-partition-percpu.h>
0026 
0027 #include <asm/cputype.h>
0028 #include <asm/exception.h>
0029 #include <asm/smp_plat.h>
0030 #include <asm/virt.h>
0031 
0032 #include "irq-gic-common.h"
0033 
0034 #define GICD_INT_NMI_PRI    (GICD_INT_DEF_PRI & ~0x80)
0035 
0036 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
0037 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539   (1ULL << 1)
0038 
0039 #define GIC_IRQ_TYPE_PARTITION  (GIC_IRQ_TYPE_LPI + 1)
0040 
0041 struct redist_region {
0042     void __iomem        *redist_base;
0043     phys_addr_t     phys_base;
0044     bool            single_redist;
0045 };
0046 
0047 struct gic_chip_data {
0048     struct fwnode_handle    *fwnode;
0049     void __iomem        *dist_base;
0050     struct redist_region    *redist_regions;
0051     struct rdists       rdists;
0052     struct irq_domain   *domain;
0053     u64         redist_stride;
0054     u32         nr_redist_regions;
0055     u64         flags;
0056     bool            has_rss;
0057     unsigned int        ppi_nr;
0058     struct partition_desc   **ppi_descs;
0059 };
0060 
0061 static struct gic_chip_data gic_data __read_mostly;
0062 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
0063 
0064 #define GIC_ID_NR   (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
0065 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
0066 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
0067 
0068 /*
0069  * The behaviours of RPR and PMR registers differ depending on the value of
0070  * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
0071  * distributor and redistributors depends on whether security is enabled in the
0072  * GIC.
0073  *
0074  * When security is enabled, non-secure priority values from the (re)distributor
0075  * are presented to the GIC CPUIF as follow:
0076  *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
0077  *
0078  * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
0079  * EL1 are subject to a similar operation thus matching the priorities presented
0080  * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
0081  * these values are unchanged by the GIC.
0082  *
0083  * see GICv3/GICv4 Architecture Specification (IHI0069D):
0084  * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
0085  *   priorities.
0086  * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
0087  *   interrupt.
0088  */
0089 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
0090 
0091 /*
0092  * Global static key controlling whether an update to PMR allowing more
0093  * interrupts requires to be propagated to the redistributor (DSB SY).
0094  * And this needs to be exported for modules to be able to enable
0095  * interrupts...
0096  */
0097 DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
0098 EXPORT_SYMBOL(gic_pmr_sync);
0099 
0100 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
0101 EXPORT_SYMBOL(gic_nonsecure_priorities);
0102 
0103 /*
0104  * When the Non-secure world has access to group 0 interrupts (as a
0105  * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
0106  * return the Distributor's view of the interrupt priority.
0107  *
0108  * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
0109  * written by software is moved to the Non-secure range by the Distributor.
0110  *
0111  * If both are true (which is when gic_nonsecure_priorities gets enabled),
0112  * we need to shift down the priority programmed by software to match it
0113  * against the value returned by ICC_RPR_EL1.
0114  */
0115 #define GICD_INT_RPR_PRI(priority)                  \
0116     ({                              \
0117         u32 __priority = (priority);                \
0118         if (static_branch_unlikely(&gic_nonsecure_priorities))  \
0119             __priority = 0x80 | (__priority >> 1);      \
0120                                     \
0121         __priority;                     \
0122     })
0123 
0124 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
0125 static refcount_t *ppi_nmi_refs;
0126 
0127 static struct gic_kvm_info gic_v3_kvm_info __initdata;
0128 static DEFINE_PER_CPU(bool, has_rss);
0129 
0130 #define MPIDR_RS(mpidr)         (((mpidr) & 0xF0UL) >> 4)
0131 #define gic_data_rdist()        (this_cpu_ptr(gic_data.rdists.rdist))
0132 #define gic_data_rdist_rd_base()    (gic_data_rdist()->rd_base)
0133 #define gic_data_rdist_sgi_base()   (gic_data_rdist_rd_base() + SZ_64K)
0134 
0135 /* Our default, arbitrary priority value. Linux only uses one anyway. */
0136 #define DEFAULT_PMR_VALUE   0xf0
0137 
0138 enum gic_intid_range {
0139     SGI_RANGE,
0140     PPI_RANGE,
0141     SPI_RANGE,
0142     EPPI_RANGE,
0143     ESPI_RANGE,
0144     LPI_RANGE,
0145     __INVALID_RANGE__
0146 };
0147 
0148 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
0149 {
0150     switch (hwirq) {
0151     case 0 ... 15:
0152         return SGI_RANGE;
0153     case 16 ... 31:
0154         return PPI_RANGE;
0155     case 32 ... 1019:
0156         return SPI_RANGE;
0157     case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
0158         return EPPI_RANGE;
0159     case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
0160         return ESPI_RANGE;
0161     case 8192 ... GENMASK(23, 0):
0162         return LPI_RANGE;
0163     default:
0164         return __INVALID_RANGE__;
0165     }
0166 }
0167 
0168 static enum gic_intid_range get_intid_range(struct irq_data *d)
0169 {
0170     return __get_intid_range(d->hwirq);
0171 }
0172 
0173 static inline unsigned int gic_irq(struct irq_data *d)
0174 {
0175     return d->hwirq;
0176 }
0177 
0178 static inline bool gic_irq_in_rdist(struct irq_data *d)
0179 {
0180     switch (get_intid_range(d)) {
0181     case SGI_RANGE:
0182     case PPI_RANGE:
0183     case EPPI_RANGE:
0184         return true;
0185     default:
0186         return false;
0187     }
0188 }
0189 
0190 static inline void __iomem *gic_dist_base(struct irq_data *d)
0191 {
0192     switch (get_intid_range(d)) {
0193     case SGI_RANGE:
0194     case PPI_RANGE:
0195     case EPPI_RANGE:
0196         /* SGI+PPI -> SGI_base for this CPU */
0197         return gic_data_rdist_sgi_base();
0198 
0199     case SPI_RANGE:
0200     case ESPI_RANGE:
0201         /* SPI -> dist_base */
0202         return gic_data.dist_base;
0203 
0204     default:
0205         return NULL;
0206     }
0207 }
0208 
0209 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
0210 {
0211     u32 count = 1000000;    /* 1s! */
0212 
0213     while (readl_relaxed(base + GICD_CTLR) & bit) {
0214         count--;
0215         if (!count) {
0216             pr_err_ratelimited("RWP timeout, gone fishing\n");
0217             return;
0218         }
0219         cpu_relax();
0220         udelay(1);
0221     }
0222 }
0223 
0224 /* Wait for completion of a distributor change */
0225 static void gic_dist_wait_for_rwp(void)
0226 {
0227     gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
0228 }
0229 
0230 /* Wait for completion of a redistributor change */
0231 static void gic_redist_wait_for_rwp(void)
0232 {
0233     gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
0234 }
0235 
0236 #ifdef CONFIG_ARM64
0237 
0238 static u64 __maybe_unused gic_read_iar(void)
0239 {
0240     if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
0241         return gic_read_iar_cavium_thunderx();
0242     else
0243         return gic_read_iar_common();
0244 }
0245 #endif
0246 
0247 static void gic_enable_redist(bool enable)
0248 {
0249     void __iomem *rbase;
0250     u32 count = 1000000;    /* 1s! */
0251     u32 val;
0252 
0253     if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
0254         return;
0255 
0256     rbase = gic_data_rdist_rd_base();
0257 
0258     val = readl_relaxed(rbase + GICR_WAKER);
0259     if (enable)
0260         /* Wake up this CPU redistributor */
0261         val &= ~GICR_WAKER_ProcessorSleep;
0262     else
0263         val |= GICR_WAKER_ProcessorSleep;
0264     writel_relaxed(val, rbase + GICR_WAKER);
0265 
0266     if (!enable) {      /* Check that GICR_WAKER is writeable */
0267         val = readl_relaxed(rbase + GICR_WAKER);
0268         if (!(val & GICR_WAKER_ProcessorSleep))
0269             return; /* No PM support in this redistributor */
0270     }
0271 
0272     while (--count) {
0273         val = readl_relaxed(rbase + GICR_WAKER);
0274         if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
0275             break;
0276         cpu_relax();
0277         udelay(1);
0278     }
0279     if (!count)
0280         pr_err_ratelimited("redistributor failed to %s...\n",
0281                    enable ? "wakeup" : "sleep");
0282 }
0283 
0284 /*
0285  * Routines to disable, enable, EOI and route interrupts
0286  */
0287 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
0288 {
0289     switch (get_intid_range(d)) {
0290     case SGI_RANGE:
0291     case PPI_RANGE:
0292     case SPI_RANGE:
0293         *index = d->hwirq;
0294         return offset;
0295     case EPPI_RANGE:
0296         /*
0297          * Contrary to the ESPI range, the EPPI range is contiguous
0298          * to the PPI range in the registers, so let's adjust the
0299          * displacement accordingly. Consistency is overrated.
0300          */
0301         *index = d->hwirq - EPPI_BASE_INTID + 32;
0302         return offset;
0303     case ESPI_RANGE:
0304         *index = d->hwirq - ESPI_BASE_INTID;
0305         switch (offset) {
0306         case GICD_ISENABLER:
0307             return GICD_ISENABLERnE;
0308         case GICD_ICENABLER:
0309             return GICD_ICENABLERnE;
0310         case GICD_ISPENDR:
0311             return GICD_ISPENDRnE;
0312         case GICD_ICPENDR:
0313             return GICD_ICPENDRnE;
0314         case GICD_ISACTIVER:
0315             return GICD_ISACTIVERnE;
0316         case GICD_ICACTIVER:
0317             return GICD_ICACTIVERnE;
0318         case GICD_IPRIORITYR:
0319             return GICD_IPRIORITYRnE;
0320         case GICD_ICFGR:
0321             return GICD_ICFGRnE;
0322         case GICD_IROUTER:
0323             return GICD_IROUTERnE;
0324         default:
0325             break;
0326         }
0327         break;
0328     default:
0329         break;
0330     }
0331 
0332     WARN_ON(1);
0333     *index = d->hwirq;
0334     return offset;
0335 }
0336 
0337 static int gic_peek_irq(struct irq_data *d, u32 offset)
0338 {
0339     void __iomem *base;
0340     u32 index, mask;
0341 
0342     offset = convert_offset_index(d, offset, &index);
0343     mask = 1 << (index % 32);
0344 
0345     if (gic_irq_in_rdist(d))
0346         base = gic_data_rdist_sgi_base();
0347     else
0348         base = gic_data.dist_base;
0349 
0350     return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
0351 }
0352 
0353 static void gic_poke_irq(struct irq_data *d, u32 offset)
0354 {
0355     void __iomem *base;
0356     u32 index, mask;
0357 
0358     offset = convert_offset_index(d, offset, &index);
0359     mask = 1 << (index % 32);
0360 
0361     if (gic_irq_in_rdist(d))
0362         base = gic_data_rdist_sgi_base();
0363     else
0364         base = gic_data.dist_base;
0365 
0366     writel_relaxed(mask, base + offset + (index / 32) * 4);
0367 }
0368 
0369 static void gic_mask_irq(struct irq_data *d)
0370 {
0371     gic_poke_irq(d, GICD_ICENABLER);
0372     if (gic_irq_in_rdist(d))
0373         gic_redist_wait_for_rwp();
0374     else
0375         gic_dist_wait_for_rwp();
0376 }
0377 
0378 static void gic_eoimode1_mask_irq(struct irq_data *d)
0379 {
0380     gic_mask_irq(d);
0381     /*
0382      * When masking a forwarded interrupt, make sure it is
0383      * deactivated as well.
0384      *
0385      * This ensures that an interrupt that is getting
0386      * disabled/masked will not get "stuck", because there is
0387      * noone to deactivate it (guest is being terminated).
0388      */
0389     if (irqd_is_forwarded_to_vcpu(d))
0390         gic_poke_irq(d, GICD_ICACTIVER);
0391 }
0392 
0393 static void gic_unmask_irq(struct irq_data *d)
0394 {
0395     gic_poke_irq(d, GICD_ISENABLER);
0396 }
0397 
0398 static inline bool gic_supports_nmi(void)
0399 {
0400     return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
0401            static_branch_likely(&supports_pseudo_nmis);
0402 }
0403 
0404 static int gic_irq_set_irqchip_state(struct irq_data *d,
0405                      enum irqchip_irq_state which, bool val)
0406 {
0407     u32 reg;
0408 
0409     if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
0410         return -EINVAL;
0411 
0412     switch (which) {
0413     case IRQCHIP_STATE_PENDING:
0414         reg = val ? GICD_ISPENDR : GICD_ICPENDR;
0415         break;
0416 
0417     case IRQCHIP_STATE_ACTIVE:
0418         reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
0419         break;
0420 
0421     case IRQCHIP_STATE_MASKED:
0422         if (val) {
0423             gic_mask_irq(d);
0424             return 0;
0425         }
0426         reg = GICD_ISENABLER;
0427         break;
0428 
0429     default:
0430         return -EINVAL;
0431     }
0432 
0433     gic_poke_irq(d, reg);
0434     return 0;
0435 }
0436 
0437 static int gic_irq_get_irqchip_state(struct irq_data *d,
0438                      enum irqchip_irq_state which, bool *val)
0439 {
0440     if (d->hwirq >= 8192) /* PPI/SPI only */
0441         return -EINVAL;
0442 
0443     switch (which) {
0444     case IRQCHIP_STATE_PENDING:
0445         *val = gic_peek_irq(d, GICD_ISPENDR);
0446         break;
0447 
0448     case IRQCHIP_STATE_ACTIVE:
0449         *val = gic_peek_irq(d, GICD_ISACTIVER);
0450         break;
0451 
0452     case IRQCHIP_STATE_MASKED:
0453         *val = !gic_peek_irq(d, GICD_ISENABLER);
0454         break;
0455 
0456     default:
0457         return -EINVAL;
0458     }
0459 
0460     return 0;
0461 }
0462 
0463 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
0464 {
0465     void __iomem *base = gic_dist_base(d);
0466     u32 offset, index;
0467 
0468     offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
0469 
0470     writeb_relaxed(prio, base + offset + index);
0471 }
0472 
0473 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
0474 {
0475     switch (__get_intid_range(hwirq)) {
0476     case PPI_RANGE:
0477         return hwirq - 16;
0478     case EPPI_RANGE:
0479         return hwirq - EPPI_BASE_INTID + 16;
0480     default:
0481         unreachable();
0482     }
0483 }
0484 
0485 static u32 gic_get_ppi_index(struct irq_data *d)
0486 {
0487     return __gic_get_ppi_index(d->hwirq);
0488 }
0489 
0490 static int gic_irq_nmi_setup(struct irq_data *d)
0491 {
0492     struct irq_desc *desc = irq_to_desc(d->irq);
0493 
0494     if (!gic_supports_nmi())
0495         return -EINVAL;
0496 
0497     if (gic_peek_irq(d, GICD_ISENABLER)) {
0498         pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
0499         return -EINVAL;
0500     }
0501 
0502     /*
0503      * A secondary irq_chip should be in charge of LPI request,
0504      * it should not be possible to get there
0505      */
0506     if (WARN_ON(gic_irq(d) >= 8192))
0507         return -EINVAL;
0508 
0509     /* desc lock should already be held */
0510     if (gic_irq_in_rdist(d)) {
0511         u32 idx = gic_get_ppi_index(d);
0512 
0513         /* Setting up PPI as NMI, only switch handler for first NMI */
0514         if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
0515             refcount_set(&ppi_nmi_refs[idx], 1);
0516             desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
0517         }
0518     } else {
0519         desc->handle_irq = handle_fasteoi_nmi;
0520     }
0521 
0522     gic_irq_set_prio(d, GICD_INT_NMI_PRI);
0523 
0524     return 0;
0525 }
0526 
0527 static void gic_irq_nmi_teardown(struct irq_data *d)
0528 {
0529     struct irq_desc *desc = irq_to_desc(d->irq);
0530 
0531     if (WARN_ON(!gic_supports_nmi()))
0532         return;
0533 
0534     if (gic_peek_irq(d, GICD_ISENABLER)) {
0535         pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
0536         return;
0537     }
0538 
0539     /*
0540      * A secondary irq_chip should be in charge of LPI request,
0541      * it should not be possible to get there
0542      */
0543     if (WARN_ON(gic_irq(d) >= 8192))
0544         return;
0545 
0546     /* desc lock should already be held */
0547     if (gic_irq_in_rdist(d)) {
0548         u32 idx = gic_get_ppi_index(d);
0549 
0550         /* Tearing down NMI, only switch handler for last NMI */
0551         if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
0552             desc->handle_irq = handle_percpu_devid_irq;
0553     } else {
0554         desc->handle_irq = handle_fasteoi_irq;
0555     }
0556 
0557     gic_irq_set_prio(d, GICD_INT_DEF_PRI);
0558 }
0559 
0560 static void gic_eoi_irq(struct irq_data *d)
0561 {
0562     write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
0563     isb();
0564 }
0565 
0566 static void gic_eoimode1_eoi_irq(struct irq_data *d)
0567 {
0568     /*
0569      * No need to deactivate an LPI, or an interrupt that
0570      * is is getting forwarded to a vcpu.
0571      */
0572     if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
0573         return;
0574     gic_write_dir(gic_irq(d));
0575 }
0576 
0577 static int gic_set_type(struct irq_data *d, unsigned int type)
0578 {
0579     enum gic_intid_range range;
0580     unsigned int irq = gic_irq(d);
0581     void __iomem *base;
0582     u32 offset, index;
0583     int ret;
0584 
0585     range = get_intid_range(d);
0586 
0587     /* Interrupt configuration for SGIs can't be changed */
0588     if (range == SGI_RANGE)
0589         return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
0590 
0591     /* SPIs have restrictions on the supported types */
0592     if ((range == SPI_RANGE || range == ESPI_RANGE) &&
0593         type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
0594         return -EINVAL;
0595 
0596     if (gic_irq_in_rdist(d))
0597         base = gic_data_rdist_sgi_base();
0598     else
0599         base = gic_data.dist_base;
0600 
0601     offset = convert_offset_index(d, GICD_ICFGR, &index);
0602 
0603     ret = gic_configure_irq(index, type, base + offset, NULL);
0604     if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
0605         /* Misconfigured PPIs are usually not fatal */
0606         pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
0607         ret = 0;
0608     }
0609 
0610     return ret;
0611 }
0612 
0613 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
0614 {
0615     if (get_intid_range(d) == SGI_RANGE)
0616         return -EINVAL;
0617 
0618     if (vcpu)
0619         irqd_set_forwarded_to_vcpu(d);
0620     else
0621         irqd_clr_forwarded_to_vcpu(d);
0622     return 0;
0623 }
0624 
0625 static u64 gic_mpidr_to_affinity(unsigned long mpidr)
0626 {
0627     u64 aff;
0628 
0629     aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
0630            MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
0631            MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
0632            MPIDR_AFFINITY_LEVEL(mpidr, 0));
0633 
0634     return aff;
0635 }
0636 
0637 static void gic_deactivate_unhandled(u32 irqnr)
0638 {
0639     if (static_branch_likely(&supports_deactivate_key)) {
0640         if (irqnr < 8192)
0641             gic_write_dir(irqnr);
0642     } else {
0643         write_gicreg(irqnr, ICC_EOIR1_EL1);
0644         isb();
0645     }
0646 }
0647 
0648 /*
0649  * Follow a read of the IAR with any HW maintenance that needs to happen prior
0650  * to invoking the relevant IRQ handler. We must do two things:
0651  *
0652  * (1) Ensure instruction ordering between a read of IAR and subsequent
0653  *     instructions in the IRQ handler using an ISB.
0654  *
0655  *     It is possible for the IAR to report an IRQ which was signalled *after*
0656  *     the CPU took an IRQ exception as multiple interrupts can race to be
0657  *     recognized by the GIC, earlier interrupts could be withdrawn, and/or
0658  *     later interrupts could be prioritized by the GIC.
0659  *
0660  *     For devices which are tightly coupled to the CPU, such as PMUs, a
0661  *     context synchronization event is necessary to ensure that system
0662  *     register state is not stale, as these may have been indirectly written
0663  *     *after* exception entry.
0664  *
0665  * (2) Deactivate the interrupt when EOI mode 1 is in use.
0666  */
0667 static inline void gic_complete_ack(u32 irqnr)
0668 {
0669     if (static_branch_likely(&supports_deactivate_key))
0670         write_gicreg(irqnr, ICC_EOIR1_EL1);
0671 
0672     isb();
0673 }
0674 
0675 static bool gic_rpr_is_nmi_prio(void)
0676 {
0677     if (!gic_supports_nmi())
0678         return false;
0679 
0680     return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
0681 }
0682 
0683 static bool gic_irqnr_is_special(u32 irqnr)
0684 {
0685     return irqnr >= 1020 && irqnr <= 1023;
0686 }
0687 
0688 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
0689 {
0690     if (gic_irqnr_is_special(irqnr))
0691         return;
0692 
0693     gic_complete_ack(irqnr);
0694 
0695     if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
0696         WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
0697         gic_deactivate_unhandled(irqnr);
0698     }
0699 }
0700 
0701 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
0702 {
0703     if (gic_irqnr_is_special(irqnr))
0704         return;
0705 
0706     gic_complete_ack(irqnr);
0707 
0708     if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
0709         WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
0710         gic_deactivate_unhandled(irqnr);
0711     }
0712 }
0713 
0714 /*
0715  * An exception has been taken from a context with IRQs enabled, and this could
0716  * be an IRQ or an NMI.
0717  *
0718  * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
0719  * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
0720  * after handling any NMI but before handling any IRQ.
0721  *
0722  * The entry code has performed IRQ entry, and if an NMI is detected we must
0723  * perform NMI entry/exit around invoking the handler.
0724  */
0725 static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
0726 {
0727     bool is_nmi;
0728     u32 irqnr;
0729 
0730     irqnr = gic_read_iar();
0731 
0732     is_nmi = gic_rpr_is_nmi_prio();
0733 
0734     if (is_nmi) {
0735         nmi_enter();
0736         __gic_handle_nmi(irqnr, regs);
0737         nmi_exit();
0738     }
0739 
0740     if (gic_prio_masking_enabled()) {
0741         gic_pmr_mask_irqs();
0742         gic_arch_enable_irqs();
0743     }
0744 
0745     if (!is_nmi)
0746         __gic_handle_irq(irqnr, regs);
0747 }
0748 
0749 /*
0750  * An exception has been taken from a context with IRQs disabled, which can only
0751  * be an NMI.
0752  *
0753  * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
0754  * DAIF.IF (and ICC_PMR_EL1) unchanged.
0755  *
0756  * The entry code has performed NMI entry.
0757  */
0758 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
0759 {
0760     u64 pmr;
0761     u32 irqnr;
0762 
0763     /*
0764      * We were in a context with IRQs disabled. However, the
0765      * entry code has set PMR to a value that allows any
0766      * interrupt to be acknowledged, and not just NMIs. This can
0767      * lead to surprising effects if the NMI has been retired in
0768      * the meantime, and that there is an IRQ pending. The IRQ
0769      * would then be taken in NMI context, something that nobody
0770      * wants to debug twice.
0771      *
0772      * Until we sort this, drop PMR again to a level that will
0773      * actually only allow NMIs before reading IAR, and then
0774      * restore it to what it was.
0775      */
0776     pmr = gic_read_pmr();
0777     gic_pmr_mask_irqs();
0778     isb();
0779     irqnr = gic_read_iar();
0780     gic_write_pmr(pmr);
0781 
0782     __gic_handle_nmi(irqnr, regs);
0783 }
0784 
0785 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
0786 {
0787     if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
0788         __gic_handle_irq_from_irqsoff(regs);
0789     else
0790         __gic_handle_irq_from_irqson(regs);
0791 }
0792 
0793 static u32 gic_get_pribits(void)
0794 {
0795     u32 pribits;
0796 
0797     pribits = gic_read_ctlr();
0798     pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
0799     pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
0800     pribits++;
0801 
0802     return pribits;
0803 }
0804 
0805 static bool gic_has_group0(void)
0806 {
0807     u32 val;
0808     u32 old_pmr;
0809 
0810     old_pmr = gic_read_pmr();
0811 
0812     /*
0813      * Let's find out if Group0 is under control of EL3 or not by
0814      * setting the highest possible, non-zero priority in PMR.
0815      *
0816      * If SCR_EL3.FIQ is set, the priority gets shifted down in
0817      * order for the CPU interface to set bit 7, and keep the
0818      * actual priority in the non-secure range. In the process, it
0819      * looses the least significant bit and the actual priority
0820      * becomes 0x80. Reading it back returns 0, indicating that
0821      * we're don't have access to Group0.
0822      */
0823     gic_write_pmr(BIT(8 - gic_get_pribits()));
0824     val = gic_read_pmr();
0825 
0826     gic_write_pmr(old_pmr);
0827 
0828     return val != 0;
0829 }
0830 
0831 static void __init gic_dist_init(void)
0832 {
0833     unsigned int i;
0834     u64 affinity;
0835     void __iomem *base = gic_data.dist_base;
0836     u32 val;
0837 
0838     /* Disable the distributor */
0839     writel_relaxed(0, base + GICD_CTLR);
0840     gic_dist_wait_for_rwp();
0841 
0842     /*
0843      * Configure SPIs as non-secure Group-1. This will only matter
0844      * if the GIC only has a single security state. This will not
0845      * do the right thing if the kernel is running in secure mode,
0846      * but that's not the intended use case anyway.
0847      */
0848     for (i = 32; i < GIC_LINE_NR; i += 32)
0849         writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
0850 
0851     /* Extended SPI range, not handled by the GICv2/GICv3 common code */
0852     for (i = 0; i < GIC_ESPI_NR; i += 32) {
0853         writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
0854         writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
0855     }
0856 
0857     for (i = 0; i < GIC_ESPI_NR; i += 32)
0858         writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
0859 
0860     for (i = 0; i < GIC_ESPI_NR; i += 16)
0861         writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
0862 
0863     for (i = 0; i < GIC_ESPI_NR; i += 4)
0864         writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
0865 
0866     /* Now do the common stuff */
0867     gic_dist_config(base, GIC_LINE_NR, NULL);
0868 
0869     val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
0870     if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
0871         pr_info("Enabling SGIs without active state\n");
0872         val |= GICD_CTLR_nASSGIreq;
0873     }
0874 
0875     /* Enable distributor with ARE, Group1, and wait for it to drain */
0876     writel_relaxed(val, base + GICD_CTLR);
0877     gic_dist_wait_for_rwp();
0878 
0879     /*
0880      * Set all global interrupts to the boot CPU only. ARE must be
0881      * enabled.
0882      */
0883     affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
0884     for (i = 32; i < GIC_LINE_NR; i++)
0885         gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
0886 
0887     for (i = 0; i < GIC_ESPI_NR; i++)
0888         gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
0889 }
0890 
0891 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
0892 {
0893     int ret = -ENODEV;
0894     int i;
0895 
0896     for (i = 0; i < gic_data.nr_redist_regions; i++) {
0897         void __iomem *ptr = gic_data.redist_regions[i].redist_base;
0898         u64 typer;
0899         u32 reg;
0900 
0901         reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
0902         if (reg != GIC_PIDR2_ARCH_GICv3 &&
0903             reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
0904             pr_warn("No redistributor present @%p\n", ptr);
0905             break;
0906         }
0907 
0908         do {
0909             typer = gic_read_typer(ptr + GICR_TYPER);
0910             ret = fn(gic_data.redist_regions + i, ptr);
0911             if (!ret)
0912                 return 0;
0913 
0914             if (gic_data.redist_regions[i].single_redist)
0915                 break;
0916 
0917             if (gic_data.redist_stride) {
0918                 ptr += gic_data.redist_stride;
0919             } else {
0920                 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
0921                 if (typer & GICR_TYPER_VLPIS)
0922                     ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
0923             }
0924         } while (!(typer & GICR_TYPER_LAST));
0925     }
0926 
0927     return ret ? -ENODEV : 0;
0928 }
0929 
0930 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
0931 {
0932     unsigned long mpidr = cpu_logical_map(smp_processor_id());
0933     u64 typer;
0934     u32 aff;
0935 
0936     /*
0937      * Convert affinity to a 32bit value that can be matched to
0938      * GICR_TYPER bits [63:32].
0939      */
0940     aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
0941            MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
0942            MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
0943            MPIDR_AFFINITY_LEVEL(mpidr, 0));
0944 
0945     typer = gic_read_typer(ptr + GICR_TYPER);
0946     if ((typer >> 32) == aff) {
0947         u64 offset = ptr - region->redist_base;
0948         raw_spin_lock_init(&gic_data_rdist()->rd_lock);
0949         gic_data_rdist_rd_base() = ptr;
0950         gic_data_rdist()->phys_base = region->phys_base + offset;
0951 
0952         pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
0953             smp_processor_id(), mpidr,
0954             (int)(region - gic_data.redist_regions),
0955             &gic_data_rdist()->phys_base);
0956         return 0;
0957     }
0958 
0959     /* Try next one */
0960     return 1;
0961 }
0962 
0963 static int gic_populate_rdist(void)
0964 {
0965     if (gic_iterate_rdists(__gic_populate_rdist) == 0)
0966         return 0;
0967 
0968     /* We couldn't even deal with ourselves... */
0969     WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
0970          smp_processor_id(),
0971          (unsigned long)cpu_logical_map(smp_processor_id()));
0972     return -ENODEV;
0973 }
0974 
0975 static int __gic_update_rdist_properties(struct redist_region *region,
0976                      void __iomem *ptr)
0977 {
0978     u64 typer = gic_read_typer(ptr + GICR_TYPER);
0979     u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
0980 
0981     /* Boot-time cleanip */
0982     if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
0983         u64 val;
0984 
0985         /* Deactivate any present vPE */
0986         val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
0987         if (val & GICR_VPENDBASER_Valid)
0988             gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
0989                           ptr + SZ_128K + GICR_VPENDBASER);
0990 
0991         /* Mark the VPE table as invalid */
0992         val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
0993         val &= ~GICR_VPROPBASER_4_1_VALID;
0994         gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
0995     }
0996 
0997     gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
0998 
0999     /*
1000      * TYPER.RVPEID implies some form of DirectLPI, no matter what the
1001      * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
1002      * that the ITS driver can make use of for LPIs (and not VLPIs).
1003      *
1004      * These are 3 different ways to express the same thing, depending
1005      * on the revision of the architecture and its relaxations over
1006      * time. Just group them under the 'direct_lpi' banner.
1007      */
1008     gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1009     gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
1010                        !!(ctlr & GICR_CTLR_IR) |
1011                        gic_data.rdists.has_rvpeid);
1012     gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
1013 
1014     /* Detect non-sensical configurations */
1015     if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1016         gic_data.rdists.has_direct_lpi = false;
1017         gic_data.rdists.has_vlpis = false;
1018         gic_data.rdists.has_rvpeid = false;
1019     }
1020 
1021     gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
1022 
1023     return 1;
1024 }
1025 
1026 static void gic_update_rdist_properties(void)
1027 {
1028     gic_data.ppi_nr = UINT_MAX;
1029     gic_iterate_rdists(__gic_update_rdist_properties);
1030     if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1031         gic_data.ppi_nr = 0;
1032     pr_info("GICv3 features: %d PPIs%s%s\n",
1033         gic_data.ppi_nr,
1034         gic_data.has_rss ? ", RSS" : "",
1035         gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1036 
1037     if (gic_data.rdists.has_vlpis)
1038         pr_info("GICv4 features: %s%s%s\n",
1039             gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1040             gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1041             gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
1042 }
1043 
1044 /* Check whether it's single security state view */
1045 static inline bool gic_dist_security_disabled(void)
1046 {
1047     return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1048 }
1049 
1050 static void gic_cpu_sys_reg_init(void)
1051 {
1052     int i, cpu = smp_processor_id();
1053     u64 mpidr = cpu_logical_map(cpu);
1054     u64 need_rss = MPIDR_RS(mpidr);
1055     bool group0;
1056     u32 pribits;
1057 
1058     /*
1059      * Need to check that the SRE bit has actually been set. If
1060      * not, it means that SRE is disabled at EL2. We're going to
1061      * die painfully, and there is nothing we can do about it.
1062      *
1063      * Kindly inform the luser.
1064      */
1065     if (!gic_enable_sre())
1066         pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1067 
1068     pribits = gic_get_pribits();
1069 
1070     group0 = gic_has_group0();
1071 
1072     /* Set priority mask register */
1073     if (!gic_prio_masking_enabled()) {
1074         write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1075     } else if (gic_supports_nmi()) {
1076         /*
1077          * Mismatch configuration with boot CPU, the system is likely
1078          * to die as interrupt masking will not work properly on all
1079          * CPUs
1080          *
1081          * The boot CPU calls this function before enabling NMI support,
1082          * and as a result we'll never see this warning in the boot path
1083          * for that CPU.
1084          */
1085         if (static_branch_unlikely(&gic_nonsecure_priorities))
1086             WARN_ON(!group0 || gic_dist_security_disabled());
1087         else
1088             WARN_ON(group0 && !gic_dist_security_disabled());
1089     }
1090 
1091     /*
1092      * Some firmwares hand over to the kernel with the BPR changed from
1093      * its reset value (and with a value large enough to prevent
1094      * any pre-emptive interrupts from working at all). Writing a zero
1095      * to BPR restores is reset value.
1096      */
1097     gic_write_bpr1(0);
1098 
1099     if (static_branch_likely(&supports_deactivate_key)) {
1100         /* EOI drops priority only (mode 1) */
1101         gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1102     } else {
1103         /* EOI deactivates interrupt too (mode 0) */
1104         gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1105     }
1106 
1107     /* Always whack Group0 before Group1 */
1108     if (group0) {
1109         switch(pribits) {
1110         case 8:
1111         case 7:
1112             write_gicreg(0, ICC_AP0R3_EL1);
1113             write_gicreg(0, ICC_AP0R2_EL1);
1114             fallthrough;
1115         case 6:
1116             write_gicreg(0, ICC_AP0R1_EL1);
1117             fallthrough;
1118         case 5:
1119         case 4:
1120             write_gicreg(0, ICC_AP0R0_EL1);
1121         }
1122 
1123         isb();
1124     }
1125 
1126     switch(pribits) {
1127     case 8:
1128     case 7:
1129         write_gicreg(0, ICC_AP1R3_EL1);
1130         write_gicreg(0, ICC_AP1R2_EL1);
1131         fallthrough;
1132     case 6:
1133         write_gicreg(0, ICC_AP1R1_EL1);
1134         fallthrough;
1135     case 5:
1136     case 4:
1137         write_gicreg(0, ICC_AP1R0_EL1);
1138     }
1139 
1140     isb();
1141 
1142     /* ... and let's hit the road... */
1143     gic_write_grpen1(1);
1144 
1145     /* Keep the RSS capability status in per_cpu variable */
1146     per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1147 
1148     /* Check all the CPUs have capable of sending SGIs to other CPUs */
1149     for_each_online_cpu(i) {
1150         bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1151 
1152         need_rss |= MPIDR_RS(cpu_logical_map(i));
1153         if (need_rss && (!have_rss))
1154             pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1155                 cpu, (unsigned long)mpidr,
1156                 i, (unsigned long)cpu_logical_map(i));
1157     }
1158 
1159     /**
1160      * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1161      * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1162      * UNPREDICTABLE choice of :
1163      *   - The write is ignored.
1164      *   - The RS field is treated as 0.
1165      */
1166     if (need_rss && (!gic_data.has_rss))
1167         pr_crit_once("RSS is required but GICD doesn't support it\n");
1168 }
1169 
1170 static bool gicv3_nolpi;
1171 
1172 static int __init gicv3_nolpi_cfg(char *buf)
1173 {
1174     return strtobool(buf, &gicv3_nolpi);
1175 }
1176 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1177 
1178 static int gic_dist_supports_lpis(void)
1179 {
1180     return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1181         !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1182         !gicv3_nolpi);
1183 }
1184 
1185 static void gic_cpu_init(void)
1186 {
1187     void __iomem *rbase;
1188     int i;
1189 
1190     /* Register ourselves with the rest of the world */
1191     if (gic_populate_rdist())
1192         return;
1193 
1194     gic_enable_redist(true);
1195 
1196     WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1197          !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1198          "Distributor has extended ranges, but CPU%d doesn't\n",
1199          smp_processor_id());
1200 
1201     rbase = gic_data_rdist_sgi_base();
1202 
1203     /* Configure SGIs/PPIs as non-secure Group-1 */
1204     for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1205         writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1206 
1207     gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1208 
1209     /* initialise system registers */
1210     gic_cpu_sys_reg_init();
1211 }
1212 
1213 #ifdef CONFIG_SMP
1214 
1215 #define MPIDR_TO_SGI_RS(mpidr)  (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1216 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr)  ((mpidr) & ~0xFUL)
1217 
1218 static int gic_starting_cpu(unsigned int cpu)
1219 {
1220     gic_cpu_init();
1221 
1222     if (gic_dist_supports_lpis())
1223         its_cpu_init();
1224 
1225     return 0;
1226 }
1227 
1228 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1229                    unsigned long cluster_id)
1230 {
1231     int next_cpu, cpu = *base_cpu;
1232     unsigned long mpidr = cpu_logical_map(cpu);
1233     u16 tlist = 0;
1234 
1235     while (cpu < nr_cpu_ids) {
1236         tlist |= 1 << (mpidr & 0xf);
1237 
1238         next_cpu = cpumask_next(cpu, mask);
1239         if (next_cpu >= nr_cpu_ids)
1240             goto out;
1241         cpu = next_cpu;
1242 
1243         mpidr = cpu_logical_map(cpu);
1244 
1245         if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1246             cpu--;
1247             goto out;
1248         }
1249     }
1250 out:
1251     *base_cpu = cpu;
1252     return tlist;
1253 }
1254 
1255 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1256     (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1257         << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1258 
1259 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1260 {
1261     u64 val;
1262 
1263     val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1264            MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1265            irq << ICC_SGI1R_SGI_ID_SHIFT        |
1266            MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1267            MPIDR_TO_SGI_RS(cluster_id)      |
1268            tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1269 
1270     pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1271     gic_write_sgi1r(val);
1272 }
1273 
1274 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1275 {
1276     int cpu;
1277 
1278     if (WARN_ON(d->hwirq >= 16))
1279         return;
1280 
1281     /*
1282      * Ensure that stores to Normal memory are visible to the
1283      * other CPUs before issuing the IPI.
1284      */
1285     dsb(ishst);
1286 
1287     for_each_cpu(cpu, mask) {
1288         u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1289         u16 tlist;
1290 
1291         tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1292         gic_send_sgi(cluster_id, tlist, d->hwirq);
1293     }
1294 
1295     /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1296     isb();
1297 }
1298 
1299 static void __init gic_smp_init(void)
1300 {
1301     struct irq_fwspec sgi_fwspec = {
1302         .fwnode     = gic_data.fwnode,
1303         .param_count    = 1,
1304     };
1305     int base_sgi;
1306 
1307     cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1308                   "irqchip/arm/gicv3:starting",
1309                   gic_starting_cpu, NULL);
1310 
1311     /* Register all 8 non-secure SGIs */
1312     base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1313                        NUMA_NO_NODE, &sgi_fwspec,
1314                        false, NULL);
1315     if (WARN_ON(base_sgi <= 0))
1316         return;
1317 
1318     set_smp_ipi_range(base_sgi, 8);
1319 }
1320 
1321 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1322                 bool force)
1323 {
1324     unsigned int cpu;
1325     u32 offset, index;
1326     void __iomem *reg;
1327     int enabled;
1328     u64 val;
1329 
1330     if (force)
1331         cpu = cpumask_first(mask_val);
1332     else
1333         cpu = cpumask_any_and(mask_val, cpu_online_mask);
1334 
1335     if (cpu >= nr_cpu_ids)
1336         return -EINVAL;
1337 
1338     if (gic_irq_in_rdist(d))
1339         return -EINVAL;
1340 
1341     /* If interrupt was enabled, disable it first */
1342     enabled = gic_peek_irq(d, GICD_ISENABLER);
1343     if (enabled)
1344         gic_mask_irq(d);
1345 
1346     offset = convert_offset_index(d, GICD_IROUTER, &index);
1347     reg = gic_dist_base(d) + offset + (index * 8);
1348     val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1349 
1350     gic_write_irouter(val, reg);
1351 
1352     /*
1353      * If the interrupt was enabled, enabled it again. Otherwise,
1354      * just wait for the distributor to have digested our changes.
1355      */
1356     if (enabled)
1357         gic_unmask_irq(d);
1358 
1359     irq_data_update_effective_affinity(d, cpumask_of(cpu));
1360 
1361     return IRQ_SET_MASK_OK_DONE;
1362 }
1363 #else
1364 #define gic_set_affinity    NULL
1365 #define gic_ipi_send_mask   NULL
1366 #define gic_smp_init()      do { } while(0)
1367 #endif
1368 
1369 static int gic_retrigger(struct irq_data *data)
1370 {
1371     return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1372 }
1373 
1374 #ifdef CONFIG_CPU_PM
1375 static int gic_cpu_pm_notifier(struct notifier_block *self,
1376                    unsigned long cmd, void *v)
1377 {
1378     if (cmd == CPU_PM_EXIT) {
1379         if (gic_dist_security_disabled())
1380             gic_enable_redist(true);
1381         gic_cpu_sys_reg_init();
1382     } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1383         gic_write_grpen1(0);
1384         gic_enable_redist(false);
1385     }
1386     return NOTIFY_OK;
1387 }
1388 
1389 static struct notifier_block gic_cpu_pm_notifier_block = {
1390     .notifier_call = gic_cpu_pm_notifier,
1391 };
1392 
1393 static void gic_cpu_pm_init(void)
1394 {
1395     cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1396 }
1397 
1398 #else
1399 static inline void gic_cpu_pm_init(void) { }
1400 #endif /* CONFIG_CPU_PM */
1401 
1402 static struct irq_chip gic_chip = {
1403     .name           = "GICv3",
1404     .irq_mask       = gic_mask_irq,
1405     .irq_unmask     = gic_unmask_irq,
1406     .irq_eoi        = gic_eoi_irq,
1407     .irq_set_type       = gic_set_type,
1408     .irq_set_affinity   = gic_set_affinity,
1409     .irq_retrigger          = gic_retrigger,
1410     .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1411     .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1412     .irq_nmi_setup      = gic_irq_nmi_setup,
1413     .irq_nmi_teardown   = gic_irq_nmi_teardown,
1414     .ipi_send_mask      = gic_ipi_send_mask,
1415     .flags          = IRQCHIP_SET_TYPE_MASKED |
1416                   IRQCHIP_SKIP_SET_WAKE |
1417                   IRQCHIP_MASK_ON_SUSPEND,
1418 };
1419 
1420 static struct irq_chip gic_eoimode1_chip = {
1421     .name           = "GICv3",
1422     .irq_mask       = gic_eoimode1_mask_irq,
1423     .irq_unmask     = gic_unmask_irq,
1424     .irq_eoi        = gic_eoimode1_eoi_irq,
1425     .irq_set_type       = gic_set_type,
1426     .irq_set_affinity   = gic_set_affinity,
1427     .irq_retrigger          = gic_retrigger,
1428     .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1429     .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1430     .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
1431     .irq_nmi_setup      = gic_irq_nmi_setup,
1432     .irq_nmi_teardown   = gic_irq_nmi_teardown,
1433     .ipi_send_mask      = gic_ipi_send_mask,
1434     .flags          = IRQCHIP_SET_TYPE_MASKED |
1435                   IRQCHIP_SKIP_SET_WAKE |
1436                   IRQCHIP_MASK_ON_SUSPEND,
1437 };
1438 
1439 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1440                   irq_hw_number_t hw)
1441 {
1442     struct irq_chip *chip = &gic_chip;
1443     struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1444 
1445     if (static_branch_likely(&supports_deactivate_key))
1446         chip = &gic_eoimode1_chip;
1447 
1448     switch (__get_intid_range(hw)) {
1449     case SGI_RANGE:
1450     case PPI_RANGE:
1451     case EPPI_RANGE:
1452         irq_set_percpu_devid(irq);
1453         irq_domain_set_info(d, irq, hw, chip, d->host_data,
1454                     handle_percpu_devid_irq, NULL, NULL);
1455         break;
1456 
1457     case SPI_RANGE:
1458     case ESPI_RANGE:
1459         irq_domain_set_info(d, irq, hw, chip, d->host_data,
1460                     handle_fasteoi_irq, NULL, NULL);
1461         irq_set_probe(irq);
1462         irqd_set_single_target(irqd);
1463         break;
1464 
1465     case LPI_RANGE:
1466         if (!gic_dist_supports_lpis())
1467             return -EPERM;
1468         irq_domain_set_info(d, irq, hw, chip, d->host_data,
1469                     handle_fasteoi_irq, NULL, NULL);
1470         break;
1471 
1472     default:
1473         return -EPERM;
1474     }
1475 
1476     /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1477     irqd_set_handle_enforce_irqctx(irqd);
1478     return 0;
1479 }
1480 
1481 static int gic_irq_domain_translate(struct irq_domain *d,
1482                     struct irq_fwspec *fwspec,
1483                     unsigned long *hwirq,
1484                     unsigned int *type)
1485 {
1486     if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1487         *hwirq = fwspec->param[0];
1488         *type = IRQ_TYPE_EDGE_RISING;
1489         return 0;
1490     }
1491 
1492     if (is_of_node(fwspec->fwnode)) {
1493         if (fwspec->param_count < 3)
1494             return -EINVAL;
1495 
1496         switch (fwspec->param[0]) {
1497         case 0:         /* SPI */
1498             *hwirq = fwspec->param[1] + 32;
1499             break;
1500         case 1:         /* PPI */
1501             *hwirq = fwspec->param[1] + 16;
1502             break;
1503         case 2:         /* ESPI */
1504             *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1505             break;
1506         case 3:         /* EPPI */
1507             *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1508             break;
1509         case GIC_IRQ_TYPE_LPI:  /* LPI */
1510             *hwirq = fwspec->param[1];
1511             break;
1512         case GIC_IRQ_TYPE_PARTITION:
1513             *hwirq = fwspec->param[1];
1514             if (fwspec->param[1] >= 16)
1515                 *hwirq += EPPI_BASE_INTID - 16;
1516             else
1517                 *hwirq += 16;
1518             break;
1519         default:
1520             return -EINVAL;
1521         }
1522 
1523         *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1524 
1525         /*
1526          * Make it clear that broken DTs are... broken.
1527          * Partitioned PPIs are an unfortunate exception.
1528          */
1529         WARN_ON(*type == IRQ_TYPE_NONE &&
1530             fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1531         return 0;
1532     }
1533 
1534     if (is_fwnode_irqchip(fwspec->fwnode)) {
1535         if(fwspec->param_count != 2)
1536             return -EINVAL;
1537 
1538         if (fwspec->param[0] < 16) {
1539             pr_err(FW_BUG "Illegal GSI%d translation request\n",
1540                    fwspec->param[0]);
1541             return -EINVAL;
1542         }
1543 
1544         *hwirq = fwspec->param[0];
1545         *type = fwspec->param[1];
1546 
1547         WARN_ON(*type == IRQ_TYPE_NONE);
1548         return 0;
1549     }
1550 
1551     return -EINVAL;
1552 }
1553 
1554 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1555                 unsigned int nr_irqs, void *arg)
1556 {
1557     int i, ret;
1558     irq_hw_number_t hwirq;
1559     unsigned int type = IRQ_TYPE_NONE;
1560     struct irq_fwspec *fwspec = arg;
1561 
1562     ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1563     if (ret)
1564         return ret;
1565 
1566     for (i = 0; i < nr_irqs; i++) {
1567         ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1568         if (ret)
1569             return ret;
1570     }
1571 
1572     return 0;
1573 }
1574 
1575 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1576                 unsigned int nr_irqs)
1577 {
1578     int i;
1579 
1580     for (i = 0; i < nr_irqs; i++) {
1581         struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1582         irq_set_handler(virq + i, NULL);
1583         irq_domain_reset_irq_data(d);
1584     }
1585 }
1586 
1587 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1588                       irq_hw_number_t hwirq)
1589 {
1590     enum gic_intid_range range;
1591 
1592     if (!gic_data.ppi_descs)
1593         return false;
1594 
1595     if (!is_of_node(fwspec->fwnode))
1596         return false;
1597 
1598     if (fwspec->param_count < 4 || !fwspec->param[3])
1599         return false;
1600 
1601     range = __get_intid_range(hwirq);
1602     if (range != PPI_RANGE && range != EPPI_RANGE)
1603         return false;
1604 
1605     return true;
1606 }
1607 
1608 static int gic_irq_domain_select(struct irq_domain *d,
1609                  struct irq_fwspec *fwspec,
1610                  enum irq_domain_bus_token bus_token)
1611 {
1612     unsigned int type, ret, ppi_idx;
1613     irq_hw_number_t hwirq;
1614 
1615     /* Not for us */
1616         if (fwspec->fwnode != d->fwnode)
1617         return 0;
1618 
1619     /* If this is not DT, then we have a single domain */
1620     if (!is_of_node(fwspec->fwnode))
1621         return 1;
1622 
1623     ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1624     if (WARN_ON_ONCE(ret))
1625         return 0;
1626 
1627     if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1628         return d == gic_data.domain;
1629 
1630     /*
1631      * If this is a PPI and we have a 4th (non-null) parameter,
1632      * then we need to match the partition domain.
1633      */
1634     ppi_idx = __gic_get_ppi_index(hwirq);
1635     return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
1636 }
1637 
1638 static const struct irq_domain_ops gic_irq_domain_ops = {
1639     .translate = gic_irq_domain_translate,
1640     .alloc = gic_irq_domain_alloc,
1641     .free = gic_irq_domain_free,
1642     .select = gic_irq_domain_select,
1643 };
1644 
1645 static int partition_domain_translate(struct irq_domain *d,
1646                       struct irq_fwspec *fwspec,
1647                       unsigned long *hwirq,
1648                       unsigned int *type)
1649 {
1650     unsigned long ppi_intid;
1651     struct device_node *np;
1652     unsigned int ppi_idx;
1653     int ret;
1654 
1655     if (!gic_data.ppi_descs)
1656         return -ENOMEM;
1657 
1658     np = of_find_node_by_phandle(fwspec->param[3]);
1659     if (WARN_ON(!np))
1660         return -EINVAL;
1661 
1662     ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1663     if (WARN_ON_ONCE(ret))
1664         return 0;
1665 
1666     ppi_idx = __gic_get_ppi_index(ppi_intid);
1667     ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
1668                      of_node_to_fwnode(np));
1669     if (ret < 0)
1670         return ret;
1671 
1672     *hwirq = ret;
1673     *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1674 
1675     return 0;
1676 }
1677 
1678 static const struct irq_domain_ops partition_domain_ops = {
1679     .translate = partition_domain_translate,
1680     .select = gic_irq_domain_select,
1681 };
1682 
1683 static bool gic_enable_quirk_msm8996(void *data)
1684 {
1685     struct gic_chip_data *d = data;
1686 
1687     d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1688 
1689     return true;
1690 }
1691 
1692 static bool gic_enable_quirk_cavium_38539(void *data)
1693 {
1694     struct gic_chip_data *d = data;
1695 
1696     d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1697 
1698     return true;
1699 }
1700 
1701 static bool gic_enable_quirk_hip06_07(void *data)
1702 {
1703     struct gic_chip_data *d = data;
1704 
1705     /*
1706      * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1707      * not being an actual ARM implementation). The saving grace is
1708      * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1709      * HIP07 doesn't even have a proper IIDR, and still pretends to
1710      * have ESPI. In both cases, put them right.
1711      */
1712     if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1713         /* Zero both ESPI and the RES0 field next to it... */
1714         d->rdists.gicd_typer &= ~GENMASK(9, 8);
1715         return true;
1716     }
1717 
1718     return false;
1719 }
1720 
1721 static const struct gic_quirk gic_quirks[] = {
1722     {
1723         .desc   = "GICv3: Qualcomm MSM8996 broken firmware",
1724         .compatible = "qcom,msm8996-gic-v3",
1725         .init   = gic_enable_quirk_msm8996,
1726     },
1727     {
1728         .desc   = "GICv3: HIP06 erratum 161010803",
1729         .iidr   = 0x0204043b,
1730         .mask   = 0xffffffff,
1731         .init   = gic_enable_quirk_hip06_07,
1732     },
1733     {
1734         .desc   = "GICv3: HIP07 erratum 161010803",
1735         .iidr   = 0x00000000,
1736         .mask   = 0xffffffff,
1737         .init   = gic_enable_quirk_hip06_07,
1738     },
1739     {
1740         /*
1741          * Reserved register accesses generate a Synchronous
1742          * External Abort. This erratum applies to:
1743          * - ThunderX: CN88xx
1744          * - OCTEON TX: CN83xx, CN81xx
1745          * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1746          */
1747         .desc   = "GICv3: Cavium erratum 38539",
1748         .iidr   = 0xa000034c,
1749         .mask   = 0xe8f00fff,
1750         .init   = gic_enable_quirk_cavium_38539,
1751     },
1752     {
1753     }
1754 };
1755 
1756 static void gic_enable_nmi_support(void)
1757 {
1758     int i;
1759 
1760     if (!gic_prio_masking_enabled())
1761         return;
1762 
1763     ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1764     if (!ppi_nmi_refs)
1765         return;
1766 
1767     for (i = 0; i < gic_data.ppi_nr; i++)
1768         refcount_set(&ppi_nmi_refs[i], 0);
1769 
1770     /*
1771      * Linux itself doesn't use 1:N distribution, so has no need to
1772      * set PMHE. The only reason to have it set is if EL3 requires it
1773      * (and we can't change it).
1774      */
1775     if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
1776         static_branch_enable(&gic_pmr_sync);
1777 
1778     pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1779         static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
1780 
1781     /*
1782      * How priority values are used by the GIC depends on two things:
1783      * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1784      * and if Group 0 interrupts can be delivered to Linux in the non-secure
1785      * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1786      * ICC_PMR_EL1 register and the priority that software assigns to
1787      * interrupts:
1788      *
1789      * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1790      * -----------------------------------------------------------
1791      *      1       |      -      |  unchanged  |    unchanged
1792      * -----------------------------------------------------------
1793      *      0       |      1      |  non-secure |    non-secure
1794      * -----------------------------------------------------------
1795      *      0       |      0      |  unchanged  |    non-secure
1796      *
1797      * where non-secure means that the value is right-shifted by one and the
1798      * MSB bit set, to make it fit in the non-secure priority range.
1799      *
1800      * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1801      * are both either modified or unchanged, we can use the same set of
1802      * priorities.
1803      *
1804      * In the last case, where only the interrupt priorities are modified to
1805      * be in the non-secure range, we use a different PMR value to mask IRQs
1806      * and the rest of the values that we use remain unchanged.
1807      */
1808     if (gic_has_group0() && !gic_dist_security_disabled())
1809         static_branch_enable(&gic_nonsecure_priorities);
1810 
1811     static_branch_enable(&supports_pseudo_nmis);
1812 
1813     if (static_branch_likely(&supports_deactivate_key))
1814         gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1815     else
1816         gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1817 }
1818 
1819 static int __init gic_init_bases(void __iomem *dist_base,
1820                  struct redist_region *rdist_regs,
1821                  u32 nr_redist_regions,
1822                  u64 redist_stride,
1823                  struct fwnode_handle *handle)
1824 {
1825     u32 typer;
1826     int err;
1827 
1828     if (!is_hyp_mode_available())
1829         static_branch_disable(&supports_deactivate_key);
1830 
1831     if (static_branch_likely(&supports_deactivate_key))
1832         pr_info("GIC: Using split EOI/Deactivate mode\n");
1833 
1834     gic_data.fwnode = handle;
1835     gic_data.dist_base = dist_base;
1836     gic_data.redist_regions = rdist_regs;
1837     gic_data.nr_redist_regions = nr_redist_regions;
1838     gic_data.redist_stride = redist_stride;
1839 
1840     /*
1841      * Find out how many interrupts are supported.
1842      */
1843     typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1844     gic_data.rdists.gicd_typer = typer;
1845 
1846     gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1847               gic_quirks, &gic_data);
1848 
1849     pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1850     pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1851 
1852     /*
1853      * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1854      * architecture spec (which says that reserved registers are RES0).
1855      */
1856     if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1857         gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1858 
1859     gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1860                          &gic_data);
1861     gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1862     gic_data.rdists.has_rvpeid = true;
1863     gic_data.rdists.has_vlpis = true;
1864     gic_data.rdists.has_direct_lpi = true;
1865     gic_data.rdists.has_vpend_valid_dirty = true;
1866 
1867     if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1868         err = -ENOMEM;
1869         goto out_free;
1870     }
1871 
1872     irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1873 
1874     gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1875 
1876     if (typer & GICD_TYPER_MBIS) {
1877         err = mbi_init(handle, gic_data.domain);
1878         if (err)
1879             pr_err("Failed to initialize MBIs\n");
1880     }
1881 
1882     set_handle_irq(gic_handle_irq);
1883 
1884     gic_update_rdist_properties();
1885 
1886     gic_dist_init();
1887     gic_cpu_init();
1888     gic_smp_init();
1889     gic_cpu_pm_init();
1890 
1891     if (gic_dist_supports_lpis()) {
1892         its_init(handle, &gic_data.rdists, gic_data.domain);
1893         its_cpu_init();
1894         its_lpi_memreserve_init();
1895     } else {
1896         if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1897             gicv2m_init(handle, gic_data.domain);
1898     }
1899 
1900     gic_enable_nmi_support();
1901 
1902     return 0;
1903 
1904 out_free:
1905     if (gic_data.domain)
1906         irq_domain_remove(gic_data.domain);
1907     free_percpu(gic_data.rdists.rdist);
1908     return err;
1909 }
1910 
1911 static int __init gic_validate_dist_version(void __iomem *dist_base)
1912 {
1913     u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1914 
1915     if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1916         return -ENODEV;
1917 
1918     return 0;
1919 }
1920 
1921 /* Create all possible partitions at boot time */
1922 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1923 {
1924     struct device_node *parts_node, *child_part;
1925     int part_idx = 0, i;
1926     int nr_parts;
1927     struct partition_affinity *parts;
1928 
1929     parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1930     if (!parts_node)
1931         return;
1932 
1933     gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1934     if (!gic_data.ppi_descs)
1935         goto out_put_node;
1936 
1937     nr_parts = of_get_child_count(parts_node);
1938 
1939     if (!nr_parts)
1940         goto out_put_node;
1941 
1942     parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1943     if (WARN_ON(!parts))
1944         goto out_put_node;
1945 
1946     for_each_child_of_node(parts_node, child_part) {
1947         struct partition_affinity *part;
1948         int n;
1949 
1950         part = &parts[part_idx];
1951 
1952         part->partition_id = of_node_to_fwnode(child_part);
1953 
1954         pr_info("GIC: PPI partition %pOFn[%d] { ",
1955             child_part, part_idx);
1956 
1957         n = of_property_count_elems_of_size(child_part, "affinity",
1958                             sizeof(u32));
1959         WARN_ON(n <= 0);
1960 
1961         for (i = 0; i < n; i++) {
1962             int err, cpu;
1963             u32 cpu_phandle;
1964             struct device_node *cpu_node;
1965 
1966             err = of_property_read_u32_index(child_part, "affinity",
1967                              i, &cpu_phandle);
1968             if (WARN_ON(err))
1969                 continue;
1970 
1971             cpu_node = of_find_node_by_phandle(cpu_phandle);
1972             if (WARN_ON(!cpu_node))
1973                 continue;
1974 
1975             cpu = of_cpu_node_to_id(cpu_node);
1976             if (WARN_ON(cpu < 0)) {
1977                 of_node_put(cpu_node);
1978                 continue;
1979             }
1980 
1981             pr_cont("%pOF[%d] ", cpu_node, cpu);
1982 
1983             cpumask_set_cpu(cpu, &part->mask);
1984             of_node_put(cpu_node);
1985         }
1986 
1987         pr_cont("}\n");
1988         part_idx++;
1989     }
1990 
1991     for (i = 0; i < gic_data.ppi_nr; i++) {
1992         unsigned int irq;
1993         struct partition_desc *desc;
1994         struct irq_fwspec ppi_fwspec = {
1995             .fwnode     = gic_data.fwnode,
1996             .param_count    = 3,
1997             .param      = {
1998                 [0] = GIC_IRQ_TYPE_PARTITION,
1999                 [1] = i,
2000                 [2] = IRQ_TYPE_NONE,
2001             },
2002         };
2003 
2004         irq = irq_create_fwspec_mapping(&ppi_fwspec);
2005         if (WARN_ON(!irq))
2006             continue;
2007         desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
2008                          irq, &partition_domain_ops);
2009         if (WARN_ON(!desc))
2010             continue;
2011 
2012         gic_data.ppi_descs[i] = desc;
2013     }
2014 
2015 out_put_node:
2016     of_node_put(parts_node);
2017 }
2018 
2019 static void __init gic_of_setup_kvm_info(struct device_node *node)
2020 {
2021     int ret;
2022     struct resource r;
2023     u32 gicv_idx;
2024 
2025     gic_v3_kvm_info.type = GIC_V3;
2026 
2027     gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2028     if (!gic_v3_kvm_info.maint_irq)
2029         return;
2030 
2031     if (of_property_read_u32(node, "#redistributor-regions",
2032                  &gicv_idx))
2033         gicv_idx = 1;
2034 
2035     gicv_idx += 3;  /* Also skip GICD, GICC, GICH */
2036     ret = of_address_to_resource(node, gicv_idx, &r);
2037     if (!ret)
2038         gic_v3_kvm_info.vcpu = r;
2039 
2040     gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2041     gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2042     vgic_set_kvm_info(&gic_v3_kvm_info);
2043 }
2044 
2045 static void gic_request_region(resource_size_t base, resource_size_t size,
2046                    const char *name)
2047 {
2048     if (!request_mem_region(base, size, name))
2049         pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2050                  name, &base);
2051 }
2052 
2053 static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2054                   const char *name, struct resource *res)
2055 {
2056     void __iomem *base;
2057     int ret;
2058 
2059     ret = of_address_to_resource(node, idx, res);
2060     if (ret)
2061         return IOMEM_ERR_PTR(ret);
2062 
2063     gic_request_region(res->start, resource_size(res), name);
2064     base = of_iomap(node, idx);
2065 
2066     return base ?: IOMEM_ERR_PTR(-ENOMEM);
2067 }
2068 
2069 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2070 {
2071     void __iomem *dist_base;
2072     struct redist_region *rdist_regs;
2073     struct resource res;
2074     u64 redist_stride;
2075     u32 nr_redist_regions;
2076     int err, i;
2077 
2078     dist_base = gic_of_iomap(node, 0, "GICD", &res);
2079     if (IS_ERR(dist_base)) {
2080         pr_err("%pOF: unable to map gic dist registers\n", node);
2081         return PTR_ERR(dist_base);
2082     }
2083 
2084     err = gic_validate_dist_version(dist_base);
2085     if (err) {
2086         pr_err("%pOF: no distributor detected, giving up\n", node);
2087         goto out_unmap_dist;
2088     }
2089 
2090     if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2091         nr_redist_regions = 1;
2092 
2093     rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
2094                  GFP_KERNEL);
2095     if (!rdist_regs) {
2096         err = -ENOMEM;
2097         goto out_unmap_dist;
2098     }
2099 
2100     for (i = 0; i < nr_redist_regions; i++) {
2101         rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2102         if (IS_ERR(rdist_regs[i].redist_base)) {
2103             pr_err("%pOF: couldn't map region %d\n", node, i);
2104             err = -ENODEV;
2105             goto out_unmap_rdist;
2106         }
2107         rdist_regs[i].phys_base = res.start;
2108     }
2109 
2110     if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2111         redist_stride = 0;
2112 
2113     gic_enable_of_quirks(node, gic_quirks, &gic_data);
2114 
2115     err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
2116                  redist_stride, &node->fwnode);
2117     if (err)
2118         goto out_unmap_rdist;
2119 
2120     gic_populate_ppi_partitions(node);
2121 
2122     if (static_branch_likely(&supports_deactivate_key))
2123         gic_of_setup_kvm_info(node);
2124     return 0;
2125 
2126 out_unmap_rdist:
2127     for (i = 0; i < nr_redist_regions; i++)
2128         if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
2129             iounmap(rdist_regs[i].redist_base);
2130     kfree(rdist_regs);
2131 out_unmap_dist:
2132     iounmap(dist_base);
2133     return err;
2134 }
2135 
2136 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2137 
2138 #ifdef CONFIG_ACPI
2139 static struct
2140 {
2141     void __iomem *dist_base;
2142     struct redist_region *redist_regs;
2143     u32 nr_redist_regions;
2144     bool single_redist;
2145     int enabled_rdists;
2146     u32 maint_irq;
2147     int maint_irq_mode;
2148     phys_addr_t vcpu_base;
2149 } acpi_data __initdata;
2150 
2151 static void __init
2152 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2153 {
2154     static int count = 0;
2155 
2156     acpi_data.redist_regs[count].phys_base = phys_base;
2157     acpi_data.redist_regs[count].redist_base = redist_base;
2158     acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2159     count++;
2160 }
2161 
2162 static int __init
2163 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2164                const unsigned long end)
2165 {
2166     struct acpi_madt_generic_redistributor *redist =
2167             (struct acpi_madt_generic_redistributor *)header;
2168     void __iomem *redist_base;
2169 
2170     redist_base = ioremap(redist->base_address, redist->length);
2171     if (!redist_base) {
2172         pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2173         return -ENOMEM;
2174     }
2175     gic_request_region(redist->base_address, redist->length, "GICR");
2176 
2177     gic_acpi_register_redist(redist->base_address, redist_base);
2178     return 0;
2179 }
2180 
2181 static int __init
2182 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2183              const unsigned long end)
2184 {
2185     struct acpi_madt_generic_interrupt *gicc =
2186                 (struct acpi_madt_generic_interrupt *)header;
2187     u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2188     u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2189     void __iomem *redist_base;
2190 
2191     /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2192     if (!(gicc->flags & ACPI_MADT_ENABLED))
2193         return 0;
2194 
2195     redist_base = ioremap(gicc->gicr_base_address, size);
2196     if (!redist_base)
2197         return -ENOMEM;
2198     gic_request_region(gicc->gicr_base_address, size, "GICR");
2199 
2200     gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2201     return 0;
2202 }
2203 
2204 static int __init gic_acpi_collect_gicr_base(void)
2205 {
2206     acpi_tbl_entry_handler redist_parser;
2207     enum acpi_madt_type type;
2208 
2209     if (acpi_data.single_redist) {
2210         type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2211         redist_parser = gic_acpi_parse_madt_gicc;
2212     } else {
2213         type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2214         redist_parser = gic_acpi_parse_madt_redist;
2215     }
2216 
2217     /* Collect redistributor base addresses in GICR entries */
2218     if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2219         return 0;
2220 
2221     pr_info("No valid GICR entries exist\n");
2222     return -ENODEV;
2223 }
2224 
2225 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2226                   const unsigned long end)
2227 {
2228     /* Subtable presence means that redist exists, that's it */
2229     return 0;
2230 }
2231 
2232 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2233                       const unsigned long end)
2234 {
2235     struct acpi_madt_generic_interrupt *gicc =
2236                 (struct acpi_madt_generic_interrupt *)header;
2237 
2238     /*
2239      * If GICC is enabled and has valid gicr base address, then it means
2240      * GICR base is presented via GICC
2241      */
2242     if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2243         acpi_data.enabled_rdists++;
2244         return 0;
2245     }
2246 
2247     /*
2248      * It's perfectly valid firmware can pass disabled GICC entry, driver
2249      * should not treat as errors, skip the entry instead of probe fail.
2250      */
2251     if (!(gicc->flags & ACPI_MADT_ENABLED))
2252         return 0;
2253 
2254     return -ENODEV;
2255 }
2256 
2257 static int __init gic_acpi_count_gicr_regions(void)
2258 {
2259     int count;
2260 
2261     /*
2262      * Count how many redistributor regions we have. It is not allowed
2263      * to mix redistributor description, GICR and GICC subtables have to be
2264      * mutually exclusive.
2265      */
2266     count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2267                       gic_acpi_match_gicr, 0);
2268     if (count > 0) {
2269         acpi_data.single_redist = false;
2270         return count;
2271     }
2272 
2273     count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2274                       gic_acpi_match_gicc, 0);
2275     if (count > 0) {
2276         acpi_data.single_redist = true;
2277         count = acpi_data.enabled_rdists;
2278     }
2279 
2280     return count;
2281 }
2282 
2283 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2284                        struct acpi_probe_entry *ape)
2285 {
2286     struct acpi_madt_generic_distributor *dist;
2287     int count;
2288 
2289     dist = (struct acpi_madt_generic_distributor *)header;
2290     if (dist->version != ape->driver_data)
2291         return false;
2292 
2293     /* We need to do that exercise anyway, the sooner the better */
2294     count = gic_acpi_count_gicr_regions();
2295     if (count <= 0)
2296         return false;
2297 
2298     acpi_data.nr_redist_regions = count;
2299     return true;
2300 }
2301 
2302 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2303                         const unsigned long end)
2304 {
2305     struct acpi_madt_generic_interrupt *gicc =
2306         (struct acpi_madt_generic_interrupt *)header;
2307     int maint_irq_mode;
2308     static int first_madt = true;
2309 
2310     /* Skip unusable CPUs */
2311     if (!(gicc->flags & ACPI_MADT_ENABLED))
2312         return 0;
2313 
2314     maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2315         ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2316 
2317     if (first_madt) {
2318         first_madt = false;
2319 
2320         acpi_data.maint_irq = gicc->vgic_interrupt;
2321         acpi_data.maint_irq_mode = maint_irq_mode;
2322         acpi_data.vcpu_base = gicc->gicv_base_address;
2323 
2324         return 0;
2325     }
2326 
2327     /*
2328      * The maintenance interrupt and GICV should be the same for every CPU
2329      */
2330     if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2331         (acpi_data.maint_irq_mode != maint_irq_mode) ||
2332         (acpi_data.vcpu_base != gicc->gicv_base_address))
2333         return -EINVAL;
2334 
2335     return 0;
2336 }
2337 
2338 static bool __init gic_acpi_collect_virt_info(void)
2339 {
2340     int count;
2341 
2342     count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2343                       gic_acpi_parse_virt_madt_gicc, 0);
2344 
2345     return (count > 0);
2346 }
2347 
2348 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2349 #define ACPI_GICV2_VCTRL_MEM_SIZE   (SZ_4K)
2350 #define ACPI_GICV2_VCPU_MEM_SIZE    (SZ_8K)
2351 
2352 static void __init gic_acpi_setup_kvm_info(void)
2353 {
2354     int irq;
2355 
2356     if (!gic_acpi_collect_virt_info()) {
2357         pr_warn("Unable to get hardware information used for virtualization\n");
2358         return;
2359     }
2360 
2361     gic_v3_kvm_info.type = GIC_V3;
2362 
2363     irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2364                 acpi_data.maint_irq_mode,
2365                 ACPI_ACTIVE_HIGH);
2366     if (irq <= 0)
2367         return;
2368 
2369     gic_v3_kvm_info.maint_irq = irq;
2370 
2371     if (acpi_data.vcpu_base) {
2372         struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2373 
2374         vcpu->flags = IORESOURCE_MEM;
2375         vcpu->start = acpi_data.vcpu_base;
2376         vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2377     }
2378 
2379     gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2380     gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2381     vgic_set_kvm_info(&gic_v3_kvm_info);
2382 }
2383 
2384 static struct fwnode_handle *gsi_domain_handle;
2385 
2386 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2387 {
2388     return gsi_domain_handle;
2389 }
2390 
2391 static int __init
2392 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2393 {
2394     struct acpi_madt_generic_distributor *dist;
2395     size_t size;
2396     int i, err;
2397 
2398     /* Get distributor base address */
2399     dist = (struct acpi_madt_generic_distributor *)header;
2400     acpi_data.dist_base = ioremap(dist->base_address,
2401                       ACPI_GICV3_DIST_MEM_SIZE);
2402     if (!acpi_data.dist_base) {
2403         pr_err("Unable to map GICD registers\n");
2404         return -ENOMEM;
2405     }
2406     gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
2407 
2408     err = gic_validate_dist_version(acpi_data.dist_base);
2409     if (err) {
2410         pr_err("No distributor detected at @%p, giving up\n",
2411                acpi_data.dist_base);
2412         goto out_dist_unmap;
2413     }
2414 
2415     size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2416     acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2417     if (!acpi_data.redist_regs) {
2418         err = -ENOMEM;
2419         goto out_dist_unmap;
2420     }
2421 
2422     err = gic_acpi_collect_gicr_base();
2423     if (err)
2424         goto out_redist_unmap;
2425 
2426     gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2427     if (!gsi_domain_handle) {
2428         err = -ENOMEM;
2429         goto out_redist_unmap;
2430     }
2431 
2432     err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2433                  acpi_data.nr_redist_regions, 0, gsi_domain_handle);
2434     if (err)
2435         goto out_fwhandle_free;
2436 
2437     acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
2438 
2439     if (static_branch_likely(&supports_deactivate_key))
2440         gic_acpi_setup_kvm_info();
2441 
2442     return 0;
2443 
2444 out_fwhandle_free:
2445     irq_domain_free_fwnode(gsi_domain_handle);
2446 out_redist_unmap:
2447     for (i = 0; i < acpi_data.nr_redist_regions; i++)
2448         if (acpi_data.redist_regs[i].redist_base)
2449             iounmap(acpi_data.redist_regs[i].redist_base);
2450     kfree(acpi_data.redist_regs);
2451 out_dist_unmap:
2452     iounmap(acpi_data.dist_base);
2453     return err;
2454 }
2455 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2456              acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2457              gic_acpi_init);
2458 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2459              acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2460              gic_acpi_init);
2461 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2462              acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2463              gic_acpi_init);
2464 #endif