Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #include <linux/acpi.h>
0008 #include <linux/acpi_iort.h>
0009 #include <linux/bitfield.h>
0010 #include <linux/bitmap.h>
0011 #include <linux/cpu.h>
0012 #include <linux/crash_dump.h>
0013 #include <linux/delay.h>
0014 #include <linux/dma-iommu.h>
0015 #include <linux/efi.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/iopoll.h>
0018 #include <linux/irqdomain.h>
0019 #include <linux/list.h>
0020 #include <linux/log2.h>
0021 #include <linux/memblock.h>
0022 #include <linux/mm.h>
0023 #include <linux/msi.h>
0024 #include <linux/of.h>
0025 #include <linux/of_address.h>
0026 #include <linux/of_irq.h>
0027 #include <linux/of_pci.h>
0028 #include <linux/of_platform.h>
0029 #include <linux/percpu.h>
0030 #include <linux/slab.h>
0031 #include <linux/syscore_ops.h>
0032 
0033 #include <linux/irqchip.h>
0034 #include <linux/irqchip/arm-gic-v3.h>
0035 #include <linux/irqchip/arm-gic-v4.h>
0036 
0037 #include <asm/cputype.h>
0038 #include <asm/exception.h>
0039 
0040 #include "irq-gic-common.h"
0041 
0042 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING       (1ULL << 0)
0043 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375   (1ULL << 1)
0044 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144   (1ULL << 2)
0045 
0046 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
0047 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED  (1 << 1)
0048 
0049 #define RD_LOCAL_LPI_ENABLED                    BIT(0)
0050 #define RD_LOCAL_PENDTABLE_PREALLOCATED         BIT(1)
0051 #define RD_LOCAL_MEMRESERVE_DONE                BIT(2)
0052 
0053 static u32 lpi_id_bits;
0054 
0055 /*
0056  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
0057  * deal with (one configuration byte per interrupt). PENDBASE has to
0058  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
0059  */
0060 #define LPI_NRBITS      lpi_id_bits
0061 #define LPI_PROPBASE_SZ     ALIGN(BIT(LPI_NRBITS), SZ_64K)
0062 #define LPI_PENDBASE_SZ     ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
0063 
0064 #define LPI_PROP_DEFAULT_PRIO   GICD_INT_DEF_PRI
0065 
0066 /*
0067  * Collection structure - just an ID, and a redistributor address to
0068  * ping. We use one per CPU as a bag of interrupts assigned to this
0069  * CPU.
0070  */
0071 struct its_collection {
0072     u64         target_address;
0073     u16         col_id;
0074 };
0075 
0076 /*
0077  * The ITS_BASER structure - contains memory information, cached
0078  * value of BASER register configuration and ITS page size.
0079  */
0080 struct its_baser {
0081     void        *base;
0082     u64     val;
0083     u32     order;
0084     u32     psz;
0085 };
0086 
0087 struct its_device;
0088 
0089 /*
0090  * The ITS structure - contains most of the infrastructure, with the
0091  * top-level MSI domain, the command queue, the collections, and the
0092  * list of devices writing to it.
0093  *
0094  * dev_alloc_lock has to be taken for device allocations, while the
0095  * spinlock must be taken to parse data structures such as the device
0096  * list.
0097  */
0098 struct its_node {
0099     raw_spinlock_t      lock;
0100     struct mutex        dev_alloc_lock;
0101     struct list_head    entry;
0102     void __iomem        *base;
0103     void __iomem        *sgir_base;
0104     phys_addr_t     phys_base;
0105     struct its_cmd_block    *cmd_base;
0106     struct its_cmd_block    *cmd_write;
0107     struct its_baser    tables[GITS_BASER_NR_REGS];
0108     struct its_collection   *collections;
0109     struct fwnode_handle    *fwnode_handle;
0110     u64         (*get_msi_base)(struct its_device *its_dev);
0111     u64         typer;
0112     u64         cbaser_save;
0113     u32         ctlr_save;
0114     u32         mpidr;
0115     struct list_head    its_device_list;
0116     u64         flags;
0117     unsigned long       list_nr;
0118     int         numa_node;
0119     unsigned int        msi_domain_flags;
0120     u32         pre_its_base; /* for Socionext Synquacer */
0121     int         vlpi_redist_offset;
0122 };
0123 
0124 #define is_v4(its)      (!!((its)->typer & GITS_TYPER_VLPIS))
0125 #define is_v4_1(its)        (!!((its)->typer & GITS_TYPER_VMAPP))
0126 #define device_ids(its)     (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
0127 
0128 #define ITS_ITT_ALIGN       SZ_256
0129 
0130 /* The maximum number of VPEID bits supported by VLPI commands */
0131 #define ITS_MAX_VPEID_BITS                      \
0132     ({                              \
0133         int nvpeid = 16;                    \
0134         if (gic_rdists->has_rvpeid &&               \
0135             gic_rdists->gicd_typer2 & GICD_TYPER2_VIL)      \
0136             nvpeid = 1 + (gic_rdists->gicd_typer2 &     \
0137                       GICD_TYPER2_VID);         \
0138                                     \
0139         nvpeid;                         \
0140     })
0141 #define ITS_MAX_VPEID       (1 << (ITS_MAX_VPEID_BITS))
0142 
0143 /* Convert page order to size in bytes */
0144 #define PAGE_ORDER_TO_SIZE(o)   (PAGE_SIZE << (o))
0145 
0146 struct event_lpi_map {
0147     unsigned long       *lpi_map;
0148     u16         *col_map;
0149     irq_hw_number_t     lpi_base;
0150     int         nr_lpis;
0151     raw_spinlock_t      vlpi_lock;
0152     struct its_vm       *vm;
0153     struct its_vlpi_map *vlpi_maps;
0154     int         nr_vlpis;
0155 };
0156 
0157 /*
0158  * The ITS view of a device - belongs to an ITS, owns an interrupt
0159  * translation table, and a list of interrupts.  If it some of its
0160  * LPIs are injected into a guest (GICv4), the event_map.vm field
0161  * indicates which one.
0162  */
0163 struct its_device {
0164     struct list_head    entry;
0165     struct its_node     *its;
0166     struct event_lpi_map    event_map;
0167     void            *itt;
0168     u32         nr_ites;
0169     u32         device_id;
0170     bool            shared;
0171 };
0172 
0173 static struct {
0174     raw_spinlock_t      lock;
0175     struct its_device   *dev;
0176     struct its_vpe      **vpes;
0177     int         next_victim;
0178 } vpe_proxy;
0179 
0180 struct cpu_lpi_count {
0181     atomic_t    managed;
0182     atomic_t    unmanaged;
0183 };
0184 
0185 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
0186 
0187 static LIST_HEAD(its_nodes);
0188 static DEFINE_RAW_SPINLOCK(its_lock);
0189 static struct rdists *gic_rdists;
0190 static struct irq_domain *its_parent;
0191 
0192 static unsigned long its_list_map;
0193 static u16 vmovp_seq_num;
0194 static DEFINE_RAW_SPINLOCK(vmovp_lock);
0195 
0196 static DEFINE_IDA(its_vpeid_ida);
0197 
0198 #define gic_data_rdist()        (raw_cpu_ptr(gic_rdists->rdist))
0199 #define gic_data_rdist_cpu(cpu)     (per_cpu_ptr(gic_rdists->rdist, cpu))
0200 #define gic_data_rdist_rd_base()    (gic_data_rdist()->rd_base)
0201 #define gic_data_rdist_vlpi_base()  (gic_data_rdist_rd_base() + SZ_128K)
0202 
0203 /*
0204  * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
0205  * always have vSGIs mapped.
0206  */
0207 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
0208 {
0209     return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
0210 }
0211 
0212 static u16 get_its_list(struct its_vm *vm)
0213 {
0214     struct its_node *its;
0215     unsigned long its_list = 0;
0216 
0217     list_for_each_entry(its, &its_nodes, entry) {
0218         if (!is_v4(its))
0219             continue;
0220 
0221         if (require_its_list_vmovp(vm, its))
0222             __set_bit(its->list_nr, &its_list);
0223     }
0224 
0225     return (u16)its_list;
0226 }
0227 
0228 static inline u32 its_get_event_id(struct irq_data *d)
0229 {
0230     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
0231     return d->hwirq - its_dev->event_map.lpi_base;
0232 }
0233 
0234 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
0235                            u32 event)
0236 {
0237     struct its_node *its = its_dev->its;
0238 
0239     return its->collections + its_dev->event_map.col_map[event];
0240 }
0241 
0242 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
0243                            u32 event)
0244 {
0245     if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
0246         return NULL;
0247 
0248     return &its_dev->event_map.vlpi_maps[event];
0249 }
0250 
0251 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
0252 {
0253     if (irqd_is_forwarded_to_vcpu(d)) {
0254         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
0255         u32 event = its_get_event_id(d);
0256 
0257         return dev_event_to_vlpi_map(its_dev, event);
0258     }
0259 
0260     return NULL;
0261 }
0262 
0263 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
0264 {
0265     raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
0266     return vpe->col_idx;
0267 }
0268 
0269 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
0270 {
0271     raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
0272 }
0273 
0274 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
0275 {
0276     struct its_vlpi_map *map = get_vlpi_map(d);
0277     int cpu;
0278 
0279     if (map) {
0280         cpu = vpe_to_cpuid_lock(map->vpe, flags);
0281     } else {
0282         /* Physical LPIs are already locked via the irq_desc lock */
0283         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
0284         cpu = its_dev->event_map.col_map[its_get_event_id(d)];
0285         /* Keep GCC quiet... */
0286         *flags = 0;
0287     }
0288 
0289     return cpu;
0290 }
0291 
0292 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
0293 {
0294     struct its_vlpi_map *map = get_vlpi_map(d);
0295 
0296     if (map)
0297         vpe_to_cpuid_unlock(map->vpe, flags);
0298 }
0299 
0300 static struct its_collection *valid_col(struct its_collection *col)
0301 {
0302     if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
0303         return NULL;
0304 
0305     return col;
0306 }
0307 
0308 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
0309 {
0310     if (valid_col(its->collections + vpe->col_idx))
0311         return vpe;
0312 
0313     return NULL;
0314 }
0315 
0316 /*
0317  * ITS command descriptors - parameters to be encoded in a command
0318  * block.
0319  */
0320 struct its_cmd_desc {
0321     union {
0322         struct {
0323             struct its_device *dev;
0324             u32 event_id;
0325         } its_inv_cmd;
0326 
0327         struct {
0328             struct its_device *dev;
0329             u32 event_id;
0330         } its_clear_cmd;
0331 
0332         struct {
0333             struct its_device *dev;
0334             u32 event_id;
0335         } its_int_cmd;
0336 
0337         struct {
0338             struct its_device *dev;
0339             int valid;
0340         } its_mapd_cmd;
0341 
0342         struct {
0343             struct its_collection *col;
0344             int valid;
0345         } its_mapc_cmd;
0346 
0347         struct {
0348             struct its_device *dev;
0349             u32 phys_id;
0350             u32 event_id;
0351         } its_mapti_cmd;
0352 
0353         struct {
0354             struct its_device *dev;
0355             struct its_collection *col;
0356             u32 event_id;
0357         } its_movi_cmd;
0358 
0359         struct {
0360             struct its_device *dev;
0361             u32 event_id;
0362         } its_discard_cmd;
0363 
0364         struct {
0365             struct its_collection *col;
0366         } its_invall_cmd;
0367 
0368         struct {
0369             struct its_vpe *vpe;
0370         } its_vinvall_cmd;
0371 
0372         struct {
0373             struct its_vpe *vpe;
0374             struct its_collection *col;
0375             bool valid;
0376         } its_vmapp_cmd;
0377 
0378         struct {
0379             struct its_vpe *vpe;
0380             struct its_device *dev;
0381             u32 virt_id;
0382             u32 event_id;
0383             bool db_enabled;
0384         } its_vmapti_cmd;
0385 
0386         struct {
0387             struct its_vpe *vpe;
0388             struct its_device *dev;
0389             u32 event_id;
0390             bool db_enabled;
0391         } its_vmovi_cmd;
0392 
0393         struct {
0394             struct its_vpe *vpe;
0395             struct its_collection *col;
0396             u16 seq_num;
0397             u16 its_list;
0398         } its_vmovp_cmd;
0399 
0400         struct {
0401             struct its_vpe *vpe;
0402         } its_invdb_cmd;
0403 
0404         struct {
0405             struct its_vpe *vpe;
0406             u8 sgi;
0407             u8 priority;
0408             bool enable;
0409             bool group;
0410             bool clear;
0411         } its_vsgi_cmd;
0412     };
0413 };
0414 
0415 /*
0416  * The ITS command block, which is what the ITS actually parses.
0417  */
0418 struct its_cmd_block {
0419     union {
0420         u64 raw_cmd[4];
0421         __le64  raw_cmd_le[4];
0422     };
0423 };
0424 
0425 #define ITS_CMD_QUEUE_SZ        SZ_64K
0426 #define ITS_CMD_QUEUE_NR_ENTRIES    (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
0427 
0428 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
0429                             struct its_cmd_block *,
0430                             struct its_cmd_desc *);
0431 
0432 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
0433                           struct its_cmd_block *,
0434                           struct its_cmd_desc *);
0435 
0436 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
0437 {
0438     u64 mask = GENMASK_ULL(h, l);
0439     *raw_cmd &= ~mask;
0440     *raw_cmd |= (val << l) & mask;
0441 }
0442 
0443 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
0444 {
0445     its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
0446 }
0447 
0448 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
0449 {
0450     its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
0451 }
0452 
0453 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
0454 {
0455     its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
0456 }
0457 
0458 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
0459 {
0460     its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
0461 }
0462 
0463 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
0464 {
0465     its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
0466 }
0467 
0468 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
0469 {
0470     its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
0471 }
0472 
0473 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
0474 {
0475     its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
0476 }
0477 
0478 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
0479 {
0480     its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
0481 }
0482 
0483 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
0484 {
0485     its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
0486 }
0487 
0488 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
0489 {
0490     its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
0491 }
0492 
0493 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
0494 {
0495     its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
0496 }
0497 
0498 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
0499 {
0500     its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
0501 }
0502 
0503 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
0504 {
0505     its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
0506 }
0507 
0508 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
0509 {
0510     its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
0511 }
0512 
0513 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
0514 {
0515     its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
0516 }
0517 
0518 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
0519 {
0520     its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
0521 }
0522 
0523 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
0524 {
0525     its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
0526 }
0527 
0528 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
0529 {
0530     its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
0531 }
0532 
0533 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
0534 {
0535     its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
0536 }
0537 
0538 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
0539 {
0540     its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
0541 }
0542 
0543 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
0544                     u32 vpe_db_lpi)
0545 {
0546     its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
0547 }
0548 
0549 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
0550                     u32 vpe_db_lpi)
0551 {
0552     its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
0553 }
0554 
0555 static void its_encode_db(struct its_cmd_block *cmd, bool db)
0556 {
0557     its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
0558 }
0559 
0560 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
0561 {
0562     its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
0563 }
0564 
0565 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
0566 {
0567     its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
0568 }
0569 
0570 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
0571 {
0572     its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
0573 }
0574 
0575 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
0576 {
0577     its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
0578 }
0579 
0580 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
0581 {
0582     its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
0583 }
0584 
0585 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
0586 {
0587     /* Let's fixup BE commands */
0588     cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
0589     cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
0590     cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
0591     cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
0592 }
0593 
0594 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
0595                          struct its_cmd_block *cmd,
0596                          struct its_cmd_desc *desc)
0597 {
0598     unsigned long itt_addr;
0599     u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
0600 
0601     itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
0602     itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
0603 
0604     its_encode_cmd(cmd, GITS_CMD_MAPD);
0605     its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
0606     its_encode_size(cmd, size - 1);
0607     its_encode_itt(cmd, itt_addr);
0608     its_encode_valid(cmd, desc->its_mapd_cmd.valid);
0609 
0610     its_fixup_cmd(cmd);
0611 
0612     return NULL;
0613 }
0614 
0615 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
0616                          struct its_cmd_block *cmd,
0617                          struct its_cmd_desc *desc)
0618 {
0619     its_encode_cmd(cmd, GITS_CMD_MAPC);
0620     its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
0621     its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
0622     its_encode_valid(cmd, desc->its_mapc_cmd.valid);
0623 
0624     its_fixup_cmd(cmd);
0625 
0626     return desc->its_mapc_cmd.col;
0627 }
0628 
0629 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
0630                           struct its_cmd_block *cmd,
0631                           struct its_cmd_desc *desc)
0632 {
0633     struct its_collection *col;
0634 
0635     col = dev_event_to_col(desc->its_mapti_cmd.dev,
0636                    desc->its_mapti_cmd.event_id);
0637 
0638     its_encode_cmd(cmd, GITS_CMD_MAPTI);
0639     its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
0640     its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
0641     its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
0642     its_encode_collection(cmd, col->col_id);
0643 
0644     its_fixup_cmd(cmd);
0645 
0646     return valid_col(col);
0647 }
0648 
0649 static struct its_collection *its_build_movi_cmd(struct its_node *its,
0650                          struct its_cmd_block *cmd,
0651                          struct its_cmd_desc *desc)
0652 {
0653     struct its_collection *col;
0654 
0655     col = dev_event_to_col(desc->its_movi_cmd.dev,
0656                    desc->its_movi_cmd.event_id);
0657 
0658     its_encode_cmd(cmd, GITS_CMD_MOVI);
0659     its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
0660     its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
0661     its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
0662 
0663     its_fixup_cmd(cmd);
0664 
0665     return valid_col(col);
0666 }
0667 
0668 static struct its_collection *its_build_discard_cmd(struct its_node *its,
0669                             struct its_cmd_block *cmd,
0670                             struct its_cmd_desc *desc)
0671 {
0672     struct its_collection *col;
0673 
0674     col = dev_event_to_col(desc->its_discard_cmd.dev,
0675                    desc->its_discard_cmd.event_id);
0676 
0677     its_encode_cmd(cmd, GITS_CMD_DISCARD);
0678     its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
0679     its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
0680 
0681     its_fixup_cmd(cmd);
0682 
0683     return valid_col(col);
0684 }
0685 
0686 static struct its_collection *its_build_inv_cmd(struct its_node *its,
0687                         struct its_cmd_block *cmd,
0688                         struct its_cmd_desc *desc)
0689 {
0690     struct its_collection *col;
0691 
0692     col = dev_event_to_col(desc->its_inv_cmd.dev,
0693                    desc->its_inv_cmd.event_id);
0694 
0695     its_encode_cmd(cmd, GITS_CMD_INV);
0696     its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
0697     its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
0698 
0699     its_fixup_cmd(cmd);
0700 
0701     return valid_col(col);
0702 }
0703 
0704 static struct its_collection *its_build_int_cmd(struct its_node *its,
0705                         struct its_cmd_block *cmd,
0706                         struct its_cmd_desc *desc)
0707 {
0708     struct its_collection *col;
0709 
0710     col = dev_event_to_col(desc->its_int_cmd.dev,
0711                    desc->its_int_cmd.event_id);
0712 
0713     its_encode_cmd(cmd, GITS_CMD_INT);
0714     its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
0715     its_encode_event_id(cmd, desc->its_int_cmd.event_id);
0716 
0717     its_fixup_cmd(cmd);
0718 
0719     return valid_col(col);
0720 }
0721 
0722 static struct its_collection *its_build_clear_cmd(struct its_node *its,
0723                           struct its_cmd_block *cmd,
0724                           struct its_cmd_desc *desc)
0725 {
0726     struct its_collection *col;
0727 
0728     col = dev_event_to_col(desc->its_clear_cmd.dev,
0729                    desc->its_clear_cmd.event_id);
0730 
0731     its_encode_cmd(cmd, GITS_CMD_CLEAR);
0732     its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
0733     its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
0734 
0735     its_fixup_cmd(cmd);
0736 
0737     return valid_col(col);
0738 }
0739 
0740 static struct its_collection *its_build_invall_cmd(struct its_node *its,
0741                            struct its_cmd_block *cmd,
0742                            struct its_cmd_desc *desc)
0743 {
0744     its_encode_cmd(cmd, GITS_CMD_INVALL);
0745     its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
0746 
0747     its_fixup_cmd(cmd);
0748 
0749     return desc->its_invall_cmd.col;
0750 }
0751 
0752 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
0753                          struct its_cmd_block *cmd,
0754                          struct its_cmd_desc *desc)
0755 {
0756     its_encode_cmd(cmd, GITS_CMD_VINVALL);
0757     its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
0758 
0759     its_fixup_cmd(cmd);
0760 
0761     return valid_vpe(its, desc->its_vinvall_cmd.vpe);
0762 }
0763 
0764 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
0765                        struct its_cmd_block *cmd,
0766                        struct its_cmd_desc *desc)
0767 {
0768     unsigned long vpt_addr, vconf_addr;
0769     u64 target;
0770     bool alloc;
0771 
0772     its_encode_cmd(cmd, GITS_CMD_VMAPP);
0773     its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
0774     its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
0775 
0776     if (!desc->its_vmapp_cmd.valid) {
0777         if (is_v4_1(its)) {
0778             alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
0779             its_encode_alloc(cmd, alloc);
0780         }
0781 
0782         goto out;
0783     }
0784 
0785     vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
0786     target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
0787 
0788     its_encode_target(cmd, target);
0789     its_encode_vpt_addr(cmd, vpt_addr);
0790     its_encode_vpt_size(cmd, LPI_NRBITS - 1);
0791 
0792     if (!is_v4_1(its))
0793         goto out;
0794 
0795     vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
0796 
0797     alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
0798 
0799     its_encode_alloc(cmd, alloc);
0800 
0801     /*
0802      * GICv4.1 provides a way to get the VLPI state, which needs the vPE
0803      * to be unmapped first, and in this case, we may remap the vPE
0804      * back while the VPT is not empty. So we can't assume that the
0805      * VPT is empty on map. This is why we never advertise PTZ.
0806      */
0807     its_encode_ptz(cmd, false);
0808     its_encode_vconf_addr(cmd, vconf_addr);
0809     its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
0810 
0811 out:
0812     its_fixup_cmd(cmd);
0813 
0814     return valid_vpe(its, desc->its_vmapp_cmd.vpe);
0815 }
0816 
0817 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
0818                         struct its_cmd_block *cmd,
0819                         struct its_cmd_desc *desc)
0820 {
0821     u32 db;
0822 
0823     if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
0824         db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
0825     else
0826         db = 1023;
0827 
0828     its_encode_cmd(cmd, GITS_CMD_VMAPTI);
0829     its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
0830     its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
0831     its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
0832     its_encode_db_phys_id(cmd, db);
0833     its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
0834 
0835     its_fixup_cmd(cmd);
0836 
0837     return valid_vpe(its, desc->its_vmapti_cmd.vpe);
0838 }
0839 
0840 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
0841                        struct its_cmd_block *cmd,
0842                        struct its_cmd_desc *desc)
0843 {
0844     u32 db;
0845 
0846     if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
0847         db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
0848     else
0849         db = 1023;
0850 
0851     its_encode_cmd(cmd, GITS_CMD_VMOVI);
0852     its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
0853     its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
0854     its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
0855     its_encode_db_phys_id(cmd, db);
0856     its_encode_db_valid(cmd, true);
0857 
0858     its_fixup_cmd(cmd);
0859 
0860     return valid_vpe(its, desc->its_vmovi_cmd.vpe);
0861 }
0862 
0863 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
0864                        struct its_cmd_block *cmd,
0865                        struct its_cmd_desc *desc)
0866 {
0867     u64 target;
0868 
0869     target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
0870     its_encode_cmd(cmd, GITS_CMD_VMOVP);
0871     its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
0872     its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
0873     its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
0874     its_encode_target(cmd, target);
0875 
0876     if (is_v4_1(its)) {
0877         its_encode_db(cmd, true);
0878         its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
0879     }
0880 
0881     its_fixup_cmd(cmd);
0882 
0883     return valid_vpe(its, desc->its_vmovp_cmd.vpe);
0884 }
0885 
0886 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
0887                       struct its_cmd_block *cmd,
0888                       struct its_cmd_desc *desc)
0889 {
0890     struct its_vlpi_map *map;
0891 
0892     map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
0893                     desc->its_inv_cmd.event_id);
0894 
0895     its_encode_cmd(cmd, GITS_CMD_INV);
0896     its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
0897     its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
0898 
0899     its_fixup_cmd(cmd);
0900 
0901     return valid_vpe(its, map->vpe);
0902 }
0903 
0904 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
0905                       struct its_cmd_block *cmd,
0906                       struct its_cmd_desc *desc)
0907 {
0908     struct its_vlpi_map *map;
0909 
0910     map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
0911                     desc->its_int_cmd.event_id);
0912 
0913     its_encode_cmd(cmd, GITS_CMD_INT);
0914     its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
0915     its_encode_event_id(cmd, desc->its_int_cmd.event_id);
0916 
0917     its_fixup_cmd(cmd);
0918 
0919     return valid_vpe(its, map->vpe);
0920 }
0921 
0922 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
0923                         struct its_cmd_block *cmd,
0924                         struct its_cmd_desc *desc)
0925 {
0926     struct its_vlpi_map *map;
0927 
0928     map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
0929                     desc->its_clear_cmd.event_id);
0930 
0931     its_encode_cmd(cmd, GITS_CMD_CLEAR);
0932     its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
0933     its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
0934 
0935     its_fixup_cmd(cmd);
0936 
0937     return valid_vpe(its, map->vpe);
0938 }
0939 
0940 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
0941                        struct its_cmd_block *cmd,
0942                        struct its_cmd_desc *desc)
0943 {
0944     if (WARN_ON(!is_v4_1(its)))
0945         return NULL;
0946 
0947     its_encode_cmd(cmd, GITS_CMD_INVDB);
0948     its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
0949 
0950     its_fixup_cmd(cmd);
0951 
0952     return valid_vpe(its, desc->its_invdb_cmd.vpe);
0953 }
0954 
0955 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
0956                       struct its_cmd_block *cmd,
0957                       struct its_cmd_desc *desc)
0958 {
0959     if (WARN_ON(!is_v4_1(its)))
0960         return NULL;
0961 
0962     its_encode_cmd(cmd, GITS_CMD_VSGI);
0963     its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
0964     its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
0965     its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
0966     its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
0967     its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
0968     its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
0969 
0970     its_fixup_cmd(cmd);
0971 
0972     return valid_vpe(its, desc->its_vsgi_cmd.vpe);
0973 }
0974 
0975 static u64 its_cmd_ptr_to_offset(struct its_node *its,
0976                  struct its_cmd_block *ptr)
0977 {
0978     return (ptr - its->cmd_base) * sizeof(*ptr);
0979 }
0980 
0981 static int its_queue_full(struct its_node *its)
0982 {
0983     int widx;
0984     int ridx;
0985 
0986     widx = its->cmd_write - its->cmd_base;
0987     ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
0988 
0989     /* This is incredibly unlikely to happen, unless the ITS locks up. */
0990     if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
0991         return 1;
0992 
0993     return 0;
0994 }
0995 
0996 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
0997 {
0998     struct its_cmd_block *cmd;
0999     u32 count = 1000000;    /* 1s! */
1000 
1001     while (its_queue_full(its)) {
1002         count--;
1003         if (!count) {
1004             pr_err_ratelimited("ITS queue not draining\n");
1005             return NULL;
1006         }
1007         cpu_relax();
1008         udelay(1);
1009     }
1010 
1011     cmd = its->cmd_write++;
1012 
1013     /* Handle queue wrapping */
1014     if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1015         its->cmd_write = its->cmd_base;
1016 
1017     /* Clear command  */
1018     cmd->raw_cmd[0] = 0;
1019     cmd->raw_cmd[1] = 0;
1020     cmd->raw_cmd[2] = 0;
1021     cmd->raw_cmd[3] = 0;
1022 
1023     return cmd;
1024 }
1025 
1026 static struct its_cmd_block *its_post_commands(struct its_node *its)
1027 {
1028     u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1029 
1030     writel_relaxed(wr, its->base + GITS_CWRITER);
1031 
1032     return its->cmd_write;
1033 }
1034 
1035 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1036 {
1037     /*
1038      * Make sure the commands written to memory are observable by
1039      * the ITS.
1040      */
1041     if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1042         gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1043     else
1044         dsb(ishst);
1045 }
1046 
1047 static int its_wait_for_range_completion(struct its_node *its,
1048                      u64    prev_idx,
1049                      struct its_cmd_block *to)
1050 {
1051     u64 rd_idx, to_idx, linear_idx;
1052     u32 count = 1000000;    /* 1s! */
1053 
1054     /* Linearize to_idx if the command set has wrapped around */
1055     to_idx = its_cmd_ptr_to_offset(its, to);
1056     if (to_idx < prev_idx)
1057         to_idx += ITS_CMD_QUEUE_SZ;
1058 
1059     linear_idx = prev_idx;
1060 
1061     while (1) {
1062         s64 delta;
1063 
1064         rd_idx = readl_relaxed(its->base + GITS_CREADR);
1065 
1066         /*
1067          * Compute the read pointer progress, taking the
1068          * potential wrap-around into account.
1069          */
1070         delta = rd_idx - prev_idx;
1071         if (rd_idx < prev_idx)
1072             delta += ITS_CMD_QUEUE_SZ;
1073 
1074         linear_idx += delta;
1075         if (linear_idx >= to_idx)
1076             break;
1077 
1078         count--;
1079         if (!count) {
1080             pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1081                        to_idx, linear_idx);
1082             return -1;
1083         }
1084         prev_idx = rd_idx;
1085         cpu_relax();
1086         udelay(1);
1087     }
1088 
1089     return 0;
1090 }
1091 
1092 /* Warning, macro hell follows */
1093 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)   \
1094 void name(struct its_node *its,                     \
1095       buildtype builder,                        \
1096       struct its_cmd_desc *desc)                    \
1097 {                                   \
1098     struct its_cmd_block *cmd, *sync_cmd, *next_cmd;        \
1099     synctype *sync_obj;                     \
1100     unsigned long flags;                        \
1101     u64 rd_idx;                         \
1102                                     \
1103     raw_spin_lock_irqsave(&its->lock, flags);           \
1104                                     \
1105     cmd = its_allocate_entry(its);                  \
1106     if (!cmd) {     /* We're soooooo screewed... */     \
1107         raw_spin_unlock_irqrestore(&its->lock, flags);      \
1108         return;                         \
1109     }                               \
1110     sync_obj = builder(its, cmd, desc);             \
1111     its_flush_cmd(its, cmd);                    \
1112                                     \
1113     if (sync_obj) {                         \
1114         sync_cmd = its_allocate_entry(its);         \
1115         if (!sync_cmd)                      \
1116             goto post;                  \
1117                                     \
1118         buildfn(its, sync_cmd, sync_obj);           \
1119         its_flush_cmd(its, sync_cmd);               \
1120     }                               \
1121                                     \
1122 post:                                   \
1123     rd_idx = readl_relaxed(its->base + GITS_CREADR);        \
1124     next_cmd = its_post_commands(its);              \
1125     raw_spin_unlock_irqrestore(&its->lock, flags);          \
1126                                     \
1127     if (its_wait_for_range_completion(its, rd_idx, next_cmd))   \
1128         pr_err_ratelimited("ITS cmd %ps failed\n", builder);    \
1129 }
1130 
1131 static void its_build_sync_cmd(struct its_node *its,
1132                    struct its_cmd_block *sync_cmd,
1133                    struct its_collection *sync_col)
1134 {
1135     its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1136     its_encode_target(sync_cmd, sync_col->target_address);
1137 
1138     its_fixup_cmd(sync_cmd);
1139 }
1140 
1141 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1142                  struct its_collection, its_build_sync_cmd)
1143 
1144 static void its_build_vsync_cmd(struct its_node *its,
1145                 struct its_cmd_block *sync_cmd,
1146                 struct its_vpe *sync_vpe)
1147 {
1148     its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1149     its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1150 
1151     its_fixup_cmd(sync_cmd);
1152 }
1153 
1154 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1155                  struct its_vpe, its_build_vsync_cmd)
1156 
1157 static void its_send_int(struct its_device *dev, u32 event_id)
1158 {
1159     struct its_cmd_desc desc;
1160 
1161     desc.its_int_cmd.dev = dev;
1162     desc.its_int_cmd.event_id = event_id;
1163 
1164     its_send_single_command(dev->its, its_build_int_cmd, &desc);
1165 }
1166 
1167 static void its_send_clear(struct its_device *dev, u32 event_id)
1168 {
1169     struct its_cmd_desc desc;
1170 
1171     desc.its_clear_cmd.dev = dev;
1172     desc.its_clear_cmd.event_id = event_id;
1173 
1174     its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1175 }
1176 
1177 static void its_send_inv(struct its_device *dev, u32 event_id)
1178 {
1179     struct its_cmd_desc desc;
1180 
1181     desc.its_inv_cmd.dev = dev;
1182     desc.its_inv_cmd.event_id = event_id;
1183 
1184     its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1185 }
1186 
1187 static void its_send_mapd(struct its_device *dev, int valid)
1188 {
1189     struct its_cmd_desc desc;
1190 
1191     desc.its_mapd_cmd.dev = dev;
1192     desc.its_mapd_cmd.valid = !!valid;
1193 
1194     its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1195 }
1196 
1197 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1198               int valid)
1199 {
1200     struct its_cmd_desc desc;
1201 
1202     desc.its_mapc_cmd.col = col;
1203     desc.its_mapc_cmd.valid = !!valid;
1204 
1205     its_send_single_command(its, its_build_mapc_cmd, &desc);
1206 }
1207 
1208 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1209 {
1210     struct its_cmd_desc desc;
1211 
1212     desc.its_mapti_cmd.dev = dev;
1213     desc.its_mapti_cmd.phys_id = irq_id;
1214     desc.its_mapti_cmd.event_id = id;
1215 
1216     its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1217 }
1218 
1219 static void its_send_movi(struct its_device *dev,
1220               struct its_collection *col, u32 id)
1221 {
1222     struct its_cmd_desc desc;
1223 
1224     desc.its_movi_cmd.dev = dev;
1225     desc.its_movi_cmd.col = col;
1226     desc.its_movi_cmd.event_id = id;
1227 
1228     its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1229 }
1230 
1231 static void its_send_discard(struct its_device *dev, u32 id)
1232 {
1233     struct its_cmd_desc desc;
1234 
1235     desc.its_discard_cmd.dev = dev;
1236     desc.its_discard_cmd.event_id = id;
1237 
1238     its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1239 }
1240 
1241 static void its_send_invall(struct its_node *its, struct its_collection *col)
1242 {
1243     struct its_cmd_desc desc;
1244 
1245     desc.its_invall_cmd.col = col;
1246 
1247     its_send_single_command(its, its_build_invall_cmd, &desc);
1248 }
1249 
1250 static void its_send_vmapti(struct its_device *dev, u32 id)
1251 {
1252     struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1253     struct its_cmd_desc desc;
1254 
1255     desc.its_vmapti_cmd.vpe = map->vpe;
1256     desc.its_vmapti_cmd.dev = dev;
1257     desc.its_vmapti_cmd.virt_id = map->vintid;
1258     desc.its_vmapti_cmd.event_id = id;
1259     desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1260 
1261     its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1262 }
1263 
1264 static void its_send_vmovi(struct its_device *dev, u32 id)
1265 {
1266     struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1267     struct its_cmd_desc desc;
1268 
1269     desc.its_vmovi_cmd.vpe = map->vpe;
1270     desc.its_vmovi_cmd.dev = dev;
1271     desc.its_vmovi_cmd.event_id = id;
1272     desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1273 
1274     its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1275 }
1276 
1277 static void its_send_vmapp(struct its_node *its,
1278                struct its_vpe *vpe, bool valid)
1279 {
1280     struct its_cmd_desc desc;
1281 
1282     desc.its_vmapp_cmd.vpe = vpe;
1283     desc.its_vmapp_cmd.valid = valid;
1284     desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1285 
1286     its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1287 }
1288 
1289 static void its_send_vmovp(struct its_vpe *vpe)
1290 {
1291     struct its_cmd_desc desc = {};
1292     struct its_node *its;
1293     unsigned long flags;
1294     int col_id = vpe->col_idx;
1295 
1296     desc.its_vmovp_cmd.vpe = vpe;
1297 
1298     if (!its_list_map) {
1299         its = list_first_entry(&its_nodes, struct its_node, entry);
1300         desc.its_vmovp_cmd.col = &its->collections[col_id];
1301         its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1302         return;
1303     }
1304 
1305     /*
1306      * Yet another marvel of the architecture. If using the
1307      * its_list "feature", we need to make sure that all ITSs
1308      * receive all VMOVP commands in the same order. The only way
1309      * to guarantee this is to make vmovp a serialization point.
1310      *
1311      * Wall <-- Head.
1312      */
1313     raw_spin_lock_irqsave(&vmovp_lock, flags);
1314 
1315     desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1316     desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1317 
1318     /* Emit VMOVPs */
1319     list_for_each_entry(its, &its_nodes, entry) {
1320         if (!is_v4(its))
1321             continue;
1322 
1323         if (!require_its_list_vmovp(vpe->its_vm, its))
1324             continue;
1325 
1326         desc.its_vmovp_cmd.col = &its->collections[col_id];
1327         its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1328     }
1329 
1330     raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1331 }
1332 
1333 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1334 {
1335     struct its_cmd_desc desc;
1336 
1337     desc.its_vinvall_cmd.vpe = vpe;
1338     its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1339 }
1340 
1341 static void its_send_vinv(struct its_device *dev, u32 event_id)
1342 {
1343     struct its_cmd_desc desc;
1344 
1345     /*
1346      * There is no real VINV command. This is just a normal INV,
1347      * with a VSYNC instead of a SYNC.
1348      */
1349     desc.its_inv_cmd.dev = dev;
1350     desc.its_inv_cmd.event_id = event_id;
1351 
1352     its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1353 }
1354 
1355 static void its_send_vint(struct its_device *dev, u32 event_id)
1356 {
1357     struct its_cmd_desc desc;
1358 
1359     /*
1360      * There is no real VINT command. This is just a normal INT,
1361      * with a VSYNC instead of a SYNC.
1362      */
1363     desc.its_int_cmd.dev = dev;
1364     desc.its_int_cmd.event_id = event_id;
1365 
1366     its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1367 }
1368 
1369 static void its_send_vclear(struct its_device *dev, u32 event_id)
1370 {
1371     struct its_cmd_desc desc;
1372 
1373     /*
1374      * There is no real VCLEAR command. This is just a normal CLEAR,
1375      * with a VSYNC instead of a SYNC.
1376      */
1377     desc.its_clear_cmd.dev = dev;
1378     desc.its_clear_cmd.event_id = event_id;
1379 
1380     its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1381 }
1382 
1383 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1384 {
1385     struct its_cmd_desc desc;
1386 
1387     desc.its_invdb_cmd.vpe = vpe;
1388     its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1389 }
1390 
1391 /*
1392  * irqchip functions - assumes MSI, mostly.
1393  */
1394 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1395 {
1396     struct its_vlpi_map *map = get_vlpi_map(d);
1397     irq_hw_number_t hwirq;
1398     void *va;
1399     u8 *cfg;
1400 
1401     if (map) {
1402         va = page_address(map->vm->vprop_page);
1403         hwirq = map->vintid;
1404 
1405         /* Remember the updated property */
1406         map->properties &= ~clr;
1407         map->properties |= set | LPI_PROP_GROUP1;
1408     } else {
1409         va = gic_rdists->prop_table_va;
1410         hwirq = d->hwirq;
1411     }
1412 
1413     cfg = va + hwirq - 8192;
1414     *cfg &= ~clr;
1415     *cfg |= set | LPI_PROP_GROUP1;
1416 
1417     /*
1418      * Make the above write visible to the redistributors.
1419      * And yes, we're flushing exactly: One. Single. Byte.
1420      * Humpf...
1421      */
1422     if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1423         gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1424     else
1425         dsb(ishst);
1426 }
1427 
1428 static void wait_for_syncr(void __iomem *rdbase)
1429 {
1430     while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1431         cpu_relax();
1432 }
1433 
1434 static void direct_lpi_inv(struct irq_data *d)
1435 {
1436     struct its_vlpi_map *map = get_vlpi_map(d);
1437     void __iomem *rdbase;
1438     unsigned long flags;
1439     u64 val;
1440     int cpu;
1441 
1442     if (map) {
1443         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1444 
1445         WARN_ON(!is_v4_1(its_dev->its));
1446 
1447         val  = GICR_INVLPIR_V;
1448         val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1449         val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1450     } else {
1451         val = d->hwirq;
1452     }
1453 
1454     /* Target the redistributor this LPI is currently routed to */
1455     cpu = irq_to_cpuid_lock(d, &flags);
1456     raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1457     rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1458     gic_write_lpir(val, rdbase + GICR_INVLPIR);
1459 
1460     wait_for_syncr(rdbase);
1461     raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1462     irq_to_cpuid_unlock(d, flags);
1463 }
1464 
1465 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1466 {
1467     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1468 
1469     lpi_write_config(d, clr, set);
1470     if (gic_rdists->has_direct_lpi &&
1471         (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1472         direct_lpi_inv(d);
1473     else if (!irqd_is_forwarded_to_vcpu(d))
1474         its_send_inv(its_dev, its_get_event_id(d));
1475     else
1476         its_send_vinv(its_dev, its_get_event_id(d));
1477 }
1478 
1479 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1480 {
1481     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1482     u32 event = its_get_event_id(d);
1483     struct its_vlpi_map *map;
1484 
1485     /*
1486      * GICv4.1 does away with the per-LPI nonsense, nothing to do
1487      * here.
1488      */
1489     if (is_v4_1(its_dev->its))
1490         return;
1491 
1492     map = dev_event_to_vlpi_map(its_dev, event);
1493 
1494     if (map->db_enabled == enable)
1495         return;
1496 
1497     map->db_enabled = enable;
1498 
1499     /*
1500      * More fun with the architecture:
1501      *
1502      * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1503      * value or to 1023, depending on the enable bit. But that
1504      * would be issuing a mapping for an /existing/ DevID+EventID
1505      * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1506      * to the /same/ vPE, using this opportunity to adjust the
1507      * doorbell. Mouahahahaha. We loves it, Precious.
1508      */
1509     its_send_vmovi(its_dev, event);
1510 }
1511 
1512 static void its_mask_irq(struct irq_data *d)
1513 {
1514     if (irqd_is_forwarded_to_vcpu(d))
1515         its_vlpi_set_doorbell(d, false);
1516 
1517     lpi_update_config(d, LPI_PROP_ENABLED, 0);
1518 }
1519 
1520 static void its_unmask_irq(struct irq_data *d)
1521 {
1522     if (irqd_is_forwarded_to_vcpu(d))
1523         its_vlpi_set_doorbell(d, true);
1524 
1525     lpi_update_config(d, 0, LPI_PROP_ENABLED);
1526 }
1527 
1528 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1529 {
1530     if (irqd_affinity_is_managed(d))
1531         return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1532 
1533     return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1534 }
1535 
1536 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1537 {
1538     if (irqd_affinity_is_managed(d))
1539         atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1540     else
1541         atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1542 }
1543 
1544 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1545 {
1546     if (irqd_affinity_is_managed(d))
1547         atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1548     else
1549         atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1550 }
1551 
1552 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1553                           const struct cpumask *cpu_mask)
1554 {
1555     unsigned int cpu = nr_cpu_ids, tmp;
1556     int count = S32_MAX;
1557 
1558     for_each_cpu(tmp, cpu_mask) {
1559         int this_count = its_read_lpi_count(d, tmp);
1560         if (this_count < count) {
1561             cpu = tmp;
1562                 count = this_count;
1563         }
1564     }
1565 
1566     return cpu;
1567 }
1568 
1569 /*
1570  * As suggested by Thomas Gleixner in:
1571  * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1572  */
1573 static int its_select_cpu(struct irq_data *d,
1574               const struct cpumask *aff_mask)
1575 {
1576     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1577     static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1578     static struct cpumask __tmpmask;
1579     struct cpumask *tmpmask;
1580     unsigned long flags;
1581     int cpu, node;
1582     node = its_dev->its->numa_node;
1583     tmpmask = &__tmpmask;
1584 
1585     raw_spin_lock_irqsave(&tmpmask_lock, flags);
1586 
1587     if (!irqd_affinity_is_managed(d)) {
1588         /* First try the NUMA node */
1589         if (node != NUMA_NO_NODE) {
1590             /*
1591              * Try the intersection of the affinity mask and the
1592              * node mask (and the online mask, just to be safe).
1593              */
1594             cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1595             cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1596 
1597             /*
1598              * Ideally, we would check if the mask is empty, and
1599              * try again on the full node here.
1600              *
1601              * But it turns out that the way ACPI describes the
1602              * affinity for ITSs only deals about memory, and
1603              * not target CPUs, so it cannot describe a single
1604              * ITS placed next to two NUMA nodes.
1605              *
1606              * Instead, just fallback on the online mask. This
1607              * diverges from Thomas' suggestion above.
1608              */
1609             cpu = cpumask_pick_least_loaded(d, tmpmask);
1610             if (cpu < nr_cpu_ids)
1611                 goto out;
1612 
1613             /* If we can't cross sockets, give up */
1614             if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1615                 goto out;
1616 
1617             /* If the above failed, expand the search */
1618         }
1619 
1620         /* Try the intersection of the affinity and online masks */
1621         cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1622 
1623         /* If that doesn't fly, the online mask is the last resort */
1624         if (cpumask_empty(tmpmask))
1625             cpumask_copy(tmpmask, cpu_online_mask);
1626 
1627         cpu = cpumask_pick_least_loaded(d, tmpmask);
1628     } else {
1629         cpumask_copy(tmpmask, aff_mask);
1630 
1631         /* If we cannot cross sockets, limit the search to that node */
1632         if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1633             node != NUMA_NO_NODE)
1634             cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1635 
1636         cpu = cpumask_pick_least_loaded(d, tmpmask);
1637     }
1638 out:
1639     raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1640 
1641     pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1642     return cpu;
1643 }
1644 
1645 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1646                 bool force)
1647 {
1648     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1649     struct its_collection *target_col;
1650     u32 id = its_get_event_id(d);
1651     int cpu, prev_cpu;
1652 
1653     /* A forwarded interrupt should use irq_set_vcpu_affinity */
1654     if (irqd_is_forwarded_to_vcpu(d))
1655         return -EINVAL;
1656 
1657     prev_cpu = its_dev->event_map.col_map[id];
1658     its_dec_lpi_count(d, prev_cpu);
1659 
1660     if (!force)
1661         cpu = its_select_cpu(d, mask_val);
1662     else
1663         cpu = cpumask_pick_least_loaded(d, mask_val);
1664 
1665     if (cpu < 0 || cpu >= nr_cpu_ids)
1666         goto err;
1667 
1668     /* don't set the affinity when the target cpu is same as current one */
1669     if (cpu != prev_cpu) {
1670         target_col = &its_dev->its->collections[cpu];
1671         its_send_movi(its_dev, target_col, id);
1672         its_dev->event_map.col_map[id] = cpu;
1673         irq_data_update_effective_affinity(d, cpumask_of(cpu));
1674     }
1675 
1676     its_inc_lpi_count(d, cpu);
1677 
1678     return IRQ_SET_MASK_OK_DONE;
1679 
1680 err:
1681     its_inc_lpi_count(d, prev_cpu);
1682     return -EINVAL;
1683 }
1684 
1685 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1686 {
1687     struct its_node *its = its_dev->its;
1688 
1689     return its->phys_base + GITS_TRANSLATER;
1690 }
1691 
1692 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1693 {
1694     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1695     struct its_node *its;
1696     u64 addr;
1697 
1698     its = its_dev->its;
1699     addr = its->get_msi_base(its_dev);
1700 
1701     msg->address_lo     = lower_32_bits(addr);
1702     msg->address_hi     = upper_32_bits(addr);
1703     msg->data       = its_get_event_id(d);
1704 
1705     iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1706 }
1707 
1708 static int its_irq_set_irqchip_state(struct irq_data *d,
1709                      enum irqchip_irq_state which,
1710                      bool state)
1711 {
1712     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1713     u32 event = its_get_event_id(d);
1714 
1715     if (which != IRQCHIP_STATE_PENDING)
1716         return -EINVAL;
1717 
1718     if (irqd_is_forwarded_to_vcpu(d)) {
1719         if (state)
1720             its_send_vint(its_dev, event);
1721         else
1722             its_send_vclear(its_dev, event);
1723     } else {
1724         if (state)
1725             its_send_int(its_dev, event);
1726         else
1727             its_send_clear(its_dev, event);
1728     }
1729 
1730     return 0;
1731 }
1732 
1733 static int its_irq_retrigger(struct irq_data *d)
1734 {
1735     return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1736 }
1737 
1738 /*
1739  * Two favourable cases:
1740  *
1741  * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1742  *     for vSGI delivery
1743  *
1744  * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1745  *     and we're better off mapping all VPEs always
1746  *
1747  * If neither (a) nor (b) is true, then we map vPEs on demand.
1748  *
1749  */
1750 static bool gic_requires_eager_mapping(void)
1751 {
1752     if (!its_list_map || gic_rdists->has_rvpeid)
1753         return true;
1754 
1755     return false;
1756 }
1757 
1758 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1759 {
1760     unsigned long flags;
1761 
1762     if (gic_requires_eager_mapping())
1763         return;
1764 
1765     raw_spin_lock_irqsave(&vmovp_lock, flags);
1766 
1767     /*
1768      * If the VM wasn't mapped yet, iterate over the vpes and get
1769      * them mapped now.
1770      */
1771     vm->vlpi_count[its->list_nr]++;
1772 
1773     if (vm->vlpi_count[its->list_nr] == 1) {
1774         int i;
1775 
1776         for (i = 0; i < vm->nr_vpes; i++) {
1777             struct its_vpe *vpe = vm->vpes[i];
1778             struct irq_data *d = irq_get_irq_data(vpe->irq);
1779 
1780             /* Map the VPE to the first possible CPU */
1781             vpe->col_idx = cpumask_first(cpu_online_mask);
1782             its_send_vmapp(its, vpe, true);
1783             its_send_vinvall(its, vpe);
1784             irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1785         }
1786     }
1787 
1788     raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1789 }
1790 
1791 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1792 {
1793     unsigned long flags;
1794 
1795     /* Not using the ITS list? Everything is always mapped. */
1796     if (gic_requires_eager_mapping())
1797         return;
1798 
1799     raw_spin_lock_irqsave(&vmovp_lock, flags);
1800 
1801     if (!--vm->vlpi_count[its->list_nr]) {
1802         int i;
1803 
1804         for (i = 0; i < vm->nr_vpes; i++)
1805             its_send_vmapp(its, vm->vpes[i], false);
1806     }
1807 
1808     raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1809 }
1810 
1811 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1812 {
1813     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1814     u32 event = its_get_event_id(d);
1815     int ret = 0;
1816 
1817     if (!info->map)
1818         return -EINVAL;
1819 
1820     raw_spin_lock(&its_dev->event_map.vlpi_lock);
1821 
1822     if (!its_dev->event_map.vm) {
1823         struct its_vlpi_map *maps;
1824 
1825         maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1826                    GFP_ATOMIC);
1827         if (!maps) {
1828             ret = -ENOMEM;
1829             goto out;
1830         }
1831 
1832         its_dev->event_map.vm = info->map->vm;
1833         its_dev->event_map.vlpi_maps = maps;
1834     } else if (its_dev->event_map.vm != info->map->vm) {
1835         ret = -EINVAL;
1836         goto out;
1837     }
1838 
1839     /* Get our private copy of the mapping information */
1840     its_dev->event_map.vlpi_maps[event] = *info->map;
1841 
1842     if (irqd_is_forwarded_to_vcpu(d)) {
1843         /* Already mapped, move it around */
1844         its_send_vmovi(its_dev, event);
1845     } else {
1846         /* Ensure all the VPEs are mapped on this ITS */
1847         its_map_vm(its_dev->its, info->map->vm);
1848 
1849         /*
1850          * Flag the interrupt as forwarded so that we can
1851          * start poking the virtual property table.
1852          */
1853         irqd_set_forwarded_to_vcpu(d);
1854 
1855         /* Write out the property to the prop table */
1856         lpi_write_config(d, 0xff, info->map->properties);
1857 
1858         /* Drop the physical mapping */
1859         its_send_discard(its_dev, event);
1860 
1861         /* and install the virtual one */
1862         its_send_vmapti(its_dev, event);
1863 
1864         /* Increment the number of VLPIs */
1865         its_dev->event_map.nr_vlpis++;
1866     }
1867 
1868 out:
1869     raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1870     return ret;
1871 }
1872 
1873 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1874 {
1875     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1876     struct its_vlpi_map *map;
1877     int ret = 0;
1878 
1879     raw_spin_lock(&its_dev->event_map.vlpi_lock);
1880 
1881     map = get_vlpi_map(d);
1882 
1883     if (!its_dev->event_map.vm || !map) {
1884         ret = -EINVAL;
1885         goto out;
1886     }
1887 
1888     /* Copy our mapping information to the incoming request */
1889     *info->map = *map;
1890 
1891 out:
1892     raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1893     return ret;
1894 }
1895 
1896 static int its_vlpi_unmap(struct irq_data *d)
1897 {
1898     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1899     u32 event = its_get_event_id(d);
1900     int ret = 0;
1901 
1902     raw_spin_lock(&its_dev->event_map.vlpi_lock);
1903 
1904     if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1905         ret = -EINVAL;
1906         goto out;
1907     }
1908 
1909     /* Drop the virtual mapping */
1910     its_send_discard(its_dev, event);
1911 
1912     /* and restore the physical one */
1913     irqd_clr_forwarded_to_vcpu(d);
1914     its_send_mapti(its_dev, d->hwirq, event);
1915     lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1916                     LPI_PROP_ENABLED |
1917                     LPI_PROP_GROUP1));
1918 
1919     /* Potentially unmap the VM from this ITS */
1920     its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1921 
1922     /*
1923      * Drop the refcount and make the device available again if
1924      * this was the last VLPI.
1925      */
1926     if (!--its_dev->event_map.nr_vlpis) {
1927         its_dev->event_map.vm = NULL;
1928         kfree(its_dev->event_map.vlpi_maps);
1929     }
1930 
1931 out:
1932     raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1933     return ret;
1934 }
1935 
1936 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1937 {
1938     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1939 
1940     if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1941         return -EINVAL;
1942 
1943     if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1944         lpi_update_config(d, 0xff, info->config);
1945     else
1946         lpi_write_config(d, 0xff, info->config);
1947     its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1948 
1949     return 0;
1950 }
1951 
1952 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1953 {
1954     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1955     struct its_cmd_info *info = vcpu_info;
1956 
1957     /* Need a v4 ITS */
1958     if (!is_v4(its_dev->its))
1959         return -EINVAL;
1960 
1961     /* Unmap request? */
1962     if (!info)
1963         return its_vlpi_unmap(d);
1964 
1965     switch (info->cmd_type) {
1966     case MAP_VLPI:
1967         return its_vlpi_map(d, info);
1968 
1969     case GET_VLPI:
1970         return its_vlpi_get(d, info);
1971 
1972     case PROP_UPDATE_VLPI:
1973     case PROP_UPDATE_AND_INV_VLPI:
1974         return its_vlpi_prop_update(d, info);
1975 
1976     default:
1977         return -EINVAL;
1978     }
1979 }
1980 
1981 static struct irq_chip its_irq_chip = {
1982     .name           = "ITS",
1983     .irq_mask       = its_mask_irq,
1984     .irq_unmask     = its_unmask_irq,
1985     .irq_eoi        = irq_chip_eoi_parent,
1986     .irq_set_affinity   = its_set_affinity,
1987     .irq_compose_msi_msg    = its_irq_compose_msi_msg,
1988     .irq_set_irqchip_state  = its_irq_set_irqchip_state,
1989     .irq_retrigger      = its_irq_retrigger,
1990     .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
1991 };
1992 
1993 
1994 /*
1995  * How we allocate LPIs:
1996  *
1997  * lpi_range_list contains ranges of LPIs that are to available to
1998  * allocate from. To allocate LPIs, just pick the first range that
1999  * fits the required allocation, and reduce it by the required
2000  * amount. Once empty, remove the range from the list.
2001  *
2002  * To free a range of LPIs, add a free range to the list, sort it and
2003  * merge the result if the new range happens to be adjacent to an
2004  * already free block.
2005  *
2006  * The consequence of the above is that allocation is cost is low, but
2007  * freeing is expensive. We assumes that freeing rarely occurs.
2008  */
2009 #define ITS_MAX_LPI_NRBITS  16 /* 64K LPIs */
2010 
2011 static DEFINE_MUTEX(lpi_range_lock);
2012 static LIST_HEAD(lpi_range_list);
2013 
2014 struct lpi_range {
2015     struct list_head    entry;
2016     u32         base_id;
2017     u32         span;
2018 };
2019 
2020 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2021 {
2022     struct lpi_range *range;
2023 
2024     range = kmalloc(sizeof(*range), GFP_KERNEL);
2025     if (range) {
2026         range->base_id = base;
2027         range->span = span;
2028     }
2029 
2030     return range;
2031 }
2032 
2033 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2034 {
2035     struct lpi_range *range, *tmp;
2036     int err = -ENOSPC;
2037 
2038     mutex_lock(&lpi_range_lock);
2039 
2040     list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2041         if (range->span >= nr_lpis) {
2042             *base = range->base_id;
2043             range->base_id += nr_lpis;
2044             range->span -= nr_lpis;
2045 
2046             if (range->span == 0) {
2047                 list_del(&range->entry);
2048                 kfree(range);
2049             }
2050 
2051             err = 0;
2052             break;
2053         }
2054     }
2055 
2056     mutex_unlock(&lpi_range_lock);
2057 
2058     pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2059     return err;
2060 }
2061 
2062 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2063 {
2064     if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2065         return;
2066     if (a->base_id + a->span != b->base_id)
2067         return;
2068     b->base_id = a->base_id;
2069     b->span += a->span;
2070     list_del(&a->entry);
2071     kfree(a);
2072 }
2073 
2074 static int free_lpi_range(u32 base, u32 nr_lpis)
2075 {
2076     struct lpi_range *new, *old;
2077 
2078     new = mk_lpi_range(base, nr_lpis);
2079     if (!new)
2080         return -ENOMEM;
2081 
2082     mutex_lock(&lpi_range_lock);
2083 
2084     list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2085         if (old->base_id < base)
2086             break;
2087     }
2088     /*
2089      * old is the last element with ->base_id smaller than base,
2090      * so new goes right after it. If there are no elements with
2091      * ->base_id smaller than base, &old->entry ends up pointing
2092      * at the head of the list, and inserting new it the start of
2093      * the list is the right thing to do in that case as well.
2094      */
2095     list_add(&new->entry, &old->entry);
2096     /*
2097      * Now check if we can merge with the preceding and/or
2098      * following ranges.
2099      */
2100     merge_lpi_ranges(old, new);
2101     merge_lpi_ranges(new, list_next_entry(new, entry));
2102 
2103     mutex_unlock(&lpi_range_lock);
2104     return 0;
2105 }
2106 
2107 static int __init its_lpi_init(u32 id_bits)
2108 {
2109     u32 lpis = (1UL << id_bits) - 8192;
2110     u32 numlpis;
2111     int err;
2112 
2113     numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2114 
2115     if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2116         lpis = numlpis;
2117         pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2118             lpis);
2119     }
2120 
2121     /*
2122      * Initializing the allocator is just the same as freeing the
2123      * full range of LPIs.
2124      */
2125     err = free_lpi_range(8192, lpis);
2126     pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2127     return err;
2128 }
2129 
2130 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2131 {
2132     unsigned long *bitmap = NULL;
2133     int err = 0;
2134 
2135     do {
2136         err = alloc_lpi_range(nr_irqs, base);
2137         if (!err)
2138             break;
2139 
2140         nr_irqs /= 2;
2141     } while (nr_irqs > 0);
2142 
2143     if (!nr_irqs)
2144         err = -ENOSPC;
2145 
2146     if (err)
2147         goto out;
2148 
2149     bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2150     if (!bitmap)
2151         goto out;
2152 
2153     *nr_ids = nr_irqs;
2154 
2155 out:
2156     if (!bitmap)
2157         *base = *nr_ids = 0;
2158 
2159     return bitmap;
2160 }
2161 
2162 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2163 {
2164     WARN_ON(free_lpi_range(base, nr_ids));
2165     bitmap_free(bitmap);
2166 }
2167 
2168 static void gic_reset_prop_table(void *va)
2169 {
2170     /* Priority 0xa0, Group-1, disabled */
2171     memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2172 
2173     /* Make sure the GIC will observe the written configuration */
2174     gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2175 }
2176 
2177 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2178 {
2179     struct page *prop_page;
2180 
2181     prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2182     if (!prop_page)
2183         return NULL;
2184 
2185     gic_reset_prop_table(page_address(prop_page));
2186 
2187     return prop_page;
2188 }
2189 
2190 static void its_free_prop_table(struct page *prop_page)
2191 {
2192     free_pages((unsigned long)page_address(prop_page),
2193            get_order(LPI_PROPBASE_SZ));
2194 }
2195 
2196 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2197 {
2198     phys_addr_t start, end, addr_end;
2199     u64 i;
2200 
2201     /*
2202      * We don't bother checking for a kdump kernel as by
2203      * construction, the LPI tables are out of this kernel's
2204      * memory map.
2205      */
2206     if (is_kdump_kernel())
2207         return true;
2208 
2209     addr_end = addr + size - 1;
2210 
2211     for_each_reserved_mem_range(i, &start, &end) {
2212         if (addr >= start && addr_end <= end)
2213             return true;
2214     }
2215 
2216     /* Not found, not a good sign... */
2217     pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2218         &addr, &addr_end);
2219     add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2220     return false;
2221 }
2222 
2223 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2224 {
2225     if (efi_enabled(EFI_CONFIG_TABLES))
2226         return efi_mem_reserve_persistent(addr, size);
2227 
2228     return 0;
2229 }
2230 
2231 static int __init its_setup_lpi_prop_table(void)
2232 {
2233     if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2234         u64 val;
2235 
2236         val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2237         lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2238 
2239         gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2240         gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2241                              LPI_PROPBASE_SZ,
2242                              MEMREMAP_WB);
2243         gic_reset_prop_table(gic_rdists->prop_table_va);
2244     } else {
2245         struct page *page;
2246 
2247         lpi_id_bits = min_t(u32,
2248                     GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2249                     ITS_MAX_LPI_NRBITS);
2250         page = its_allocate_prop_table(GFP_NOWAIT);
2251         if (!page) {
2252             pr_err("Failed to allocate PROPBASE\n");
2253             return -ENOMEM;
2254         }
2255 
2256         gic_rdists->prop_table_pa = page_to_phys(page);
2257         gic_rdists->prop_table_va = page_address(page);
2258         WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2259                       LPI_PROPBASE_SZ));
2260     }
2261 
2262     pr_info("GICv3: using LPI property table @%pa\n",
2263         &gic_rdists->prop_table_pa);
2264 
2265     return its_lpi_init(lpi_id_bits);
2266 }
2267 
2268 static const char *its_base_type_string[] = {
2269     [GITS_BASER_TYPE_DEVICE]    = "Devices",
2270     [GITS_BASER_TYPE_VCPU]      = "Virtual CPUs",
2271     [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2272     [GITS_BASER_TYPE_COLLECTION]    = "Interrupt Collections",
2273     [GITS_BASER_TYPE_RESERVED5]     = "Reserved (5)",
2274     [GITS_BASER_TYPE_RESERVED6]     = "Reserved (6)",
2275     [GITS_BASER_TYPE_RESERVED7]     = "Reserved (7)",
2276 };
2277 
2278 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2279 {
2280     u32 idx = baser - its->tables;
2281 
2282     return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2283 }
2284 
2285 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2286                 u64 val)
2287 {
2288     u32 idx = baser - its->tables;
2289 
2290     gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2291     baser->val = its_read_baser(its, baser);
2292 }
2293 
2294 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2295                u64 cache, u64 shr, u32 order, bool indirect)
2296 {
2297     u64 val = its_read_baser(its, baser);
2298     u64 esz = GITS_BASER_ENTRY_SIZE(val);
2299     u64 type = GITS_BASER_TYPE(val);
2300     u64 baser_phys, tmp;
2301     u32 alloc_pages, psz;
2302     struct page *page;
2303     void *base;
2304 
2305     psz = baser->psz;
2306     alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2307     if (alloc_pages > GITS_BASER_PAGES_MAX) {
2308         pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2309             &its->phys_base, its_base_type_string[type],
2310             alloc_pages, GITS_BASER_PAGES_MAX);
2311         alloc_pages = GITS_BASER_PAGES_MAX;
2312         order = get_order(GITS_BASER_PAGES_MAX * psz);
2313     }
2314 
2315     page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2316     if (!page)
2317         return -ENOMEM;
2318 
2319     base = (void *)page_address(page);
2320     baser_phys = virt_to_phys(base);
2321 
2322     /* Check if the physical address of the memory is above 48bits */
2323     if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2324 
2325         /* 52bit PA is supported only when PageSize=64K */
2326         if (psz != SZ_64K) {
2327             pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2328             free_pages((unsigned long)base, order);
2329             return -ENXIO;
2330         }
2331 
2332         /* Convert 52bit PA to 48bit field */
2333         baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2334     }
2335 
2336 retry_baser:
2337     val = (baser_phys                    |
2338         (type << GITS_BASER_TYPE_SHIFT)          |
2339         ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)   |
2340         ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
2341         cache                        |
2342         shr                      |
2343         GITS_BASER_VALID);
2344 
2345     val |=  indirect ? GITS_BASER_INDIRECT : 0x0;
2346 
2347     switch (psz) {
2348     case SZ_4K:
2349         val |= GITS_BASER_PAGE_SIZE_4K;
2350         break;
2351     case SZ_16K:
2352         val |= GITS_BASER_PAGE_SIZE_16K;
2353         break;
2354     case SZ_64K:
2355         val |= GITS_BASER_PAGE_SIZE_64K;
2356         break;
2357     }
2358 
2359     its_write_baser(its, baser, val);
2360     tmp = baser->val;
2361 
2362     if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2363         /*
2364          * Shareability didn't stick. Just use
2365          * whatever the read reported, which is likely
2366          * to be the only thing this redistributor
2367          * supports. If that's zero, make it
2368          * non-cacheable as well.
2369          */
2370         shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2371         if (!shr) {
2372             cache = GITS_BASER_nC;
2373             gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2374         }
2375         goto retry_baser;
2376     }
2377 
2378     if (val != tmp) {
2379         pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2380                &its->phys_base, its_base_type_string[type],
2381                val, tmp);
2382         free_pages((unsigned long)base, order);
2383         return -ENXIO;
2384     }
2385 
2386     baser->order = order;
2387     baser->base = base;
2388     baser->psz = psz;
2389     tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2390 
2391     pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2392         &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2393         its_base_type_string[type],
2394         (unsigned long)virt_to_phys(base),
2395         indirect ? "indirect" : "flat", (int)esz,
2396         psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2397 
2398     return 0;
2399 }
2400 
2401 static bool its_parse_indirect_baser(struct its_node *its,
2402                      struct its_baser *baser,
2403                      u32 *order, u32 ids)
2404 {
2405     u64 tmp = its_read_baser(its, baser);
2406     u64 type = GITS_BASER_TYPE(tmp);
2407     u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2408     u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2409     u32 new_order = *order;
2410     u32 psz = baser->psz;
2411     bool indirect = false;
2412 
2413     /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2414     if ((esz << ids) > (psz * 2)) {
2415         /*
2416          * Find out whether hw supports a single or two-level table by
2417          * table by reading bit at offset '62' after writing '1' to it.
2418          */
2419         its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2420         indirect = !!(baser->val & GITS_BASER_INDIRECT);
2421 
2422         if (indirect) {
2423             /*
2424              * The size of the lvl2 table is equal to ITS page size
2425              * which is 'psz'. For computing lvl1 table size,
2426              * subtract ID bits that sparse lvl2 table from 'ids'
2427              * which is reported by ITS hardware times lvl1 table
2428              * entry size.
2429              */
2430             ids -= ilog2(psz / (int)esz);
2431             esz = GITS_LVL1_ENTRY_SIZE;
2432         }
2433     }
2434 
2435     /*
2436      * Allocate as many entries as required to fit the
2437      * range of device IDs that the ITS can grok... The ID
2438      * space being incredibly sparse, this results in a
2439      * massive waste of memory if two-level device table
2440      * feature is not supported by hardware.
2441      */
2442     new_order = max_t(u32, get_order(esz << ids), new_order);
2443     if (new_order >= MAX_ORDER) {
2444         new_order = MAX_ORDER - 1;
2445         ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2446         pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2447             &its->phys_base, its_base_type_string[type],
2448             device_ids(its), ids);
2449     }
2450 
2451     *order = new_order;
2452 
2453     return indirect;
2454 }
2455 
2456 static u32 compute_common_aff(u64 val)
2457 {
2458     u32 aff, clpiaff;
2459 
2460     aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2461     clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2462 
2463     return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2464 }
2465 
2466 static u32 compute_its_aff(struct its_node *its)
2467 {
2468     u64 val;
2469     u32 svpet;
2470 
2471     /*
2472      * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2473      * the resulting affinity. We then use that to see if this match
2474      * our own affinity.
2475      */
2476     svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2477     val  = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2478     val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2479     return compute_common_aff(val);
2480 }
2481 
2482 static struct its_node *find_sibling_its(struct its_node *cur_its)
2483 {
2484     struct its_node *its;
2485     u32 aff;
2486 
2487     if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2488         return NULL;
2489 
2490     aff = compute_its_aff(cur_its);
2491 
2492     list_for_each_entry(its, &its_nodes, entry) {
2493         u64 baser;
2494 
2495         if (!is_v4_1(its) || its == cur_its)
2496             continue;
2497 
2498         if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2499             continue;
2500 
2501         if (aff != compute_its_aff(its))
2502             continue;
2503 
2504         /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2505         baser = its->tables[2].val;
2506         if (!(baser & GITS_BASER_VALID))
2507             continue;
2508 
2509         return its;
2510     }
2511 
2512     return NULL;
2513 }
2514 
2515 static void its_free_tables(struct its_node *its)
2516 {
2517     int i;
2518 
2519     for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2520         if (its->tables[i].base) {
2521             free_pages((unsigned long)its->tables[i].base,
2522                    its->tables[i].order);
2523             its->tables[i].base = NULL;
2524         }
2525     }
2526 }
2527 
2528 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2529 {
2530     u64 psz = SZ_64K;
2531 
2532     while (psz) {
2533         u64 val, gpsz;
2534 
2535         val = its_read_baser(its, baser);
2536         val &= ~GITS_BASER_PAGE_SIZE_MASK;
2537 
2538         switch (psz) {
2539         case SZ_64K:
2540             gpsz = GITS_BASER_PAGE_SIZE_64K;
2541             break;
2542         case SZ_16K:
2543             gpsz = GITS_BASER_PAGE_SIZE_16K;
2544             break;
2545         case SZ_4K:
2546         default:
2547             gpsz = GITS_BASER_PAGE_SIZE_4K;
2548             break;
2549         }
2550 
2551         gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2552 
2553         val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2554         its_write_baser(its, baser, val);
2555 
2556         if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2557             break;
2558 
2559         switch (psz) {
2560         case SZ_64K:
2561             psz = SZ_16K;
2562             break;
2563         case SZ_16K:
2564             psz = SZ_4K;
2565             break;
2566         case SZ_4K:
2567         default:
2568             return -1;
2569         }
2570     }
2571 
2572     baser->psz = psz;
2573     return 0;
2574 }
2575 
2576 static int its_alloc_tables(struct its_node *its)
2577 {
2578     u64 shr = GITS_BASER_InnerShareable;
2579     u64 cache = GITS_BASER_RaWaWb;
2580     int err, i;
2581 
2582     if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2583         /* erratum 24313: ignore memory access type */
2584         cache = GITS_BASER_nCnB;
2585 
2586     for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2587         struct its_baser *baser = its->tables + i;
2588         u64 val = its_read_baser(its, baser);
2589         u64 type = GITS_BASER_TYPE(val);
2590         bool indirect = false;
2591         u32 order;
2592 
2593         if (type == GITS_BASER_TYPE_NONE)
2594             continue;
2595 
2596         if (its_probe_baser_psz(its, baser)) {
2597             its_free_tables(its);
2598             return -ENXIO;
2599         }
2600 
2601         order = get_order(baser->psz);
2602 
2603         switch (type) {
2604         case GITS_BASER_TYPE_DEVICE:
2605             indirect = its_parse_indirect_baser(its, baser, &order,
2606                                 device_ids(its));
2607             break;
2608 
2609         case GITS_BASER_TYPE_VCPU:
2610             if (is_v4_1(its)) {
2611                 struct its_node *sibling;
2612 
2613                 WARN_ON(i != 2);
2614                 if ((sibling = find_sibling_its(its))) {
2615                     *baser = sibling->tables[2];
2616                     its_write_baser(its, baser, baser->val);
2617                     continue;
2618                 }
2619             }
2620 
2621             indirect = its_parse_indirect_baser(its, baser, &order,
2622                                 ITS_MAX_VPEID_BITS);
2623             break;
2624         }
2625 
2626         err = its_setup_baser(its, baser, cache, shr, order, indirect);
2627         if (err < 0) {
2628             its_free_tables(its);
2629             return err;
2630         }
2631 
2632         /* Update settings which will be used for next BASERn */
2633         cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2634         shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2635     }
2636 
2637     return 0;
2638 }
2639 
2640 static u64 inherit_vpe_l1_table_from_its(void)
2641 {
2642     struct its_node *its;
2643     u64 val;
2644     u32 aff;
2645 
2646     val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2647     aff = compute_common_aff(val);
2648 
2649     list_for_each_entry(its, &its_nodes, entry) {
2650         u64 baser, addr;
2651 
2652         if (!is_v4_1(its))
2653             continue;
2654 
2655         if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2656             continue;
2657 
2658         if (aff != compute_its_aff(its))
2659             continue;
2660 
2661         /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2662         baser = its->tables[2].val;
2663         if (!(baser & GITS_BASER_VALID))
2664             continue;
2665 
2666         /* We have a winner! */
2667         gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2668 
2669         val  = GICR_VPROPBASER_4_1_VALID;
2670         if (baser & GITS_BASER_INDIRECT)
2671             val |= GICR_VPROPBASER_4_1_INDIRECT;
2672         val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2673                   FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2674         switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2675         case GIC_PAGE_SIZE_64K:
2676             addr = GITS_BASER_ADDR_48_to_52(baser);
2677             break;
2678         default:
2679             addr = baser & GENMASK_ULL(47, 12);
2680             break;
2681         }
2682         val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2683         val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2684                   FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2685         val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2686                   FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2687         val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2688 
2689         return val;
2690     }
2691 
2692     return 0;
2693 }
2694 
2695 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2696 {
2697     u32 aff;
2698     u64 val;
2699     int cpu;
2700 
2701     val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2702     aff = compute_common_aff(val);
2703 
2704     for_each_possible_cpu(cpu) {
2705         void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2706 
2707         if (!base || cpu == smp_processor_id())
2708             continue;
2709 
2710         val = gic_read_typer(base + GICR_TYPER);
2711         if (aff != compute_common_aff(val))
2712             continue;
2713 
2714         /*
2715          * At this point, we have a victim. This particular CPU
2716          * has already booted, and has an affinity that matches
2717          * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2718          * Make sure we don't write the Z bit in that case.
2719          */
2720         val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2721         val &= ~GICR_VPROPBASER_4_1_Z;
2722 
2723         gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2724         *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2725 
2726         return val;
2727     }
2728 
2729     return 0;
2730 }
2731 
2732 static bool allocate_vpe_l2_table(int cpu, u32 id)
2733 {
2734     void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2735     unsigned int psz, esz, idx, npg, gpsz;
2736     u64 val;
2737     struct page *page;
2738     __le64 *table;
2739 
2740     if (!gic_rdists->has_rvpeid)
2741         return true;
2742 
2743     /* Skip non-present CPUs */
2744     if (!base)
2745         return true;
2746 
2747     val  = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2748 
2749     esz  = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2750     gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2751     npg  = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2752 
2753     switch (gpsz) {
2754     default:
2755         WARN_ON(1);
2756         fallthrough;
2757     case GIC_PAGE_SIZE_4K:
2758         psz = SZ_4K;
2759         break;
2760     case GIC_PAGE_SIZE_16K:
2761         psz = SZ_16K;
2762         break;
2763     case GIC_PAGE_SIZE_64K:
2764         psz = SZ_64K;
2765         break;
2766     }
2767 
2768     /* Don't allow vpe_id that exceeds single, flat table limit */
2769     if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2770         return (id < (npg * psz / (esz * SZ_8)));
2771 
2772     /* Compute 1st level table index & check if that exceeds table limit */
2773     idx = id >> ilog2(psz / (esz * SZ_8));
2774     if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2775         return false;
2776 
2777     table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2778 
2779     /* Allocate memory for 2nd level table */
2780     if (!table[idx]) {
2781         page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2782         if (!page)
2783             return false;
2784 
2785         /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2786         if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2787             gic_flush_dcache_to_poc(page_address(page), psz);
2788 
2789         table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2790 
2791         /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2792         if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2793             gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2794 
2795         /* Ensure updated table contents are visible to RD hardware */
2796         dsb(sy);
2797     }
2798 
2799     return true;
2800 }
2801 
2802 static int allocate_vpe_l1_table(void)
2803 {
2804     void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2805     u64 val, gpsz, npg, pa;
2806     unsigned int psz = SZ_64K;
2807     unsigned int np, epp, esz;
2808     struct page *page;
2809 
2810     if (!gic_rdists->has_rvpeid)
2811         return 0;
2812 
2813     /*
2814      * if VPENDBASER.Valid is set, disable any previously programmed
2815      * VPE by setting PendingLast while clearing Valid. This has the
2816      * effect of making sure no doorbell will be generated and we can
2817      * then safely clear VPROPBASER.Valid.
2818      */
2819     if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2820         gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2821                       vlpi_base + GICR_VPENDBASER);
2822 
2823     /*
2824      * If we can inherit the configuration from another RD, let's do
2825      * so. Otherwise, we have to go through the allocation process. We
2826      * assume that all RDs have the exact same requirements, as
2827      * nothing will work otherwise.
2828      */
2829     val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2830     if (val & GICR_VPROPBASER_4_1_VALID)
2831         goto out;
2832 
2833     gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2834     if (!gic_data_rdist()->vpe_table_mask)
2835         return -ENOMEM;
2836 
2837     val = inherit_vpe_l1_table_from_its();
2838     if (val & GICR_VPROPBASER_4_1_VALID)
2839         goto out;
2840 
2841     /* First probe the page size */
2842     val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2843     gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2844     val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2845     gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2846     esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2847 
2848     switch (gpsz) {
2849     default:
2850         gpsz = GIC_PAGE_SIZE_4K;
2851         fallthrough;
2852     case GIC_PAGE_SIZE_4K:
2853         psz = SZ_4K;
2854         break;
2855     case GIC_PAGE_SIZE_16K:
2856         psz = SZ_16K;
2857         break;
2858     case GIC_PAGE_SIZE_64K:
2859         psz = SZ_64K;
2860         break;
2861     }
2862 
2863     /*
2864      * Start populating the register from scratch, including RO fields
2865      * (which we want to print in debug cases...)
2866      */
2867     val = 0;
2868     val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2869     val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2870 
2871     /* How many entries per GIC page? */
2872     esz++;
2873     epp = psz / (esz * SZ_8);
2874 
2875     /*
2876      * If we need more than just a single L1 page, flag the table
2877      * as indirect and compute the number of required L1 pages.
2878      */
2879     if (epp < ITS_MAX_VPEID) {
2880         int nl2;
2881 
2882         val |= GICR_VPROPBASER_4_1_INDIRECT;
2883 
2884         /* Number of L2 pages required to cover the VPEID space */
2885         nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2886 
2887         /* Number of L1 pages to point to the L2 pages */
2888         npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2889     } else {
2890         npg = 1;
2891     }
2892 
2893     val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2894 
2895     /* Right, that's the number of CPU pages we need for L1 */
2896     np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2897 
2898     pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2899          np, npg, psz, epp, esz);
2900     page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2901     if (!page)
2902         return -ENOMEM;
2903 
2904     gic_data_rdist()->vpe_l1_base = page_address(page);
2905     pa = virt_to_phys(page_address(page));
2906     WARN_ON(!IS_ALIGNED(pa, psz));
2907 
2908     val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2909     val |= GICR_VPROPBASER_RaWb;
2910     val |= GICR_VPROPBASER_InnerShareable;
2911     val |= GICR_VPROPBASER_4_1_Z;
2912     val |= GICR_VPROPBASER_4_1_VALID;
2913 
2914 out:
2915     gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2916     cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2917 
2918     pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2919          smp_processor_id(), val,
2920          cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2921 
2922     return 0;
2923 }
2924 
2925 static int its_alloc_collections(struct its_node *its)
2926 {
2927     int i;
2928 
2929     its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2930                    GFP_KERNEL);
2931     if (!its->collections)
2932         return -ENOMEM;
2933 
2934     for (i = 0; i < nr_cpu_ids; i++)
2935         its->collections[i].target_address = ~0ULL;
2936 
2937     return 0;
2938 }
2939 
2940 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2941 {
2942     struct page *pend_page;
2943 
2944     pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2945                 get_order(LPI_PENDBASE_SZ));
2946     if (!pend_page)
2947         return NULL;
2948 
2949     /* Make sure the GIC will observe the zero-ed page */
2950     gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2951 
2952     return pend_page;
2953 }
2954 
2955 static void its_free_pending_table(struct page *pt)
2956 {
2957     free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2958 }
2959 
2960 /*
2961  * Booting with kdump and LPIs enabled is generally fine. Any other
2962  * case is wrong in the absence of firmware/EFI support.
2963  */
2964 static bool enabled_lpis_allowed(void)
2965 {
2966     phys_addr_t addr;
2967     u64 val;
2968 
2969     /* Check whether the property table is in a reserved region */
2970     val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2971     addr = val & GENMASK_ULL(51, 12);
2972 
2973     return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2974 }
2975 
2976 static int __init allocate_lpi_tables(void)
2977 {
2978     u64 val;
2979     int err, cpu;
2980 
2981     /*
2982      * If LPIs are enabled while we run this from the boot CPU,
2983      * flag the RD tables as pre-allocated if the stars do align.
2984      */
2985     val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2986     if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2987         gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2988                       RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2989         pr_info("GICv3: Using preallocated redistributor tables\n");
2990     }
2991 
2992     err = its_setup_lpi_prop_table();
2993     if (err)
2994         return err;
2995 
2996     /*
2997      * We allocate all the pending tables anyway, as we may have a
2998      * mix of RDs that have had LPIs enabled, and some that
2999      * don't. We'll free the unused ones as each CPU comes online.
3000      */
3001     for_each_possible_cpu(cpu) {
3002         struct page *pend_page;
3003 
3004         pend_page = its_allocate_pending_table(GFP_NOWAIT);
3005         if (!pend_page) {
3006             pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3007             return -ENOMEM;
3008         }
3009 
3010         gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3011     }
3012 
3013     return 0;
3014 }
3015 
3016 static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3017 {
3018     u32 count = 1000000;    /* 1s! */
3019     bool clean;
3020     u64 val;
3021 
3022     do {
3023         val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3024         clean = !(val & GICR_VPENDBASER_Dirty);
3025         if (!clean) {
3026             count--;
3027             cpu_relax();
3028             udelay(1);
3029         }
3030     } while (!clean && count);
3031 
3032     if (unlikely(!clean))
3033         pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3034 
3035     return val;
3036 }
3037 
3038 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3039 {
3040     u64 val;
3041 
3042     /* Make sure we wait until the RD is done with the initial scan */
3043     val = read_vpend_dirty_clear(vlpi_base);
3044     val &= ~GICR_VPENDBASER_Valid;
3045     val &= ~clr;
3046     val |= set;
3047     gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3048 
3049     val = read_vpend_dirty_clear(vlpi_base);
3050     if (unlikely(val & GICR_VPENDBASER_Dirty))
3051         val |= GICR_VPENDBASER_PendingLast;
3052 
3053     return val;
3054 }
3055 
3056 static void its_cpu_init_lpis(void)
3057 {
3058     void __iomem *rbase = gic_data_rdist_rd_base();
3059     struct page *pend_page;
3060     phys_addr_t paddr;
3061     u64 val, tmp;
3062 
3063     if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3064         return;
3065 
3066     val = readl_relaxed(rbase + GICR_CTLR);
3067     if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3068         (val & GICR_CTLR_ENABLE_LPIS)) {
3069         /*
3070          * Check that we get the same property table on all
3071          * RDs. If we don't, this is hopeless.
3072          */
3073         paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3074         paddr &= GENMASK_ULL(51, 12);
3075         if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3076             add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3077 
3078         paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3079         paddr &= GENMASK_ULL(51, 16);
3080 
3081         WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3082         gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3083 
3084         goto out;
3085     }
3086 
3087     pend_page = gic_data_rdist()->pend_page;
3088     paddr = page_to_phys(pend_page);
3089 
3090     /* set PROPBASE */
3091     val = (gic_rdists->prop_table_pa |
3092            GICR_PROPBASER_InnerShareable |
3093            GICR_PROPBASER_RaWaWb |
3094            ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3095 
3096     gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3097     tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3098 
3099     if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3100         if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3101             /*
3102              * The HW reports non-shareable, we must
3103              * remove the cacheability attributes as
3104              * well.
3105              */
3106             val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3107                  GICR_PROPBASER_CACHEABILITY_MASK);
3108             val |= GICR_PROPBASER_nC;
3109             gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3110         }
3111         pr_info_once("GIC: using cache flushing for LPI property table\n");
3112         gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3113     }
3114 
3115     /* set PENDBASE */
3116     val = (page_to_phys(pend_page) |
3117            GICR_PENDBASER_InnerShareable |
3118            GICR_PENDBASER_RaWaWb);
3119 
3120     gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3121     tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3122 
3123     if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3124         /*
3125          * The HW reports non-shareable, we must remove the
3126          * cacheability attributes as well.
3127          */
3128         val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3129              GICR_PENDBASER_CACHEABILITY_MASK);
3130         val |= GICR_PENDBASER_nC;
3131         gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3132     }
3133 
3134     /* Enable LPIs */
3135     val = readl_relaxed(rbase + GICR_CTLR);
3136     val |= GICR_CTLR_ENABLE_LPIS;
3137     writel_relaxed(val, rbase + GICR_CTLR);
3138 
3139     if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3140         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3141 
3142         /*
3143          * It's possible for CPU to receive VLPIs before it is
3144          * scheduled as a vPE, especially for the first CPU, and the
3145          * VLPI with INTID larger than 2^(IDbits+1) will be considered
3146          * as out of range and dropped by GIC.
3147          * So we initialize IDbits to known value to avoid VLPI drop.
3148          */
3149         val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3150         pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3151             smp_processor_id(), val);
3152         gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3153 
3154         /*
3155          * Also clear Valid bit of GICR_VPENDBASER, in case some
3156          * ancient programming gets left in and has possibility of
3157          * corrupting memory.
3158          */
3159         val = its_clear_vpend_valid(vlpi_base, 0, 0);
3160     }
3161 
3162     if (allocate_vpe_l1_table()) {
3163         /*
3164          * If the allocation has failed, we're in massive trouble.
3165          * Disable direct injection, and pray that no VM was
3166          * already running...
3167          */
3168         gic_rdists->has_rvpeid = false;
3169         gic_rdists->has_vlpis = false;
3170     }
3171 
3172     /* Make sure the GIC has seen the above */
3173     dsb(sy);
3174 out:
3175     gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3176     pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3177         smp_processor_id(),
3178         gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3179         "reserved" : "allocated",
3180         &paddr);
3181 }
3182 
3183 static void its_cpu_init_collection(struct its_node *its)
3184 {
3185     int cpu = smp_processor_id();
3186     u64 target;
3187 
3188     /* avoid cross node collections and its mapping */
3189     if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3190         struct device_node *cpu_node;
3191 
3192         cpu_node = of_get_cpu_node(cpu, NULL);
3193         if (its->numa_node != NUMA_NO_NODE &&
3194             its->numa_node != of_node_to_nid(cpu_node))
3195             return;
3196     }
3197 
3198     /*
3199      * We now have to bind each collection to its target
3200      * redistributor.
3201      */
3202     if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3203         /*
3204          * This ITS wants the physical address of the
3205          * redistributor.
3206          */
3207         target = gic_data_rdist()->phys_base;
3208     } else {
3209         /* This ITS wants a linear CPU number. */
3210         target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3211         target = GICR_TYPER_CPU_NUMBER(target) << 16;
3212     }
3213 
3214     /* Perform collection mapping */
3215     its->collections[cpu].target_address = target;
3216     its->collections[cpu].col_id = cpu;
3217 
3218     its_send_mapc(its, &its->collections[cpu], 1);
3219     its_send_invall(its, &its->collections[cpu]);
3220 }
3221 
3222 static void its_cpu_init_collections(void)
3223 {
3224     struct its_node *its;
3225 
3226     raw_spin_lock(&its_lock);
3227 
3228     list_for_each_entry(its, &its_nodes, entry)
3229         its_cpu_init_collection(its);
3230 
3231     raw_spin_unlock(&its_lock);
3232 }
3233 
3234 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3235 {
3236     struct its_device *its_dev = NULL, *tmp;
3237     unsigned long flags;
3238 
3239     raw_spin_lock_irqsave(&its->lock, flags);
3240 
3241     list_for_each_entry(tmp, &its->its_device_list, entry) {
3242         if (tmp->device_id == dev_id) {
3243             its_dev = tmp;
3244             break;
3245         }
3246     }
3247 
3248     raw_spin_unlock_irqrestore(&its->lock, flags);
3249 
3250     return its_dev;
3251 }
3252 
3253 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3254 {
3255     int i;
3256 
3257     for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3258         if (GITS_BASER_TYPE(its->tables[i].val) == type)
3259             return &its->tables[i];
3260     }
3261 
3262     return NULL;
3263 }
3264 
3265 static bool its_alloc_table_entry(struct its_node *its,
3266                   struct its_baser *baser, u32 id)
3267 {
3268     struct page *page;
3269     u32 esz, idx;
3270     __le64 *table;
3271 
3272     /* Don't allow device id that exceeds single, flat table limit */
3273     esz = GITS_BASER_ENTRY_SIZE(baser->val);
3274     if (!(baser->val & GITS_BASER_INDIRECT))
3275         return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3276 
3277     /* Compute 1st level table index & check if that exceeds table limit */
3278     idx = id >> ilog2(baser->psz / esz);
3279     if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3280         return false;
3281 
3282     table = baser->base;
3283 
3284     /* Allocate memory for 2nd level table */
3285     if (!table[idx]) {
3286         page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3287                     get_order(baser->psz));
3288         if (!page)
3289             return false;
3290 
3291         /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3292         if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3293             gic_flush_dcache_to_poc(page_address(page), baser->psz);
3294 
3295         table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3296 
3297         /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3298         if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3299             gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3300 
3301         /* Ensure updated table contents are visible to ITS hardware */
3302         dsb(sy);
3303     }
3304 
3305     return true;
3306 }
3307 
3308 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3309 {
3310     struct its_baser *baser;
3311 
3312     baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3313 
3314     /* Don't allow device id that exceeds ITS hardware limit */
3315     if (!baser)
3316         return (ilog2(dev_id) < device_ids(its));
3317 
3318     return its_alloc_table_entry(its, baser, dev_id);
3319 }
3320 
3321 static bool its_alloc_vpe_table(u32 vpe_id)
3322 {
3323     struct its_node *its;
3324     int cpu;
3325 
3326     /*
3327      * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3328      * could try and only do it on ITSs corresponding to devices
3329      * that have interrupts targeted at this VPE, but the
3330      * complexity becomes crazy (and you have tons of memory
3331      * anyway, right?).
3332      */
3333     list_for_each_entry(its, &its_nodes, entry) {
3334         struct its_baser *baser;
3335 
3336         if (!is_v4(its))
3337             continue;
3338 
3339         baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3340         if (!baser)
3341             return false;
3342 
3343         if (!its_alloc_table_entry(its, baser, vpe_id))
3344             return false;
3345     }
3346 
3347     /* Non v4.1? No need to iterate RDs and go back early. */
3348     if (!gic_rdists->has_rvpeid)
3349         return true;
3350 
3351     /*
3352      * Make sure the L2 tables are allocated for all copies of
3353      * the L1 table on *all* v4.1 RDs.
3354      */
3355     for_each_possible_cpu(cpu) {
3356         if (!allocate_vpe_l2_table(cpu, vpe_id))
3357             return false;
3358     }
3359 
3360     return true;
3361 }
3362 
3363 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3364                         int nvecs, bool alloc_lpis)
3365 {
3366     struct its_device *dev;
3367     unsigned long *lpi_map = NULL;
3368     unsigned long flags;
3369     u16 *col_map = NULL;
3370     void *itt;
3371     int lpi_base;
3372     int nr_lpis;
3373     int nr_ites;
3374     int sz;
3375 
3376     if (!its_alloc_device_table(its, dev_id))
3377         return NULL;
3378 
3379     if (WARN_ON(!is_power_of_2(nvecs)))
3380         nvecs = roundup_pow_of_two(nvecs);
3381 
3382     dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3383     /*
3384      * Even if the device wants a single LPI, the ITT must be
3385      * sized as a power of two (and you need at least one bit...).
3386      */
3387     nr_ites = max(2, nvecs);
3388     sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3389     sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3390     itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3391     if (alloc_lpis) {
3392         lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3393         if (lpi_map)
3394             col_map = kcalloc(nr_lpis, sizeof(*col_map),
3395                       GFP_KERNEL);
3396     } else {
3397         col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3398         nr_lpis = 0;
3399         lpi_base = 0;
3400     }
3401 
3402     if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
3403         kfree(dev);
3404         kfree(itt);
3405         bitmap_free(lpi_map);
3406         kfree(col_map);
3407         return NULL;
3408     }
3409 
3410     gic_flush_dcache_to_poc(itt, sz);
3411 
3412     dev->its = its;
3413     dev->itt = itt;
3414     dev->nr_ites = nr_ites;
3415     dev->event_map.lpi_map = lpi_map;
3416     dev->event_map.col_map = col_map;
3417     dev->event_map.lpi_base = lpi_base;
3418     dev->event_map.nr_lpis = nr_lpis;
3419     raw_spin_lock_init(&dev->event_map.vlpi_lock);
3420     dev->device_id = dev_id;
3421     INIT_LIST_HEAD(&dev->entry);
3422 
3423     raw_spin_lock_irqsave(&its->lock, flags);
3424     list_add(&dev->entry, &its->its_device_list);
3425     raw_spin_unlock_irqrestore(&its->lock, flags);
3426 
3427     /* Map device to its ITT */
3428     its_send_mapd(dev, 1);
3429 
3430     return dev;
3431 }
3432 
3433 static void its_free_device(struct its_device *its_dev)
3434 {
3435     unsigned long flags;
3436 
3437     raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3438     list_del(&its_dev->entry);
3439     raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3440     kfree(its_dev->event_map.col_map);
3441     kfree(its_dev->itt);
3442     kfree(its_dev);
3443 }
3444 
3445 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3446 {
3447     int idx;
3448 
3449     /* Find a free LPI region in lpi_map and allocate them. */
3450     idx = bitmap_find_free_region(dev->event_map.lpi_map,
3451                       dev->event_map.nr_lpis,
3452                       get_count_order(nvecs));
3453     if (idx < 0)
3454         return -ENOSPC;
3455 
3456     *hwirq = dev->event_map.lpi_base + idx;
3457 
3458     return 0;
3459 }
3460 
3461 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3462                int nvec, msi_alloc_info_t *info)
3463 {
3464     struct its_node *its;
3465     struct its_device *its_dev;
3466     struct msi_domain_info *msi_info;
3467     u32 dev_id;
3468     int err = 0;
3469 
3470     /*
3471      * We ignore "dev" entirely, and rely on the dev_id that has
3472      * been passed via the scratchpad. This limits this domain's
3473      * usefulness to upper layers that definitely know that they
3474      * are built on top of the ITS.
3475      */
3476     dev_id = info->scratchpad[0].ul;
3477 
3478     msi_info = msi_get_domain_info(domain);
3479     its = msi_info->data;
3480 
3481     if (!gic_rdists->has_direct_lpi &&
3482         vpe_proxy.dev &&
3483         vpe_proxy.dev->its == its &&
3484         dev_id == vpe_proxy.dev->device_id) {
3485         /* Bad luck. Get yourself a better implementation */
3486         WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3487               dev_id);
3488         return -EINVAL;
3489     }
3490 
3491     mutex_lock(&its->dev_alloc_lock);
3492     its_dev = its_find_device(its, dev_id);
3493     if (its_dev) {
3494         /*
3495          * We already have seen this ID, probably through
3496          * another alias (PCI bridge of some sort). No need to
3497          * create the device.
3498          */
3499         its_dev->shared = true;
3500         pr_debug("Reusing ITT for devID %x\n", dev_id);
3501         goto out;
3502     }
3503 
3504     its_dev = its_create_device(its, dev_id, nvec, true);
3505     if (!its_dev) {
3506         err = -ENOMEM;
3507         goto out;
3508     }
3509 
3510     if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3511         its_dev->shared = true;
3512 
3513     pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3514 out:
3515     mutex_unlock(&its->dev_alloc_lock);
3516     info->scratchpad[0].ptr = its_dev;
3517     return err;
3518 }
3519 
3520 static struct msi_domain_ops its_msi_domain_ops = {
3521     .msi_prepare    = its_msi_prepare,
3522 };
3523 
3524 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3525                     unsigned int virq,
3526                     irq_hw_number_t hwirq)
3527 {
3528     struct irq_fwspec fwspec;
3529 
3530     if (irq_domain_get_of_node(domain->parent)) {
3531         fwspec.fwnode = domain->parent->fwnode;
3532         fwspec.param_count = 3;
3533         fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3534         fwspec.param[1] = hwirq;
3535         fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3536     } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3537         fwspec.fwnode = domain->parent->fwnode;
3538         fwspec.param_count = 2;
3539         fwspec.param[0] = hwirq;
3540         fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3541     } else {
3542         return -EINVAL;
3543     }
3544 
3545     return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3546 }
3547 
3548 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3549                 unsigned int nr_irqs, void *args)
3550 {
3551     msi_alloc_info_t *info = args;
3552     struct its_device *its_dev = info->scratchpad[0].ptr;
3553     struct its_node *its = its_dev->its;
3554     struct irq_data *irqd;
3555     irq_hw_number_t hwirq;
3556     int err;
3557     int i;
3558 
3559     err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3560     if (err)
3561         return err;
3562 
3563     err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3564     if (err)
3565         return err;
3566 
3567     for (i = 0; i < nr_irqs; i++) {
3568         err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3569         if (err)
3570             return err;
3571 
3572         irq_domain_set_hwirq_and_chip(domain, virq + i,
3573                           hwirq + i, &its_irq_chip, its_dev);
3574         irqd = irq_get_irq_data(virq + i);
3575         irqd_set_single_target(irqd);
3576         irqd_set_affinity_on_activate(irqd);
3577         pr_debug("ID:%d pID:%d vID:%d\n",
3578              (int)(hwirq + i - its_dev->event_map.lpi_base),
3579              (int)(hwirq + i), virq + i);
3580     }
3581 
3582     return 0;
3583 }
3584 
3585 static int its_irq_domain_activate(struct irq_domain *domain,
3586                    struct irq_data *d, bool reserve)
3587 {
3588     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3589     u32 event = its_get_event_id(d);
3590     int cpu;
3591 
3592     cpu = its_select_cpu(d, cpu_online_mask);
3593     if (cpu < 0 || cpu >= nr_cpu_ids)
3594         return -EINVAL;
3595 
3596     its_inc_lpi_count(d, cpu);
3597     its_dev->event_map.col_map[event] = cpu;
3598     irq_data_update_effective_affinity(d, cpumask_of(cpu));
3599 
3600     /* Map the GIC IRQ and event to the device */
3601     its_send_mapti(its_dev, d->hwirq, event);
3602     return 0;
3603 }
3604 
3605 static void its_irq_domain_deactivate(struct irq_domain *domain,
3606                       struct irq_data *d)
3607 {
3608     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3609     u32 event = its_get_event_id(d);
3610 
3611     its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3612     /* Stop the delivery of interrupts */
3613     its_send_discard(its_dev, event);
3614 }
3615 
3616 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3617                 unsigned int nr_irqs)
3618 {
3619     struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3620     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3621     struct its_node *its = its_dev->its;
3622     int i;
3623 
3624     bitmap_release_region(its_dev->event_map.lpi_map,
3625                   its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3626                   get_count_order(nr_irqs));
3627 
3628     for (i = 0; i < nr_irqs; i++) {
3629         struct irq_data *data = irq_domain_get_irq_data(domain,
3630                                 virq + i);
3631         /* Nuke the entry in the domain */
3632         irq_domain_reset_irq_data(data);
3633     }
3634 
3635     mutex_lock(&its->dev_alloc_lock);
3636 
3637     /*
3638      * If all interrupts have been freed, start mopping the
3639      * floor. This is conditioned on the device not being shared.
3640      */
3641     if (!its_dev->shared &&
3642         bitmap_empty(its_dev->event_map.lpi_map,
3643              its_dev->event_map.nr_lpis)) {
3644         its_lpi_free(its_dev->event_map.lpi_map,
3645                  its_dev->event_map.lpi_base,
3646                  its_dev->event_map.nr_lpis);
3647 
3648         /* Unmap device/itt */
3649         its_send_mapd(its_dev, 0);
3650         its_free_device(its_dev);
3651     }
3652 
3653     mutex_unlock(&its->dev_alloc_lock);
3654 
3655     irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3656 }
3657 
3658 static const struct irq_domain_ops its_domain_ops = {
3659     .alloc          = its_irq_domain_alloc,
3660     .free           = its_irq_domain_free,
3661     .activate       = its_irq_domain_activate,
3662     .deactivate     = its_irq_domain_deactivate,
3663 };
3664 
3665 /*
3666  * This is insane.
3667  *
3668  * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3669  * likely), the only way to perform an invalidate is to use a fake
3670  * device to issue an INV command, implying that the LPI has first
3671  * been mapped to some event on that device. Since this is not exactly
3672  * cheap, we try to keep that mapping around as long as possible, and
3673  * only issue an UNMAP if we're short on available slots.
3674  *
3675  * Broken by design(tm).
3676  *
3677  * GICv4.1, on the other hand, mandates that we're able to invalidate
3678  * by writing to a MMIO register. It doesn't implement the whole of
3679  * DirectLPI, but that's good enough. And most of the time, we don't
3680  * even have to invalidate anything, as the redistributor can be told
3681  * whether to generate a doorbell or not (we thus leave it enabled,
3682  * always).
3683  */
3684 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3685 {
3686     /* GICv4.1 doesn't use a proxy, so nothing to do here */
3687     if (gic_rdists->has_rvpeid)
3688         return;
3689 
3690     /* Already unmapped? */
3691     if (vpe->vpe_proxy_event == -1)
3692         return;
3693 
3694     its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3695     vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3696 
3697     /*
3698      * We don't track empty slots at all, so let's move the
3699      * next_victim pointer if we can quickly reuse that slot
3700      * instead of nuking an existing entry. Not clear that this is
3701      * always a win though, and this might just generate a ripple
3702      * effect... Let's just hope VPEs don't migrate too often.
3703      */
3704     if (vpe_proxy.vpes[vpe_proxy.next_victim])
3705         vpe_proxy.next_victim = vpe->vpe_proxy_event;
3706 
3707     vpe->vpe_proxy_event = -1;
3708 }
3709 
3710 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3711 {
3712     /* GICv4.1 doesn't use a proxy, so nothing to do here */
3713     if (gic_rdists->has_rvpeid)
3714         return;
3715 
3716     if (!gic_rdists->has_direct_lpi) {
3717         unsigned long flags;
3718 
3719         raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3720         its_vpe_db_proxy_unmap_locked(vpe);
3721         raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3722     }
3723 }
3724 
3725 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3726 {
3727     /* GICv4.1 doesn't use a proxy, so nothing to do here */
3728     if (gic_rdists->has_rvpeid)
3729         return;
3730 
3731     /* Already mapped? */
3732     if (vpe->vpe_proxy_event != -1)
3733         return;
3734 
3735     /* This slot was already allocated. Kick the other VPE out. */
3736     if (vpe_proxy.vpes[vpe_proxy.next_victim])
3737         its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3738 
3739     /* Map the new VPE instead */
3740     vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3741     vpe->vpe_proxy_event = vpe_proxy.next_victim;
3742     vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3743 
3744     vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3745     its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3746 }
3747 
3748 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3749 {
3750     unsigned long flags;
3751     struct its_collection *target_col;
3752 
3753     /* GICv4.1 doesn't use a proxy, so nothing to do here */
3754     if (gic_rdists->has_rvpeid)
3755         return;
3756 
3757     if (gic_rdists->has_direct_lpi) {
3758         void __iomem *rdbase;
3759 
3760         rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3761         gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3762         wait_for_syncr(rdbase);
3763 
3764         return;
3765     }
3766 
3767     raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3768 
3769     its_vpe_db_proxy_map_locked(vpe);
3770 
3771     target_col = &vpe_proxy.dev->its->collections[to];
3772     its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3773     vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3774 
3775     raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3776 }
3777 
3778 static int its_vpe_set_affinity(struct irq_data *d,
3779                 const struct cpumask *mask_val,
3780                 bool force)
3781 {
3782     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3783     int from, cpu = cpumask_first(mask_val);
3784     unsigned long flags;
3785 
3786     /*
3787      * Changing affinity is mega expensive, so let's be as lazy as
3788      * we can and only do it if we really have to. Also, if mapped
3789      * into the proxy device, we need to move the doorbell
3790      * interrupt to its new location.
3791      *
3792      * Another thing is that changing the affinity of a vPE affects
3793      * *other interrupts* such as all the vLPIs that are routed to
3794      * this vPE. This means that the irq_desc lock is not enough to
3795      * protect us, and that we must ensure nobody samples vpe->col_idx
3796      * during the update, hence the lock below which must also be
3797      * taken on any vLPI handling path that evaluates vpe->col_idx.
3798      */
3799     from = vpe_to_cpuid_lock(vpe, &flags);
3800     if (from == cpu)
3801         goto out;
3802 
3803     vpe->col_idx = cpu;
3804 
3805     /*
3806      * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3807      * is sharing its VPE table with the current one.
3808      */
3809     if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
3810         cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
3811         goto out;
3812 
3813     its_send_vmovp(vpe);
3814     its_vpe_db_proxy_move(vpe, from, cpu);
3815 
3816 out:
3817     irq_data_update_effective_affinity(d, cpumask_of(cpu));
3818     vpe_to_cpuid_unlock(vpe, flags);
3819 
3820     return IRQ_SET_MASK_OK_DONE;
3821 }
3822 
3823 static void its_wait_vpt_parse_complete(void)
3824 {
3825     void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3826     u64 val;
3827 
3828     if (!gic_rdists->has_vpend_valid_dirty)
3829         return;
3830 
3831     WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3832                                val,
3833                                !(val & GICR_VPENDBASER_Dirty),
3834                                1, 500));
3835 }
3836 
3837 static void its_vpe_schedule(struct its_vpe *vpe)
3838 {
3839     void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3840     u64 val;
3841 
3842     /* Schedule the VPE */
3843     val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3844         GENMASK_ULL(51, 12);
3845     val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3846     val |= GICR_VPROPBASER_RaWb;
3847     val |= GICR_VPROPBASER_InnerShareable;
3848     gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3849 
3850     val  = virt_to_phys(page_address(vpe->vpt_page)) &
3851         GENMASK_ULL(51, 16);
3852     val |= GICR_VPENDBASER_RaWaWb;
3853     val |= GICR_VPENDBASER_InnerShareable;
3854     /*
3855      * There is no good way of finding out if the pending table is
3856      * empty as we can race against the doorbell interrupt very
3857      * easily. So in the end, vpe->pending_last is only an
3858      * indication that the vcpu has something pending, not one
3859      * that the pending table is empty. A good implementation
3860      * would be able to read its coarse map pretty quickly anyway,
3861      * making this a tolerable issue.
3862      */
3863     val |= GICR_VPENDBASER_PendingLast;
3864     val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3865     val |= GICR_VPENDBASER_Valid;
3866     gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3867 }
3868 
3869 static void its_vpe_deschedule(struct its_vpe *vpe)
3870 {
3871     void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3872     u64 val;
3873 
3874     val = its_clear_vpend_valid(vlpi_base, 0, 0);
3875 
3876     vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3877     vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3878 }
3879 
3880 static void its_vpe_invall(struct its_vpe *vpe)
3881 {
3882     struct its_node *its;
3883 
3884     list_for_each_entry(its, &its_nodes, entry) {
3885         if (!is_v4(its))
3886             continue;
3887 
3888         if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3889             continue;
3890 
3891         /*
3892          * Sending a VINVALL to a single ITS is enough, as all
3893          * we need is to reach the redistributors.
3894          */
3895         its_send_vinvall(its, vpe);
3896         return;
3897     }
3898 }
3899 
3900 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3901 {
3902     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3903     struct its_cmd_info *info = vcpu_info;
3904 
3905     switch (info->cmd_type) {
3906     case SCHEDULE_VPE:
3907         its_vpe_schedule(vpe);
3908         return 0;
3909 
3910     case DESCHEDULE_VPE:
3911         its_vpe_deschedule(vpe);
3912         return 0;
3913 
3914     case COMMIT_VPE:
3915         its_wait_vpt_parse_complete();
3916         return 0;
3917 
3918     case INVALL_VPE:
3919         its_vpe_invall(vpe);
3920         return 0;
3921 
3922     default:
3923         return -EINVAL;
3924     }
3925 }
3926 
3927 static void its_vpe_send_cmd(struct its_vpe *vpe,
3928                  void (*cmd)(struct its_device *, u32))
3929 {
3930     unsigned long flags;
3931 
3932     raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3933 
3934     its_vpe_db_proxy_map_locked(vpe);
3935     cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3936 
3937     raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3938 }
3939 
3940 static void its_vpe_send_inv(struct irq_data *d)
3941 {
3942     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3943 
3944     if (gic_rdists->has_direct_lpi) {
3945         void __iomem *rdbase;
3946 
3947         /* Target the redistributor this VPE is currently known on */
3948         raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3949         rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3950         gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3951         wait_for_syncr(rdbase);
3952         raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3953     } else {
3954         its_vpe_send_cmd(vpe, its_send_inv);
3955     }
3956 }
3957 
3958 static void its_vpe_mask_irq(struct irq_data *d)
3959 {
3960     /*
3961      * We need to unmask the LPI, which is described by the parent
3962      * irq_data. Instead of calling into the parent (which won't
3963      * exactly do the right thing, let's simply use the
3964      * parent_data pointer. Yes, I'm naughty.
3965      */
3966     lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3967     its_vpe_send_inv(d);
3968 }
3969 
3970 static void its_vpe_unmask_irq(struct irq_data *d)
3971 {
3972     /* Same hack as above... */
3973     lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3974     its_vpe_send_inv(d);
3975 }
3976 
3977 static int its_vpe_set_irqchip_state(struct irq_data *d,
3978                      enum irqchip_irq_state which,
3979                      bool state)
3980 {
3981     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3982 
3983     if (which != IRQCHIP_STATE_PENDING)
3984         return -EINVAL;
3985 
3986     if (gic_rdists->has_direct_lpi) {
3987         void __iomem *rdbase;
3988 
3989         rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3990         if (state) {
3991             gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3992         } else {
3993             gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3994             wait_for_syncr(rdbase);
3995         }
3996     } else {
3997         if (state)
3998             its_vpe_send_cmd(vpe, its_send_int);
3999         else
4000             its_vpe_send_cmd(vpe, its_send_clear);
4001     }
4002 
4003     return 0;
4004 }
4005 
4006 static int its_vpe_retrigger(struct irq_data *d)
4007 {
4008     return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4009 }
4010 
4011 static struct irq_chip its_vpe_irq_chip = {
4012     .name           = "GICv4-vpe",
4013     .irq_mask       = its_vpe_mask_irq,
4014     .irq_unmask     = its_vpe_unmask_irq,
4015     .irq_eoi        = irq_chip_eoi_parent,
4016     .irq_set_affinity   = its_vpe_set_affinity,
4017     .irq_retrigger      = its_vpe_retrigger,
4018     .irq_set_irqchip_state  = its_vpe_set_irqchip_state,
4019     .irq_set_vcpu_affinity  = its_vpe_set_vcpu_affinity,
4020 };
4021 
4022 static struct its_node *find_4_1_its(void)
4023 {
4024     static struct its_node *its = NULL;
4025 
4026     if (!its) {
4027         list_for_each_entry(its, &its_nodes, entry) {
4028             if (is_v4_1(its))
4029                 return its;
4030         }
4031 
4032         /* Oops? */
4033         its = NULL;
4034     }
4035 
4036     return its;
4037 }
4038 
4039 static void its_vpe_4_1_send_inv(struct irq_data *d)
4040 {
4041     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4042     struct its_node *its;
4043 
4044     /*
4045      * GICv4.1 wants doorbells to be invalidated using the
4046      * INVDB command in order to be broadcast to all RDs. Send
4047      * it to the first valid ITS, and let the HW do its magic.
4048      */
4049     its = find_4_1_its();
4050     if (its)
4051         its_send_invdb(its, vpe);
4052 }
4053 
4054 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4055 {
4056     lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4057     its_vpe_4_1_send_inv(d);
4058 }
4059 
4060 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4061 {
4062     lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4063     its_vpe_4_1_send_inv(d);
4064 }
4065 
4066 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4067                  struct its_cmd_info *info)
4068 {
4069     void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4070     u64 val = 0;
4071 
4072     /* Schedule the VPE */
4073     val |= GICR_VPENDBASER_Valid;
4074     val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4075     val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4076     val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4077 
4078     gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4079 }
4080 
4081 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4082                    struct its_cmd_info *info)
4083 {
4084     void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4085     u64 val;
4086 
4087     if (info->req_db) {
4088         unsigned long flags;
4089 
4090         /*
4091          * vPE is going to block: make the vPE non-resident with
4092          * PendingLast clear and DB set. The GIC guarantees that if
4093          * we read-back PendingLast clear, then a doorbell will be
4094          * delivered when an interrupt comes.
4095          *
4096          * Note the locking to deal with the concurrent update of
4097          * pending_last from the doorbell interrupt handler that can
4098          * run concurrently.
4099          */
4100         raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4101         val = its_clear_vpend_valid(vlpi_base,
4102                         GICR_VPENDBASER_PendingLast,
4103                         GICR_VPENDBASER_4_1_DB);
4104         vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4105         raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4106     } else {
4107         /*
4108          * We're not blocking, so just make the vPE non-resident
4109          * with PendingLast set, indicating that we'll be back.
4110          */
4111         val = its_clear_vpend_valid(vlpi_base,
4112                         0,
4113                         GICR_VPENDBASER_PendingLast);
4114         vpe->pending_last = true;
4115     }
4116 }
4117 
4118 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4119 {
4120     void __iomem *rdbase;
4121     unsigned long flags;
4122     u64 val;
4123     int cpu;
4124 
4125     val  = GICR_INVALLR_V;
4126     val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4127 
4128     /* Target the redistributor this vPE is currently known on */
4129     cpu = vpe_to_cpuid_lock(vpe, &flags);
4130     raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4131     rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4132     gic_write_lpir(val, rdbase + GICR_INVALLR);
4133 
4134     wait_for_syncr(rdbase);
4135     raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4136     vpe_to_cpuid_unlock(vpe, flags);
4137 }
4138 
4139 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4140 {
4141     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4142     struct its_cmd_info *info = vcpu_info;
4143 
4144     switch (info->cmd_type) {
4145     case SCHEDULE_VPE:
4146         its_vpe_4_1_schedule(vpe, info);
4147         return 0;
4148 
4149     case DESCHEDULE_VPE:
4150         its_vpe_4_1_deschedule(vpe, info);
4151         return 0;
4152 
4153     case COMMIT_VPE:
4154         its_wait_vpt_parse_complete();
4155         return 0;
4156 
4157     case INVALL_VPE:
4158         its_vpe_4_1_invall(vpe);
4159         return 0;
4160 
4161     default:
4162         return -EINVAL;
4163     }
4164 }
4165 
4166 static struct irq_chip its_vpe_4_1_irq_chip = {
4167     .name           = "GICv4.1-vpe",
4168     .irq_mask       = its_vpe_4_1_mask_irq,
4169     .irq_unmask     = its_vpe_4_1_unmask_irq,
4170     .irq_eoi        = irq_chip_eoi_parent,
4171     .irq_set_affinity   = its_vpe_set_affinity,
4172     .irq_set_vcpu_affinity  = its_vpe_4_1_set_vcpu_affinity,
4173 };
4174 
4175 static void its_configure_sgi(struct irq_data *d, bool clear)
4176 {
4177     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4178     struct its_cmd_desc desc;
4179 
4180     desc.its_vsgi_cmd.vpe = vpe;
4181     desc.its_vsgi_cmd.sgi = d->hwirq;
4182     desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4183     desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4184     desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4185     desc.its_vsgi_cmd.clear = clear;
4186 
4187     /*
4188      * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4189      * destination VPE is mapped there. Since we map them eagerly at
4190      * activation time, we're pretty sure the first GICv4.1 ITS will do.
4191      */
4192     its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4193 }
4194 
4195 static void its_sgi_mask_irq(struct irq_data *d)
4196 {
4197     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4198 
4199     vpe->sgi_config[d->hwirq].enabled = false;
4200     its_configure_sgi(d, false);
4201 }
4202 
4203 static void its_sgi_unmask_irq(struct irq_data *d)
4204 {
4205     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4206 
4207     vpe->sgi_config[d->hwirq].enabled = true;
4208     its_configure_sgi(d, false);
4209 }
4210 
4211 static int its_sgi_set_affinity(struct irq_data *d,
4212                 const struct cpumask *mask_val,
4213                 bool force)
4214 {
4215     /*
4216      * There is no notion of affinity for virtual SGIs, at least
4217      * not on the host (since they can only be targeting a vPE).
4218      * Tell the kernel we've done whatever it asked for.
4219      */
4220     irq_data_update_effective_affinity(d, mask_val);
4221     return IRQ_SET_MASK_OK;
4222 }
4223 
4224 static int its_sgi_set_irqchip_state(struct irq_data *d,
4225                      enum irqchip_irq_state which,
4226                      bool state)
4227 {
4228     if (which != IRQCHIP_STATE_PENDING)
4229         return -EINVAL;
4230 
4231     if (state) {
4232         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4233         struct its_node *its = find_4_1_its();
4234         u64 val;
4235 
4236         val  = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4237         val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4238         writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4239     } else {
4240         its_configure_sgi(d, true);
4241     }
4242 
4243     return 0;
4244 }
4245 
4246 static int its_sgi_get_irqchip_state(struct irq_data *d,
4247                      enum irqchip_irq_state which, bool *val)
4248 {
4249     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4250     void __iomem *base;
4251     unsigned long flags;
4252     u32 count = 1000000;    /* 1s! */
4253     u32 status;
4254     int cpu;
4255 
4256     if (which != IRQCHIP_STATE_PENDING)
4257         return -EINVAL;
4258 
4259     /*
4260      * Locking galore! We can race against two different events:
4261      *
4262      * - Concurrent vPE affinity change: we must make sure it cannot
4263      *   happen, or we'll talk to the wrong redistributor. This is
4264      *   identical to what happens with vLPIs.
4265      *
4266      * - Concurrent VSGIPENDR access: As it involves accessing two
4267      *   MMIO registers, this must be made atomic one way or another.
4268      */
4269     cpu = vpe_to_cpuid_lock(vpe, &flags);
4270     raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4271     base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4272     writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4273     do {
4274         status = readl_relaxed(base + GICR_VSGIPENDR);
4275         if (!(status & GICR_VSGIPENDR_BUSY))
4276             goto out;
4277 
4278         count--;
4279         if (!count) {
4280             pr_err_ratelimited("Unable to get SGI status\n");
4281             goto out;
4282         }
4283         cpu_relax();
4284         udelay(1);
4285     } while (count);
4286 
4287 out:
4288     raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4289     vpe_to_cpuid_unlock(vpe, flags);
4290 
4291     if (!count)
4292         return -ENXIO;
4293 
4294     *val = !!(status & (1 << d->hwirq));
4295 
4296     return 0;
4297 }
4298 
4299 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4300 {
4301     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4302     struct its_cmd_info *info = vcpu_info;
4303 
4304     switch (info->cmd_type) {
4305     case PROP_UPDATE_VSGI:
4306         vpe->sgi_config[d->hwirq].priority = info->priority;
4307         vpe->sgi_config[d->hwirq].group = info->group;
4308         its_configure_sgi(d, false);
4309         return 0;
4310 
4311     default:
4312         return -EINVAL;
4313     }
4314 }
4315 
4316 static struct irq_chip its_sgi_irq_chip = {
4317     .name           = "GICv4.1-sgi",
4318     .irq_mask       = its_sgi_mask_irq,
4319     .irq_unmask     = its_sgi_unmask_irq,
4320     .irq_set_affinity   = its_sgi_set_affinity,
4321     .irq_set_irqchip_state  = its_sgi_set_irqchip_state,
4322     .irq_get_irqchip_state  = its_sgi_get_irqchip_state,
4323     .irq_set_vcpu_affinity  = its_sgi_set_vcpu_affinity,
4324 };
4325 
4326 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4327                     unsigned int virq, unsigned int nr_irqs,
4328                     void *args)
4329 {
4330     struct its_vpe *vpe = args;
4331     int i;
4332 
4333     /* Yes, we do want 16 SGIs */
4334     WARN_ON(nr_irqs != 16);
4335 
4336     for (i = 0; i < 16; i++) {
4337         vpe->sgi_config[i].priority = 0;
4338         vpe->sgi_config[i].enabled = false;
4339         vpe->sgi_config[i].group = false;
4340 
4341         irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4342                           &its_sgi_irq_chip, vpe);
4343         irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4344     }
4345 
4346     return 0;
4347 }
4348 
4349 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4350                     unsigned int virq,
4351                     unsigned int nr_irqs)
4352 {
4353     /* Nothing to do */
4354 }
4355 
4356 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4357                        struct irq_data *d, bool reserve)
4358 {
4359     /* Write out the initial SGI configuration */
4360     its_configure_sgi(d, false);
4361     return 0;
4362 }
4363 
4364 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4365                       struct irq_data *d)
4366 {
4367     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4368 
4369     /*
4370      * The VSGI command is awkward:
4371      *
4372      * - To change the configuration, CLEAR must be set to false,
4373      *   leaving the pending bit unchanged.
4374      * - To clear the pending bit, CLEAR must be set to true, leaving
4375      *   the configuration unchanged.
4376      *
4377      * You just can't do both at once, hence the two commands below.
4378      */
4379     vpe->sgi_config[d->hwirq].enabled = false;
4380     its_configure_sgi(d, false);
4381     its_configure_sgi(d, true);
4382 }
4383 
4384 static const struct irq_domain_ops its_sgi_domain_ops = {
4385     .alloc      = its_sgi_irq_domain_alloc,
4386     .free       = its_sgi_irq_domain_free,
4387     .activate   = its_sgi_irq_domain_activate,
4388     .deactivate = its_sgi_irq_domain_deactivate,
4389 };
4390 
4391 static int its_vpe_id_alloc(void)
4392 {
4393     return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4394 }
4395 
4396 static void its_vpe_id_free(u16 id)
4397 {
4398     ida_simple_remove(&its_vpeid_ida, id);
4399 }
4400 
4401 static int its_vpe_init(struct its_vpe *vpe)
4402 {
4403     struct page *vpt_page;
4404     int vpe_id;
4405 
4406     /* Allocate vpe_id */
4407     vpe_id = its_vpe_id_alloc();
4408     if (vpe_id < 0)
4409         return vpe_id;
4410 
4411     /* Allocate VPT */
4412     vpt_page = its_allocate_pending_table(GFP_KERNEL);
4413     if (!vpt_page) {
4414         its_vpe_id_free(vpe_id);
4415         return -ENOMEM;
4416     }
4417 
4418     if (!its_alloc_vpe_table(vpe_id)) {
4419         its_vpe_id_free(vpe_id);
4420         its_free_pending_table(vpt_page);
4421         return -ENOMEM;
4422     }
4423 
4424     raw_spin_lock_init(&vpe->vpe_lock);
4425     vpe->vpe_id = vpe_id;
4426     vpe->vpt_page = vpt_page;
4427     if (gic_rdists->has_rvpeid)
4428         atomic_set(&vpe->vmapp_count, 0);
4429     else
4430         vpe->vpe_proxy_event = -1;
4431 
4432     return 0;
4433 }
4434 
4435 static void its_vpe_teardown(struct its_vpe *vpe)
4436 {
4437     its_vpe_db_proxy_unmap(vpe);
4438     its_vpe_id_free(vpe->vpe_id);
4439     its_free_pending_table(vpe->vpt_page);
4440 }
4441 
4442 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4443                     unsigned int virq,
4444                     unsigned int nr_irqs)
4445 {
4446     struct its_vm *vm = domain->host_data;
4447     int i;
4448 
4449     irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4450 
4451     for (i = 0; i < nr_irqs; i++) {
4452         struct irq_data *data = irq_domain_get_irq_data(domain,
4453                                 virq + i);
4454         struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4455 
4456         BUG_ON(vm != vpe->its_vm);
4457 
4458         clear_bit(data->hwirq, vm->db_bitmap);
4459         its_vpe_teardown(vpe);
4460         irq_domain_reset_irq_data(data);
4461     }
4462 
4463     if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4464         its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4465         its_free_prop_table(vm->vprop_page);
4466     }
4467 }
4468 
4469 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4470                     unsigned int nr_irqs, void *args)
4471 {
4472     struct irq_chip *irqchip = &its_vpe_irq_chip;
4473     struct its_vm *vm = args;
4474     unsigned long *bitmap;
4475     struct page *vprop_page;
4476     int base, nr_ids, i, err = 0;
4477 
4478     BUG_ON(!vm);
4479 
4480     bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4481     if (!bitmap)
4482         return -ENOMEM;
4483 
4484     if (nr_ids < nr_irqs) {
4485         its_lpi_free(bitmap, base, nr_ids);
4486         return -ENOMEM;
4487     }
4488 
4489     vprop_page = its_allocate_prop_table(GFP_KERNEL);
4490     if (!vprop_page) {
4491         its_lpi_free(bitmap, base, nr_ids);
4492         return -ENOMEM;
4493     }
4494 
4495     vm->db_bitmap = bitmap;
4496     vm->db_lpi_base = base;
4497     vm->nr_db_lpis = nr_ids;
4498     vm->vprop_page = vprop_page;
4499 
4500     if (gic_rdists->has_rvpeid)
4501         irqchip = &its_vpe_4_1_irq_chip;
4502 
4503     for (i = 0; i < nr_irqs; i++) {
4504         vm->vpes[i]->vpe_db_lpi = base + i;
4505         err = its_vpe_init(vm->vpes[i]);
4506         if (err)
4507             break;
4508         err = its_irq_gic_domain_alloc(domain, virq + i,
4509                            vm->vpes[i]->vpe_db_lpi);
4510         if (err)
4511             break;
4512         irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4513                           irqchip, vm->vpes[i]);
4514         set_bit(i, bitmap);
4515     }
4516 
4517     if (err) {
4518         if (i > 0)
4519             its_vpe_irq_domain_free(domain, virq, i);
4520 
4521         its_lpi_free(bitmap, base, nr_ids);
4522         its_free_prop_table(vprop_page);
4523     }
4524 
4525     return err;
4526 }
4527 
4528 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4529                        struct irq_data *d, bool reserve)
4530 {
4531     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4532     struct its_node *its;
4533 
4534     /*
4535      * If we use the list map, we issue VMAPP on demand... Unless
4536      * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4537      * so that VSGIs can work.
4538      */
4539     if (!gic_requires_eager_mapping())
4540         return 0;
4541 
4542     /* Map the VPE to the first possible CPU */
4543     vpe->col_idx = cpumask_first(cpu_online_mask);
4544 
4545     list_for_each_entry(its, &its_nodes, entry) {
4546         if (!is_v4(its))
4547             continue;
4548 
4549         its_send_vmapp(its, vpe, true);
4550         its_send_vinvall(its, vpe);
4551     }
4552 
4553     irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4554 
4555     return 0;
4556 }
4557 
4558 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4559                       struct irq_data *d)
4560 {
4561     struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4562     struct its_node *its;
4563 
4564     /*
4565      * If we use the list map on GICv4.0, we unmap the VPE once no
4566      * VLPIs are associated with the VM.
4567      */
4568     if (!gic_requires_eager_mapping())
4569         return;
4570 
4571     list_for_each_entry(its, &its_nodes, entry) {
4572         if (!is_v4(its))
4573             continue;
4574 
4575         its_send_vmapp(its, vpe, false);
4576     }
4577 
4578     /*
4579      * There may be a direct read to the VPT after unmapping the
4580      * vPE, to guarantee the validity of this, we make the VPT
4581      * memory coherent with the CPU caches here.
4582      */
4583     if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4584         gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4585                     LPI_PENDBASE_SZ);
4586 }
4587 
4588 static const struct irq_domain_ops its_vpe_domain_ops = {
4589     .alloc          = its_vpe_irq_domain_alloc,
4590     .free           = its_vpe_irq_domain_free,
4591     .activate       = its_vpe_irq_domain_activate,
4592     .deactivate     = its_vpe_irq_domain_deactivate,
4593 };
4594 
4595 static int its_force_quiescent(void __iomem *base)
4596 {
4597     u32 count = 1000000;    /* 1s */
4598     u32 val;
4599 
4600     val = readl_relaxed(base + GITS_CTLR);
4601     /*
4602      * GIC architecture specification requires the ITS to be both
4603      * disabled and quiescent for writes to GITS_BASER<n> or
4604      * GITS_CBASER to not have UNPREDICTABLE results.
4605      */
4606     if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4607         return 0;
4608 
4609     /* Disable the generation of all interrupts to this ITS */
4610     val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4611     writel_relaxed(val, base + GITS_CTLR);
4612 
4613     /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4614     while (1) {
4615         val = readl_relaxed(base + GITS_CTLR);
4616         if (val & GITS_CTLR_QUIESCENT)
4617             return 0;
4618 
4619         count--;
4620         if (!count)
4621             return -EBUSY;
4622 
4623         cpu_relax();
4624         udelay(1);
4625     }
4626 }
4627 
4628 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4629 {
4630     struct its_node *its = data;
4631 
4632     /* erratum 22375: only alloc 8MB table size (20 bits) */
4633     its->typer &= ~GITS_TYPER_DEVBITS;
4634     its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4635     its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4636 
4637     return true;
4638 }
4639 
4640 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4641 {
4642     struct its_node *its = data;
4643 
4644     its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4645 
4646     return true;
4647 }
4648 
4649 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4650 {
4651     struct its_node *its = data;
4652 
4653     /* On QDF2400, the size of the ITE is 16Bytes */
4654     its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4655     its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4656 
4657     return true;
4658 }
4659 
4660 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4661 {
4662     struct its_node *its = its_dev->its;
4663 
4664     /*
4665      * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4666      * which maps 32-bit writes targeted at a separate window of
4667      * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4668      * with device ID taken from bits [device_id_bits + 1:2] of
4669      * the window offset.
4670      */
4671     return its->pre_its_base + (its_dev->device_id << 2);
4672 }
4673 
4674 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4675 {
4676     struct its_node *its = data;
4677     u32 pre_its_window[2];
4678     u32 ids;
4679 
4680     if (!fwnode_property_read_u32_array(its->fwnode_handle,
4681                        "socionext,synquacer-pre-its",
4682                        pre_its_window,
4683                        ARRAY_SIZE(pre_its_window))) {
4684 
4685         its->pre_its_base = pre_its_window[0];
4686         its->get_msi_base = its_irq_get_msi_base_pre_its;
4687 
4688         ids = ilog2(pre_its_window[1]) - 2;
4689         if (device_ids(its) > ids) {
4690             its->typer &= ~GITS_TYPER_DEVBITS;
4691             its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4692         }
4693 
4694         /* the pre-ITS breaks isolation, so disable MSI remapping */
4695         its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
4696         return true;
4697     }
4698     return false;
4699 }
4700 
4701 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4702 {
4703     struct its_node *its = data;
4704 
4705     /*
4706      * Hip07 insists on using the wrong address for the VLPI
4707      * page. Trick it into doing the right thing...
4708      */
4709     its->vlpi_redist_offset = SZ_128K;
4710     return true;
4711 }
4712 
4713 static const struct gic_quirk its_quirks[] = {
4714 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4715     {
4716         .desc   = "ITS: Cavium errata 22375, 24313",
4717         .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
4718         .mask   = 0xffff0fff,
4719         .init   = its_enable_quirk_cavium_22375,
4720     },
4721 #endif
4722 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4723     {
4724         .desc   = "ITS: Cavium erratum 23144",
4725         .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
4726         .mask   = 0xffff0fff,
4727         .init   = its_enable_quirk_cavium_23144,
4728     },
4729 #endif
4730 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4731     {
4732         .desc   = "ITS: QDF2400 erratum 0065",
4733         .iidr   = 0x00001070, /* QDF2400 ITS rev 1.x */
4734         .mask   = 0xffffffff,
4735         .init   = its_enable_quirk_qdf2400_e0065,
4736     },
4737 #endif
4738 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4739     {
4740         /*
4741          * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4742          * implementation, but with a 'pre-ITS' added that requires
4743          * special handling in software.
4744          */
4745         .desc   = "ITS: Socionext Synquacer pre-ITS",
4746         .iidr   = 0x0001143b,
4747         .mask   = 0xffffffff,
4748         .init   = its_enable_quirk_socionext_synquacer,
4749     },
4750 #endif
4751 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4752     {
4753         .desc   = "ITS: Hip07 erratum 161600802",
4754         .iidr   = 0x00000004,
4755         .mask   = 0xffffffff,
4756         .init   = its_enable_quirk_hip07_161600802,
4757     },
4758 #endif
4759     {
4760     }
4761 };
4762 
4763 static void its_enable_quirks(struct its_node *its)
4764 {
4765     u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4766 
4767     gic_enable_quirks(iidr, its_quirks, its);
4768 }
4769 
4770 static int its_save_disable(void)
4771 {
4772     struct its_node *its;
4773     int err = 0;
4774 
4775     raw_spin_lock(&its_lock);
4776     list_for_each_entry(its, &its_nodes, entry) {
4777         void __iomem *base;
4778 
4779         base = its->base;
4780         its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4781         err = its_force_quiescent(base);
4782         if (err) {
4783             pr_err("ITS@%pa: failed to quiesce: %d\n",
4784                    &its->phys_base, err);
4785             writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4786             goto err;
4787         }
4788 
4789         its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4790     }
4791 
4792 err:
4793     if (err) {
4794         list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4795             void __iomem *base;
4796 
4797             base = its->base;
4798             writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4799         }
4800     }
4801     raw_spin_unlock(&its_lock);
4802 
4803     return err;
4804 }
4805 
4806 static void its_restore_enable(void)
4807 {
4808     struct its_node *its;
4809     int ret;
4810 
4811     raw_spin_lock(&its_lock);
4812     list_for_each_entry(its, &its_nodes, entry) {
4813         void __iomem *base;
4814         int i;
4815 
4816         base = its->base;
4817 
4818         /*
4819          * Make sure that the ITS is disabled. If it fails to quiesce,
4820          * don't restore it since writing to CBASER or BASER<n>
4821          * registers is undefined according to the GIC v3 ITS
4822          * Specification.
4823          *
4824          * Firmware resuming with the ITS enabled is terminally broken.
4825          */
4826         WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
4827         ret = its_force_quiescent(base);
4828         if (ret) {
4829             pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4830                    &its->phys_base, ret);
4831             continue;
4832         }
4833 
4834         gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4835 
4836         /*
4837          * Writing CBASER resets CREADR to 0, so make CWRITER and
4838          * cmd_write line up with it.
4839          */
4840         its->cmd_write = its->cmd_base;
4841         gits_write_cwriter(0, base + GITS_CWRITER);
4842 
4843         /* Restore GITS_BASER from the value cache. */
4844         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4845             struct its_baser *baser = &its->tables[i];
4846 
4847             if (!(baser->val & GITS_BASER_VALID))
4848                 continue;
4849 
4850             its_write_baser(its, baser, baser->val);
4851         }
4852         writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4853 
4854         /*
4855          * Reinit the collection if it's stored in the ITS. This is
4856          * indicated by the col_id being less than the HCC field.
4857          * CID < HCC as specified in the GIC v3 Documentation.
4858          */
4859         if (its->collections[smp_processor_id()].col_id <
4860             GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4861             its_cpu_init_collection(its);
4862     }
4863     raw_spin_unlock(&its_lock);
4864 }
4865 
4866 static struct syscore_ops its_syscore_ops = {
4867     .suspend = its_save_disable,
4868     .resume = its_restore_enable,
4869 };
4870 
4871 static void __init __iomem *its_map_one(struct resource *res, int *err)
4872 {
4873     void __iomem *its_base;
4874     u32 val;
4875 
4876     its_base = ioremap(res->start, SZ_64K);
4877     if (!its_base) {
4878         pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4879         *err = -ENOMEM;
4880         return NULL;
4881     }
4882 
4883     val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4884     if (val != 0x30 && val != 0x40) {
4885         pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4886         *err = -ENODEV;
4887         goto out_unmap;
4888     }
4889 
4890     *err = its_force_quiescent(its_base);
4891     if (*err) {
4892         pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4893         goto out_unmap;
4894     }
4895 
4896     return its_base;
4897 
4898 out_unmap:
4899     iounmap(its_base);
4900     return NULL;
4901 }
4902 
4903 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4904 {
4905     struct irq_domain *inner_domain;
4906     struct msi_domain_info *info;
4907 
4908     info = kzalloc(sizeof(*info), GFP_KERNEL);
4909     if (!info)
4910         return -ENOMEM;
4911 
4912     inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4913     if (!inner_domain) {
4914         kfree(info);
4915         return -ENOMEM;
4916     }
4917 
4918     inner_domain->parent = its_parent;
4919     irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4920     inner_domain->flags |= its->msi_domain_flags;
4921     info->ops = &its_msi_domain_ops;
4922     info->data = its;
4923     inner_domain->host_data = info;
4924 
4925     return 0;
4926 }
4927 
4928 static int its_init_vpe_domain(void)
4929 {
4930     struct its_node *its;
4931     u32 devid;
4932     int entries;
4933 
4934     if (gic_rdists->has_direct_lpi) {
4935         pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4936         return 0;
4937     }
4938 
4939     /* Any ITS will do, even if not v4 */
4940     its = list_first_entry(&its_nodes, struct its_node, entry);
4941 
4942     entries = roundup_pow_of_two(nr_cpu_ids);
4943     vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4944                  GFP_KERNEL);
4945     if (!vpe_proxy.vpes)
4946         return -ENOMEM;
4947 
4948     /* Use the last possible DevID */
4949     devid = GENMASK(device_ids(its) - 1, 0);
4950     vpe_proxy.dev = its_create_device(its, devid, entries, false);
4951     if (!vpe_proxy.dev) {
4952         kfree(vpe_proxy.vpes);
4953         pr_err("ITS: Can't allocate GICv4 proxy device\n");
4954         return -ENOMEM;
4955     }
4956 
4957     BUG_ON(entries > vpe_proxy.dev->nr_ites);
4958 
4959     raw_spin_lock_init(&vpe_proxy.lock);
4960     vpe_proxy.next_victim = 0;
4961     pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4962         devid, vpe_proxy.dev->nr_ites);
4963 
4964     return 0;
4965 }
4966 
4967 static int __init its_compute_its_list_map(struct resource *res,
4968                        void __iomem *its_base)
4969 {
4970     int its_number;
4971     u32 ctlr;
4972 
4973     /*
4974      * This is assumed to be done early enough that we're
4975      * guaranteed to be single-threaded, hence no
4976      * locking. Should this change, we should address
4977      * this.
4978      */
4979     its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
4980     if (its_number >= GICv4_ITS_LIST_MAX) {
4981         pr_err("ITS@%pa: No ITSList entry available!\n",
4982                &res->start);
4983         return -EINVAL;
4984     }
4985 
4986     ctlr = readl_relaxed(its_base + GITS_CTLR);
4987     ctlr &= ~GITS_CTLR_ITS_NUMBER;
4988     ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
4989     writel_relaxed(ctlr, its_base + GITS_CTLR);
4990     ctlr = readl_relaxed(its_base + GITS_CTLR);
4991     if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
4992         its_number = ctlr & GITS_CTLR_ITS_NUMBER;
4993         its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
4994     }
4995 
4996     if (test_and_set_bit(its_number, &its_list_map)) {
4997         pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
4998                &res->start, its_number);
4999         return -EINVAL;
5000     }
5001 
5002     return its_number;
5003 }
5004 
5005 static int __init its_probe_one(struct resource *res,
5006                 struct fwnode_handle *handle, int numa_node)
5007 {
5008     struct its_node *its;
5009     void __iomem *its_base;
5010     u64 baser, tmp, typer;
5011     struct page *page;
5012     u32 ctlr;
5013     int err;
5014 
5015     its_base = its_map_one(res, &err);
5016     if (!its_base)
5017         return err;
5018 
5019     pr_info("ITS %pR\n", res);
5020 
5021     its = kzalloc(sizeof(*its), GFP_KERNEL);
5022     if (!its) {
5023         err = -ENOMEM;
5024         goto out_unmap;
5025     }
5026 
5027     raw_spin_lock_init(&its->lock);
5028     mutex_init(&its->dev_alloc_lock);
5029     INIT_LIST_HEAD(&its->entry);
5030     INIT_LIST_HEAD(&its->its_device_list);
5031     typer = gic_read_typer(its_base + GITS_TYPER);
5032     its->typer = typer;
5033     its->base = its_base;
5034     its->phys_base = res->start;
5035     if (is_v4(its)) {
5036         if (!(typer & GITS_TYPER_VMOVP)) {
5037             err = its_compute_its_list_map(res, its_base);
5038             if (err < 0)
5039                 goto out_free_its;
5040 
5041             its->list_nr = err;
5042 
5043             pr_info("ITS@%pa: Using ITS number %d\n",
5044                 &res->start, err);
5045         } else {
5046             pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
5047         }
5048 
5049         if (is_v4_1(its)) {
5050             u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
5051 
5052             its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
5053             if (!its->sgir_base) {
5054                 err = -ENOMEM;
5055                 goto out_free_its;
5056             }
5057 
5058             its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
5059 
5060             pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5061                 &res->start, its->mpidr, svpet);
5062         }
5063     }
5064 
5065     its->numa_node = numa_node;
5066 
5067     page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5068                 get_order(ITS_CMD_QUEUE_SZ));
5069     if (!page) {
5070         err = -ENOMEM;
5071         goto out_unmap_sgir;
5072     }
5073     its->cmd_base = (void *)page_address(page);
5074     its->cmd_write = its->cmd_base;
5075     its->fwnode_handle = handle;
5076     its->get_msi_base = its_irq_get_msi_base;
5077     its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
5078 
5079     its_enable_quirks(its);
5080 
5081     err = its_alloc_tables(its);
5082     if (err)
5083         goto out_free_cmd;
5084 
5085     err = its_alloc_collections(its);
5086     if (err)
5087         goto out_free_tables;
5088 
5089     baser = (virt_to_phys(its->cmd_base)    |
5090          GITS_CBASER_RaWaWb     |
5091          GITS_CBASER_InnerShareable |
5092          (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5093          GITS_CBASER_VALID);
5094 
5095     gits_write_cbaser(baser, its->base + GITS_CBASER);
5096     tmp = gits_read_cbaser(its->base + GITS_CBASER);
5097 
5098     if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5099         if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5100             /*
5101              * The HW reports non-shareable, we must
5102              * remove the cacheability attributes as
5103              * well.
5104              */
5105             baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5106                    GITS_CBASER_CACHEABILITY_MASK);
5107             baser |= GITS_CBASER_nC;
5108             gits_write_cbaser(baser, its->base + GITS_CBASER);
5109         }
5110         pr_info("ITS: using cache flushing for cmd queue\n");
5111         its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5112     }
5113 
5114     gits_write_cwriter(0, its->base + GITS_CWRITER);
5115     ctlr = readl_relaxed(its->base + GITS_CTLR);
5116     ctlr |= GITS_CTLR_ENABLE;
5117     if (is_v4(its))
5118         ctlr |= GITS_CTLR_ImDe;
5119     writel_relaxed(ctlr, its->base + GITS_CTLR);
5120 
5121     err = its_init_domain(handle, its);
5122     if (err)
5123         goto out_free_tables;
5124 
5125     raw_spin_lock(&its_lock);
5126     list_add(&its->entry, &its_nodes);
5127     raw_spin_unlock(&its_lock);
5128 
5129     return 0;
5130 
5131 out_free_tables:
5132     its_free_tables(its);
5133 out_free_cmd:
5134     free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5135 out_unmap_sgir:
5136     if (its->sgir_base)
5137         iounmap(its->sgir_base);
5138 out_free_its:
5139     kfree(its);
5140 out_unmap:
5141     iounmap(its_base);
5142     pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
5143     return err;
5144 }
5145 
5146 static bool gic_rdists_supports_plpis(void)
5147 {
5148     return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5149 }
5150 
5151 static int redist_disable_lpis(void)
5152 {
5153     void __iomem *rbase = gic_data_rdist_rd_base();
5154     u64 timeout = USEC_PER_SEC;
5155     u64 val;
5156 
5157     if (!gic_rdists_supports_plpis()) {
5158         pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5159         return -ENXIO;
5160     }
5161 
5162     val = readl_relaxed(rbase + GICR_CTLR);
5163     if (!(val & GICR_CTLR_ENABLE_LPIS))
5164         return 0;
5165 
5166     /*
5167      * If coming via a CPU hotplug event, we don't need to disable
5168      * LPIs before trying to re-enable them. They are already
5169      * configured and all is well in the world.
5170      *
5171      * If running with preallocated tables, there is nothing to do.
5172      */
5173     if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5174         (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5175         return 0;
5176 
5177     /*
5178      * From that point on, we only try to do some damage control.
5179      */
5180     pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5181         smp_processor_id());
5182     add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5183 
5184     /* Disable LPIs */
5185     val &= ~GICR_CTLR_ENABLE_LPIS;
5186     writel_relaxed(val, rbase + GICR_CTLR);
5187 
5188     /* Make sure any change to GICR_CTLR is observable by the GIC */
5189     dsb(sy);
5190 
5191     /*
5192      * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5193      * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5194      * Error out if we time out waiting for RWP to clear.
5195      */
5196     while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5197         if (!timeout) {
5198             pr_err("CPU%d: Timeout while disabling LPIs\n",
5199                    smp_processor_id());
5200             return -ETIMEDOUT;
5201         }
5202         udelay(1);
5203         timeout--;
5204     }
5205 
5206     /*
5207      * After it has been written to 1, it is IMPLEMENTATION
5208      * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5209      * cleared to 0. Error out if clearing the bit failed.
5210      */
5211     if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5212         pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5213         return -EBUSY;
5214     }
5215 
5216     return 0;
5217 }
5218 
5219 int its_cpu_init(void)
5220 {
5221     if (!list_empty(&its_nodes)) {
5222         int ret;
5223 
5224         ret = redist_disable_lpis();
5225         if (ret)
5226             return ret;
5227 
5228         its_cpu_init_lpis();
5229         its_cpu_init_collections();
5230     }
5231 
5232     return 0;
5233 }
5234 
5235 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5236 {
5237     cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5238     gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5239 }
5240 
5241 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5242             rdist_memreserve_cpuhp_cleanup_workfn);
5243 
5244 static int its_cpu_memreserve_lpi(unsigned int cpu)
5245 {
5246     struct page *pend_page;
5247     int ret = 0;
5248 
5249     /* This gets to run exactly once per CPU */
5250     if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5251         return 0;
5252 
5253     pend_page = gic_data_rdist()->pend_page;
5254     if (WARN_ON(!pend_page)) {
5255         ret = -ENOMEM;
5256         goto out;
5257     }
5258     /*
5259      * If the pending table was pre-programmed, free the memory we
5260      * preemptively allocated. Otherwise, reserve that memory for
5261      * later kexecs.
5262      */
5263     if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5264         its_free_pending_table(pend_page);
5265         gic_data_rdist()->pend_page = NULL;
5266     } else {
5267         phys_addr_t paddr = page_to_phys(pend_page);
5268         WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5269     }
5270 
5271 out:
5272     /* Last CPU being brought up gets to issue the cleanup */
5273     if (!IS_ENABLED(CONFIG_SMP) ||
5274         cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5275         schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5276 
5277     gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5278     return ret;
5279 }
5280 
5281 /* Mark all the BASER registers as invalid before they get reprogrammed */
5282 static int __init its_reset_one(struct resource *res)
5283 {
5284     void __iomem *its_base;
5285     int err, i;
5286 
5287     its_base = its_map_one(res, &err);
5288     if (!its_base)
5289         return err;
5290 
5291     for (i = 0; i < GITS_BASER_NR_REGS; i++)
5292         gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5293 
5294     iounmap(its_base);
5295     return 0;
5296 }
5297 
5298 static const struct of_device_id its_device_id[] = {
5299     {   .compatible = "arm,gic-v3-its", },
5300     {},
5301 };
5302 
5303 static int __init its_of_probe(struct device_node *node)
5304 {
5305     struct device_node *np;
5306     struct resource res;
5307 
5308     /*
5309      * Make sure *all* the ITS are reset before we probe any, as
5310      * they may be sharing memory. If any of the ITS fails to
5311      * reset, don't even try to go any further, as this could
5312      * result in something even worse.
5313      */
5314     for (np = of_find_matching_node(node, its_device_id); np;
5315          np = of_find_matching_node(np, its_device_id)) {
5316         int err;
5317 
5318         if (!of_device_is_available(np) ||
5319             !of_property_read_bool(np, "msi-controller") ||
5320             of_address_to_resource(np, 0, &res))
5321             continue;
5322 
5323         err = its_reset_one(&res);
5324         if (err)
5325             return err;
5326     }
5327 
5328     for (np = of_find_matching_node(node, its_device_id); np;
5329          np = of_find_matching_node(np, its_device_id)) {
5330         if (!of_device_is_available(np))
5331             continue;
5332         if (!of_property_read_bool(np, "msi-controller")) {
5333             pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5334                 np);
5335             continue;
5336         }
5337 
5338         if (of_address_to_resource(np, 0, &res)) {
5339             pr_warn("%pOF: no regs?\n", np);
5340             continue;
5341         }
5342 
5343         its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
5344     }
5345     return 0;
5346 }
5347 
5348 #ifdef CONFIG_ACPI
5349 
5350 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5351 
5352 #ifdef CONFIG_ACPI_NUMA
5353 struct its_srat_map {
5354     /* numa node id */
5355     u32 numa_node;
5356     /* GIC ITS ID */
5357     u32 its_id;
5358 };
5359 
5360 static struct its_srat_map *its_srat_maps __initdata;
5361 static int its_in_srat __initdata;
5362 
5363 static int __init acpi_get_its_numa_node(u32 its_id)
5364 {
5365     int i;
5366 
5367     for (i = 0; i < its_in_srat; i++) {
5368         if (its_id == its_srat_maps[i].its_id)
5369             return its_srat_maps[i].numa_node;
5370     }
5371     return NUMA_NO_NODE;
5372 }
5373 
5374 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5375                       const unsigned long end)
5376 {
5377     return 0;
5378 }
5379 
5380 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5381              const unsigned long end)
5382 {
5383     int node;
5384     struct acpi_srat_gic_its_affinity *its_affinity;
5385 
5386     its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5387     if (!its_affinity)
5388         return -EINVAL;
5389 
5390     if (its_affinity->header.length < sizeof(*its_affinity)) {
5391         pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5392             its_affinity->header.length);
5393         return -EINVAL;
5394     }
5395 
5396     /*
5397      * Note that in theory a new proximity node could be created by this
5398      * entry as it is an SRAT resource allocation structure.
5399      * We do not currently support doing so.
5400      */
5401     node = pxm_to_node(its_affinity->proximity_domain);
5402 
5403     if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5404         pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5405         return 0;
5406     }
5407 
5408     its_srat_maps[its_in_srat].numa_node = node;
5409     its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5410     its_in_srat++;
5411     pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5412         its_affinity->proximity_domain, its_affinity->its_id, node);
5413 
5414     return 0;
5415 }
5416 
5417 static void __init acpi_table_parse_srat_its(void)
5418 {
5419     int count;
5420 
5421     count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5422             sizeof(struct acpi_table_srat),
5423             ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5424             gic_acpi_match_srat_its, 0);
5425     if (count <= 0)
5426         return;
5427 
5428     its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5429                       GFP_KERNEL);
5430     if (!its_srat_maps)
5431         return;
5432 
5433     acpi_table_parse_entries(ACPI_SIG_SRAT,
5434             sizeof(struct acpi_table_srat),
5435             ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5436             gic_acpi_parse_srat_its, 0);
5437 }
5438 
5439 /* free the its_srat_maps after ITS probing */
5440 static void __init acpi_its_srat_maps_free(void)
5441 {
5442     kfree(its_srat_maps);
5443 }
5444 #else
5445 static void __init acpi_table_parse_srat_its(void)  { }
5446 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5447 static void __init acpi_its_srat_maps_free(void) { }
5448 #endif
5449 
5450 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5451                       const unsigned long end)
5452 {
5453     struct acpi_madt_generic_translator *its_entry;
5454     struct fwnode_handle *dom_handle;
5455     struct resource res;
5456     int err;
5457 
5458     its_entry = (struct acpi_madt_generic_translator *)header;
5459     memset(&res, 0, sizeof(res));
5460     res.start = its_entry->base_address;
5461     res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5462     res.flags = IORESOURCE_MEM;
5463 
5464     dom_handle = irq_domain_alloc_fwnode(&res.start);
5465     if (!dom_handle) {
5466         pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5467                &res.start);
5468         return -ENOMEM;
5469     }
5470 
5471     err = iort_register_domain_token(its_entry->translation_id, res.start,
5472                      dom_handle);
5473     if (err) {
5474         pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5475                &res.start, its_entry->translation_id);
5476         goto dom_err;
5477     }
5478 
5479     err = its_probe_one(&res, dom_handle,
5480             acpi_get_its_numa_node(its_entry->translation_id));
5481     if (!err)
5482         return 0;
5483 
5484     iort_deregister_domain_token(its_entry->translation_id);
5485 dom_err:
5486     irq_domain_free_fwnode(dom_handle);
5487     return err;
5488 }
5489 
5490 static int __init its_acpi_reset(union acpi_subtable_headers *header,
5491                  const unsigned long end)
5492 {
5493     struct acpi_madt_generic_translator *its_entry;
5494     struct resource res;
5495 
5496     its_entry = (struct acpi_madt_generic_translator *)header;
5497     res = (struct resource) {
5498         .start  = its_entry->base_address,
5499         .end    = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5500         .flags  = IORESOURCE_MEM,
5501     };
5502 
5503     return its_reset_one(&res);
5504 }
5505 
5506 static void __init its_acpi_probe(void)
5507 {
5508     acpi_table_parse_srat_its();
5509     /*
5510      * Make sure *all* the ITS are reset before we probe any, as
5511      * they may be sharing memory. If any of the ITS fails to
5512      * reset, don't even try to go any further, as this could
5513      * result in something even worse.
5514      */
5515     if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5516                   its_acpi_reset, 0) > 0)
5517         acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5518                       gic_acpi_parse_madt_its, 0);
5519     acpi_its_srat_maps_free();
5520 }
5521 #else
5522 static void __init its_acpi_probe(void) { }
5523 #endif
5524 
5525 int __init its_lpi_memreserve_init(void)
5526 {
5527     int state;
5528 
5529     if (!efi_enabled(EFI_CONFIG_TABLES))
5530         return 0;
5531 
5532     if (list_empty(&its_nodes))
5533         return 0;
5534 
5535     gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5536     state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5537                   "irqchip/arm/gicv3/memreserve:online",
5538                   its_cpu_memreserve_lpi,
5539                   NULL);
5540     if (state < 0)
5541         return state;
5542 
5543     gic_rdists->cpuhp_memreserve_state = state;
5544 
5545     return 0;
5546 }
5547 
5548 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5549             struct irq_domain *parent_domain)
5550 {
5551     struct device_node *of_node;
5552     struct its_node *its;
5553     bool has_v4 = false;
5554     bool has_v4_1 = false;
5555     int err;
5556 
5557     gic_rdists = rdists;
5558 
5559     its_parent = parent_domain;
5560     of_node = to_of_node(handle);
5561     if (of_node)
5562         its_of_probe(of_node);
5563     else
5564         its_acpi_probe();
5565 
5566     if (list_empty(&its_nodes)) {
5567         pr_warn("ITS: No ITS available, not enabling LPIs\n");
5568         return -ENXIO;
5569     }
5570 
5571     err = allocate_lpi_tables();
5572     if (err)
5573         return err;
5574 
5575     list_for_each_entry(its, &its_nodes, entry) {
5576         has_v4 |= is_v4(its);
5577         has_v4_1 |= is_v4_1(its);
5578     }
5579 
5580     /* Don't bother with inconsistent systems */
5581     if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5582         rdists->has_rvpeid = false;
5583 
5584     if (has_v4 & rdists->has_vlpis) {
5585         const struct irq_domain_ops *sgi_ops;
5586 
5587         if (has_v4_1)
5588             sgi_ops = &its_sgi_domain_ops;
5589         else
5590             sgi_ops = NULL;
5591 
5592         if (its_init_vpe_domain() ||
5593             its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5594             rdists->has_vlpis = false;
5595             pr_err("ITS: Disabling GICv4 support\n");
5596         }
5597     }
5598 
5599     register_syscore_ops(&its_syscore_ops);
5600 
5601     return 0;
5602 }