0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/init.h>
0026 #include <linux/kernel.h>
0027 #include <linux/err.h>
0028 #include <linux/module.h>
0029 #include <linux/list.h>
0030 #include <linux/smp.h>
0031 #include <linux/cpu.h>
0032 #include <linux/cpu_pm.h>
0033 #include <linux/cpumask.h>
0034 #include <linux/io.h>
0035 #include <linux/of.h>
0036 #include <linux/of_address.h>
0037 #include <linux/of_irq.h>
0038 #include <linux/irqdomain.h>
0039 #include <linux/interrupt.h>
0040 #include <linux/slab.h>
0041 #include <linux/irqchip.h>
0042 #include <linux/irqchip/arm-gic.h>
0043
0044 #include <asm/irq.h>
0045 #include <asm/exception.h>
0046 #include <asm/smp_plat.h>
0047
0048 #include "irq-gic-common.h"
0049
0050 #define HIP04_MAX_IRQS 510
0051
0052 struct hip04_irq_data {
0053 void __iomem *dist_base;
0054 void __iomem *cpu_base;
0055 struct irq_domain *domain;
0056 unsigned int nr_irqs;
0057 };
0058
0059 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
0060
0061
0062
0063
0064
0065
0066 #define NR_HIP04_CPU_IF 16
0067 static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly;
0068
0069 static struct hip04_irq_data hip04_data __read_mostly;
0070
0071 static inline void __iomem *hip04_dist_base(struct irq_data *d)
0072 {
0073 struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
0074 return hip04_data->dist_base;
0075 }
0076
0077 static inline void __iomem *hip04_cpu_base(struct irq_data *d)
0078 {
0079 struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
0080 return hip04_data->cpu_base;
0081 }
0082
0083 static inline unsigned int hip04_irq(struct irq_data *d)
0084 {
0085 return d->hwirq;
0086 }
0087
0088
0089
0090
0091 static void hip04_mask_irq(struct irq_data *d)
0092 {
0093 u32 mask = 1 << (hip04_irq(d) % 32);
0094
0095 raw_spin_lock(&irq_controller_lock);
0096 writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR +
0097 (hip04_irq(d) / 32) * 4);
0098 raw_spin_unlock(&irq_controller_lock);
0099 }
0100
0101 static void hip04_unmask_irq(struct irq_data *d)
0102 {
0103 u32 mask = 1 << (hip04_irq(d) % 32);
0104
0105 raw_spin_lock(&irq_controller_lock);
0106 writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET +
0107 (hip04_irq(d) / 32) * 4);
0108 raw_spin_unlock(&irq_controller_lock);
0109 }
0110
0111 static void hip04_eoi_irq(struct irq_data *d)
0112 {
0113 writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI);
0114 }
0115
0116 static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
0117 {
0118 void __iomem *base = hip04_dist_base(d);
0119 unsigned int irq = hip04_irq(d);
0120 int ret;
0121
0122
0123 if (irq < 16)
0124 return -EINVAL;
0125
0126
0127 if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
0128 type != IRQ_TYPE_EDGE_RISING)
0129 return -EINVAL;
0130
0131 raw_spin_lock(&irq_controller_lock);
0132
0133 ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL);
0134 if (ret && irq < 32) {
0135
0136 pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16);
0137 ret = 0;
0138 }
0139
0140 raw_spin_unlock(&irq_controller_lock);
0141
0142 return ret;
0143 }
0144
0145 #ifdef CONFIG_SMP
0146 static int hip04_irq_set_affinity(struct irq_data *d,
0147 const struct cpumask *mask_val,
0148 bool force)
0149 {
0150 void __iomem *reg;
0151 unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
0152 u32 val, mask, bit;
0153
0154 if (!force)
0155 cpu = cpumask_any_and(mask_val, cpu_online_mask);
0156 else
0157 cpu = cpumask_first(mask_val);
0158
0159 if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
0160 return -EINVAL;
0161
0162 raw_spin_lock(&irq_controller_lock);
0163 reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3);
0164 mask = 0xffff << shift;
0165 bit = hip04_cpu_map[cpu] << shift;
0166 val = readl_relaxed(reg) & ~mask;
0167 writel_relaxed(val | bit, reg);
0168 raw_spin_unlock(&irq_controller_lock);
0169
0170 irq_data_update_effective_affinity(d, cpumask_of(cpu));
0171
0172 return IRQ_SET_MASK_OK;
0173 }
0174
0175 static void hip04_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
0176 {
0177 int cpu;
0178 unsigned long flags, map = 0;
0179
0180 raw_spin_lock_irqsave(&irq_controller_lock, flags);
0181
0182
0183 for_each_cpu(cpu, mask)
0184 map |= hip04_cpu_map[cpu];
0185
0186
0187
0188
0189
0190 dmb(ishst);
0191
0192
0193 writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT);
0194
0195 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
0196 }
0197 #endif
0198
0199 static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
0200 {
0201 u32 irqstat, irqnr;
0202 void __iomem *cpu_base = hip04_data.cpu_base;
0203
0204 do {
0205 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
0206 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
0207
0208 if (irqnr <= HIP04_MAX_IRQS)
0209 generic_handle_domain_irq(hip04_data.domain, irqnr);
0210 } while (irqnr > HIP04_MAX_IRQS);
0211 }
0212
0213 static struct irq_chip hip04_irq_chip = {
0214 .name = "HIP04 INTC",
0215 .irq_mask = hip04_mask_irq,
0216 .irq_unmask = hip04_unmask_irq,
0217 .irq_eoi = hip04_eoi_irq,
0218 .irq_set_type = hip04_irq_set_type,
0219 #ifdef CONFIG_SMP
0220 .irq_set_affinity = hip04_irq_set_affinity,
0221 .ipi_send_mask = hip04_ipi_send_mask,
0222 #endif
0223 .flags = IRQCHIP_SET_TYPE_MASKED |
0224 IRQCHIP_SKIP_SET_WAKE |
0225 IRQCHIP_MASK_ON_SUSPEND,
0226 };
0227
0228 static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
0229 {
0230 void __iomem *base = intc->dist_base;
0231 u32 mask, i;
0232
0233 for (i = mask = 0; i < 32; i += 2) {
0234 mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2);
0235 mask |= mask >> 16;
0236 if (mask)
0237 break;
0238 }
0239
0240 if (!mask)
0241 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
0242
0243 return mask;
0244 }
0245
0246 static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
0247 {
0248 unsigned int i;
0249 u32 cpumask;
0250 unsigned int nr_irqs = intc->nr_irqs;
0251 void __iomem *base = intc->dist_base;
0252
0253 writel_relaxed(0, base + GIC_DIST_CTRL);
0254
0255
0256
0257
0258 cpumask = hip04_get_cpumask(intc);
0259 cpumask |= cpumask << 16;
0260 for (i = 32; i < nr_irqs; i += 2)
0261 writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
0262
0263 gic_dist_config(base, nr_irqs, NULL);
0264
0265 writel_relaxed(1, base + GIC_DIST_CTRL);
0266 }
0267
0268 static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
0269 {
0270 void __iomem *dist_base = intc->dist_base;
0271 void __iomem *base = intc->cpu_base;
0272 unsigned int cpu_mask, cpu = smp_processor_id();
0273 int i;
0274
0275
0276
0277
0278 BUG_ON(cpu >= NR_HIP04_CPU_IF);
0279 cpu_mask = hip04_get_cpumask(intc);
0280 hip04_cpu_map[cpu] = cpu_mask;
0281
0282
0283
0284
0285
0286 for (i = 0; i < NR_HIP04_CPU_IF; i++)
0287 if (i != cpu)
0288 hip04_cpu_map[i] &= ~cpu_mask;
0289
0290 gic_cpu_config(dist_base, 32, NULL);
0291
0292 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
0293 writel_relaxed(1, base + GIC_CPU_CTRL);
0294 }
0295
0296 static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
0297 irq_hw_number_t hw)
0298 {
0299 if (hw < 32) {
0300 irq_set_percpu_devid(irq);
0301 irq_set_chip_and_handler(irq, &hip04_irq_chip,
0302 handle_percpu_devid_irq);
0303 } else {
0304 irq_set_chip_and_handler(irq, &hip04_irq_chip,
0305 handle_fasteoi_irq);
0306 irq_set_probe(irq);
0307 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
0308 }
0309 irq_set_chip_data(irq, d->host_data);
0310 return 0;
0311 }
0312
0313 static int hip04_irq_domain_xlate(struct irq_domain *d,
0314 struct device_node *controller,
0315 const u32 *intspec, unsigned int intsize,
0316 unsigned long *out_hwirq,
0317 unsigned int *out_type)
0318 {
0319 if (irq_domain_get_of_node(d) != controller)
0320 return -EINVAL;
0321 if (intsize == 1 && intspec[0] < 16) {
0322 *out_hwirq = intspec[0];
0323 *out_type = IRQ_TYPE_EDGE_RISING;
0324 return 0;
0325 }
0326 if (intsize < 3)
0327 return -EINVAL;
0328
0329
0330 *out_hwirq = intspec[1] + 16;
0331
0332
0333 if (!intspec[0])
0334 *out_hwirq += 16;
0335
0336 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
0337
0338 return 0;
0339 }
0340
0341 static int hip04_irq_starting_cpu(unsigned int cpu)
0342 {
0343 hip04_irq_cpu_init(&hip04_data);
0344 return 0;
0345 }
0346
0347 static const struct irq_domain_ops hip04_irq_domain_ops = {
0348 .map = hip04_irq_domain_map,
0349 .xlate = hip04_irq_domain_xlate,
0350 };
0351
0352 static int __init
0353 hip04_of_init(struct device_node *node, struct device_node *parent)
0354 {
0355 int nr_irqs, irq_base, i;
0356
0357 if (WARN_ON(!node))
0358 return -ENODEV;
0359
0360 hip04_data.dist_base = of_iomap(node, 0);
0361 WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n");
0362
0363 hip04_data.cpu_base = of_iomap(node, 1);
0364 WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n");
0365
0366
0367
0368
0369
0370 for (i = 0; i < NR_HIP04_CPU_IF; i++)
0371 hip04_cpu_map[i] = 0xffff;
0372
0373
0374
0375
0376
0377 nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f;
0378 nr_irqs = (nr_irqs + 1) * 32;
0379 if (nr_irqs > HIP04_MAX_IRQS)
0380 nr_irqs = HIP04_MAX_IRQS;
0381 hip04_data.nr_irqs = nr_irqs;
0382
0383 irq_base = irq_alloc_descs(-1, 0, nr_irqs, numa_node_id());
0384 if (irq_base < 0) {
0385 pr_err("failed to allocate IRQ numbers\n");
0386 return -EINVAL;
0387 }
0388
0389 hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
0390 0,
0391 &hip04_irq_domain_ops,
0392 &hip04_data);
0393 if (WARN_ON(!hip04_data.domain))
0394 return -EINVAL;
0395
0396 #ifdef CONFIG_SMP
0397 set_smp_ipi_range(irq_base, 16);
0398 #endif
0399 set_handle_irq(hip04_handle_irq);
0400
0401 hip04_irq_dist_init(&hip04_data);
0402 cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
0403 hip04_irq_starting_cpu, NULL);
0404 return 0;
0405 }
0406 IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);