Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Broadcom BCM6345 style Level 1 interrupt controller driver
0004  *
0005  * Copyright (C) 2014 Broadcom Corporation
0006  * Copyright 2015 Simon Arlott
0007  *
0008  * This is based on the BCM7038 (which supports SMP) but with a single
0009  * enable register instead of separate mask/set/clear registers.
0010  *
0011  * The BCM3380 has a similar mask/status register layout, but each pair
0012  * of words is at separate locations (and SMP is not supported).
0013  *
0014  * ENABLE/STATUS words are packed next to each other for each CPU:
0015  *
0016  * BCM6368:
0017  *   0x1000_0020: CPU0_W0_ENABLE
0018  *   0x1000_0024: CPU0_W1_ENABLE
0019  *   0x1000_0028: CPU0_W0_STATUS        IRQs 31-63
0020  *   0x1000_002c: CPU0_W1_STATUS        IRQs 0-31
0021  *   0x1000_0030: CPU1_W0_ENABLE
0022  *   0x1000_0034: CPU1_W1_ENABLE
0023  *   0x1000_0038: CPU1_W0_STATUS        IRQs 31-63
0024  *   0x1000_003c: CPU1_W1_STATUS        IRQs 0-31
0025  *
0026  * BCM63168:
0027  *   0x1000_0020: CPU0_W0_ENABLE
0028  *   0x1000_0024: CPU0_W1_ENABLE
0029  *   0x1000_0028: CPU0_W2_ENABLE
0030  *   0x1000_002c: CPU0_W3_ENABLE
0031  *   0x1000_0030: CPU0_W0_STATUS    IRQs 96-127
0032  *   0x1000_0034: CPU0_W1_STATUS    IRQs 64-95
0033  *   0x1000_0038: CPU0_W2_STATUS    IRQs 32-63
0034  *   0x1000_003c: CPU0_W3_STATUS    IRQs 0-31
0035  *   0x1000_0040: CPU1_W0_ENABLE
0036  *   0x1000_0044: CPU1_W1_ENABLE
0037  *   0x1000_0048: CPU1_W2_ENABLE
0038  *   0x1000_004c: CPU1_W3_ENABLE
0039  *   0x1000_0050: CPU1_W0_STATUS    IRQs 96-127
0040  *   0x1000_0054: CPU1_W1_STATUS    IRQs 64-95
0041  *   0x1000_0058: CPU1_W2_STATUS    IRQs 32-63
0042  *   0x1000_005c: CPU1_W3_STATUS    IRQs 0-31
0043  *
0044  * IRQs are numbered in CPU native endian order
0045  * (which is big-endian in these examples)
0046  */
0047 
0048 #define pr_fmt(fmt) KBUILD_MODNAME  ": " fmt
0049 
0050 #include <linux/bitops.h>
0051 #include <linux/cpumask.h>
0052 #include <linux/kernel.h>
0053 #include <linux/init.h>
0054 #include <linux/interrupt.h>
0055 #include <linux/io.h>
0056 #include <linux/ioport.h>
0057 #include <linux/irq.h>
0058 #include <linux/irqdomain.h>
0059 #include <linux/module.h>
0060 #include <linux/of.h>
0061 #include <linux/of_irq.h>
0062 #include <linux/of_address.h>
0063 #include <linux/of_platform.h>
0064 #include <linux/platform_device.h>
0065 #include <linux/slab.h>
0066 #include <linux/smp.h>
0067 #include <linux/types.h>
0068 #include <linux/irqchip.h>
0069 #include <linux/irqchip/chained_irq.h>
0070 
0071 #define IRQS_PER_WORD       32
0072 #define REG_BYTES_PER_IRQ_WORD  (sizeof(u32) * 2)
0073 
0074 struct bcm6345_l1_cpu;
0075 
0076 struct bcm6345_l1_chip {
0077     raw_spinlock_t      lock;
0078     unsigned int        n_words;
0079     struct irq_domain   *domain;
0080     struct cpumask      cpumask;
0081     struct bcm6345_l1_cpu   *cpus[NR_CPUS];
0082 };
0083 
0084 struct bcm6345_l1_cpu {
0085     void __iomem        *map_base;
0086     unsigned int        parent_irq;
0087     u32         enable_cache[];
0088 };
0089 
0090 static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
0091                        unsigned int word)
0092 {
0093 #ifdef __BIG_ENDIAN
0094     return (1 * intc->n_words - word - 1) * sizeof(u32);
0095 #else
0096     return (0 * intc->n_words + word) * sizeof(u32);
0097 #endif
0098 }
0099 
0100 static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
0101                       unsigned int word)
0102 {
0103 #ifdef __BIG_ENDIAN
0104     return (2 * intc->n_words - word - 1) * sizeof(u32);
0105 #else
0106     return (1 * intc->n_words + word) * sizeof(u32);
0107 #endif
0108 }
0109 
0110 static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
0111                     struct irq_data *d)
0112 {
0113     return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
0114 }
0115 
0116 static void bcm6345_l1_irq_handle(struct irq_desc *desc)
0117 {
0118     struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
0119     struct bcm6345_l1_cpu *cpu;
0120     struct irq_chip *chip = irq_desc_get_chip(desc);
0121     unsigned int idx;
0122 
0123 #ifdef CONFIG_SMP
0124     cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
0125 #else
0126     cpu = intc->cpus[0];
0127 #endif
0128 
0129     chained_irq_enter(chip, desc);
0130 
0131     for (idx = 0; idx < intc->n_words; idx++) {
0132         int base = idx * IRQS_PER_WORD;
0133         unsigned long pending;
0134         irq_hw_number_t hwirq;
0135 
0136         pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
0137         pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
0138 
0139         for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
0140             if (generic_handle_domain_irq(intc->domain, base + hwirq))
0141                 spurious_interrupt();
0142         }
0143     }
0144 
0145     chained_irq_exit(chip, desc);
0146 }
0147 
0148 static inline void __bcm6345_l1_unmask(struct irq_data *d)
0149 {
0150     struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
0151     u32 word = d->hwirq / IRQS_PER_WORD;
0152     u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
0153     unsigned int cpu_idx = cpu_for_irq(intc, d);
0154 
0155     intc->cpus[cpu_idx]->enable_cache[word] |= mask;
0156     __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
0157         intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
0158 }
0159 
0160 static inline void __bcm6345_l1_mask(struct irq_data *d)
0161 {
0162     struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
0163     u32 word = d->hwirq / IRQS_PER_WORD;
0164     u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
0165     unsigned int cpu_idx = cpu_for_irq(intc, d);
0166 
0167     intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
0168     __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
0169         intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
0170 }
0171 
0172 static void bcm6345_l1_unmask(struct irq_data *d)
0173 {
0174     struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
0175     unsigned long flags;
0176 
0177     raw_spin_lock_irqsave(&intc->lock, flags);
0178     __bcm6345_l1_unmask(d);
0179     raw_spin_unlock_irqrestore(&intc->lock, flags);
0180 }
0181 
0182 static void bcm6345_l1_mask(struct irq_data *d)
0183 {
0184     struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
0185     unsigned long flags;
0186 
0187     raw_spin_lock_irqsave(&intc->lock, flags);
0188     __bcm6345_l1_mask(d);
0189     raw_spin_unlock_irqrestore(&intc->lock, flags);
0190 }
0191 
0192 static int bcm6345_l1_set_affinity(struct irq_data *d,
0193                    const struct cpumask *dest,
0194                    bool force)
0195 {
0196     struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
0197     u32 word = d->hwirq / IRQS_PER_WORD;
0198     u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
0199     unsigned int old_cpu = cpu_for_irq(intc, d);
0200     unsigned int new_cpu;
0201     struct cpumask valid;
0202     unsigned long flags;
0203     bool enabled;
0204 
0205     if (!cpumask_and(&valid, &intc->cpumask, dest))
0206         return -EINVAL;
0207 
0208     new_cpu = cpumask_any_and(&valid, cpu_online_mask);
0209     if (new_cpu >= nr_cpu_ids)
0210         return -EINVAL;
0211 
0212     dest = cpumask_of(new_cpu);
0213 
0214     raw_spin_lock_irqsave(&intc->lock, flags);
0215     if (old_cpu != new_cpu) {
0216         enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
0217         if (enabled)
0218             __bcm6345_l1_mask(d);
0219         irq_data_update_affinity(d, dest);
0220         if (enabled)
0221             __bcm6345_l1_unmask(d);
0222     } else {
0223         irq_data_update_affinity(d, dest);
0224     }
0225     raw_spin_unlock_irqrestore(&intc->lock, flags);
0226 
0227     irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
0228 
0229     return IRQ_SET_MASK_OK_NOCOPY;
0230 }
0231 
0232 static int __init bcm6345_l1_init_one(struct device_node *dn,
0233                       unsigned int idx,
0234                       struct bcm6345_l1_chip *intc)
0235 {
0236     struct resource res;
0237     resource_size_t sz;
0238     struct bcm6345_l1_cpu *cpu;
0239     unsigned int i, n_words;
0240 
0241     if (of_address_to_resource(dn, idx, &res))
0242         return -EINVAL;
0243     sz = resource_size(&res);
0244     n_words = sz / REG_BYTES_PER_IRQ_WORD;
0245 
0246     if (!intc->n_words)
0247         intc->n_words = n_words;
0248     else if (intc->n_words != n_words)
0249         return -EINVAL;
0250 
0251     cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
0252                     GFP_KERNEL);
0253     if (!cpu)
0254         return -ENOMEM;
0255 
0256     cpu->map_base = ioremap(res.start, sz);
0257     if (!cpu->map_base)
0258         return -ENOMEM;
0259 
0260     for (i = 0; i < n_words; i++) {
0261         cpu->enable_cache[i] = 0;
0262         __raw_writel(0, cpu->map_base + reg_enable(intc, i));
0263     }
0264 
0265     cpu->parent_irq = irq_of_parse_and_map(dn, idx);
0266     if (!cpu->parent_irq) {
0267         pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
0268         return -EINVAL;
0269     }
0270     irq_set_chained_handler_and_data(cpu->parent_irq,
0271                         bcm6345_l1_irq_handle, intc);
0272 
0273     return 0;
0274 }
0275 
0276 static struct irq_chip bcm6345_l1_irq_chip = {
0277     .name           = "bcm6345-l1",
0278     .irq_mask       = bcm6345_l1_mask,
0279     .irq_unmask     = bcm6345_l1_unmask,
0280     .irq_set_affinity   = bcm6345_l1_set_affinity,
0281 };
0282 
0283 static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
0284               irq_hw_number_t hw_irq)
0285 {
0286     irq_set_chip_and_handler(virq,
0287         &bcm6345_l1_irq_chip, handle_percpu_irq);
0288     irq_set_chip_data(virq, d->host_data);
0289     irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
0290     return 0;
0291 }
0292 
0293 static const struct irq_domain_ops bcm6345_l1_domain_ops = {
0294     .xlate          = irq_domain_xlate_onecell,
0295     .map            = bcm6345_l1_map,
0296 };
0297 
0298 static int __init bcm6345_l1_of_init(struct device_node *dn,
0299                   struct device_node *parent)
0300 {
0301     struct bcm6345_l1_chip *intc;
0302     unsigned int idx;
0303     int ret;
0304 
0305     intc = kzalloc(sizeof(*intc), GFP_KERNEL);
0306     if (!intc)
0307         return -ENOMEM;
0308 
0309     for_each_possible_cpu(idx) {
0310         ret = bcm6345_l1_init_one(dn, idx, intc);
0311         if (ret)
0312             pr_err("failed to init intc L1 for cpu %d: %d\n",
0313                 idx, ret);
0314         else
0315             cpumask_set_cpu(idx, &intc->cpumask);
0316     }
0317 
0318     if (cpumask_empty(&intc->cpumask)) {
0319         ret = -ENODEV;
0320         goto out_free;
0321     }
0322 
0323     raw_spin_lock_init(&intc->lock);
0324 
0325     intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
0326                          &bcm6345_l1_domain_ops,
0327                          intc);
0328     if (!intc->domain) {
0329         ret = -ENOMEM;
0330         goto out_unmap;
0331     }
0332 
0333     pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
0334             IRQS_PER_WORD * intc->n_words);
0335     for_each_cpu(idx, &intc->cpumask) {
0336         struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
0337 
0338         pr_info("  CPU%u at MMIO 0x%p (irq = %d)\n", idx,
0339                 cpu->map_base, cpu->parent_irq);
0340     }
0341 
0342     return 0;
0343 
0344 out_unmap:
0345     for_each_possible_cpu(idx) {
0346         struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
0347 
0348         if (cpu) {
0349             if (cpu->map_base)
0350                 iounmap(cpu->map_base);
0351             kfree(cpu);
0352         }
0353     }
0354 out_free:
0355     kfree(intc);
0356     return ret;
0357 }
0358 
0359 IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);