Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
0003 
0004 #include <linux/kernel.h>
0005 #include <linux/init.h>
0006 #include <linux/of.h>
0007 #include <linux/of_address.h>
0008 #include <linux/module.h>
0009 #include <linux/irqdomain.h>
0010 #include <linux/irqchip.h>
0011 #include <linux/irq.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/smp.h>
0014 #include <linux/io.h>
0015 #include <asm/irq.h>
0016 #include <asm/traps.h>
0017 #include <asm/reg_ops.h>
0018 
0019 static struct irq_domain *root_domain;
0020 static void __iomem *INTCG_base;
0021 static void __iomem *INTCL_base;
0022 
0023 #define IPI_IRQ     15
0024 #define INTC_IRQS   256
0025 #define COMM_IRQ_BASE   32
0026 
0027 #define INTCG_SIZE  0x8000
0028 #define INTCL_SIZE  0x1000
0029 
0030 #define INTCG_ICTLR 0x0
0031 #define INTCG_CICFGR    0x100
0032 #define INTCG_CIDSTR    0x1000
0033 
0034 #define INTCL_PICTLR    0x0
0035 #define INTCL_CFGR  0x14
0036 #define INTCL_SIGR  0x60
0037 #define INTCL_RDYIR 0x6c
0038 #define INTCL_SENR  0xa0
0039 #define INTCL_CENR  0xa4
0040 #define INTCL_CACR  0xb4
0041 
0042 static DEFINE_PER_CPU(void __iomem *, intcl_reg);
0043 
0044 static unsigned long *__trigger;
0045 
0046 #define IRQ_OFFSET(irq) ((irq < COMM_IRQ_BASE) ? irq : (irq - COMM_IRQ_BASE))
0047 
0048 #define TRIG_BYTE_OFFSET(i) ((((i) * 2) / 32) * 4)
0049 #define TRIG_BIT_OFFSET(i)   (((i) * 2) % 32)
0050 
0051 #define TRIG_VAL(trigger, irq)  (trigger << TRIG_BIT_OFFSET(IRQ_OFFSET(irq)))
0052 #define TRIG_VAL_MSK(irq)       (~(3 << TRIG_BIT_OFFSET(IRQ_OFFSET(irq))))
0053 
0054 #define TRIG_BASE(irq) \
0055     (TRIG_BYTE_OFFSET(IRQ_OFFSET(irq)) + ((irq < COMM_IRQ_BASE) ? \
0056     (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR)))
0057 
0058 static DEFINE_SPINLOCK(setup_lock);
0059 static void setup_trigger(unsigned long irq, unsigned long trigger)
0060 {
0061     unsigned int tmp;
0062 
0063     spin_lock(&setup_lock);
0064 
0065     /* setup trigger */
0066     tmp = readl_relaxed(TRIG_BASE(irq)) & TRIG_VAL_MSK(irq);
0067 
0068     writel_relaxed(tmp | TRIG_VAL(trigger, irq), TRIG_BASE(irq));
0069 
0070     spin_unlock(&setup_lock);
0071 }
0072 
0073 static void csky_mpintc_handler(struct pt_regs *regs)
0074 {
0075     void __iomem *reg_base = this_cpu_read(intcl_reg);
0076 
0077     generic_handle_domain_irq(root_domain,
0078         readl_relaxed(reg_base + INTCL_RDYIR));
0079 }
0080 
0081 static void csky_mpintc_unmask(struct irq_data *d)
0082 {
0083     void __iomem *reg_base = this_cpu_read(intcl_reg);
0084 
0085     setup_trigger(d->hwirq, __trigger[d->hwirq]);
0086 
0087     writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
0088 }
0089 
0090 static void csky_mpintc_mask(struct irq_data *d)
0091 {
0092     void __iomem *reg_base = this_cpu_read(intcl_reg);
0093 
0094     writel_relaxed(d->hwirq, reg_base + INTCL_CENR);
0095 }
0096 
0097 static void csky_mpintc_eoi(struct irq_data *d)
0098 {
0099     void __iomem *reg_base = this_cpu_read(intcl_reg);
0100 
0101     writel_relaxed(d->hwirq, reg_base + INTCL_CACR);
0102 }
0103 
0104 static int csky_mpintc_set_type(struct irq_data *d, unsigned int type)
0105 {
0106     switch (type & IRQ_TYPE_SENSE_MASK) {
0107     case IRQ_TYPE_LEVEL_HIGH:
0108         __trigger[d->hwirq] = 0;
0109         break;
0110     case IRQ_TYPE_LEVEL_LOW:
0111         __trigger[d->hwirq] = 1;
0112         break;
0113     case IRQ_TYPE_EDGE_RISING:
0114         __trigger[d->hwirq] = 2;
0115         break;
0116     case IRQ_TYPE_EDGE_FALLING:
0117         __trigger[d->hwirq] = 3;
0118         break;
0119     default:
0120         return -EINVAL;
0121     }
0122 
0123     return 0;
0124 }
0125 
0126 #ifdef CONFIG_SMP
0127 static int csky_irq_set_affinity(struct irq_data *d,
0128                  const struct cpumask *mask_val,
0129                  bool force)
0130 {
0131     unsigned int cpu;
0132     unsigned int offset = 4 * (d->hwirq - COMM_IRQ_BASE);
0133 
0134     if (!force)
0135         cpu = cpumask_any_and(mask_val, cpu_online_mask);
0136     else
0137         cpu = cpumask_first(mask_val);
0138 
0139     if (cpu >= nr_cpu_ids)
0140         return -EINVAL;
0141 
0142     /*
0143      * The csky,mpintc could support auto irq deliver, but it only
0144      * could deliver external irq to one cpu or all cpus. So it
0145      * doesn't support deliver external irq to a group of cpus
0146      * with cpu_mask.
0147      * SO we only use auto deliver mode when affinity mask_val is
0148      * equal to cpu_present_mask.
0149      *
0150      */
0151     if (cpumask_equal(mask_val, cpu_present_mask))
0152         cpu = 0;
0153     else
0154         cpu |= BIT(31);
0155 
0156     writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
0157 
0158     irq_data_update_effective_affinity(d, cpumask_of(cpu));
0159 
0160     return IRQ_SET_MASK_OK_DONE;
0161 }
0162 #endif
0163 
0164 static struct irq_chip csky_irq_chip = {
0165     .name           = "C-SKY SMP Intc",
0166     .irq_eoi    = csky_mpintc_eoi,
0167     .irq_unmask = csky_mpintc_unmask,
0168     .irq_mask   = csky_mpintc_mask,
0169     .irq_set_type   = csky_mpintc_set_type,
0170 #ifdef CONFIG_SMP
0171     .irq_set_affinity = csky_irq_set_affinity,
0172 #endif
0173 };
0174 
0175 static int csky_irqdomain_map(struct irq_domain *d, unsigned int irq,
0176                   irq_hw_number_t hwirq)
0177 {
0178     if (hwirq < COMM_IRQ_BASE) {
0179         irq_set_percpu_devid(irq);
0180         irq_set_chip_and_handler(irq, &csky_irq_chip,
0181                      handle_percpu_irq);
0182     } else {
0183         irq_set_chip_and_handler(irq, &csky_irq_chip,
0184                      handle_fasteoi_irq);
0185     }
0186 
0187     return 0;
0188 }
0189 
0190 static int csky_irq_domain_xlate_cells(struct irq_domain *d,
0191         struct device_node *ctrlr, const u32 *intspec,
0192         unsigned int intsize, unsigned long *out_hwirq,
0193         unsigned int *out_type)
0194 {
0195     if (WARN_ON(intsize < 1))
0196         return -EINVAL;
0197 
0198     *out_hwirq = intspec[0];
0199     if (intsize > 1)
0200         *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
0201     else
0202         *out_type = IRQ_TYPE_LEVEL_HIGH;
0203 
0204     return 0;
0205 }
0206 
0207 static const struct irq_domain_ops csky_irqdomain_ops = {
0208     .map    = csky_irqdomain_map,
0209     .xlate  = csky_irq_domain_xlate_cells,
0210 };
0211 
0212 #ifdef CONFIG_SMP
0213 static void csky_mpintc_send_ipi(const struct cpumask *mask)
0214 {
0215     void __iomem *reg_base = this_cpu_read(intcl_reg);
0216 
0217     /*
0218      * INTCL_SIGR[3:0] INTID
0219      * INTCL_SIGR[8:15] CPUMASK
0220      */
0221     writel_relaxed((*cpumask_bits(mask)) << 8 | IPI_IRQ,
0222                     reg_base + INTCL_SIGR);
0223 }
0224 #endif
0225 
0226 /* C-SKY multi processor interrupt controller */
0227 static int __init
0228 csky_mpintc_init(struct device_node *node, struct device_node *parent)
0229 {
0230     int ret;
0231     unsigned int cpu, nr_irq;
0232 #ifdef CONFIG_SMP
0233     unsigned int ipi_irq;
0234 #endif
0235 
0236     if (parent)
0237         return 0;
0238 
0239     ret = of_property_read_u32(node, "csky,num-irqs", &nr_irq);
0240     if (ret < 0)
0241         nr_irq = INTC_IRQS;
0242 
0243     __trigger  = kcalloc(nr_irq, sizeof(unsigned long), GFP_KERNEL);
0244     if (__trigger == NULL)
0245         return -ENXIO;
0246 
0247     if (INTCG_base == NULL) {
0248         INTCG_base = ioremap(mfcr("cr<31, 14>"),
0249                      INTCL_SIZE*nr_cpu_ids + INTCG_SIZE);
0250         if (INTCG_base == NULL)
0251             return -EIO;
0252 
0253         INTCL_base = INTCG_base + INTCG_SIZE;
0254 
0255         writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR);
0256     }
0257 
0258     root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops,
0259                         NULL);
0260     if (!root_domain)
0261         return -ENXIO;
0262 
0263     /* for every cpu */
0264     for_each_present_cpu(cpu) {
0265         per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
0266         writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
0267     }
0268 
0269     set_handle_irq(&csky_mpintc_handler);
0270 
0271 #ifdef CONFIG_SMP
0272     ipi_irq = irq_create_mapping(root_domain, IPI_IRQ);
0273     if (!ipi_irq)
0274         return -EIO;
0275 
0276     set_send_ipi(&csky_mpintc_send_ipi, ipi_irq);
0277 #endif
0278 
0279     return 0;
0280 }
0281 IRQCHIP_DECLARE(csky_mpintc, "csky,mpintc", csky_mpintc_init);