Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
0004  *
0005  * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
0006  */
0007 
0008 #include <linux/smp.h>
0009 #include <linux/irq.h>
0010 #include <linux/irqchip/chained_irq.h>
0011 #include <linux/spinlock.h>
0012 #include <soc/arc/mcip.h>
0013 #include <asm/irqflags-arcv2.h>
0014 #include <asm/setup.h>
0015 
0016 static DEFINE_RAW_SPINLOCK(mcip_lock);
0017 
0018 #ifdef CONFIG_SMP
0019 
0020 static char smp_cpuinfo_buf[128];
0021 
0022 /*
0023  * Set mask to halt GFRC if any online core in SMP cluster is halted.
0024  * Only works for ARC HS v3.0+, on earlier versions has no effect.
0025  */
0026 static void mcip_update_gfrc_halt_mask(int cpu)
0027 {
0028     struct bcr_generic gfrc;
0029     unsigned long flags;
0030     u32 gfrc_halt_mask;
0031 
0032     READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
0033 
0034     /*
0035      * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
0036      * GFRC 0x3 version.
0037      */
0038     if (gfrc.ver < 0x3)
0039         return;
0040 
0041     raw_spin_lock_irqsave(&mcip_lock, flags);
0042 
0043     __mcip_cmd(CMD_GFRC_READ_CORE, 0);
0044     gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
0045     gfrc_halt_mask |= BIT(cpu);
0046     __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
0047 
0048     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0049 }
0050 
0051 static void mcip_update_debug_halt_mask(int cpu)
0052 {
0053     u32 mcip_mask = 0;
0054     unsigned long flags;
0055 
0056     raw_spin_lock_irqsave(&mcip_lock, flags);
0057 
0058     /*
0059      * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
0060      * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
0061      * and CMD_DEBUG_READ_SELECT.
0062      */
0063     __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
0064     mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
0065 
0066     mcip_mask |= BIT(cpu);
0067 
0068     __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
0069     /*
0070      * Parameter specified halt cause:
0071      * STATUS32[H]/actionpoint/breakpoint/self-halt
0072      * We choose all of them (0xF).
0073      */
0074     __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
0075 
0076     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0077 }
0078 
0079 static void mcip_setup_per_cpu(int cpu)
0080 {
0081     struct mcip_bcr mp;
0082 
0083     READ_BCR(ARC_REG_MCIP_BCR, mp);
0084 
0085     smp_ipi_irq_setup(cpu, IPI_IRQ);
0086     smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
0087 
0088     /* Update GFRC halt mask as new CPU came online */
0089     if (mp.gfrc)
0090         mcip_update_gfrc_halt_mask(cpu);
0091 
0092     /* Update MCIP debug mask as new CPU came online */
0093     if (mp.dbg)
0094         mcip_update_debug_halt_mask(cpu);
0095 }
0096 
0097 static void mcip_ipi_send(int cpu)
0098 {
0099     unsigned long flags;
0100     int ipi_was_pending;
0101 
0102     /* ARConnect can only send IPI to others */
0103     if (unlikely(cpu == raw_smp_processor_id())) {
0104         arc_softirq_trigger(SOFTIRQ_IRQ);
0105         return;
0106     }
0107 
0108     raw_spin_lock_irqsave(&mcip_lock, flags);
0109 
0110     /*
0111      * If receiver already has a pending interrupt, elide sending this one.
0112      * Linux cross core calling works well with concurrent IPIs
0113      * coalesced into one
0114      * see arch/arc/kernel/smp.c: ipi_send_msg_one()
0115      */
0116     __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
0117     ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
0118     if (!ipi_was_pending)
0119         __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
0120 
0121     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0122 }
0123 
0124 static void mcip_ipi_clear(int irq)
0125 {
0126     unsigned int cpu, c;
0127     unsigned long flags;
0128 
0129     if (unlikely(irq == SOFTIRQ_IRQ)) {
0130         arc_softirq_clear(irq);
0131         return;
0132     }
0133 
0134     raw_spin_lock_irqsave(&mcip_lock, flags);
0135 
0136     /* Who sent the IPI */
0137     __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
0138 
0139     cpu = read_aux_reg(ARC_REG_MCIP_READBACK);  /* 1,2,4,8... */
0140 
0141     /*
0142      * In rare case, multiple concurrent IPIs sent to same target can
0143      * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
0144      * "vectored" (multiple bits sets) as opposed to typical single bit
0145      */
0146     do {
0147         c = __ffs(cpu);         /* 0,1,2,3 */
0148         __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
0149         cpu &= ~(1U << c);
0150     } while (cpu);
0151 
0152     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0153 }
0154 
0155 static void mcip_probe_n_setup(void)
0156 {
0157     struct mcip_bcr mp;
0158 
0159     READ_BCR(ARC_REG_MCIP_BCR, mp);
0160 
0161     sprintf(smp_cpuinfo_buf,
0162         "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
0163         mp.ver, mp.num_cores,
0164         IS_AVAIL1(mp.ipi, "IPI "),
0165         IS_AVAIL1(mp.idu, "IDU "),
0166         IS_AVAIL1(mp.dbg, "DEBUG "),
0167         IS_AVAIL1(mp.gfrc, "GFRC"));
0168 
0169     cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
0170 }
0171 
0172 struct plat_smp_ops plat_smp_ops = {
0173     .info       = smp_cpuinfo_buf,
0174     .init_early_smp = mcip_probe_n_setup,
0175     .init_per_cpu   = mcip_setup_per_cpu,
0176     .ipi_send   = mcip_ipi_send,
0177     .ipi_clear  = mcip_ipi_clear,
0178 };
0179 
0180 #endif
0181 
0182 /***************************************************************************
0183  * ARCv2 Interrupt Distribution Unit (IDU)
0184  *
0185  * Connects external "COMMON" IRQs to core intc, providing:
0186  *  -dynamic routing (IRQ affinity)
0187  *  -load balancing (Round Robin interrupt distribution)
0188  *  -1:N distribution
0189  *
0190  * It physically resides in the MCIP hw block
0191  */
0192 
0193 #include <linux/irqchip.h>
0194 #include <linux/of.h>
0195 #include <linux/of_irq.h>
0196 
0197 /*
0198  * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
0199  */
0200 static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
0201 {
0202     __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
0203 }
0204 
0205 static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
0206              bool set_distr, unsigned int distr)
0207 {
0208     union {
0209         unsigned int word;
0210         struct {
0211             unsigned int distr:2, pad:2, lvl:1, pad2:27;
0212         };
0213     } data;
0214 
0215     data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
0216     if (set_distr)
0217         data.distr = distr;
0218     if (set_lvl)
0219         data.lvl = lvl;
0220     __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
0221 }
0222 
0223 static void idu_irq_mask_raw(irq_hw_number_t hwirq)
0224 {
0225     unsigned long flags;
0226 
0227     raw_spin_lock_irqsave(&mcip_lock, flags);
0228     __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
0229     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0230 }
0231 
0232 static void idu_irq_mask(struct irq_data *data)
0233 {
0234     idu_irq_mask_raw(data->hwirq);
0235 }
0236 
0237 static void idu_irq_unmask(struct irq_data *data)
0238 {
0239     unsigned long flags;
0240 
0241     raw_spin_lock_irqsave(&mcip_lock, flags);
0242     __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
0243     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0244 }
0245 
0246 static void idu_irq_ack(struct irq_data *data)
0247 {
0248     unsigned long flags;
0249 
0250     raw_spin_lock_irqsave(&mcip_lock, flags);
0251     __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
0252     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0253 }
0254 
0255 static void idu_irq_mask_ack(struct irq_data *data)
0256 {
0257     unsigned long flags;
0258 
0259     raw_spin_lock_irqsave(&mcip_lock, flags);
0260     __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
0261     __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
0262     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0263 }
0264 
0265 static int
0266 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
0267              bool force)
0268 {
0269     unsigned long flags;
0270     cpumask_t online;
0271     unsigned int destination_bits;
0272     unsigned int distribution_mode;
0273 
0274     /* errout if no online cpu per @cpumask */
0275     if (!cpumask_and(&online, cpumask, cpu_online_mask))
0276         return -EINVAL;
0277 
0278     raw_spin_lock_irqsave(&mcip_lock, flags);
0279 
0280     destination_bits = cpumask_bits(&online)[0];
0281     idu_set_dest(data->hwirq, destination_bits);
0282 
0283     if (ffs(destination_bits) == fls(destination_bits))
0284         distribution_mode = IDU_M_DISTRI_DEST;
0285     else
0286         distribution_mode = IDU_M_DISTRI_RR;
0287 
0288     idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
0289 
0290     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0291 
0292     return IRQ_SET_MASK_OK;
0293 }
0294 
0295 static int idu_irq_set_type(struct irq_data *data, u32 type)
0296 {
0297     unsigned long flags;
0298 
0299     /*
0300      * ARCv2 IDU HW does not support inverse polarity, so these are the
0301      * only interrupt types supported.
0302      */
0303     if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
0304         return -EINVAL;
0305 
0306     raw_spin_lock_irqsave(&mcip_lock, flags);
0307 
0308     idu_set_mode(data->hwirq, true,
0309              type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
0310                            IDU_M_TRIG_LEVEL,
0311              false, 0);
0312 
0313     raw_spin_unlock_irqrestore(&mcip_lock, flags);
0314 
0315     return 0;
0316 }
0317 
0318 static void idu_irq_enable(struct irq_data *data)
0319 {
0320     /*
0321      * By default send all common interrupts to all available online CPUs.
0322      * The affinity of common interrupts in IDU must be set manually since
0323      * in some cases the kernel will not call irq_set_affinity() by itself:
0324      *   1. When the kernel is not configured with support of SMP.
0325      *   2. When the kernel is configured with support of SMP but upper
0326      *      interrupt controllers does not support setting of the affinity
0327      *      and cannot propagate it to IDU.
0328      */
0329     idu_irq_set_affinity(data, cpu_online_mask, false);
0330     idu_irq_unmask(data);
0331 }
0332 
0333 static struct irq_chip idu_irq_chip = {
0334     .name           = "MCIP IDU Intc",
0335     .irq_mask       = idu_irq_mask,
0336     .irq_unmask     = idu_irq_unmask,
0337     .irq_ack        = idu_irq_ack,
0338     .irq_mask_ack       = idu_irq_mask_ack,
0339     .irq_enable     = idu_irq_enable,
0340     .irq_set_type       = idu_irq_set_type,
0341 #ifdef CONFIG_SMP
0342     .irq_set_affinity       = idu_irq_set_affinity,
0343 #endif
0344 
0345 };
0346 
0347 static void idu_cascade_isr(struct irq_desc *desc)
0348 {
0349     struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
0350     struct irq_chip *core_chip = irq_desc_get_chip(desc);
0351     irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
0352     irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
0353 
0354     chained_irq_enter(core_chip, desc);
0355     generic_handle_domain_irq(idu_domain, idu_hwirq);
0356     chained_irq_exit(core_chip, desc);
0357 }
0358 
0359 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
0360 {
0361     irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
0362     irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
0363 
0364     return 0;
0365 }
0366 
0367 static const struct irq_domain_ops idu_irq_ops = {
0368     .xlate  = irq_domain_xlate_onetwocell,
0369     .map    = idu_irq_map,
0370 };
0371 
0372 /*
0373  * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
0374  * [24, 23+C]: If C > 0 then "C" common IRQs
0375  * [24+C, N]: Not statically assigned, private-per-core
0376  */
0377 
0378 
0379 static int __init
0380 idu_of_init(struct device_node *intc, struct device_node *parent)
0381 {
0382     struct irq_domain *domain;
0383     int nr_irqs;
0384     int i, virq;
0385     struct mcip_bcr mp;
0386     struct mcip_idu_bcr idu_bcr;
0387 
0388     READ_BCR(ARC_REG_MCIP_BCR, mp);
0389 
0390     if (!mp.idu)
0391         panic("IDU not detected, but DeviceTree using it");
0392 
0393     READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
0394     nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
0395 
0396     pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
0397 
0398     domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
0399 
0400     /* Parent interrupts (core-intc) are already mapped */
0401 
0402     for (i = 0; i < nr_irqs; i++) {
0403         /* Mask all common interrupts by default */
0404         idu_irq_mask_raw(i);
0405 
0406         /*
0407          * Return parent uplink IRQs (towards core intc) 24,25,.....
0408          * this step has been done before already
0409          * however we need it to get the parent virq and set IDU handler
0410          * as first level isr
0411          */
0412         virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
0413         BUG_ON(!virq);
0414         irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
0415     }
0416 
0417     __mcip_cmd(CMD_IDU_ENABLE, 0);
0418 
0419     return 0;
0420 }
0421 IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);