0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/smp.h>
0009 #include <linux/irq.h>
0010 #include <linux/irqchip/chained_irq.h>
0011 #include <linux/spinlock.h>
0012 #include <soc/arc/mcip.h>
0013 #include <asm/irqflags-arcv2.h>
0014 #include <asm/setup.h>
0015
0016 static DEFINE_RAW_SPINLOCK(mcip_lock);
0017
0018 #ifdef CONFIG_SMP
0019
0020 static char smp_cpuinfo_buf[128];
0021
0022
0023
0024
0025
0026 static void mcip_update_gfrc_halt_mask(int cpu)
0027 {
0028 struct bcr_generic gfrc;
0029 unsigned long flags;
0030 u32 gfrc_halt_mask;
0031
0032 READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
0033
0034
0035
0036
0037
0038 if (gfrc.ver < 0x3)
0039 return;
0040
0041 raw_spin_lock_irqsave(&mcip_lock, flags);
0042
0043 __mcip_cmd(CMD_GFRC_READ_CORE, 0);
0044 gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
0045 gfrc_halt_mask |= BIT(cpu);
0046 __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
0047
0048 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0049 }
0050
0051 static void mcip_update_debug_halt_mask(int cpu)
0052 {
0053 u32 mcip_mask = 0;
0054 unsigned long flags;
0055
0056 raw_spin_lock_irqsave(&mcip_lock, flags);
0057
0058
0059
0060
0061
0062
0063 __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
0064 mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
0065
0066 mcip_mask |= BIT(cpu);
0067
0068 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
0069
0070
0071
0072
0073
0074 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
0075
0076 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0077 }
0078
0079 static void mcip_setup_per_cpu(int cpu)
0080 {
0081 struct mcip_bcr mp;
0082
0083 READ_BCR(ARC_REG_MCIP_BCR, mp);
0084
0085 smp_ipi_irq_setup(cpu, IPI_IRQ);
0086 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
0087
0088
0089 if (mp.gfrc)
0090 mcip_update_gfrc_halt_mask(cpu);
0091
0092
0093 if (mp.dbg)
0094 mcip_update_debug_halt_mask(cpu);
0095 }
0096
0097 static void mcip_ipi_send(int cpu)
0098 {
0099 unsigned long flags;
0100 int ipi_was_pending;
0101
0102
0103 if (unlikely(cpu == raw_smp_processor_id())) {
0104 arc_softirq_trigger(SOFTIRQ_IRQ);
0105 return;
0106 }
0107
0108 raw_spin_lock_irqsave(&mcip_lock, flags);
0109
0110
0111
0112
0113
0114
0115
0116 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
0117 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
0118 if (!ipi_was_pending)
0119 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
0120
0121 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0122 }
0123
0124 static void mcip_ipi_clear(int irq)
0125 {
0126 unsigned int cpu, c;
0127 unsigned long flags;
0128
0129 if (unlikely(irq == SOFTIRQ_IRQ)) {
0130 arc_softirq_clear(irq);
0131 return;
0132 }
0133
0134 raw_spin_lock_irqsave(&mcip_lock, flags);
0135
0136
0137 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
0138
0139 cpu = read_aux_reg(ARC_REG_MCIP_READBACK);
0140
0141
0142
0143
0144
0145
0146 do {
0147 c = __ffs(cpu);
0148 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
0149 cpu &= ~(1U << c);
0150 } while (cpu);
0151
0152 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0153 }
0154
0155 static void mcip_probe_n_setup(void)
0156 {
0157 struct mcip_bcr mp;
0158
0159 READ_BCR(ARC_REG_MCIP_BCR, mp);
0160
0161 sprintf(smp_cpuinfo_buf,
0162 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
0163 mp.ver, mp.num_cores,
0164 IS_AVAIL1(mp.ipi, "IPI "),
0165 IS_AVAIL1(mp.idu, "IDU "),
0166 IS_AVAIL1(mp.dbg, "DEBUG "),
0167 IS_AVAIL1(mp.gfrc, "GFRC"));
0168
0169 cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
0170 }
0171
0172 struct plat_smp_ops plat_smp_ops = {
0173 .info = smp_cpuinfo_buf,
0174 .init_early_smp = mcip_probe_n_setup,
0175 .init_per_cpu = mcip_setup_per_cpu,
0176 .ipi_send = mcip_ipi_send,
0177 .ipi_clear = mcip_ipi_clear,
0178 };
0179
0180 #endif
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 #include <linux/irqchip.h>
0194 #include <linux/of.h>
0195 #include <linux/of_irq.h>
0196
0197
0198
0199
0200 static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
0201 {
0202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
0203 }
0204
0205 static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
0206 bool set_distr, unsigned int distr)
0207 {
0208 union {
0209 unsigned int word;
0210 struct {
0211 unsigned int distr:2, pad:2, lvl:1, pad2:27;
0212 };
0213 } data;
0214
0215 data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
0216 if (set_distr)
0217 data.distr = distr;
0218 if (set_lvl)
0219 data.lvl = lvl;
0220 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
0221 }
0222
0223 static void idu_irq_mask_raw(irq_hw_number_t hwirq)
0224 {
0225 unsigned long flags;
0226
0227 raw_spin_lock_irqsave(&mcip_lock, flags);
0228 __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
0229 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0230 }
0231
0232 static void idu_irq_mask(struct irq_data *data)
0233 {
0234 idu_irq_mask_raw(data->hwirq);
0235 }
0236
0237 static void idu_irq_unmask(struct irq_data *data)
0238 {
0239 unsigned long flags;
0240
0241 raw_spin_lock_irqsave(&mcip_lock, flags);
0242 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
0243 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0244 }
0245
0246 static void idu_irq_ack(struct irq_data *data)
0247 {
0248 unsigned long flags;
0249
0250 raw_spin_lock_irqsave(&mcip_lock, flags);
0251 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
0252 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0253 }
0254
0255 static void idu_irq_mask_ack(struct irq_data *data)
0256 {
0257 unsigned long flags;
0258
0259 raw_spin_lock_irqsave(&mcip_lock, flags);
0260 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
0261 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
0262 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0263 }
0264
0265 static int
0266 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
0267 bool force)
0268 {
0269 unsigned long flags;
0270 cpumask_t online;
0271 unsigned int destination_bits;
0272 unsigned int distribution_mode;
0273
0274
0275 if (!cpumask_and(&online, cpumask, cpu_online_mask))
0276 return -EINVAL;
0277
0278 raw_spin_lock_irqsave(&mcip_lock, flags);
0279
0280 destination_bits = cpumask_bits(&online)[0];
0281 idu_set_dest(data->hwirq, destination_bits);
0282
0283 if (ffs(destination_bits) == fls(destination_bits))
0284 distribution_mode = IDU_M_DISTRI_DEST;
0285 else
0286 distribution_mode = IDU_M_DISTRI_RR;
0287
0288 idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
0289
0290 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0291
0292 return IRQ_SET_MASK_OK;
0293 }
0294
0295 static int idu_irq_set_type(struct irq_data *data, u32 type)
0296 {
0297 unsigned long flags;
0298
0299
0300
0301
0302
0303 if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
0304 return -EINVAL;
0305
0306 raw_spin_lock_irqsave(&mcip_lock, flags);
0307
0308 idu_set_mode(data->hwirq, true,
0309 type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
0310 IDU_M_TRIG_LEVEL,
0311 false, 0);
0312
0313 raw_spin_unlock_irqrestore(&mcip_lock, flags);
0314
0315 return 0;
0316 }
0317
0318 static void idu_irq_enable(struct irq_data *data)
0319 {
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329 idu_irq_set_affinity(data, cpu_online_mask, false);
0330 idu_irq_unmask(data);
0331 }
0332
0333 static struct irq_chip idu_irq_chip = {
0334 .name = "MCIP IDU Intc",
0335 .irq_mask = idu_irq_mask,
0336 .irq_unmask = idu_irq_unmask,
0337 .irq_ack = idu_irq_ack,
0338 .irq_mask_ack = idu_irq_mask_ack,
0339 .irq_enable = idu_irq_enable,
0340 .irq_set_type = idu_irq_set_type,
0341 #ifdef CONFIG_SMP
0342 .irq_set_affinity = idu_irq_set_affinity,
0343 #endif
0344
0345 };
0346
0347 static void idu_cascade_isr(struct irq_desc *desc)
0348 {
0349 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
0350 struct irq_chip *core_chip = irq_desc_get_chip(desc);
0351 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
0352 irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
0353
0354 chained_irq_enter(core_chip, desc);
0355 generic_handle_domain_irq(idu_domain, idu_hwirq);
0356 chained_irq_exit(core_chip, desc);
0357 }
0358
0359 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
0360 {
0361 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
0362 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
0363
0364 return 0;
0365 }
0366
0367 static const struct irq_domain_ops idu_irq_ops = {
0368 .xlate = irq_domain_xlate_onetwocell,
0369 .map = idu_irq_map,
0370 };
0371
0372
0373
0374
0375
0376
0377
0378
0379 static int __init
0380 idu_of_init(struct device_node *intc, struct device_node *parent)
0381 {
0382 struct irq_domain *domain;
0383 int nr_irqs;
0384 int i, virq;
0385 struct mcip_bcr mp;
0386 struct mcip_idu_bcr idu_bcr;
0387
0388 READ_BCR(ARC_REG_MCIP_BCR, mp);
0389
0390 if (!mp.idu)
0391 panic("IDU not detected, but DeviceTree using it");
0392
0393 READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
0394 nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
0395
0396 pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
0397
0398 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
0399
0400
0401
0402 for (i = 0; i < nr_irqs; i++) {
0403
0404 idu_irq_mask_raw(i);
0405
0406
0407
0408
0409
0410
0411
0412 virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
0413 BUG_ON(!virq);
0414 irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
0415 }
0416
0417 __mcip_cmd(CMD_IDU_ENABLE, 0);
0418
0419 return 0;
0420 }
0421 IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);