0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/interrupt.h>
0011 #include <linux/irq.h>
0012 #include <linux/irqdomain.h>
0013 #include <linux/ioport.h>
0014 #include <linux/kernel.h>
0015 #include <linux/bitops.h>
0016 #include <linux/sched.h>
0017
0018 #include <asm/io.h>
0019 #include <asm/irq_cpu.h>
0020 #include <asm/sn/addrs.h>
0021 #include <asm/sn/agent.h>
0022 #include <asm/sn/arch.h>
0023 #include <asm/sn/intr.h>
0024 #include <asm/sn/irq_alloc.h>
0025
0026 struct hub_irq_data {
0027 u64 *irq_mask[2];
0028 cpuid_t cpu;
0029 };
0030
0031 static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
0032
0033 static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
0034
0035 static inline int alloc_level(void)
0036 {
0037 int level;
0038
0039 again:
0040 level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
0041 if (level >= IP27_HUB_IRQ_COUNT)
0042 return -ENOSPC;
0043
0044 if (test_and_set_bit(level, hub_irq_map))
0045 goto again;
0046
0047 return level;
0048 }
0049
0050 static void enable_hub_irq(struct irq_data *d)
0051 {
0052 struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
0053 unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
0054
0055 set_bit(d->hwirq, mask);
0056 __raw_writeq(mask[0], hd->irq_mask[0]);
0057 __raw_writeq(mask[1], hd->irq_mask[1]);
0058 }
0059
0060 static void disable_hub_irq(struct irq_data *d)
0061 {
0062 struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
0063 unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
0064
0065 clear_bit(d->hwirq, mask);
0066 __raw_writeq(mask[0], hd->irq_mask[0]);
0067 __raw_writeq(mask[1], hd->irq_mask[1]);
0068 }
0069
0070 static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
0071 {
0072 nasid_t nasid;
0073 int cpu;
0074
0075 cpu = cpumask_first_and(mask, cpu_online_mask);
0076 if (cpu >= nr_cpu_ids)
0077 cpu = cpumask_any(cpu_online_mask);
0078
0079 nasid = cpu_to_node(cpu);
0080 hd->cpu = cpu;
0081 if (!cputoslice(cpu)) {
0082 hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
0083 hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
0084 } else {
0085 hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
0086 hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
0087 }
0088 }
0089
0090 static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
0091 bool force)
0092 {
0093 struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
0094
0095 if (!hd)
0096 return -EINVAL;
0097
0098 if (irqd_is_started(d))
0099 disable_hub_irq(d);
0100
0101 setup_hub_mask(hd, mask);
0102
0103 if (irqd_is_started(d))
0104 enable_hub_irq(d);
0105
0106 irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
0107
0108 return 0;
0109 }
0110
0111 static struct irq_chip hub_irq_type = {
0112 .name = "HUB",
0113 .irq_mask = disable_hub_irq,
0114 .irq_unmask = enable_hub_irq,
0115 .irq_set_affinity = set_affinity_hub_irq,
0116 };
0117
0118 static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
0119 unsigned int nr_irqs, void *arg)
0120 {
0121 struct irq_alloc_info *info = arg;
0122 struct hub_irq_data *hd;
0123 struct hub_data *hub;
0124 struct irq_desc *desc;
0125 int swlevel;
0126
0127 if (nr_irqs > 1 || !info)
0128 return -EINVAL;
0129
0130 hd = kzalloc(sizeof(*hd), GFP_KERNEL);
0131 if (!hd)
0132 return -ENOMEM;
0133
0134 swlevel = alloc_level();
0135 if (unlikely(swlevel < 0)) {
0136 kfree(hd);
0137 return -EAGAIN;
0138 }
0139 irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
0140 handle_level_irq, NULL, NULL);
0141
0142
0143 hub = hub_data(info->nasid);
0144 setup_hub_mask(hd, &hub->h_cpus);
0145 info->nasid = cpu_to_node(hd->cpu);
0146
0147
0148 REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
0149
0150 desc = irq_to_desc(virq);
0151 desc->irq_common_data.node = info->nasid;
0152 cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
0153
0154 return 0;
0155 }
0156
0157 static void hub_domain_free(struct irq_domain *domain,
0158 unsigned int virq, unsigned int nr_irqs)
0159 {
0160 struct irq_data *irqd;
0161
0162 if (nr_irqs > 1)
0163 return;
0164
0165 irqd = irq_domain_get_irq_data(domain, virq);
0166 if (irqd && irqd->chip_data)
0167 kfree(irqd->chip_data);
0168 }
0169
0170 static const struct irq_domain_ops hub_domain_ops = {
0171 .alloc = hub_domain_alloc,
0172 .free = hub_domain_free,
0173 };
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 static void ip27_do_irq_mask0(struct irq_desc *desc)
0188 {
0189 cpuid_t cpu = smp_processor_id();
0190 unsigned long *mask = per_cpu(irq_enable_mask, cpu);
0191 struct irq_domain *domain;
0192 u64 pend0;
0193 int ret;
0194
0195
0196 pend0 = LOCAL_HUB_L(PI_INT_PEND0);
0197
0198 pend0 &= mask[0];
0199 if (!pend0)
0200 return;
0201
0202 #ifdef CONFIG_SMP
0203 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
0204 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
0205 scheduler_ipi();
0206 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
0207 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
0208 scheduler_ipi();
0209 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
0210 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
0211 generic_smp_call_function_interrupt();
0212 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
0213 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
0214 generic_smp_call_function_interrupt();
0215 } else
0216 #endif
0217 {
0218 domain = irq_desc_get_handler_data(desc);
0219 ret = generic_handle_domain_irq(domain, __ffs(pend0));
0220 if (ret)
0221 spurious_interrupt();
0222 }
0223
0224 LOCAL_HUB_L(PI_INT_PEND0);
0225 }
0226
0227 static void ip27_do_irq_mask1(struct irq_desc *desc)
0228 {
0229 cpuid_t cpu = smp_processor_id();
0230 unsigned long *mask = per_cpu(irq_enable_mask, cpu);
0231 struct irq_domain *domain;
0232 u64 pend1;
0233 int ret;
0234
0235
0236 pend1 = LOCAL_HUB_L(PI_INT_PEND1);
0237
0238 pend1 &= mask[1];
0239 if (!pend1)
0240 return;
0241
0242 domain = irq_desc_get_handler_data(desc);
0243 ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
0244 if (ret)
0245 spurious_interrupt();
0246
0247 LOCAL_HUB_L(PI_INT_PEND1);
0248 }
0249
0250 void install_ipi(void)
0251 {
0252 int cpu = smp_processor_id();
0253 unsigned long *mask = per_cpu(irq_enable_mask, cpu);
0254 int slice = LOCAL_HUB_L(PI_CPU_NUM);
0255 int resched, call;
0256
0257 resched = CPU_RESCHED_A_IRQ + slice;
0258 set_bit(resched, mask);
0259 LOCAL_HUB_CLR_INTR(resched);
0260
0261 call = CPU_CALL_A_IRQ + slice;
0262 set_bit(call, mask);
0263 LOCAL_HUB_CLR_INTR(call);
0264
0265 if (slice == 0) {
0266 LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
0267 LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
0268 } else {
0269 LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
0270 LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
0271 }
0272 }
0273
0274 void __init arch_init_irq(void)
0275 {
0276 struct irq_domain *domain;
0277 struct fwnode_handle *fn;
0278 int i;
0279
0280 mips_cpu_irq_init();
0281
0282
0283
0284
0285
0286
0287 for (i = 0; i <= CPU_CALL_B_IRQ; i++)
0288 set_bit(i, hub_irq_map);
0289
0290 for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
0291 set_bit(i, hub_irq_map);
0292
0293 fn = irq_domain_alloc_named_fwnode("HUB");
0294 WARN_ON(fn == NULL);
0295 if (!fn)
0296 return;
0297 domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
0298 &hub_domain_ops, NULL);
0299 WARN_ON(domain == NULL);
0300 if (!domain)
0301 return;
0302
0303 irq_set_default_host(domain);
0304
0305 irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
0306 irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
0307 domain);
0308 irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
0309 irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
0310 domain);
0311 }