0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/sched.h>
0010 #include <linux/linkage.h>
0011 #include <linux/ptrace.h>
0012 #include <linux/errno.h>
0013 #include <linux/kernel_stat.h>
0014 #include <linux/signal.h>
0015 #include <linux/mm.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/slab.h>
0018 #include <linux/random.h>
0019 #include <linux/init.h>
0020 #include <linux/delay.h>
0021 #include <linux/proc_fs.h>
0022 #include <linux/seq_file.h>
0023 #include <linux/ftrace.h>
0024 #include <linux/irq.h>
0025
0026 #include <asm/ptrace.h>
0027 #include <asm/processor.h>
0028 #include <linux/atomic.h>
0029 #include <asm/irq.h>
0030 #include <asm/io.h>
0031 #include <asm/iommu.h>
0032 #include <asm/upa.h>
0033 #include <asm/oplib.h>
0034 #include <asm/prom.h>
0035 #include <asm/timer.h>
0036 #include <asm/smp.h>
0037 #include <asm/starfire.h>
0038 #include <linux/uaccess.h>
0039 #include <asm/cache.h>
0040 #include <asm/cpudata.h>
0041 #include <asm/auxio.h>
0042 #include <asm/head.h>
0043 #include <asm/hypervisor.h>
0044 #include <asm/cacheflush.h>
0045 #include <asm/softirq_stack.h>
0046
0047 #include "entry.h"
0048 #include "cpumap.h"
0049 #include "kstack.h"
0050
0051 struct ino_bucket *ivector_table;
0052 unsigned long ivector_table_pa;
0053
0054
0055
0056
0057
0058 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
0059 {
0060 unsigned long ret;
0061
0062 __asm__ __volatile__("ldxa [%1] %2, %0"
0063 : "=&r" (ret)
0064 : "r" (bucket_pa +
0065 offsetof(struct ino_bucket,
0066 __irq_chain_pa)),
0067 "i" (ASI_PHYS_USE_EC));
0068
0069 return ret;
0070 }
0071
0072 static void bucket_clear_chain_pa(unsigned long bucket_pa)
0073 {
0074 __asm__ __volatile__("stxa %%g0, [%0] %1"
0075 :
0076 : "r" (bucket_pa +
0077 offsetof(struct ino_bucket,
0078 __irq_chain_pa)),
0079 "i" (ASI_PHYS_USE_EC));
0080 }
0081
0082 static unsigned int bucket_get_irq(unsigned long bucket_pa)
0083 {
0084 unsigned int ret;
0085
0086 __asm__ __volatile__("lduwa [%1] %2, %0"
0087 : "=&r" (ret)
0088 : "r" (bucket_pa +
0089 offsetof(struct ino_bucket,
0090 __irq)),
0091 "i" (ASI_PHYS_USE_EC));
0092
0093 return ret;
0094 }
0095
0096 static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
0097 {
0098 __asm__ __volatile__("stwa %0, [%1] %2"
0099 :
0100 : "r" (irq),
0101 "r" (bucket_pa +
0102 offsetof(struct ino_bucket,
0103 __irq)),
0104 "i" (ASI_PHYS_USE_EC));
0105 }
0106
0107 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
0108
0109 static unsigned long hvirq_major __initdata;
0110 static int __init early_hvirq_major(char *p)
0111 {
0112 int rc = kstrtoul(p, 10, &hvirq_major);
0113
0114 return rc;
0115 }
0116 early_param("hvirq", early_hvirq_major);
0117
0118 static int hv_irq_version;
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146 static bool sun4v_cookie_only_virqs(void)
0147 {
0148 if (hv_irq_version >= 3)
0149 return true;
0150 return false;
0151 }
0152
0153 static void __init irq_init_hv(void)
0154 {
0155 unsigned long hv_error, major, minor = 0;
0156
0157 if (tlb_type != hypervisor)
0158 return;
0159
0160 if (hvirq_major)
0161 major = hvirq_major;
0162 else
0163 major = 3;
0164
0165 hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
0166 if (!hv_error)
0167 hv_irq_version = major;
0168 else
0169 hv_irq_version = 1;
0170
0171 pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
0172 hv_irq_version,
0173 sun4v_cookie_only_virqs() ? "enabled" : "disabled");
0174 }
0175
0176
0177 int __init arch_probe_nr_irqs(void)
0178 {
0179 return 1;
0180 }
0181
0182 #define DEFAULT_NUM_IVECS (0xfffU)
0183 static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
0184 #define NUM_IVECS (nr_ivec)
0185
0186 static unsigned int __init size_nr_ivec(void)
0187 {
0188 if (tlb_type == hypervisor) {
0189 switch (sun4v_chip_type) {
0190
0191 case SUN4V_CHIP_SPARC64X:
0192 nr_ivec = 0xffff;
0193 break;
0194 }
0195 }
0196 return nr_ivec;
0197 }
0198
0199 struct irq_handler_data {
0200 union {
0201 struct {
0202 unsigned int dev_handle;
0203 unsigned int dev_ino;
0204 };
0205 unsigned long sysino;
0206 };
0207 struct ino_bucket bucket;
0208 unsigned long iclr;
0209 unsigned long imap;
0210 };
0211
0212 static inline unsigned int irq_data_to_handle(struct irq_data *data)
0213 {
0214 struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
0215
0216 return ihd->dev_handle;
0217 }
0218
0219 static inline unsigned int irq_data_to_ino(struct irq_data *data)
0220 {
0221 struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
0222
0223 return ihd->dev_ino;
0224 }
0225
0226 static inline unsigned long irq_data_to_sysino(struct irq_data *data)
0227 {
0228 struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
0229
0230 return ihd->sysino;
0231 }
0232
0233 void irq_free(unsigned int irq)
0234 {
0235 void *data = irq_get_handler_data(irq);
0236
0237 kfree(data);
0238 irq_set_handler_data(irq, NULL);
0239 irq_free_descs(irq, 1);
0240 }
0241
0242 unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
0243 {
0244 int irq;
0245
0246 irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL);
0247 if (irq <= 0)
0248 goto out;
0249
0250 return irq;
0251 out:
0252 return 0;
0253 }
0254
0255 static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
0256 {
0257 unsigned long hv_err, cookie;
0258 struct ino_bucket *bucket;
0259 unsigned int irq = 0U;
0260
0261 hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
0262 if (hv_err) {
0263 pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
0264 goto out;
0265 }
0266
0267 if (cookie & ((1UL << 63UL))) {
0268 cookie = ~cookie;
0269 bucket = (struct ino_bucket *) __va(cookie);
0270 irq = bucket->__irq;
0271 }
0272 out:
0273 return irq;
0274 }
0275
0276 static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
0277 {
0278 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
0279 struct ino_bucket *bucket;
0280 unsigned int irq;
0281
0282 bucket = &ivector_table[sysino];
0283 irq = bucket_get_irq(__pa(bucket));
0284
0285 return irq;
0286 }
0287
0288 void ack_bad_irq(unsigned int irq)
0289 {
0290 pr_crit("BAD IRQ ack %d\n", irq);
0291 }
0292
0293 void irq_install_pre_handler(int irq,
0294 void (*func)(unsigned int, void *, void *),
0295 void *arg1, void *arg2)
0296 {
0297 pr_warn("IRQ pre handler NOT supported.\n");
0298 }
0299
0300
0301
0302
0303 int arch_show_interrupts(struct seq_file *p, int prec)
0304 {
0305 int j;
0306
0307 seq_printf(p, "NMI: ");
0308 for_each_online_cpu(j)
0309 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
0310 seq_printf(p, " Non-maskable interrupts\n");
0311 return 0;
0312 }
0313
0314 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
0315 {
0316 unsigned int tid;
0317
0318 if (this_is_starfire) {
0319 tid = starfire_translate(imap, cpuid);
0320 tid <<= IMAP_TID_SHIFT;
0321 tid &= IMAP_TID_UPA;
0322 } else {
0323 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
0324 unsigned long ver;
0325
0326 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
0327 if ((ver >> 32UL) == __JALAPENO_ID ||
0328 (ver >> 32UL) == __SERRANO_ID) {
0329 tid = cpuid << IMAP_TID_SHIFT;
0330 tid &= IMAP_TID_JBUS;
0331 } else {
0332 unsigned int a = cpuid & 0x1f;
0333 unsigned int n = (cpuid >> 5) & 0x1f;
0334
0335 tid = ((a << IMAP_AID_SHIFT) |
0336 (n << IMAP_NID_SHIFT));
0337 tid &= (IMAP_AID_SAFARI |
0338 IMAP_NID_SAFARI);
0339 }
0340 } else {
0341 tid = cpuid << IMAP_TID_SHIFT;
0342 tid &= IMAP_TID_UPA;
0343 }
0344 }
0345
0346 return tid;
0347 }
0348
0349 #ifdef CONFIG_SMP
0350 static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
0351 {
0352 cpumask_t mask;
0353 int cpuid;
0354
0355 cpumask_copy(&mask, affinity);
0356 if (cpumask_equal(&mask, cpu_online_mask)) {
0357 cpuid = map_to_cpu(irq);
0358 } else {
0359 cpumask_t tmp;
0360
0361 cpumask_and(&tmp, cpu_online_mask, &mask);
0362 cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
0363 }
0364
0365 return cpuid;
0366 }
0367 #else
0368 #define irq_choose_cpu(irq, affinity) \
0369 real_hard_smp_processor_id()
0370 #endif
0371
0372 static void sun4u_irq_enable(struct irq_data *data)
0373 {
0374 struct irq_handler_data *handler_data;
0375
0376 handler_data = irq_data_get_irq_handler_data(data);
0377 if (likely(handler_data)) {
0378 unsigned long cpuid, imap, val;
0379 unsigned int tid;
0380
0381 cpuid = irq_choose_cpu(data->irq,
0382 irq_data_get_affinity_mask(data));
0383 imap = handler_data->imap;
0384
0385 tid = sun4u_compute_tid(imap, cpuid);
0386
0387 val = upa_readq(imap);
0388 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
0389 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
0390 val |= tid | IMAP_VALID;
0391 upa_writeq(val, imap);
0392 upa_writeq(ICLR_IDLE, handler_data->iclr);
0393 }
0394 }
0395
0396 static int sun4u_set_affinity(struct irq_data *data,
0397 const struct cpumask *mask, bool force)
0398 {
0399 struct irq_handler_data *handler_data;
0400
0401 handler_data = irq_data_get_irq_handler_data(data);
0402 if (likely(handler_data)) {
0403 unsigned long cpuid, imap, val;
0404 unsigned int tid;
0405
0406 cpuid = irq_choose_cpu(data->irq, mask);
0407 imap = handler_data->imap;
0408
0409 tid = sun4u_compute_tid(imap, cpuid);
0410
0411 val = upa_readq(imap);
0412 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
0413 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
0414 val |= tid | IMAP_VALID;
0415 upa_writeq(val, imap);
0416 upa_writeq(ICLR_IDLE, handler_data->iclr);
0417 }
0418
0419 return 0;
0420 }
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439 static void sun4u_irq_disable(struct irq_data *data)
0440 {
0441 }
0442
0443 static void sun4u_irq_eoi(struct irq_data *data)
0444 {
0445 struct irq_handler_data *handler_data;
0446
0447 handler_data = irq_data_get_irq_handler_data(data);
0448 if (likely(handler_data))
0449 upa_writeq(ICLR_IDLE, handler_data->iclr);
0450 }
0451
0452 static void sun4v_irq_enable(struct irq_data *data)
0453 {
0454 unsigned long cpuid = irq_choose_cpu(data->irq,
0455 irq_data_get_affinity_mask(data));
0456 unsigned int ino = irq_data_to_sysino(data);
0457 int err;
0458
0459 err = sun4v_intr_settarget(ino, cpuid);
0460 if (err != HV_EOK)
0461 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
0462 "err(%d)\n", ino, cpuid, err);
0463 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
0464 if (err != HV_EOK)
0465 printk(KERN_ERR "sun4v_intr_setstate(%x): "
0466 "err(%d)\n", ino, err);
0467 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
0468 if (err != HV_EOK)
0469 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
0470 ino, err);
0471 }
0472
0473 static int sun4v_set_affinity(struct irq_data *data,
0474 const struct cpumask *mask, bool force)
0475 {
0476 unsigned long cpuid = irq_choose_cpu(data->irq, mask);
0477 unsigned int ino = irq_data_to_sysino(data);
0478 int err;
0479
0480 err = sun4v_intr_settarget(ino, cpuid);
0481 if (err != HV_EOK)
0482 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
0483 "err(%d)\n", ino, cpuid, err);
0484
0485 return 0;
0486 }
0487
0488 static void sun4v_irq_disable(struct irq_data *data)
0489 {
0490 unsigned int ino = irq_data_to_sysino(data);
0491 int err;
0492
0493 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
0494 if (err != HV_EOK)
0495 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
0496 "err(%d)\n", ino, err);
0497 }
0498
0499 static void sun4v_irq_eoi(struct irq_data *data)
0500 {
0501 unsigned int ino = irq_data_to_sysino(data);
0502 int err;
0503
0504 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
0505 if (err != HV_EOK)
0506 printk(KERN_ERR "sun4v_intr_setstate(%x): "
0507 "err(%d)\n", ino, err);
0508 }
0509
0510 static void sun4v_virq_enable(struct irq_data *data)
0511 {
0512 unsigned long dev_handle = irq_data_to_handle(data);
0513 unsigned long dev_ino = irq_data_to_ino(data);
0514 unsigned long cpuid;
0515 int err;
0516
0517 cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data));
0518
0519 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
0520 if (err != HV_EOK)
0521 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
0522 "err(%d)\n",
0523 dev_handle, dev_ino, cpuid, err);
0524 err = sun4v_vintr_set_state(dev_handle, dev_ino,
0525 HV_INTR_STATE_IDLE);
0526 if (err != HV_EOK)
0527 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
0528 "HV_INTR_STATE_IDLE): err(%d)\n",
0529 dev_handle, dev_ino, err);
0530 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
0531 HV_INTR_ENABLED);
0532 if (err != HV_EOK)
0533 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
0534 "HV_INTR_ENABLED): err(%d)\n",
0535 dev_handle, dev_ino, err);
0536 }
0537
0538 static int sun4v_virt_set_affinity(struct irq_data *data,
0539 const struct cpumask *mask, bool force)
0540 {
0541 unsigned long dev_handle = irq_data_to_handle(data);
0542 unsigned long dev_ino = irq_data_to_ino(data);
0543 unsigned long cpuid;
0544 int err;
0545
0546 cpuid = irq_choose_cpu(data->irq, mask);
0547
0548 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
0549 if (err != HV_EOK)
0550 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
0551 "err(%d)\n",
0552 dev_handle, dev_ino, cpuid, err);
0553
0554 return 0;
0555 }
0556
0557 static void sun4v_virq_disable(struct irq_data *data)
0558 {
0559 unsigned long dev_handle = irq_data_to_handle(data);
0560 unsigned long dev_ino = irq_data_to_ino(data);
0561 int err;
0562
0563
0564 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
0565 HV_INTR_DISABLED);
0566 if (err != HV_EOK)
0567 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
0568 "HV_INTR_DISABLED): err(%d)\n",
0569 dev_handle, dev_ino, err);
0570 }
0571
0572 static void sun4v_virq_eoi(struct irq_data *data)
0573 {
0574 unsigned long dev_handle = irq_data_to_handle(data);
0575 unsigned long dev_ino = irq_data_to_ino(data);
0576 int err;
0577
0578 err = sun4v_vintr_set_state(dev_handle, dev_ino,
0579 HV_INTR_STATE_IDLE);
0580 if (err != HV_EOK)
0581 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
0582 "HV_INTR_STATE_IDLE): err(%d)\n",
0583 dev_handle, dev_ino, err);
0584 }
0585
0586 static struct irq_chip sun4u_irq = {
0587 .name = "sun4u",
0588 .irq_enable = sun4u_irq_enable,
0589 .irq_disable = sun4u_irq_disable,
0590 .irq_eoi = sun4u_irq_eoi,
0591 .irq_set_affinity = sun4u_set_affinity,
0592 .flags = IRQCHIP_EOI_IF_HANDLED,
0593 };
0594
0595 static struct irq_chip sun4v_irq = {
0596 .name = "sun4v",
0597 .irq_enable = sun4v_irq_enable,
0598 .irq_disable = sun4v_irq_disable,
0599 .irq_eoi = sun4v_irq_eoi,
0600 .irq_set_affinity = sun4v_set_affinity,
0601 .flags = IRQCHIP_EOI_IF_HANDLED,
0602 };
0603
0604 static struct irq_chip sun4v_virq = {
0605 .name = "vsun4v",
0606 .irq_enable = sun4v_virq_enable,
0607 .irq_disable = sun4v_virq_disable,
0608 .irq_eoi = sun4v_virq_eoi,
0609 .irq_set_affinity = sun4v_virt_set_affinity,
0610 .flags = IRQCHIP_EOI_IF_HANDLED,
0611 };
0612
0613 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
0614 {
0615 struct irq_handler_data *handler_data;
0616 struct ino_bucket *bucket;
0617 unsigned int irq;
0618 int ino;
0619
0620 BUG_ON(tlb_type == hypervisor);
0621
0622 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
0623 bucket = &ivector_table[ino];
0624 irq = bucket_get_irq(__pa(bucket));
0625 if (!irq) {
0626 irq = irq_alloc(0, ino);
0627 bucket_set_irq(__pa(bucket), irq);
0628 irq_set_chip_and_handler_name(irq, &sun4u_irq,
0629 handle_fasteoi_irq, "IVEC");
0630 }
0631
0632 handler_data = irq_get_handler_data(irq);
0633 if (unlikely(handler_data))
0634 goto out;
0635
0636 handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
0637 if (unlikely(!handler_data)) {
0638 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
0639 prom_halt();
0640 }
0641 irq_set_handler_data(irq, handler_data);
0642
0643 handler_data->imap = imap;
0644 handler_data->iclr = iclr;
0645
0646 out:
0647 return irq;
0648 }
0649
0650 static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
0651 void (*handler_data_init)(struct irq_handler_data *data,
0652 u32 devhandle, unsigned int devino),
0653 struct irq_chip *chip)
0654 {
0655 struct irq_handler_data *data;
0656 unsigned int irq;
0657
0658 irq = irq_alloc(devhandle, devino);
0659 if (!irq)
0660 goto out;
0661
0662 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
0663 if (unlikely(!data)) {
0664 pr_err("IRQ handler data allocation failed.\n");
0665 irq_free(irq);
0666 irq = 0;
0667 goto out;
0668 }
0669
0670 irq_set_handler_data(irq, data);
0671 handler_data_init(data, devhandle, devino);
0672 irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
0673 data->imap = ~0UL;
0674 data->iclr = ~0UL;
0675 out:
0676 return irq;
0677 }
0678
0679 static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
0680 unsigned int devino)
0681 {
0682 struct irq_handler_data *ihd = irq_get_handler_data(irq);
0683 unsigned long hv_error, cookie;
0684
0685
0686
0687
0688 ihd->bucket.__irq = irq;
0689 cookie = ~__pa(&ihd->bucket);
0690
0691 hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
0692 if (hv_error)
0693 pr_err("HV vintr set cookie failed = %ld\n", hv_error);
0694
0695 return hv_error;
0696 }
0697
0698 static void cookie_handler_data(struct irq_handler_data *data,
0699 u32 devhandle, unsigned int devino)
0700 {
0701 data->dev_handle = devhandle;
0702 data->dev_ino = devino;
0703 }
0704
0705 static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
0706 struct irq_chip *chip)
0707 {
0708 unsigned long hv_error;
0709 unsigned int irq;
0710
0711 irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
0712
0713 hv_error = cookie_assign(irq, devhandle, devino);
0714 if (hv_error) {
0715 irq_free(irq);
0716 irq = 0;
0717 }
0718
0719 return irq;
0720 }
0721
0722 static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
0723 {
0724 unsigned int irq;
0725
0726 irq = cookie_exists(devhandle, devino);
0727 if (irq)
0728 goto out;
0729
0730 irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
0731
0732 out:
0733 return irq;
0734 }
0735
0736 static void sysino_set_bucket(unsigned int irq)
0737 {
0738 struct irq_handler_data *ihd = irq_get_handler_data(irq);
0739 struct ino_bucket *bucket;
0740 unsigned long sysino;
0741
0742 sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
0743 BUG_ON(sysino >= nr_ivec);
0744 bucket = &ivector_table[sysino];
0745 bucket_set_irq(__pa(bucket), irq);
0746 }
0747
0748 static void sysino_handler_data(struct irq_handler_data *data,
0749 u32 devhandle, unsigned int devino)
0750 {
0751 unsigned long sysino;
0752
0753 sysino = sun4v_devino_to_sysino(devhandle, devino);
0754 data->sysino = sysino;
0755 }
0756
0757 static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
0758 struct irq_chip *chip)
0759 {
0760 unsigned int irq;
0761
0762 irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
0763 if (!irq)
0764 goto out;
0765
0766 sysino_set_bucket(irq);
0767 out:
0768 return irq;
0769 }
0770
0771 static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
0772 {
0773 int irq;
0774
0775 irq = sysino_exists(devhandle, devino);
0776 if (irq)
0777 goto out;
0778
0779 irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
0780 out:
0781 return irq;
0782 }
0783
0784 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
0785 {
0786 unsigned int irq;
0787
0788 if (sun4v_cookie_only_virqs())
0789 irq = sun4v_build_cookie(devhandle, devino);
0790 else
0791 irq = sun4v_build_sysino(devhandle, devino);
0792
0793 return irq;
0794 }
0795
0796 unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
0797 {
0798 int irq;
0799
0800 irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
0801 if (!irq)
0802 goto out;
0803
0804
0805
0806 irq_set_status_flags(irq, IRQ_NOAUTOEN);
0807
0808 out:
0809 return irq;
0810 }
0811
0812 void *hardirq_stack[NR_CPUS];
0813 void *softirq_stack[NR_CPUS];
0814
0815 void __irq_entry handler_irq(int pil, struct pt_regs *regs)
0816 {
0817 unsigned long pstate, bucket_pa;
0818 struct pt_regs *old_regs;
0819 void *orig_sp;
0820
0821 clear_softint(1 << pil);
0822
0823 old_regs = set_irq_regs(regs);
0824 irq_enter();
0825
0826
0827 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
0828 "wrpr %0, %3, %%pstate\n\t"
0829 "ldx [%2], %1\n\t"
0830 "stx %%g0, [%2]\n\t"
0831 "wrpr %0, 0x0, %%pstate\n\t"
0832 : "=&r" (pstate), "=&r" (bucket_pa)
0833 : "r" (irq_work_pa(smp_processor_id())),
0834 "i" (PSTATE_IE)
0835 : "memory");
0836
0837 orig_sp = set_hardirq_stack();
0838
0839 while (bucket_pa) {
0840 unsigned long next_pa;
0841 unsigned int irq;
0842
0843 next_pa = bucket_get_chain_pa(bucket_pa);
0844 irq = bucket_get_irq(bucket_pa);
0845 bucket_clear_chain_pa(bucket_pa);
0846
0847 generic_handle_irq(irq);
0848
0849 bucket_pa = next_pa;
0850 }
0851
0852 restore_hardirq_stack(orig_sp);
0853
0854 irq_exit();
0855 set_irq_regs(old_regs);
0856 }
0857
0858 #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
0859 void do_softirq_own_stack(void)
0860 {
0861 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
0862
0863 sp += THREAD_SIZE - 192 - STACK_BIAS;
0864
0865 __asm__ __volatile__("mov %%sp, %0\n\t"
0866 "mov %1, %%sp"
0867 : "=&r" (orig_sp)
0868 : "r" (sp));
0869 __do_softirq();
0870 __asm__ __volatile__("mov %0, %%sp"
0871 : : "r" (orig_sp));
0872 }
0873 #endif
0874
0875 #ifdef CONFIG_HOTPLUG_CPU
0876 void fixup_irqs(void)
0877 {
0878 unsigned int irq;
0879
0880 for (irq = 0; irq < NR_IRQS; irq++) {
0881 struct irq_desc *desc = irq_to_desc(irq);
0882 struct irq_data *data;
0883 unsigned long flags;
0884
0885 if (!desc)
0886 continue;
0887 data = irq_desc_get_irq_data(desc);
0888 raw_spin_lock_irqsave(&desc->lock, flags);
0889 if (desc->action && !irqd_is_per_cpu(data)) {
0890 if (data->chip->irq_set_affinity)
0891 data->chip->irq_set_affinity(data,
0892 irq_data_get_affinity_mask(data),
0893 false);
0894 }
0895 raw_spin_unlock_irqrestore(&desc->lock, flags);
0896 }
0897
0898 tick_ops->disable_irq();
0899 }
0900 #endif
0901
0902 struct sun5_timer {
0903 u64 count0;
0904 u64 limit0;
0905 u64 count1;
0906 u64 limit1;
0907 };
0908
0909 static struct sun5_timer *prom_timers;
0910 static u64 prom_limit0, prom_limit1;
0911
0912 static void map_prom_timers(void)
0913 {
0914 struct device_node *dp;
0915 const unsigned int *addr;
0916
0917
0918 dp = of_find_node_by_path("/");
0919 dp = dp->child;
0920 while (dp) {
0921 if (of_node_name_eq(dp, "counter-timer"))
0922 break;
0923 dp = dp->sibling;
0924 }
0925
0926
0927
0928
0929 if (!dp) {
0930 prom_timers = (struct sun5_timer *) 0;
0931 return;
0932 }
0933
0934
0935 addr = of_get_property(dp, "address", NULL);
0936 if (!addr) {
0937 prom_printf("PROM does not have timer mapped, trying to continue.\n");
0938 prom_timers = (struct sun5_timer *) 0;
0939 return;
0940 }
0941 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
0942 }
0943
0944 static void kill_prom_timer(void)
0945 {
0946 if (!prom_timers)
0947 return;
0948
0949
0950 prom_limit0 = prom_timers->limit0;
0951 prom_limit1 = prom_timers->limit1;
0952
0953
0954
0955
0956 prom_timers->limit0 = 0;
0957 prom_timers->limit1 = 0;
0958
0959
0960 __asm__ __volatile__(
0961 " mov 0x40, %%g2\n"
0962 " ldxa [%%g0] %0, %%g1\n"
0963 " ldxa [%%g2] %1, %%g1\n"
0964 " stxa %%g0, [%%g0] %0\n"
0965 " membar #Sync\n"
0966 :
0967 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
0968 : "g1", "g2");
0969 }
0970
0971 void notrace init_irqwork_curcpu(void)
0972 {
0973 int cpu = hard_smp_processor_id();
0974
0975 trap_block[cpu].irq_worklist_pa = 0UL;
0976 }
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989 static void notrace register_one_mondo(unsigned long paddr, unsigned long type,
0990 unsigned long qmask)
0991 {
0992 unsigned long num_entries = (qmask + 1) / 64;
0993 unsigned long status;
0994
0995 status = sun4v_cpu_qconf(type, paddr, num_entries);
0996 if (status != HV_EOK) {
0997 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
0998 "err %lu\n", type, paddr, num_entries, status);
0999 prom_halt();
1000 }
1001 }
1002
1003 void notrace sun4v_register_mondo_queues(int this_cpu)
1004 {
1005 struct trap_per_cpu *tb = &trap_block[this_cpu];
1006
1007 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
1008 tb->cpu_mondo_qmask);
1009 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
1010 tb->dev_mondo_qmask);
1011 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
1012 tb->resum_qmask);
1013 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
1014 tb->nonresum_qmask);
1015 }
1016
1017
1018
1019
1020
1021 static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
1022 {
1023 unsigned long size = PAGE_ALIGN(qmask + 1);
1024 unsigned long order = get_order(size);
1025 unsigned long p;
1026
1027 p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1028 if (!p) {
1029 prom_printf("SUN4V: Error, cannot allocate queue.\n");
1030 prom_halt();
1031 }
1032
1033 *pa_ptr = __pa(p);
1034 }
1035
1036 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1037 {
1038 #ifdef CONFIG_SMP
1039 unsigned long page;
1040 void *mondo, *p;
1041
1042 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
1043
1044
1045 p = kzalloc(127, GFP_KERNEL);
1046 if (!p) {
1047 prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
1048 prom_halt();
1049 }
1050 mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
1051 tb->cpu_mondo_block_pa = __pa(mondo);
1052
1053 page = get_zeroed_page(GFP_KERNEL);
1054 if (!page) {
1055 prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
1056 prom_halt();
1057 }
1058
1059 tb->cpu_list_pa = __pa(page);
1060 #endif
1061 }
1062
1063
1064 static void __init sun4v_init_mondo_queues(void)
1065 {
1066 int cpu;
1067
1068 for_each_possible_cpu(cpu) {
1069 struct trap_per_cpu *tb = &trap_block[cpu];
1070
1071 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
1072 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
1073 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
1074 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
1075 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
1076 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
1077 tb->nonresum_qmask);
1078 }
1079 }
1080
1081 static void __init init_send_mondo_info(void)
1082 {
1083 int cpu;
1084
1085 for_each_possible_cpu(cpu) {
1086 struct trap_per_cpu *tb = &trap_block[cpu];
1087
1088 init_cpu_send_mondo_info(tb);
1089 }
1090 }
1091
1092 static struct irqaction timer_irq_action = {
1093 .name = "timer",
1094 };
1095
1096 static void __init irq_ivector_init(void)
1097 {
1098 unsigned long size, order;
1099 unsigned int ivecs;
1100
1101
1102
1103
1104 if (sun4v_cookie_only_virqs())
1105 return;
1106
1107 ivecs = size_nr_ivec();
1108 size = sizeof(struct ino_bucket) * ivecs;
1109 order = get_order(size);
1110 ivector_table = (struct ino_bucket *)
1111 __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1112 if (!ivector_table) {
1113 prom_printf("Fatal error, cannot allocate ivector_table\n");
1114 prom_halt();
1115 }
1116 __flush_dcache_range((unsigned long) ivector_table,
1117 ((unsigned long) ivector_table) + size);
1118
1119 ivector_table_pa = __pa(ivector_table);
1120 }
1121
1122
1123 void __init init_IRQ(void)
1124 {
1125 irq_init_hv();
1126 irq_ivector_init();
1127 map_prom_timers();
1128 kill_prom_timer();
1129
1130 if (tlb_type == hypervisor)
1131 sun4v_init_mondo_queues();
1132
1133 init_send_mondo_info();
1134
1135 if (tlb_type == hypervisor) {
1136
1137 sun4v_register_mondo_queues(hard_smp_processor_id());
1138 }
1139
1140
1141
1142
1143
1144 clear_softint(get_softint());
1145
1146
1147
1148
1149
1150
1151 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1152 "or %%g1, %0, %%g1\n\t"
1153 "wrpr %%g1, 0x0, %%pstate"
1154 :
1155 : "i" (PSTATE_IE)
1156 : "g1");
1157
1158 irq_to_desc(0)->action = &timer_irq_action;
1159 }