0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) "xive: " fmt
0007
0008 #include <linux/types.h>
0009 #include <linux/irq.h>
0010 #include <linux/smp.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/init.h>
0013 #include <linux/of.h>
0014 #include <linux/of_address.h>
0015 #include <linux/of_fdt.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/bitmap.h>
0019 #include <linux/cpumask.h>
0020 #include <linux/mm.h>
0021 #include <linux/delay.h>
0022 #include <linux/libfdt.h>
0023
0024 #include <asm/machdep.h>
0025 #include <asm/prom.h>
0026 #include <asm/io.h>
0027 #include <asm/smp.h>
0028 #include <asm/irq.h>
0029 #include <asm/errno.h>
0030 #include <asm/xive.h>
0031 #include <asm/xive-regs.h>
0032 #include <asm/hvcall.h>
0033 #include <asm/svm.h>
0034 #include <asm/ultravisor.h>
0035
0036 #include "xive-internal.h"
0037
0038 static u32 xive_queue_shift;
0039
0040 struct xive_irq_bitmap {
0041 unsigned long *bitmap;
0042 unsigned int base;
0043 unsigned int count;
0044 spinlock_t lock;
0045 struct list_head list;
0046 };
0047
0048 static LIST_HEAD(xive_irq_bitmaps);
0049
0050 static int __init xive_irq_bitmap_add(int base, int count)
0051 {
0052 struct xive_irq_bitmap *xibm;
0053
0054 xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
0055 if (!xibm)
0056 return -ENOMEM;
0057
0058 spin_lock_init(&xibm->lock);
0059 xibm->base = base;
0060 xibm->count = count;
0061 xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
0062 if (!xibm->bitmap) {
0063 kfree(xibm);
0064 return -ENOMEM;
0065 }
0066 list_add(&xibm->list, &xive_irq_bitmaps);
0067
0068 pr_info("Using IRQ range [%x-%x]", xibm->base,
0069 xibm->base + xibm->count - 1);
0070 return 0;
0071 }
0072
0073 static void xive_irq_bitmap_remove_all(void)
0074 {
0075 struct xive_irq_bitmap *xibm, *tmp;
0076
0077 list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
0078 list_del(&xibm->list);
0079 bitmap_free(xibm->bitmap);
0080 kfree(xibm);
0081 }
0082 }
0083
0084 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
0085 {
0086 int irq;
0087
0088 irq = find_first_zero_bit(xibm->bitmap, xibm->count);
0089 if (irq != xibm->count) {
0090 set_bit(irq, xibm->bitmap);
0091 irq += xibm->base;
0092 } else {
0093 irq = -ENOMEM;
0094 }
0095
0096 return irq;
0097 }
0098
0099 static int xive_irq_bitmap_alloc(void)
0100 {
0101 struct xive_irq_bitmap *xibm;
0102 unsigned long flags;
0103 int irq = -ENOENT;
0104
0105 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
0106 spin_lock_irqsave(&xibm->lock, flags);
0107 irq = __xive_irq_bitmap_alloc(xibm);
0108 spin_unlock_irqrestore(&xibm->lock, flags);
0109 if (irq >= 0)
0110 break;
0111 }
0112 return irq;
0113 }
0114
0115 static void xive_irq_bitmap_free(int irq)
0116 {
0117 unsigned long flags;
0118 struct xive_irq_bitmap *xibm;
0119
0120 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
0121 if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
0122 spin_lock_irqsave(&xibm->lock, flags);
0123 clear_bit(irq - xibm->base, xibm->bitmap);
0124 spin_unlock_irqrestore(&xibm->lock, flags);
0125 break;
0126 }
0127 }
0128 }
0129
0130
0131
0132 static unsigned int plpar_busy_delay_time(long rc)
0133 {
0134 unsigned int ms = 0;
0135
0136 if (H_IS_LONG_BUSY(rc)) {
0137 ms = get_longbusy_msecs(rc);
0138 } else if (rc == H_BUSY) {
0139 ms = 10;
0140 }
0141
0142 return ms;
0143 }
0144
0145 static unsigned int plpar_busy_delay(int rc)
0146 {
0147 unsigned int ms;
0148
0149 ms = plpar_busy_delay_time(rc);
0150 if (ms)
0151 mdelay(ms);
0152
0153 return ms;
0154 }
0155
0156
0157
0158
0159
0160
0161 static long plpar_int_reset(unsigned long flags)
0162 {
0163 long rc;
0164
0165 do {
0166 rc = plpar_hcall_norets(H_INT_RESET, flags);
0167 } while (plpar_busy_delay(rc));
0168
0169 if (rc)
0170 pr_err("H_INT_RESET failed %ld\n", rc);
0171
0172 return rc;
0173 }
0174
0175 static long plpar_int_get_source_info(unsigned long flags,
0176 unsigned long lisn,
0177 unsigned long *src_flags,
0178 unsigned long *eoi_page,
0179 unsigned long *trig_page,
0180 unsigned long *esb_shift)
0181 {
0182 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0183 long rc;
0184
0185 do {
0186 rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
0187 } while (plpar_busy_delay(rc));
0188
0189 if (rc) {
0190 pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc);
0191 return rc;
0192 }
0193
0194 *src_flags = retbuf[0];
0195 *eoi_page = retbuf[1];
0196 *trig_page = retbuf[2];
0197 *esb_shift = retbuf[3];
0198
0199 pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n",
0200 lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
0201
0202 return 0;
0203 }
0204
0205 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
0206 #define XIVE_SRC_MASK (1ull << (63 - 63))
0207
0208 static long plpar_int_set_source_config(unsigned long flags,
0209 unsigned long lisn,
0210 unsigned long target,
0211 unsigned long prio,
0212 unsigned long sw_irq)
0213 {
0214 long rc;
0215
0216
0217 pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n",
0218 flags, lisn, target, prio, sw_irq);
0219
0220
0221 do {
0222 rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
0223 target, prio, sw_irq);
0224 } while (plpar_busy_delay(rc));
0225
0226 if (rc) {
0227 pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n",
0228 lisn, target, prio, rc);
0229 return rc;
0230 }
0231
0232 return 0;
0233 }
0234
0235 static long plpar_int_get_source_config(unsigned long flags,
0236 unsigned long lisn,
0237 unsigned long *target,
0238 unsigned long *prio,
0239 unsigned long *sw_irq)
0240 {
0241 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0242 long rc;
0243
0244 pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn);
0245
0246 do {
0247 rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
0248 target, prio, sw_irq);
0249 } while (plpar_busy_delay(rc));
0250
0251 if (rc) {
0252 pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n",
0253 lisn, rc);
0254 return rc;
0255 }
0256
0257 *target = retbuf[0];
0258 *prio = retbuf[1];
0259 *sw_irq = retbuf[2];
0260
0261 pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n",
0262 retbuf[0], retbuf[1], retbuf[2]);
0263
0264 return 0;
0265 }
0266
0267 static long plpar_int_get_queue_info(unsigned long flags,
0268 unsigned long target,
0269 unsigned long priority,
0270 unsigned long *esn_page,
0271 unsigned long *esn_size)
0272 {
0273 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0274 long rc;
0275
0276 do {
0277 rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
0278 priority);
0279 } while (plpar_busy_delay(rc));
0280
0281 if (rc) {
0282 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
0283 target, priority, rc);
0284 return rc;
0285 }
0286
0287 *esn_page = retbuf[0];
0288 *esn_size = retbuf[1];
0289
0290 pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n",
0291 target, priority, retbuf[0], retbuf[1]);
0292
0293 return 0;
0294 }
0295
0296 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
0297
0298 static long plpar_int_set_queue_config(unsigned long flags,
0299 unsigned long target,
0300 unsigned long priority,
0301 unsigned long qpage,
0302 unsigned long qsize)
0303 {
0304 long rc;
0305
0306 pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n",
0307 flags, target, priority, qpage, qsize);
0308
0309 do {
0310 rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
0311 priority, qpage, qsize);
0312 } while (plpar_busy_delay(rc));
0313
0314 if (rc) {
0315 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n",
0316 target, priority, qpage, rc);
0317 return rc;
0318 }
0319
0320 return 0;
0321 }
0322
0323 static long plpar_int_sync(unsigned long flags, unsigned long lisn)
0324 {
0325 long rc;
0326
0327 do {
0328 rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
0329 } while (plpar_busy_delay(rc));
0330
0331 if (rc) {
0332 pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc);
0333 return rc;
0334 }
0335
0336 return 0;
0337 }
0338
0339 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
0340
0341 static long plpar_int_esb(unsigned long flags,
0342 unsigned long lisn,
0343 unsigned long offset,
0344 unsigned long in_data,
0345 unsigned long *out_data)
0346 {
0347 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0348 long rc;
0349
0350 pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n",
0351 flags, lisn, offset, in_data);
0352
0353 do {
0354 rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
0355 in_data);
0356 } while (plpar_busy_delay(rc));
0357
0358 if (rc) {
0359 pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n",
0360 lisn, offset, rc);
0361 return rc;
0362 }
0363
0364 *out_data = retbuf[0];
0365
0366 return 0;
0367 }
0368
0369 static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
0370 {
0371 unsigned long read_data;
0372 long rc;
0373
0374 rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
0375 lisn, offset, data, &read_data);
0376 if (rc)
0377 return -1;
0378
0379 return write ? 0 : read_data;
0380 }
0381
0382 #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
0383 #define XIVE_SRC_LSI (1ull << (63 - 61))
0384 #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
0385 #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
0386
0387 static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
0388 {
0389 long rc;
0390 unsigned long flags;
0391 unsigned long eoi_page;
0392 unsigned long trig_page;
0393 unsigned long esb_shift;
0394
0395 memset(data, 0, sizeof(*data));
0396
0397 rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
0398 &esb_shift);
0399 if (rc)
0400 return -EINVAL;
0401
0402 if (flags & XIVE_SRC_H_INT_ESB)
0403 data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
0404 if (flags & XIVE_SRC_STORE_EOI)
0405 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
0406 if (flags & XIVE_SRC_LSI)
0407 data->flags |= XIVE_IRQ_FLAG_LSI;
0408 data->eoi_page = eoi_page;
0409 data->esb_shift = esb_shift;
0410 data->trig_page = trig_page;
0411
0412 data->hw_irq = hw_irq;
0413
0414
0415
0416
0417
0418 data->src_chip = XIVE_INVALID_CHIP_ID;
0419
0420
0421
0422
0423
0424
0425 if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
0426 return 0;
0427
0428 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
0429 if (!data->eoi_mmio) {
0430 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
0431 return -ENOMEM;
0432 }
0433
0434
0435 if (flags & XIVE_SRC_TRIGGER) {
0436 data->trig_mmio = data->eoi_mmio;
0437 return 0;
0438 }
0439
0440 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
0441 if (!data->trig_mmio) {
0442 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
0443 return -ENOMEM;
0444 }
0445 return 0;
0446 }
0447
0448 static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
0449 {
0450 long rc;
0451
0452 rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
0453 prio, sw_irq);
0454
0455 return rc == 0 ? 0 : -ENXIO;
0456 }
0457
0458 static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
0459 u32 *sw_irq)
0460 {
0461 long rc;
0462 unsigned long h_target;
0463 unsigned long h_prio;
0464 unsigned long h_sw_irq;
0465
0466 rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
0467 &h_sw_irq);
0468
0469 *target = h_target;
0470 *prio = h_prio;
0471 *sw_irq = h_sw_irq;
0472
0473 return rc == 0 ? 0 : -ENXIO;
0474 }
0475
0476
0477 static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
0478 __be32 *qpage, u32 order)
0479 {
0480 s64 rc = 0;
0481 unsigned long esn_page;
0482 unsigned long esn_size;
0483 u64 flags, qpage_phys;
0484
0485
0486 if (order) {
0487 if (WARN_ON(!qpage))
0488 return -EINVAL;
0489 qpage_phys = __pa(qpage);
0490 } else {
0491 qpage_phys = 0;
0492 }
0493
0494
0495 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
0496 q->idx = 0;
0497 q->toggle = 0;
0498
0499 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
0500 if (rc) {
0501 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
0502 target, prio);
0503 rc = -EIO;
0504 goto fail;
0505 }
0506
0507
0508 q->eoi_phys = esn_page;
0509
0510
0511 flags = XIVE_EQ_ALWAYS_NOTIFY;
0512
0513
0514 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
0515 if (rc) {
0516 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
0517 target, prio);
0518 rc = -EIO;
0519 } else {
0520 q->qpage = qpage;
0521 if (is_secure_guest())
0522 uv_share_page(PHYS_PFN(qpage_phys),
0523 1 << xive_alloc_order(order));
0524 }
0525 fail:
0526 return rc;
0527 }
0528
0529 static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
0530 u8 prio)
0531 {
0532 struct xive_q *q = &xc->queue[prio];
0533 __be32 *qpage;
0534
0535 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
0536 if (IS_ERR(qpage))
0537 return PTR_ERR(qpage);
0538
0539 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
0540 q, prio, qpage, xive_queue_shift);
0541 }
0542
0543 static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
0544 u8 prio)
0545 {
0546 struct xive_q *q = &xc->queue[prio];
0547 unsigned int alloc_order;
0548 long rc;
0549 int hw_cpu = get_hard_smp_processor_id(cpu);
0550
0551 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
0552 if (rc)
0553 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
0554 hw_cpu, prio);
0555
0556 alloc_order = xive_alloc_order(xive_queue_shift);
0557 if (is_secure_guest())
0558 uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
0559 free_pages((unsigned long)q->qpage, alloc_order);
0560 q->qpage = NULL;
0561 }
0562
0563 static bool xive_spapr_match(struct device_node *node)
0564 {
0565
0566 return true;
0567 }
0568
0569 #ifdef CONFIG_SMP
0570 static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
0571 {
0572 int irq = xive_irq_bitmap_alloc();
0573
0574 if (irq < 0) {
0575 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
0576 return -ENXIO;
0577 }
0578
0579 xc->hw_ipi = irq;
0580 return 0;
0581 }
0582
0583 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
0584 {
0585 if (xc->hw_ipi == XIVE_BAD_IRQ)
0586 return;
0587
0588 xive_irq_bitmap_free(xc->hw_ipi);
0589 xc->hw_ipi = XIVE_BAD_IRQ;
0590 }
0591 #endif
0592
0593 static void xive_spapr_shutdown(void)
0594 {
0595 plpar_int_reset(0);
0596 }
0597
0598
0599
0600
0601
0602 static void xive_spapr_update_pending(struct xive_cpu *xc)
0603 {
0604 u8 nsr, cppr;
0605 u16 ack;
0606
0607
0608
0609
0610
0611
0612
0613
0614 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
0615
0616
0617 mb();
0618
0619
0620
0621
0622
0623 cppr = ack & 0xff;
0624 nsr = ack >> 8;
0625
0626 if (nsr & TM_QW1_NSR_EO) {
0627 if (cppr == 0xff)
0628 return;
0629
0630 xc->pending_prio |= 1 << cppr;
0631
0632
0633
0634
0635
0636 if (cppr >= xc->cppr)
0637 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
0638 smp_processor_id(), cppr, xc->cppr);
0639
0640
0641 xc->cppr = cppr;
0642 }
0643 }
0644
0645 static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
0646 {
0647
0648 pr_debug("(HW value: %08x %08x %08x)\n",
0649 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
0650 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
0651 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
0652 }
0653
0654 static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
0655 {
0656 ;
0657 }
0658
0659 static void xive_spapr_sync_source(u32 hw_irq)
0660 {
0661
0662 plpar_int_sync(0, hw_irq);
0663 }
0664
0665 static int xive_spapr_debug_show(struct seq_file *m, void *private)
0666 {
0667 struct xive_irq_bitmap *xibm;
0668 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
0669
0670 if (!buf)
0671 return -ENOMEM;
0672
0673 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
0674 memset(buf, 0, PAGE_SIZE);
0675 bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
0676 seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
0677 }
0678 kfree(buf);
0679
0680 return 0;
0681 }
0682
0683 static const struct xive_ops xive_spapr_ops = {
0684 .populate_irq_data = xive_spapr_populate_irq_data,
0685 .configure_irq = xive_spapr_configure_irq,
0686 .get_irq_config = xive_spapr_get_irq_config,
0687 .setup_queue = xive_spapr_setup_queue,
0688 .cleanup_queue = xive_spapr_cleanup_queue,
0689 .match = xive_spapr_match,
0690 .shutdown = xive_spapr_shutdown,
0691 .update_pending = xive_spapr_update_pending,
0692 .setup_cpu = xive_spapr_setup_cpu,
0693 .teardown_cpu = xive_spapr_teardown_cpu,
0694 .sync_source = xive_spapr_sync_source,
0695 .esb_rw = xive_spapr_esb_rw,
0696 #ifdef CONFIG_SMP
0697 .get_ipi = xive_spapr_get_ipi,
0698 .put_ipi = xive_spapr_put_ipi,
0699 .debug_show = xive_spapr_debug_show,
0700 #endif
0701 .name = "spapr",
0702 };
0703
0704
0705
0706
0707 static bool __init xive_get_max_prio(u8 *max_prio)
0708 {
0709 struct device_node *rootdn;
0710 const __be32 *reg;
0711 u32 len;
0712 int prio, found;
0713
0714 rootdn = of_find_node_by_path("/");
0715 if (!rootdn) {
0716 pr_err("not root node found !\n");
0717 return false;
0718 }
0719
0720 reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
0721 of_node_put(rootdn);
0722 if (!reg) {
0723 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
0724 return false;
0725 }
0726
0727 if (len % (2 * sizeof(u32)) != 0) {
0728 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
0729 return false;
0730 }
0731
0732
0733
0734
0735
0736 found = 0xFF;
0737 for (prio = 0; prio < 8; prio++) {
0738 int reserved = 0;
0739 int i;
0740
0741 for (i = 0; i < len / (2 * sizeof(u32)); i++) {
0742 int base = be32_to_cpu(reg[2 * i]);
0743 int range = be32_to_cpu(reg[2 * i + 1]);
0744
0745 if (prio >= base && prio < base + range)
0746 reserved++;
0747 }
0748
0749 if (!reserved)
0750 found = prio;
0751 }
0752
0753 if (found == 0xFF) {
0754 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
0755 return false;
0756 }
0757
0758 *max_prio = found;
0759 return true;
0760 }
0761
0762 static const u8 *__init get_vec5_feature(unsigned int index)
0763 {
0764 unsigned long root, chosen;
0765 int size;
0766 const u8 *vec5;
0767
0768 root = of_get_flat_dt_root();
0769 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
0770 if (chosen == -FDT_ERR_NOTFOUND)
0771 return NULL;
0772
0773 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
0774 if (!vec5)
0775 return NULL;
0776
0777 if (size <= index)
0778 return NULL;
0779
0780 return vec5 + index;
0781 }
0782
0783 static bool __init xive_spapr_disabled(void)
0784 {
0785 const u8 *vec5_xive;
0786
0787 vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
0788 if (vec5_xive) {
0789 u8 val;
0790
0791 val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
0792 switch (val) {
0793 case OV5_FEAT(OV5_XIVE_EITHER):
0794 case OV5_FEAT(OV5_XIVE_LEGACY):
0795 break;
0796 case OV5_FEAT(OV5_XIVE_EXPLOIT):
0797
0798 if (xive_cmdline_disabled)
0799 pr_warn("WARNING: Ignoring cmdline option xive=off\n");
0800 return false;
0801 default:
0802 pr_warn("%s: Unknown xive support option: 0x%x\n",
0803 __func__, val);
0804 break;
0805 }
0806 }
0807
0808 return xive_cmdline_disabled;
0809 }
0810
0811 bool __init xive_spapr_init(void)
0812 {
0813 struct device_node *np;
0814 struct resource r;
0815 void __iomem *tima;
0816 struct property *prop;
0817 u8 max_prio;
0818 u32 val;
0819 u32 len;
0820 const __be32 *reg;
0821 int i, err;
0822
0823 if (xive_spapr_disabled())
0824 return false;
0825
0826 pr_devel("%s()\n", __func__);
0827 np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
0828 if (!np) {
0829 pr_devel("not found !\n");
0830 return false;
0831 }
0832 pr_devel("Found %s\n", np->full_name);
0833
0834
0835 if (of_address_to_resource(np, 1, &r)) {
0836 pr_err("Failed to get thread mgmnt area resource\n");
0837 goto err_put;
0838 }
0839 tima = ioremap(r.start, resource_size(&r));
0840 if (!tima) {
0841 pr_err("Failed to map thread mgmnt area\n");
0842 goto err_put;
0843 }
0844
0845 if (!xive_get_max_prio(&max_prio))
0846 goto err_unmap;
0847
0848
0849 reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
0850 if (!reg) {
0851 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
0852 goto err_unmap;
0853 }
0854
0855 if (len % (2 * sizeof(u32)) != 0) {
0856 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
0857 goto err_unmap;
0858 }
0859
0860 for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) {
0861 err = xive_irq_bitmap_add(be32_to_cpu(reg[0]),
0862 be32_to_cpu(reg[1]));
0863 if (err < 0)
0864 goto err_mem_free;
0865 }
0866
0867
0868 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
0869 xive_queue_shift = val;
0870 if (val == PAGE_SHIFT)
0871 break;
0872 }
0873
0874
0875 if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
0876 goto err_mem_free;
0877
0878 of_node_put(np);
0879 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
0880 return true;
0881
0882 err_mem_free:
0883 xive_irq_bitmap_remove_all();
0884 err_unmap:
0885 iounmap(tima);
0886 err_put:
0887 of_node_put(np);
0888 return false;
0889 }
0890
0891 machine_arch_initcall(pseries, xive_core_debug_init);