0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) "xive: " fmt
0007
0008 #include <linux/types.h>
0009 #include <linux/irq.h>
0010 #include <linux/debugfs.h>
0011 #include <linux/smp.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/seq_file.h>
0014 #include <linux/init.h>
0015 #include <linux/of.h>
0016 #include <linux/of_address.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/delay.h>
0020 #include <linux/cpumask.h>
0021 #include <linux/mm.h>
0022 #include <linux/kmemleak.h>
0023
0024 #include <asm/machdep.h>
0025 #include <asm/io.h>
0026 #include <asm/smp.h>
0027 #include <asm/irq.h>
0028 #include <asm/errno.h>
0029 #include <asm/xive.h>
0030 #include <asm/xive-regs.h>
0031 #include <asm/opal.h>
0032 #include <asm/kvm_ppc.h>
0033
0034 #include "xive-internal.h"
0035
0036
0037 static u32 xive_provision_size;
0038 static u32 *xive_provision_chips;
0039 static u32 xive_provision_chip_count;
0040 static u32 xive_queue_shift;
0041 static u32 xive_pool_vps = XIVE_INVALID_VP;
0042 static struct kmem_cache *xive_provision_cache;
0043 static bool xive_has_single_esc;
0044 bool xive_has_save_restore;
0045
0046 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
0047 {
0048 __be64 flags, eoi_page, trig_page;
0049 __be32 esb_shift, src_chip;
0050 u64 opal_flags;
0051 s64 rc;
0052
0053 memset(data, 0, sizeof(*data));
0054
0055 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
0056 &esb_shift, &src_chip);
0057 if (rc) {
0058 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
0059 hw_irq, rc);
0060 return -EINVAL;
0061 }
0062
0063 opal_flags = be64_to_cpu(flags);
0064 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
0065 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
0066 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
0067 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
0068 if (opal_flags & OPAL_XIVE_IRQ_LSI)
0069 data->flags |= XIVE_IRQ_FLAG_LSI;
0070 data->eoi_page = be64_to_cpu(eoi_page);
0071 data->trig_page = be64_to_cpu(trig_page);
0072 data->esb_shift = be32_to_cpu(esb_shift);
0073 data->src_chip = be32_to_cpu(src_chip);
0074
0075 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
0076 if (!data->eoi_mmio) {
0077 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
0078 return -ENOMEM;
0079 }
0080
0081 data->hw_irq = hw_irq;
0082
0083 if (!data->trig_page)
0084 return 0;
0085 if (data->trig_page == data->eoi_page) {
0086 data->trig_mmio = data->eoi_mmio;
0087 return 0;
0088 }
0089
0090 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
0091 if (!data->trig_mmio) {
0092 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
0093 return -ENOMEM;
0094 }
0095 return 0;
0096 }
0097 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
0098
0099 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
0100 {
0101 s64 rc;
0102
0103 for (;;) {
0104 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
0105 if (rc != OPAL_BUSY)
0106 break;
0107 msleep(OPAL_BUSY_DELAY_MS);
0108 }
0109 return rc == 0 ? 0 : -ENXIO;
0110 }
0111 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
0112
0113 static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
0114 u32 *sw_irq)
0115 {
0116 s64 rc;
0117 __be64 vp;
0118 __be32 lirq;
0119
0120 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
0121
0122 *target = be64_to_cpu(vp);
0123 *sw_irq = be32_to_cpu(lirq);
0124
0125 return rc == 0 ? 0 : -ENXIO;
0126 }
0127
0128 #define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
0129
0130
0131 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
0132 __be32 *qpage, u32 order, bool can_escalate)
0133 {
0134 s64 rc = 0;
0135 __be64 qeoi_page_be;
0136 __be32 esc_irq_be;
0137 u64 flags, qpage_phys;
0138
0139
0140 if (order) {
0141 if (WARN_ON(!qpage))
0142 return -EINVAL;
0143 qpage_phys = __pa(qpage);
0144 } else
0145 qpage_phys = 0;
0146
0147
0148 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
0149 q->idx = 0;
0150 q->toggle = 0;
0151
0152 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
0153 &qeoi_page_be,
0154 &esc_irq_be,
0155 NULL);
0156 if (rc) {
0157 vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
0158 rc = -EIO;
0159 goto fail;
0160 }
0161 q->eoi_phys = be64_to_cpu(qeoi_page_be);
0162
0163
0164 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
0165
0166
0167 if (can_escalate) {
0168 q->esc_irq = be32_to_cpu(esc_irq_be);
0169 flags |= OPAL_XIVE_EQ_ESCALATE;
0170 }
0171
0172
0173 for (;;) {
0174 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
0175 if (rc != OPAL_BUSY)
0176 break;
0177 msleep(OPAL_BUSY_DELAY_MS);
0178 }
0179 if (rc) {
0180 vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
0181 rc = -EIO;
0182 } else {
0183
0184
0185
0186
0187 wmb();
0188 q->qpage = qpage;
0189 }
0190 fail:
0191 return rc;
0192 }
0193 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
0194
0195 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
0196 {
0197 s64 rc;
0198
0199
0200 for (;;) {
0201 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
0202 if (rc != OPAL_BUSY)
0203 break;
0204 msleep(OPAL_BUSY_DELAY_MS);
0205 }
0206 if (rc)
0207 vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
0208 }
0209
0210 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
0211 {
0212 __xive_native_disable_queue(vp_id, q, prio);
0213 }
0214 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
0215
0216 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
0217 {
0218 struct xive_q *q = &xc->queue[prio];
0219 __be32 *qpage;
0220
0221 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
0222 if (IS_ERR(qpage))
0223 return PTR_ERR(qpage);
0224
0225 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
0226 q, prio, qpage, xive_queue_shift, false);
0227 }
0228
0229 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
0230 {
0231 struct xive_q *q = &xc->queue[prio];
0232 unsigned int alloc_order;
0233
0234
0235
0236
0237
0238 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
0239 alloc_order = xive_alloc_order(xive_queue_shift);
0240 free_pages((unsigned long)q->qpage, alloc_order);
0241 q->qpage = NULL;
0242 }
0243
0244 static bool xive_native_match(struct device_node *node)
0245 {
0246 return of_device_is_compatible(node, "ibm,opal-xive-vc");
0247 }
0248
0249 static s64 opal_xive_allocate_irq(u32 chip_id)
0250 {
0251 s64 irq = opal_xive_allocate_irq_raw(chip_id);
0252
0253
0254
0255
0256
0257 return irq == 0xffffffff ? OPAL_RESOURCE : irq;
0258 }
0259
0260 #ifdef CONFIG_SMP
0261 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
0262 {
0263 s64 irq;
0264
0265
0266 for (;;) {
0267 irq = opal_xive_allocate_irq(xc->chip_id);
0268 if (irq == OPAL_BUSY) {
0269 msleep(OPAL_BUSY_DELAY_MS);
0270 continue;
0271 }
0272 if (irq < 0) {
0273 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
0274 return -ENXIO;
0275 }
0276 xc->hw_ipi = irq;
0277 break;
0278 }
0279 return 0;
0280 }
0281 #endif
0282
0283 u32 xive_native_alloc_irq_on_chip(u32 chip_id)
0284 {
0285 s64 rc;
0286
0287 for (;;) {
0288 rc = opal_xive_allocate_irq(chip_id);
0289 if (rc != OPAL_BUSY)
0290 break;
0291 msleep(OPAL_BUSY_DELAY_MS);
0292 }
0293 if (rc < 0)
0294 return 0;
0295 return rc;
0296 }
0297 EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
0298
0299 void xive_native_free_irq(u32 irq)
0300 {
0301 for (;;) {
0302 s64 rc = opal_xive_free_irq(irq);
0303 if (rc != OPAL_BUSY)
0304 break;
0305 msleep(OPAL_BUSY_DELAY_MS);
0306 }
0307 }
0308 EXPORT_SYMBOL_GPL(xive_native_free_irq);
0309
0310 #ifdef CONFIG_SMP
0311 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
0312 {
0313 s64 rc;
0314
0315
0316 if (xc->hw_ipi == XIVE_BAD_IRQ)
0317 return;
0318 for (;;) {
0319 rc = opal_xive_free_irq(xc->hw_ipi);
0320 if (rc == OPAL_BUSY) {
0321 msleep(OPAL_BUSY_DELAY_MS);
0322 continue;
0323 }
0324 xc->hw_ipi = XIVE_BAD_IRQ;
0325 break;
0326 }
0327 }
0328 #endif
0329
0330 static void xive_native_shutdown(void)
0331 {
0332
0333 opal_xive_reset(OPAL_XIVE_MODE_EMU);
0334 }
0335
0336
0337
0338
0339
0340
0341 static void xive_native_update_pending(struct xive_cpu *xc)
0342 {
0343 u8 he, cppr;
0344 u16 ack;
0345
0346
0347 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
0348
0349
0350 mb();
0351
0352
0353
0354
0355
0356 cppr = ack & 0xff;
0357 he = (ack >> 8) >> 6;
0358 switch(he) {
0359 case TM_QW3_NSR_HE_NONE:
0360 break;
0361 case TM_QW3_NSR_HE_PHYS:
0362 if (cppr == 0xff)
0363 return;
0364
0365 xc->pending_prio |= 1 << cppr;
0366
0367
0368
0369
0370
0371 if (cppr >= xc->cppr)
0372 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
0373 smp_processor_id(), cppr, xc->cppr);
0374
0375
0376 xc->cppr = cppr;
0377 break;
0378 case TM_QW3_NSR_HE_POOL:
0379 case TM_QW3_NSR_HE_LSI:
0380 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
0381 smp_processor_id(), he);
0382 return;
0383 }
0384 }
0385
0386 static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
0387 {
0388 xc->chip_id = cpu_to_chip_id(cpu);
0389 }
0390
0391 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
0392 {
0393 s64 rc;
0394 u32 vp;
0395 __be64 vp_cam_be;
0396 u64 vp_cam;
0397
0398 if (xive_pool_vps == XIVE_INVALID_VP)
0399 return;
0400
0401
0402 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
0403 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
0404
0405
0406 vp = xive_pool_vps + cpu;
0407 for (;;) {
0408 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
0409 if (rc != OPAL_BUSY)
0410 break;
0411 msleep(OPAL_BUSY_DELAY_MS);
0412 }
0413 if (rc) {
0414 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
0415 return;
0416 }
0417
0418
0419 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
0420 if (rc) {
0421 pr_err("Failed to get pool VP info CPU %d\n", cpu);
0422 return;
0423 }
0424 vp_cam = be64_to_cpu(vp_cam_be);
0425
0426
0427 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
0428 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
0429 }
0430
0431 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
0432 {
0433 s64 rc;
0434 u32 vp;
0435
0436 if (xive_pool_vps == XIVE_INVALID_VP)
0437 return;
0438
0439
0440 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
0441
0442
0443 vp = xive_pool_vps + cpu;
0444 for (;;) {
0445 rc = opal_xive_set_vp_info(vp, 0, 0);
0446 if (rc != OPAL_BUSY)
0447 break;
0448 msleep(OPAL_BUSY_DELAY_MS);
0449 }
0450 }
0451
0452 void xive_native_sync_source(u32 hw_irq)
0453 {
0454 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
0455 }
0456 EXPORT_SYMBOL_GPL(xive_native_sync_source);
0457
0458 void xive_native_sync_queue(u32 hw_irq)
0459 {
0460 opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
0461 }
0462 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
0463
0464 #ifdef CONFIG_DEBUG_FS
0465 static int xive_native_debug_create(struct dentry *xive_dir)
0466 {
0467 debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
0468 return 0;
0469 }
0470 #endif
0471
0472 static const struct xive_ops xive_native_ops = {
0473 .populate_irq_data = xive_native_populate_irq_data,
0474 .configure_irq = xive_native_configure_irq,
0475 .get_irq_config = xive_native_get_irq_config,
0476 .setup_queue = xive_native_setup_queue,
0477 .cleanup_queue = xive_native_cleanup_queue,
0478 .match = xive_native_match,
0479 .shutdown = xive_native_shutdown,
0480 .update_pending = xive_native_update_pending,
0481 .prepare_cpu = xive_native_prepare_cpu,
0482 .setup_cpu = xive_native_setup_cpu,
0483 .teardown_cpu = xive_native_teardown_cpu,
0484 .sync_source = xive_native_sync_source,
0485 #ifdef CONFIG_SMP
0486 .get_ipi = xive_native_get_ipi,
0487 .put_ipi = xive_native_put_ipi,
0488 #endif
0489 #ifdef CONFIG_DEBUG_FS
0490 .debug_create = xive_native_debug_create,
0491 #endif
0492 .name = "native",
0493 };
0494
0495 static bool __init xive_parse_provisioning(struct device_node *np)
0496 {
0497 int rc;
0498
0499 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
0500 &xive_provision_size) < 0)
0501 return true;
0502 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
0503 if (rc < 0) {
0504 pr_err("Error %d getting provision chips array\n", rc);
0505 return false;
0506 }
0507 xive_provision_chip_count = rc;
0508 if (rc == 0)
0509 return true;
0510
0511 xive_provision_chips = kcalloc(4, xive_provision_chip_count,
0512 GFP_KERNEL);
0513 if (WARN_ON(!xive_provision_chips))
0514 return false;
0515
0516 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
0517 xive_provision_chips,
0518 xive_provision_chip_count);
0519 if (rc < 0) {
0520 pr_err("Error %d reading provision chips array\n", rc);
0521 return false;
0522 }
0523
0524 xive_provision_cache = kmem_cache_create("xive-provision",
0525 xive_provision_size,
0526 xive_provision_size,
0527 0, NULL);
0528 if (!xive_provision_cache) {
0529 pr_err("Failed to allocate provision cache\n");
0530 return false;
0531 }
0532 return true;
0533 }
0534
0535 static void __init xive_native_setup_pools(void)
0536 {
0537
0538 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
0539
0540 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
0541 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
0542 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
0543
0544 pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
0545 xive_pool_vps, nr_cpu_ids);
0546 }
0547
0548 u32 xive_native_default_eq_shift(void)
0549 {
0550 return xive_queue_shift;
0551 }
0552 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
0553
0554 unsigned long xive_tima_os;
0555 EXPORT_SYMBOL_GPL(xive_tima_os);
0556
0557 bool __init xive_native_init(void)
0558 {
0559 struct device_node *np;
0560 struct resource r;
0561 void __iomem *tima;
0562 struct property *prop;
0563 u8 max_prio = 7;
0564 const __be32 *p;
0565 u32 val, cpu;
0566 s64 rc;
0567
0568 if (xive_cmdline_disabled)
0569 return false;
0570
0571 pr_devel("xive_native_init()\n");
0572 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
0573 if (!np) {
0574 pr_devel("not found !\n");
0575 return false;
0576 }
0577 pr_devel("Found %pOF\n", np);
0578
0579
0580 if (of_address_to_resource(np, 1, &r)) {
0581 pr_err("Failed to get thread mgmnt area resource\n");
0582 return false;
0583 }
0584 tima = ioremap(r.start, resource_size(&r));
0585 if (!tima) {
0586 pr_err("Failed to map thread mgmnt area\n");
0587 return false;
0588 }
0589
0590
0591 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
0592 max_prio = val - 1;
0593
0594
0595 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
0596 xive_queue_shift = val;
0597 if (val == PAGE_SHIFT)
0598 break;
0599 }
0600
0601
0602 if (of_get_property(np, "single-escalation-support", NULL) != NULL)
0603 xive_has_single_esc = true;
0604
0605 if (of_get_property(np, "vp-save-restore", NULL))
0606 xive_has_save_restore = true;
0607
0608
0609 for_each_possible_cpu(cpu)
0610 kvmppc_set_xive_tima(cpu, r.start, tima);
0611
0612
0613 if (of_address_to_resource(np, 2, &r)) {
0614 pr_err("Failed to get thread mgmnt area resource\n");
0615 return false;
0616 }
0617
0618 xive_tima_os = r.start;
0619
0620
0621 xive_parse_provisioning(np);
0622
0623
0624 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
0625 if (rc) {
0626 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
0627 return false;
0628 }
0629
0630
0631 xive_native_setup_pools();
0632
0633
0634 if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
0635 max_prio)) {
0636 opal_xive_reset(OPAL_XIVE_MODE_EMU);
0637 return false;
0638 }
0639 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
0640 return true;
0641 }
0642
0643 static bool xive_native_provision_pages(void)
0644 {
0645 u32 i;
0646 void *p;
0647
0648 for (i = 0; i < xive_provision_chip_count; i++) {
0649 u32 chip = xive_provision_chips[i];
0650
0651
0652
0653
0654
0655 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
0656 if (!p) {
0657 pr_err("Failed to allocate provisioning page\n");
0658 return false;
0659 }
0660 kmemleak_ignore(p);
0661 opal_xive_donate_page(chip, __pa(p));
0662 }
0663 return true;
0664 }
0665
0666 u32 xive_native_alloc_vp_block(u32 max_vcpus)
0667 {
0668 s64 rc;
0669 u32 order;
0670
0671 order = fls(max_vcpus) - 1;
0672 if (max_vcpus > (1 << order))
0673 order++;
0674
0675 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
0676 max_vcpus, order);
0677
0678 for (;;) {
0679 rc = opal_xive_alloc_vp_block(order);
0680 switch (rc) {
0681 case OPAL_BUSY:
0682 msleep(OPAL_BUSY_DELAY_MS);
0683 break;
0684 case OPAL_XIVE_PROVISIONING:
0685 if (!xive_native_provision_pages())
0686 return XIVE_INVALID_VP;
0687 break;
0688 default:
0689 if (rc < 0) {
0690 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
0691 order, rc);
0692 return XIVE_INVALID_VP;
0693 }
0694 return rc;
0695 }
0696 }
0697 }
0698 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
0699
0700 void xive_native_free_vp_block(u32 vp_base)
0701 {
0702 s64 rc;
0703
0704 if (vp_base == XIVE_INVALID_VP)
0705 return;
0706
0707 rc = opal_xive_free_vp_block(vp_base);
0708 if (rc < 0)
0709 pr_warn("OPAL error %lld freeing VP block\n", rc);
0710 }
0711 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
0712
0713 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
0714 {
0715 s64 rc;
0716 u64 flags = OPAL_XIVE_VP_ENABLED;
0717
0718 if (single_escalation)
0719 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
0720 for (;;) {
0721 rc = opal_xive_set_vp_info(vp_id, flags, 0);
0722 if (rc != OPAL_BUSY)
0723 break;
0724 msleep(OPAL_BUSY_DELAY_MS);
0725 }
0726 if (rc)
0727 vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
0728 return rc ? -EIO : 0;
0729 }
0730 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
0731
0732 int xive_native_disable_vp(u32 vp_id)
0733 {
0734 s64 rc;
0735
0736 for (;;) {
0737 rc = opal_xive_set_vp_info(vp_id, 0, 0);
0738 if (rc != OPAL_BUSY)
0739 break;
0740 msleep(OPAL_BUSY_DELAY_MS);
0741 }
0742 if (rc)
0743 vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
0744 return rc ? -EIO : 0;
0745 }
0746 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
0747
0748 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
0749 {
0750 __be64 vp_cam_be;
0751 __be32 vp_chip_id_be;
0752 s64 rc;
0753
0754 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
0755 if (rc) {
0756 vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
0757 return -EIO;
0758 }
0759 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
0760 *out_chip_id = be32_to_cpu(vp_chip_id_be);
0761
0762 return 0;
0763 }
0764 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
0765
0766 bool xive_native_has_single_escalation(void)
0767 {
0768 return xive_has_single_esc;
0769 }
0770 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
0771
0772 bool xive_native_has_save_restore(void)
0773 {
0774 return xive_has_save_restore;
0775 }
0776 EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
0777
0778 int xive_native_get_queue_info(u32 vp_id, u32 prio,
0779 u64 *out_qpage,
0780 u64 *out_qsize,
0781 u64 *out_qeoi_page,
0782 u32 *out_escalate_irq,
0783 u64 *out_qflags)
0784 {
0785 __be64 qpage;
0786 __be64 qsize;
0787 __be64 qeoi_page;
0788 __be32 escalate_irq;
0789 __be64 qflags;
0790 s64 rc;
0791
0792 rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
0793 &qeoi_page, &escalate_irq, &qflags);
0794 if (rc) {
0795 vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
0796 return -EIO;
0797 }
0798
0799 if (out_qpage)
0800 *out_qpage = be64_to_cpu(qpage);
0801 if (out_qsize)
0802 *out_qsize = be32_to_cpu(qsize);
0803 if (out_qeoi_page)
0804 *out_qeoi_page = be64_to_cpu(qeoi_page);
0805 if (out_escalate_irq)
0806 *out_escalate_irq = be32_to_cpu(escalate_irq);
0807 if (out_qflags)
0808 *out_qflags = be64_to_cpu(qflags);
0809
0810 return 0;
0811 }
0812 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
0813
0814 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
0815 {
0816 __be32 opal_qtoggle;
0817 __be32 opal_qindex;
0818 s64 rc;
0819
0820 rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
0821 &opal_qindex);
0822 if (rc) {
0823 vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
0824 return -EIO;
0825 }
0826
0827 if (qtoggle)
0828 *qtoggle = be32_to_cpu(opal_qtoggle);
0829 if (qindex)
0830 *qindex = be32_to_cpu(opal_qindex);
0831
0832 return 0;
0833 }
0834 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
0835
0836 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
0837 {
0838 s64 rc;
0839
0840 rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
0841 if (rc) {
0842 vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
0843 return -EIO;
0844 }
0845
0846 return 0;
0847 }
0848 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
0849
0850 bool xive_native_has_queue_state_support(void)
0851 {
0852 return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
0853 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
0854 }
0855 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
0856
0857 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
0858 {
0859 __be64 state;
0860 s64 rc;
0861
0862 rc = opal_xive_get_vp_state(vp_id, &state);
0863 if (rc) {
0864 vp_err(vp_id, "failed to get vp state : %lld\n", rc);
0865 return -EIO;
0866 }
0867
0868 if (out_state)
0869 *out_state = be64_to_cpu(state);
0870 return 0;
0871 }
0872 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
0873
0874 machine_arch_initcall(powernv, xive_core_debug_init);