0001
0002
0003 #include <linux/sched/mm.h>
0004 #include <linux/mutex.h>
0005 #include <linux/mm.h>
0006 #include <linux/mm_types.h>
0007 #include <linux/mmu_context.h>
0008 #include <linux/mmu_notifier.h>
0009 #include <linux/irqdomain.h>
0010 #include <asm/copro.h>
0011 #include <asm/pnv-ocxl.h>
0012 #include <asm/xive.h>
0013 #include <misc/ocxl.h>
0014 #include "ocxl_internal.h"
0015 #include "trace.h"
0016
0017
0018 #define SPA_PASID_BITS 15
0019 #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1)
0020 #define SPA_PE_MASK SPA_PASID_MAX
0021 #define SPA_SPA_SIZE_LOG 22
0022
0023 #define SPA_CFG_SF (1ull << (63-0))
0024 #define SPA_CFG_TA (1ull << (63-1))
0025 #define SPA_CFG_HV (1ull << (63-3))
0026 #define SPA_CFG_UV (1ull << (63-4))
0027 #define SPA_CFG_XLAT_hpt (0ull << (63-6))
0028 #define SPA_CFG_XLAT_roh (2ull << (63-6))
0029 #define SPA_CFG_XLAT_ror (3ull << (63-6))
0030 #define SPA_CFG_PR (1ull << (63-49))
0031 #define SPA_CFG_TC (1ull << (63-54))
0032 #define SPA_CFG_DR (1ull << (63-59))
0033
0034 #define SPA_XSL_TF (1ull << (63-3))
0035 #define SPA_XSL_S (1ull << (63-38))
0036
0037 #define SPA_PE_VALID 0x80000000
0038
0039 struct ocxl_link;
0040
0041 struct pe_data {
0042 struct mm_struct *mm;
0043
0044 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr);
0045
0046 void *xsl_err_data;
0047 struct rcu_head rcu;
0048 struct ocxl_link *link;
0049 struct mmu_notifier mmu_notifier;
0050 };
0051
0052 struct spa {
0053 struct ocxl_process_element *spa_mem;
0054 int spa_order;
0055 struct mutex spa_lock;
0056 struct radix_tree_root pe_tree;
0057 char *irq_name;
0058 int virq;
0059 void __iomem *reg_dsisr;
0060 void __iomem *reg_dar;
0061 void __iomem *reg_tfc;
0062 void __iomem *reg_pe_handle;
0063
0064
0065
0066
0067
0068
0069 struct xsl_fault {
0070 struct work_struct fault_work;
0071 u64 pe;
0072 u64 dsisr;
0073 u64 dar;
0074 struct pe_data pe_data;
0075 } xsl_fault;
0076 };
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 struct ocxl_link {
0087 struct list_head list;
0088 struct kref ref;
0089 int domain;
0090 int bus;
0091 int dev;
0092 void __iomem *arva;
0093 spinlock_t atsd_lock;
0094 atomic_t irq_available;
0095 struct spa *spa;
0096 void *platform_data;
0097 };
0098 static LIST_HEAD(links_list);
0099 static DEFINE_MUTEX(links_list_lock);
0100
0101 enum xsl_response {
0102 CONTINUE,
0103 ADDRESS_ERROR,
0104 RESTART,
0105 };
0106
0107
0108 static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe)
0109 {
0110 u64 reg;
0111
0112 *dsisr = in_be64(spa->reg_dsisr);
0113 *dar = in_be64(spa->reg_dar);
0114 reg = in_be64(spa->reg_pe_handle);
0115 *pe = reg & SPA_PE_MASK;
0116 }
0117
0118 static void ack_irq(struct spa *spa, enum xsl_response r)
0119 {
0120 u64 reg = 0;
0121
0122
0123 if (r == RESTART)
0124 reg = PPC_BIT(31);
0125 else if (r == ADDRESS_ERROR)
0126 reg = PPC_BIT(30);
0127 else
0128 WARN(1, "Invalid irq response %d\n", r);
0129
0130 if (reg) {
0131 trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe,
0132 spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg);
0133 out_be64(spa->reg_tfc, reg);
0134 }
0135 }
0136
0137 static void xsl_fault_handler_bh(struct work_struct *fault_work)
0138 {
0139 vm_fault_t flt = 0;
0140 unsigned long access, flags, inv_flags = 0;
0141 enum xsl_response r;
0142 struct xsl_fault *fault = container_of(fault_work, struct xsl_fault,
0143 fault_work);
0144 struct spa *spa = container_of(fault, struct spa, xsl_fault);
0145
0146 int rc;
0147
0148
0149
0150
0151
0152 rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr,
0153 &flt);
0154 if (rc) {
0155 pr_debug("copro_handle_mm_fault failed: %d\n", rc);
0156 if (fault->pe_data.xsl_err_cb) {
0157 fault->pe_data.xsl_err_cb(
0158 fault->pe_data.xsl_err_data,
0159 fault->dar, fault->dsisr);
0160 }
0161 r = ADDRESS_ERROR;
0162 goto ack;
0163 }
0164
0165 if (!radix_enabled()) {
0166
0167
0168
0169
0170
0171 access = _PAGE_PRESENT | _PAGE_READ;
0172 if (fault->dsisr & SPA_XSL_S)
0173 access |= _PAGE_WRITE;
0174
0175 if (get_region_id(fault->dar) != USER_REGION_ID)
0176 access |= _PAGE_PRIVILEGED;
0177
0178 local_irq_save(flags);
0179 hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300,
0180 inv_flags);
0181 local_irq_restore(flags);
0182 }
0183 r = RESTART;
0184 ack:
0185 mmput(fault->pe_data.mm);
0186 ack_irq(spa, r);
0187 }
0188
0189 static irqreturn_t xsl_fault_handler(int irq, void *data)
0190 {
0191 struct ocxl_link *link = (struct ocxl_link *) data;
0192 struct spa *spa = link->spa;
0193 u64 dsisr, dar, pe_handle;
0194 struct pe_data *pe_data;
0195 struct ocxl_process_element *pe;
0196 int pid;
0197 bool schedule = false;
0198
0199 read_irq(spa, &dsisr, &dar, &pe_handle);
0200 trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
0201
0202 WARN_ON(pe_handle > SPA_PE_MASK);
0203 pe = spa->spa_mem + pe_handle;
0204 pid = be32_to_cpu(pe->pid);
0205
0206
0207
0208
0209
0210
0211
0212 if (!(dsisr & SPA_XSL_TF)) {
0213 WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr);
0214 ack_irq(spa, ADDRESS_ERROR);
0215 return IRQ_HANDLED;
0216 }
0217
0218 rcu_read_lock();
0219 pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle);
0220 if (!pe_data) {
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 rcu_read_unlock();
0232 pr_debug("Unknown mm context for xsl interrupt\n");
0233 ack_irq(spa, ADDRESS_ERROR);
0234 return IRQ_HANDLED;
0235 }
0236
0237 if (!pe_data->mm) {
0238
0239
0240
0241
0242 rcu_read_unlock();
0243 pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n");
0244 ack_irq(spa, ADDRESS_ERROR);
0245 return IRQ_HANDLED;
0246 }
0247 WARN_ON(pe_data->mm->context.id != pid);
0248
0249 if (mmget_not_zero(pe_data->mm)) {
0250 spa->xsl_fault.pe = pe_handle;
0251 spa->xsl_fault.dar = dar;
0252 spa->xsl_fault.dsisr = dsisr;
0253 spa->xsl_fault.pe_data = *pe_data;
0254 schedule = true;
0255
0256 }
0257 rcu_read_unlock();
0258 if (schedule)
0259 schedule_work(&spa->xsl_fault.fault_work);
0260 else
0261 ack_irq(spa, ADDRESS_ERROR);
0262 return IRQ_HANDLED;
0263 }
0264
0265 static void unmap_irq_registers(struct spa *spa)
0266 {
0267 pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc,
0268 spa->reg_pe_handle);
0269 }
0270
0271 static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
0272 {
0273 return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar,
0274 &spa->reg_tfc, &spa->reg_pe_handle);
0275 }
0276
0277 static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link)
0278 {
0279 struct spa *spa = link->spa;
0280 int rc;
0281 int hwirq;
0282
0283 rc = pnv_ocxl_get_xsl_irq(dev, &hwirq);
0284 if (rc)
0285 return rc;
0286
0287 rc = map_irq_registers(dev, spa);
0288 if (rc)
0289 return rc;
0290
0291 spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
0292 link->domain, link->bus, link->dev);
0293 if (!spa->irq_name) {
0294 dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
0295 rc = -ENOMEM;
0296 goto err_xsl;
0297 }
0298
0299
0300
0301
0302 spa->virq = irq_create_mapping(NULL, hwirq);
0303 if (!spa->virq) {
0304 dev_err(&dev->dev,
0305 "irq_create_mapping failed for translation interrupt\n");
0306 rc = -EINVAL;
0307 goto err_name;
0308 }
0309
0310 dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
0311
0312 rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
0313 link);
0314 if (rc) {
0315 dev_err(&dev->dev,
0316 "request_irq failed for translation interrupt: %d\n",
0317 rc);
0318 rc = -EINVAL;
0319 goto err_mapping;
0320 }
0321 return 0;
0322
0323 err_mapping:
0324 irq_dispose_mapping(spa->virq);
0325 err_name:
0326 kfree(spa->irq_name);
0327 err_xsl:
0328 unmap_irq_registers(spa);
0329 return rc;
0330 }
0331
0332 static void release_xsl_irq(struct ocxl_link *link)
0333 {
0334 struct spa *spa = link->spa;
0335
0336 if (spa->virq) {
0337 free_irq(spa->virq, link);
0338 irq_dispose_mapping(spa->virq);
0339 }
0340 kfree(spa->irq_name);
0341 unmap_irq_registers(spa);
0342 }
0343
0344 static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link)
0345 {
0346 struct spa *spa;
0347
0348 spa = kzalloc(sizeof(struct spa), GFP_KERNEL);
0349 if (!spa)
0350 return -ENOMEM;
0351
0352 mutex_init(&spa->spa_lock);
0353 INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL);
0354 INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh);
0355
0356 spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT;
0357 spa->spa_mem = (struct ocxl_process_element *)
0358 __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order);
0359 if (!spa->spa_mem) {
0360 dev_err(&dev->dev, "Can't allocate Shared Process Area\n");
0361 kfree(spa);
0362 return -ENOMEM;
0363 }
0364 pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus,
0365 link->dev, spa->spa_mem);
0366
0367 link->spa = spa;
0368 return 0;
0369 }
0370
0371 static void free_spa(struct ocxl_link *link)
0372 {
0373 struct spa *spa = link->spa;
0374
0375 pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus,
0376 link->dev);
0377
0378 if (spa && spa->spa_mem) {
0379 free_pages((unsigned long) spa->spa_mem, spa->spa_order);
0380 kfree(spa);
0381 link->spa = NULL;
0382 }
0383 }
0384
0385 static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link)
0386 {
0387 struct ocxl_link *link;
0388 int rc;
0389
0390 link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL);
0391 if (!link)
0392 return -ENOMEM;
0393
0394 kref_init(&link->ref);
0395 link->domain = pci_domain_nr(dev->bus);
0396 link->bus = dev->bus->number;
0397 link->dev = PCI_SLOT(dev->devfn);
0398 atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
0399 spin_lock_init(&link->atsd_lock);
0400
0401 rc = alloc_spa(dev, link);
0402 if (rc)
0403 goto err_free;
0404
0405 rc = setup_xsl_irq(dev, link);
0406 if (rc)
0407 goto err_spa;
0408
0409
0410 rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
0411 &link->platform_data);
0412 if (rc)
0413 goto err_xsl_irq;
0414
0415
0416
0417
0418
0419
0420 pnv_ocxl_map_lpar(dev, mfspr(SPRN_LPID), 0, &link->arva);
0421
0422 *out_link = link;
0423 return 0;
0424
0425 err_xsl_irq:
0426 release_xsl_irq(link);
0427 err_spa:
0428 free_spa(link);
0429 err_free:
0430 kfree(link);
0431 return rc;
0432 }
0433
0434 static void free_link(struct ocxl_link *link)
0435 {
0436 release_xsl_irq(link);
0437 free_spa(link);
0438 kfree(link);
0439 }
0440
0441 int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle)
0442 {
0443 int rc = 0;
0444 struct ocxl_link *link;
0445
0446 mutex_lock(&links_list_lock);
0447 list_for_each_entry(link, &links_list, list) {
0448
0449 if (link->domain == pci_domain_nr(dev->bus) &&
0450 link->bus == dev->bus->number &&
0451 link->dev == PCI_SLOT(dev->devfn)) {
0452 kref_get(&link->ref);
0453 *link_handle = link;
0454 goto unlock;
0455 }
0456 }
0457 rc = alloc_link(dev, PE_mask, &link);
0458 if (rc)
0459 goto unlock;
0460
0461 list_add(&link->list, &links_list);
0462 *link_handle = link;
0463 unlock:
0464 mutex_unlock(&links_list_lock);
0465 return rc;
0466 }
0467 EXPORT_SYMBOL_GPL(ocxl_link_setup);
0468
0469 static void release_xsl(struct kref *ref)
0470 {
0471 struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
0472
0473 if (link->arva) {
0474 pnv_ocxl_unmap_lpar(link->arva);
0475 link->arva = NULL;
0476 }
0477
0478 list_del(&link->list);
0479
0480 pnv_ocxl_spa_release(link->platform_data);
0481 free_link(link);
0482 }
0483
0484 void ocxl_link_release(struct pci_dev *dev, void *link_handle)
0485 {
0486 struct ocxl_link *link = (struct ocxl_link *) link_handle;
0487
0488 mutex_lock(&links_list_lock);
0489 kref_put(&link->ref, release_xsl);
0490 mutex_unlock(&links_list_lock);
0491 }
0492 EXPORT_SYMBOL_GPL(ocxl_link_release);
0493
0494 static void invalidate_range(struct mmu_notifier *mn,
0495 struct mm_struct *mm,
0496 unsigned long start, unsigned long end)
0497 {
0498 struct pe_data *pe_data = container_of(mn, struct pe_data, mmu_notifier);
0499 struct ocxl_link *link = pe_data->link;
0500 unsigned long addr, pid, page_size = PAGE_SIZE;
0501
0502 pid = mm->context.id;
0503 trace_ocxl_mmu_notifier_range(start, end, pid);
0504
0505 spin_lock(&link->atsd_lock);
0506 for (addr = start; addr < end; addr += page_size)
0507 pnv_ocxl_tlb_invalidate(link->arva, pid, addr, page_size);
0508 spin_unlock(&link->atsd_lock);
0509 }
0510
0511 static const struct mmu_notifier_ops ocxl_mmu_notifier_ops = {
0512 .invalidate_range = invalidate_range,
0513 };
0514
0515 static u64 calculate_cfg_state(bool kernel)
0516 {
0517 u64 state;
0518
0519 state = SPA_CFG_DR;
0520 if (mfspr(SPRN_LPCR) & LPCR_TC)
0521 state |= SPA_CFG_TC;
0522 if (radix_enabled())
0523 state |= SPA_CFG_XLAT_ror;
0524 else
0525 state |= SPA_CFG_XLAT_hpt;
0526 state |= SPA_CFG_HV;
0527 if (kernel) {
0528 if (mfmsr() & MSR_SF)
0529 state |= SPA_CFG_SF;
0530 } else {
0531 state |= SPA_CFG_PR;
0532 if (!test_tsk_thread_flag(current, TIF_32BIT))
0533 state |= SPA_CFG_SF;
0534 }
0535 return state;
0536 }
0537
0538 int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
0539 u64 amr, u16 bdf, struct mm_struct *mm,
0540 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
0541 void *xsl_err_data)
0542 {
0543 struct ocxl_link *link = (struct ocxl_link *) link_handle;
0544 struct spa *spa = link->spa;
0545 struct ocxl_process_element *pe;
0546 int pe_handle, rc = 0;
0547 struct pe_data *pe_data;
0548
0549 BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128);
0550 if (pasid > SPA_PASID_MAX)
0551 return -EINVAL;
0552
0553 mutex_lock(&spa->spa_lock);
0554 pe_handle = pasid & SPA_PE_MASK;
0555 pe = spa->spa_mem + pe_handle;
0556
0557 if (pe->software_state) {
0558 rc = -EBUSY;
0559 goto unlock;
0560 }
0561
0562 pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL);
0563 if (!pe_data) {
0564 rc = -ENOMEM;
0565 goto unlock;
0566 }
0567
0568 pe_data->mm = mm;
0569 pe_data->xsl_err_cb = xsl_err_cb;
0570 pe_data->xsl_err_data = xsl_err_data;
0571 pe_data->link = link;
0572 pe_data->mmu_notifier.ops = &ocxl_mmu_notifier_ops;
0573
0574 memset(pe, 0, sizeof(struct ocxl_process_element));
0575 pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0));
0576 pe->pasid = cpu_to_be32(pasid << (31 - 19));
0577 pe->bdf = cpu_to_be16(bdf);
0578 pe->lpid = cpu_to_be32(mfspr(SPRN_LPID));
0579 pe->pid = cpu_to_be32(pidr);
0580 pe->tid = cpu_to_be32(tidr);
0581 pe->amr = cpu_to_be64(amr);
0582 pe->software_state = cpu_to_be32(SPA_PE_VALID);
0583
0584
0585
0586
0587
0588
0589 if (mm) {
0590 mm_context_add_copro(mm);
0591 if (link->arva) {
0592
0593
0594
0595 trace_ocxl_init_mmu_notifier(pasid, mm->context.id);
0596 mmu_notifier_register(&pe_data->mmu_notifier, mm);
0597 }
0598 }
0599
0600
0601
0602
0603
0604
0605 mb();
0606 radix_tree_insert(&spa->pe_tree, pe_handle, pe_data);
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622 if (mm)
0623 mmgrab(mm);
0624 trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
0625 unlock:
0626 mutex_unlock(&spa->spa_lock);
0627 return rc;
0628 }
0629 EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
0630
0631 int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
0632 {
0633 struct ocxl_link *link = (struct ocxl_link *) link_handle;
0634 struct spa *spa = link->spa;
0635 struct ocxl_process_element *pe;
0636 int pe_handle, rc;
0637
0638 if (pasid > SPA_PASID_MAX)
0639 return -EINVAL;
0640
0641 pe_handle = pasid & SPA_PE_MASK;
0642 pe = spa->spa_mem + pe_handle;
0643
0644 mutex_lock(&spa->spa_lock);
0645
0646 pe->tid = cpu_to_be32(tid);
0647
0648
0649
0650
0651
0652
0653 mb();
0654
0655
0656
0657
0658
0659
0660 rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
0661 WARN_ON(rc);
0662
0663 mutex_unlock(&spa->spa_lock);
0664 return rc;
0665 }
0666
0667 int ocxl_link_remove_pe(void *link_handle, int pasid)
0668 {
0669 struct ocxl_link *link = (struct ocxl_link *) link_handle;
0670 struct spa *spa = link->spa;
0671 struct ocxl_process_element *pe;
0672 struct pe_data *pe_data;
0673 int pe_handle, rc;
0674
0675 if (pasid > SPA_PASID_MAX)
0676 return -EINVAL;
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696 pe_handle = pasid & SPA_PE_MASK;
0697 pe = spa->spa_mem + pe_handle;
0698
0699 mutex_lock(&spa->spa_lock);
0700
0701 if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) {
0702 rc = -EINVAL;
0703 goto unlock;
0704 }
0705
0706 trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid,
0707 be32_to_cpu(pe->pid), be32_to_cpu(pe->tid));
0708
0709 memset(pe, 0, sizeof(struct ocxl_process_element));
0710
0711
0712
0713
0714
0715 mb();
0716
0717
0718
0719
0720
0721
0722 rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
0723 WARN_ON(rc);
0724
0725 pe_data = radix_tree_delete(&spa->pe_tree, pe_handle);
0726 if (!pe_data) {
0727 WARN(1, "Couldn't find pe data when removing PE\n");
0728 } else {
0729 if (pe_data->mm) {
0730 if (link->arva) {
0731 trace_ocxl_release_mmu_notifier(pasid,
0732 pe_data->mm->context.id);
0733 mmu_notifier_unregister(&pe_data->mmu_notifier,
0734 pe_data->mm);
0735 spin_lock(&link->atsd_lock);
0736 pnv_ocxl_tlb_invalidate(link->arva,
0737 pe_data->mm->context.id,
0738 0ull,
0739 PAGE_SIZE);
0740 spin_unlock(&link->atsd_lock);
0741 }
0742 mm_context_remove_copro(pe_data->mm);
0743 mmdrop(pe_data->mm);
0744 }
0745 kfree_rcu(pe_data, rcu);
0746 }
0747 unlock:
0748 mutex_unlock(&spa->spa_lock);
0749 return rc;
0750 }
0751 EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
0752
0753 int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
0754 {
0755 struct ocxl_link *link = (struct ocxl_link *) link_handle;
0756 int irq;
0757
0758 if (atomic_dec_if_positive(&link->irq_available) < 0)
0759 return -ENOSPC;
0760
0761 irq = xive_native_alloc_irq();
0762 if (!irq) {
0763 atomic_inc(&link->irq_available);
0764 return -ENXIO;
0765 }
0766
0767 *hw_irq = irq;
0768 return 0;
0769 }
0770 EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
0771
0772 void ocxl_link_free_irq(void *link_handle, int hw_irq)
0773 {
0774 struct ocxl_link *link = (struct ocxl_link *) link_handle;
0775
0776 xive_native_free_irq(hw_irq);
0777 atomic_inc(&link->irq_available);
0778 }
0779 EXPORT_SYMBOL_GPL(ocxl_link_free_irq);