Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 // Copyright 2017 IBM Corp.
0003 #include <linux/interrupt.h>
0004 #include <linux/irqdomain.h>
0005 #include <asm/pnv-ocxl.h>
0006 #include <asm/xive.h>
0007 #include "ocxl_internal.h"
0008 #include "trace.h"
0009 
0010 struct afu_irq {
0011     int id;
0012     int hw_irq;
0013     unsigned int virq;
0014     char *name;
0015     irqreturn_t (*handler)(void *private);
0016     void (*free_private)(void *private);
0017     void *private;
0018 };
0019 
0020 int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset)
0021 {
0022     return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT;
0023 }
0024 
0025 u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id)
0026 {
0027     return ctx->afu->irq_base_offset + (irq_id << PAGE_SHIFT);
0028 }
0029 
0030 int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
0031         irqreturn_t (*handler)(void *private),
0032         void (*free_private)(void *private),
0033         void *private)
0034 {
0035     struct afu_irq *irq;
0036     int rc;
0037 
0038     mutex_lock(&ctx->irq_lock);
0039     irq = idr_find(&ctx->irq_idr, irq_id);
0040     if (!irq) {
0041         rc = -EINVAL;
0042         goto unlock;
0043     }
0044 
0045     irq->handler = handler;
0046     irq->private = private;
0047     irq->free_private = free_private;
0048 
0049     rc = 0;
0050     // Fall through to unlock
0051 
0052 unlock:
0053     mutex_unlock(&ctx->irq_lock);
0054     return rc;
0055 }
0056 EXPORT_SYMBOL_GPL(ocxl_irq_set_handler);
0057 
0058 static irqreturn_t afu_irq_handler(int virq, void *data)
0059 {
0060     struct afu_irq *irq = (struct afu_irq *) data;
0061 
0062     trace_ocxl_afu_irq_receive(virq);
0063 
0064     if (irq->handler)
0065         return irq->handler(irq->private);
0066 
0067     return IRQ_HANDLED; // Just drop it on the ground
0068 }
0069 
0070 static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq)
0071 {
0072     int rc;
0073 
0074     irq->virq = irq_create_mapping(NULL, irq->hw_irq);
0075     if (!irq->virq) {
0076         pr_err("irq_create_mapping failed\n");
0077         return -ENOMEM;
0078     }
0079     pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq);
0080 
0081     irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq);
0082     if (!irq->name) {
0083         irq_dispose_mapping(irq->virq);
0084         return -ENOMEM;
0085     }
0086 
0087     rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq);
0088     if (rc) {
0089         kfree(irq->name);
0090         irq->name = NULL;
0091         irq_dispose_mapping(irq->virq);
0092         pr_err("request_irq failed: %d\n", rc);
0093         return rc;
0094     }
0095     return 0;
0096 }
0097 
0098 static void release_afu_irq(struct afu_irq *irq)
0099 {
0100     free_irq(irq->virq, irq);
0101     irq_dispose_mapping(irq->virq);
0102     kfree(irq->name);
0103 }
0104 
0105 int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id)
0106 {
0107     struct afu_irq *irq;
0108     int rc;
0109 
0110     irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL);
0111     if (!irq)
0112         return -ENOMEM;
0113 
0114     /*
0115      * We limit the number of afu irqs per context and per link to
0116      * avoid a single process or user depleting the pool of IPIs
0117      */
0118 
0119     mutex_lock(&ctx->irq_lock);
0120 
0121     irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT,
0122             GFP_KERNEL);
0123     if (irq->id < 0) {
0124         rc = -ENOSPC;
0125         goto err_unlock;
0126     }
0127 
0128     rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq);
0129     if (rc)
0130         goto err_idr;
0131 
0132     rc = setup_afu_irq(ctx, irq);
0133     if (rc)
0134         goto err_alloc;
0135 
0136     trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq);
0137     mutex_unlock(&ctx->irq_lock);
0138 
0139     *irq_id = irq->id;
0140 
0141     return 0;
0142 
0143 err_alloc:
0144     ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
0145 err_idr:
0146     idr_remove(&ctx->irq_idr, irq->id);
0147 err_unlock:
0148     mutex_unlock(&ctx->irq_lock);
0149     kfree(irq);
0150     return rc;
0151 }
0152 EXPORT_SYMBOL_GPL(ocxl_afu_irq_alloc);
0153 
0154 static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx)
0155 {
0156     trace_ocxl_afu_irq_free(ctx->pasid, irq->id);
0157     if (ctx->mapping)
0158         unmap_mapping_range(ctx->mapping,
0159                 ocxl_irq_id_to_offset(ctx, irq->id),
0160                 1 << PAGE_SHIFT, 1);
0161     release_afu_irq(irq);
0162     if (irq->free_private)
0163         irq->free_private(irq->private);
0164     ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq);
0165     kfree(irq);
0166 }
0167 
0168 int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id)
0169 {
0170     struct afu_irq *irq;
0171 
0172     mutex_lock(&ctx->irq_lock);
0173 
0174     irq = idr_find(&ctx->irq_idr, irq_id);
0175     if (!irq) {
0176         mutex_unlock(&ctx->irq_lock);
0177         return -EINVAL;
0178     }
0179     idr_remove(&ctx->irq_idr, irq->id);
0180     afu_irq_free(irq, ctx);
0181     mutex_unlock(&ctx->irq_lock);
0182     return 0;
0183 }
0184 EXPORT_SYMBOL_GPL(ocxl_afu_irq_free);
0185 
0186 void ocxl_afu_irq_free_all(struct ocxl_context *ctx)
0187 {
0188     struct afu_irq *irq;
0189     int id;
0190 
0191     mutex_lock(&ctx->irq_lock);
0192     idr_for_each_entry(&ctx->irq_idr, irq, id)
0193         afu_irq_free(irq, ctx);
0194     mutex_unlock(&ctx->irq_lock);
0195 }
0196 
0197 u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id)
0198 {
0199     struct xive_irq_data *xd;
0200     struct afu_irq *irq;
0201     u64 addr = 0;
0202 
0203     mutex_lock(&ctx->irq_lock);
0204     irq = idr_find(&ctx->irq_idr, irq_id);
0205     if (irq) {
0206         xd = irq_get_handler_data(irq->virq);
0207         addr = xd ? xd->trig_page : 0;
0208     }
0209     mutex_unlock(&ctx->irq_lock);
0210     return addr;
0211 }
0212 EXPORT_SYMBOL_GPL(ocxl_afu_irq_get_addr);