0001
0002
0003 #include <linux/sched/mm.h>
0004 #include "trace.h"
0005 #include "ocxl_internal.h"
0006
0007 int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
0008 struct address_space *mapping)
0009 {
0010 int pasid;
0011 struct ocxl_context *ctx;
0012
0013 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
0014 if (!ctx)
0015 return -ENOMEM;
0016
0017 ctx->afu = afu;
0018 mutex_lock(&afu->contexts_lock);
0019 pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
0020 afu->pasid_base + afu->pasid_max, GFP_KERNEL);
0021 if (pasid < 0) {
0022 mutex_unlock(&afu->contexts_lock);
0023 kfree(ctx);
0024 return pasid;
0025 }
0026 afu->pasid_count++;
0027 mutex_unlock(&afu->contexts_lock);
0028
0029 ctx->pasid = pasid;
0030 ctx->status = OPENED;
0031 mutex_init(&ctx->status_mutex);
0032 ctx->mapping = mapping;
0033 mutex_init(&ctx->mapping_lock);
0034 init_waitqueue_head(&ctx->events_wq);
0035 mutex_init(&ctx->xsl_error_lock);
0036 mutex_init(&ctx->irq_lock);
0037 idr_init(&ctx->irq_idr);
0038 ctx->tidr = 0;
0039
0040
0041
0042
0043
0044 ocxl_afu_get(afu);
0045 *context = ctx;
0046 return 0;
0047 }
0048 EXPORT_SYMBOL_GPL(ocxl_context_alloc);
0049
0050
0051
0052
0053
0054
0055
0056 static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
0057 {
0058 struct ocxl_context *ctx = (struct ocxl_context *) data;
0059
0060 mutex_lock(&ctx->xsl_error_lock);
0061 ctx->xsl_error.addr = addr;
0062 ctx->xsl_error.dsisr = dsisr;
0063 ctx->xsl_error.count++;
0064 mutex_unlock(&ctx->xsl_error_lock);
0065
0066 wake_up_all(&ctx->events_wq);
0067 }
0068
0069 int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
0070 {
0071 int rc;
0072 unsigned long pidr = 0;
0073 struct pci_dev *dev;
0074
0075
0076 mutex_lock(&ctx->status_mutex);
0077 if (ctx->status != OPENED) {
0078 rc = -EIO;
0079 goto out;
0080 }
0081
0082 if (mm)
0083 pidr = mm->context.id;
0084
0085 dev = to_pci_dev(ctx->afu->fn->dev.parent);
0086 rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
0087 amr, pci_dev_id(dev), mm, xsl_fault_error, ctx);
0088 if (rc)
0089 goto out;
0090
0091 ctx->status = ATTACHED;
0092 out:
0093 mutex_unlock(&ctx->status_mutex);
0094 return rc;
0095 }
0096 EXPORT_SYMBOL_GPL(ocxl_context_attach);
0097
0098 static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
0099 u64 offset, struct ocxl_context *ctx)
0100 {
0101 u64 trigger_addr;
0102 int irq_id = ocxl_irq_offset_to_id(ctx, offset);
0103
0104 trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id);
0105 if (!trigger_addr)
0106 return VM_FAULT_SIGBUS;
0107
0108 return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
0109 }
0110
0111 static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
0112 u64 offset, struct ocxl_context *ctx)
0113 {
0114 u64 pp_mmio_addr;
0115 int pasid_off;
0116 vm_fault_t ret;
0117
0118 if (offset >= ctx->afu->config.pp_mmio_stride)
0119 return VM_FAULT_SIGBUS;
0120
0121 mutex_lock(&ctx->status_mutex);
0122 if (ctx->status != ATTACHED) {
0123 mutex_unlock(&ctx->status_mutex);
0124 pr_debug("%s: Context not attached, failing mmio mmap\n",
0125 __func__);
0126 return VM_FAULT_SIGBUS;
0127 }
0128
0129 pasid_off = ctx->pasid - ctx->afu->pasid_base;
0130 pp_mmio_addr = ctx->afu->pp_mmio_start +
0131 pasid_off * ctx->afu->config.pp_mmio_stride +
0132 offset;
0133
0134 ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
0135 mutex_unlock(&ctx->status_mutex);
0136 return ret;
0137 }
0138
0139 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf)
0140 {
0141 struct vm_area_struct *vma = vmf->vma;
0142 struct ocxl_context *ctx = vma->vm_file->private_data;
0143 u64 offset;
0144 vm_fault_t ret;
0145
0146 offset = vmf->pgoff << PAGE_SHIFT;
0147 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
0148 ctx->pasid, vmf->address, offset);
0149
0150 if (offset < ctx->afu->irq_base_offset)
0151 ret = map_pp_mmio(vma, vmf->address, offset, ctx);
0152 else
0153 ret = map_afu_irq(vma, vmf->address, offset, ctx);
0154 return ret;
0155 }
0156
0157 static const struct vm_operations_struct ocxl_vmops = {
0158 .fault = ocxl_mmap_fault,
0159 };
0160
0161 static int check_mmap_afu_irq(struct ocxl_context *ctx,
0162 struct vm_area_struct *vma)
0163 {
0164 int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT);
0165
0166
0167 if (vma_pages(vma) != 1)
0168 return -EINVAL;
0169
0170
0171 if (!ocxl_afu_irq_get_addr(ctx, irq_id))
0172 return -EINVAL;
0173
0174
0175
0176
0177
0178
0179
0180 if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
0181 !(vma->vm_flags & VM_WRITE))
0182 return -EINVAL;
0183 vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
0184 return 0;
0185 }
0186
0187 static int check_mmap_mmio(struct ocxl_context *ctx,
0188 struct vm_area_struct *vma)
0189 {
0190 if ((vma_pages(vma) + vma->vm_pgoff) >
0191 (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT))
0192 return -EINVAL;
0193 return 0;
0194 }
0195
0196 int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
0197 {
0198 int rc;
0199
0200 if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset)
0201 rc = check_mmap_mmio(ctx, vma);
0202 else
0203 rc = check_mmap_afu_irq(ctx, vma);
0204 if (rc)
0205 return rc;
0206
0207 vma->vm_flags |= VM_IO | VM_PFNMAP;
0208 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0209 vma->vm_ops = &ocxl_vmops;
0210 return 0;
0211 }
0212
0213 int ocxl_context_detach(struct ocxl_context *ctx)
0214 {
0215 struct pci_dev *dev;
0216 int afu_control_pos;
0217 enum ocxl_context_status status;
0218 int rc;
0219
0220 mutex_lock(&ctx->status_mutex);
0221 status = ctx->status;
0222 ctx->status = CLOSED;
0223 mutex_unlock(&ctx->status_mutex);
0224 if (status != ATTACHED)
0225 return 0;
0226
0227 dev = to_pci_dev(ctx->afu->fn->dev.parent);
0228 afu_control_pos = ctx->afu->config.dvsec_afu_control_pos;
0229
0230 mutex_lock(&ctx->afu->afu_control_lock);
0231 rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid);
0232 mutex_unlock(&ctx->afu->afu_control_lock);
0233 trace_ocxl_terminate_pasid(ctx->pasid, rc);
0234 if (rc) {
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 if (rc == -EBUSY)
0249 return rc;
0250 }
0251 rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
0252 if (rc) {
0253 dev_warn(&dev->dev,
0254 "Couldn't remove PE entry cleanly: %d\n", rc);
0255 }
0256 return 0;
0257 }
0258 EXPORT_SYMBOL_GPL(ocxl_context_detach);
0259
0260 void ocxl_context_detach_all(struct ocxl_afu *afu)
0261 {
0262 struct ocxl_context *ctx;
0263 int tmp;
0264
0265 mutex_lock(&afu->contexts_lock);
0266 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
0267 ocxl_context_detach(ctx);
0268
0269
0270
0271
0272
0273
0274
0275 mutex_lock(&ctx->mapping_lock);
0276 if (ctx->mapping)
0277 unmap_mapping_range(ctx->mapping, 0, 0, 1);
0278 mutex_unlock(&ctx->mapping_lock);
0279 }
0280 mutex_unlock(&afu->contexts_lock);
0281 }
0282
0283 void ocxl_context_free(struct ocxl_context *ctx)
0284 {
0285 mutex_lock(&ctx->afu->contexts_lock);
0286 ctx->afu->pasid_count--;
0287 idr_remove(&ctx->afu->contexts_idr, ctx->pasid);
0288 mutex_unlock(&ctx->afu->contexts_lock);
0289
0290 ocxl_afu_irq_free_all(ctx);
0291 idr_destroy(&ctx->irq_idr);
0292
0293 ocxl_afu_put(ctx->afu);
0294 kfree(ctx);
0295 }
0296 EXPORT_SYMBOL_GPL(ocxl_context_free);