Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright 2014 IBM Corp.
0004  */
0005 
0006 #include <linux/module.h>
0007 #include <linux/kernel.h>
0008 #include <linux/bitmap.h>
0009 #include <linux/sched.h>
0010 #include <linux/pid.h>
0011 #include <linux/fs.h>
0012 #include <linux/mm.h>
0013 #include <linux/debugfs.h>
0014 #include <linux/slab.h>
0015 #include <linux/idr.h>
0016 #include <linux/sched/mm.h>
0017 #include <linux/mmu_context.h>
0018 #include <asm/cputable.h>
0019 #include <asm/current.h>
0020 #include <asm/copro.h>
0021 
0022 #include "cxl.h"
0023 
0024 /*
0025  * Allocates space for a CXL context.
0026  */
0027 struct cxl_context *cxl_context_alloc(void)
0028 {
0029     return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
0030 }
0031 
0032 /*
0033  * Initialises a CXL context.
0034  */
0035 int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
0036 {
0037     int i;
0038 
0039     ctx->afu = afu;
0040     ctx->master = master;
0041     ctx->pid = NULL; /* Set in start work ioctl */
0042     mutex_init(&ctx->mapping_lock);
0043     ctx->mapping = NULL;
0044     ctx->tidr = 0;
0045     ctx->assign_tidr = false;
0046 
0047     if (cxl_is_power8()) {
0048         spin_lock_init(&ctx->sste_lock);
0049 
0050         /*
0051          * Allocate the segment table before we put it in the IDR so that we
0052          * can always access it when dereferenced from IDR. For the same
0053          * reason, the segment table is only destroyed after the context is
0054          * removed from the IDR.  Access to this in the IOCTL is protected by
0055          * Linux filesystem semantics (can't IOCTL until open is complete).
0056          */
0057         i = cxl_alloc_sst(ctx);
0058         if (i)
0059             return i;
0060     }
0061 
0062     INIT_WORK(&ctx->fault_work, cxl_handle_fault);
0063 
0064     init_waitqueue_head(&ctx->wq);
0065     spin_lock_init(&ctx->lock);
0066 
0067     ctx->irq_bitmap = NULL;
0068     ctx->pending_irq = false;
0069     ctx->pending_fault = false;
0070     ctx->pending_afu_err = false;
0071 
0072     INIT_LIST_HEAD(&ctx->irq_names);
0073 
0074     /*
0075      * When we have to destroy all contexts in cxl_context_detach_all() we
0076      * end up with afu_release_irqs() called from inside a
0077      * idr_for_each_entry(). Hence we need to make sure that anything
0078      * dereferenced from this IDR is ok before we allocate the IDR here.
0079      * This clears out the IRQ ranges to ensure this.
0080      */
0081     for (i = 0; i < CXL_IRQ_RANGES; i++)
0082         ctx->irqs.range[i] = 0;
0083 
0084     mutex_init(&ctx->status_mutex);
0085 
0086     ctx->status = OPENED;
0087 
0088     /*
0089      * Allocating IDR! We better make sure everything's setup that
0090      * dereferences from it.
0091      */
0092     mutex_lock(&afu->contexts_lock);
0093     idr_preload(GFP_KERNEL);
0094     i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
0095               ctx->afu->num_procs, GFP_NOWAIT);
0096     idr_preload_end();
0097     mutex_unlock(&afu->contexts_lock);
0098     if (i < 0)
0099         return i;
0100 
0101     ctx->pe = i;
0102     if (cpu_has_feature(CPU_FTR_HVMODE)) {
0103         ctx->elem = &ctx->afu->native->spa[i];
0104         ctx->external_pe = ctx->pe;
0105     } else {
0106         ctx->external_pe = -1; /* assigned when attaching */
0107     }
0108     ctx->pe_inserted = false;
0109 
0110     /*
0111      * take a ref on the afu so that it stays alive at-least till
0112      * this context is reclaimed inside reclaim_ctx.
0113      */
0114     cxl_afu_get(afu);
0115     return 0;
0116 }
0117 
0118 void cxl_context_set_mapping(struct cxl_context *ctx,
0119             struct address_space *mapping)
0120 {
0121     mutex_lock(&ctx->mapping_lock);
0122     ctx->mapping = mapping;
0123     mutex_unlock(&ctx->mapping_lock);
0124 }
0125 
0126 static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf)
0127 {
0128     struct vm_area_struct *vma = vmf->vma;
0129     struct cxl_context *ctx = vma->vm_file->private_data;
0130     u64 area, offset;
0131     vm_fault_t ret;
0132 
0133     offset = vmf->pgoff << PAGE_SHIFT;
0134 
0135     pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
0136             __func__, ctx->pe, vmf->address, offset);
0137 
0138     if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
0139         area = ctx->afu->psn_phys;
0140         if (offset >= ctx->afu->adapter->ps_size)
0141             return VM_FAULT_SIGBUS;
0142     } else {
0143         area = ctx->psn_phys;
0144         if (offset >= ctx->psn_size)
0145             return VM_FAULT_SIGBUS;
0146     }
0147 
0148     mutex_lock(&ctx->status_mutex);
0149 
0150     if (ctx->status != STARTED) {
0151         mutex_unlock(&ctx->status_mutex);
0152         pr_devel("%s: Context not started, failing problem state access\n", __func__);
0153         if (ctx->mmio_err_ff) {
0154             if (!ctx->ff_page) {
0155                 ctx->ff_page = alloc_page(GFP_USER);
0156                 if (!ctx->ff_page)
0157                     return VM_FAULT_OOM;
0158                 memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
0159             }
0160             get_page(ctx->ff_page);
0161             vmf->page = ctx->ff_page;
0162             vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
0163             return 0;
0164         }
0165         return VM_FAULT_SIGBUS;
0166     }
0167 
0168     ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
0169 
0170     mutex_unlock(&ctx->status_mutex);
0171 
0172     return ret;
0173 }
0174 
0175 static const struct vm_operations_struct cxl_mmap_vmops = {
0176     .fault = cxl_mmap_fault,
0177 };
0178 
0179 /*
0180  * Map a per-context mmio space into the given vma.
0181  */
0182 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
0183 {
0184     u64 start = vma->vm_pgoff << PAGE_SHIFT;
0185     u64 len = vma->vm_end - vma->vm_start;
0186 
0187     if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
0188         if (start + len > ctx->afu->adapter->ps_size)
0189             return -EINVAL;
0190 
0191         if (cxl_is_power9()) {
0192             /*
0193              * Make sure there is a valid problem state
0194              * area space for this AFU.
0195              */
0196             if (ctx->master && !ctx->afu->psa) {
0197                 pr_devel("AFU doesn't support mmio space\n");
0198                 return -EINVAL;
0199             }
0200 
0201             /* Can't mmap until the AFU is enabled */
0202             if (!ctx->afu->enabled)
0203                 return -EBUSY;
0204         }
0205     } else {
0206         if (start + len > ctx->psn_size)
0207             return -EINVAL;
0208 
0209         /* Make sure there is a valid per process space for this AFU */
0210         if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
0211             pr_devel("AFU doesn't support mmio space\n");
0212             return -EINVAL;
0213         }
0214 
0215         /* Can't mmap until the AFU is enabled */
0216         if (!ctx->afu->enabled)
0217             return -EBUSY;
0218     }
0219 
0220     pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
0221          ctx->psn_phys, ctx->pe , ctx->master);
0222 
0223     vma->vm_flags |= VM_IO | VM_PFNMAP;
0224     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0225     vma->vm_ops = &cxl_mmap_vmops;
0226     return 0;
0227 }
0228 
0229 /*
0230  * Detach a context from the hardware. This disables interrupts and doesn't
0231  * return until all outstanding interrupts for this context have completed. The
0232  * hardware should no longer access *ctx after this has returned.
0233  */
0234 int __detach_context(struct cxl_context *ctx)
0235 {
0236     enum cxl_context_status status;
0237 
0238     mutex_lock(&ctx->status_mutex);
0239     status = ctx->status;
0240     ctx->status = CLOSED;
0241     mutex_unlock(&ctx->status_mutex);
0242     if (status != STARTED)
0243         return -EBUSY;
0244 
0245     /* Only warn if we detached while the link was OK.
0246      * If detach fails when hw is down, we don't care.
0247      */
0248     WARN_ON(cxl_ops->detach_process(ctx) &&
0249         cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
0250     flush_work(&ctx->fault_work); /* Only needed for dedicated process */
0251 
0252     /*
0253      * Wait until no further interrupts are presented by the PSL
0254      * for this context.
0255      */
0256     if (cxl_ops->irq_wait)
0257         cxl_ops->irq_wait(ctx);
0258 
0259     /* release the reference to the group leader and mm handling pid */
0260     put_pid(ctx->pid);
0261 
0262     cxl_ctx_put();
0263 
0264     /* Decrease the attached context count on the adapter */
0265     cxl_adapter_context_put(ctx->afu->adapter);
0266 
0267     /* Decrease the mm count on the context */
0268     cxl_context_mm_count_put(ctx);
0269     if (ctx->mm)
0270         mm_context_remove_copro(ctx->mm);
0271     ctx->mm = NULL;
0272 
0273     return 0;
0274 }
0275 
0276 /*
0277  * Detach the given context from the AFU. This doesn't actually
0278  * free the context but it should stop the context running in hardware
0279  * (ie. prevent this context from generating any further interrupts
0280  * so that it can be freed).
0281  */
0282 void cxl_context_detach(struct cxl_context *ctx)
0283 {
0284     int rc;
0285 
0286     rc = __detach_context(ctx);
0287     if (rc)
0288         return;
0289 
0290     afu_release_irqs(ctx, ctx);
0291     wake_up_all(&ctx->wq);
0292 }
0293 
0294 /*
0295  * Detach all contexts on the given AFU.
0296  */
0297 void cxl_context_detach_all(struct cxl_afu *afu)
0298 {
0299     struct cxl_context *ctx;
0300     int tmp;
0301 
0302     mutex_lock(&afu->contexts_lock);
0303     idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
0304         /*
0305          * Anything done in here needs to be setup before the IDR is
0306          * created and torn down after the IDR removed
0307          */
0308         cxl_context_detach(ctx);
0309 
0310         /*
0311          * We are force detaching - remove any active PSA mappings so
0312          * userspace cannot interfere with the card if it comes back.
0313          * Easiest way to exercise this is to unbind and rebind the
0314          * driver via sysfs while it is in use.
0315          */
0316         mutex_lock(&ctx->mapping_lock);
0317         if (ctx->mapping)
0318             unmap_mapping_range(ctx->mapping, 0, 0, 1);
0319         mutex_unlock(&ctx->mapping_lock);
0320     }
0321     mutex_unlock(&afu->contexts_lock);
0322 }
0323 
0324 static void reclaim_ctx(struct rcu_head *rcu)
0325 {
0326     struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
0327 
0328     if (cxl_is_power8())
0329         free_page((u64)ctx->sstp);
0330     if (ctx->ff_page)
0331         __free_page(ctx->ff_page);
0332     ctx->sstp = NULL;
0333 
0334     bitmap_free(ctx->irq_bitmap);
0335 
0336     /* Drop ref to the afu device taken during cxl_context_init */
0337     cxl_afu_put(ctx->afu);
0338 
0339     kfree(ctx);
0340 }
0341 
0342 void cxl_context_free(struct cxl_context *ctx)
0343 {
0344     if (ctx->kernelapi && ctx->mapping)
0345         cxl_release_mapping(ctx);
0346     mutex_lock(&ctx->afu->contexts_lock);
0347     idr_remove(&ctx->afu->contexts_idr, ctx->pe);
0348     mutex_unlock(&ctx->afu->contexts_lock);
0349     call_rcu(&ctx->rcu, reclaim_ctx);
0350 }
0351 
0352 void cxl_context_mm_count_get(struct cxl_context *ctx)
0353 {
0354     if (ctx->mm)
0355         mmgrab(ctx->mm);
0356 }
0357 
0358 void cxl_context_mm_count_put(struct cxl_context *ctx)
0359 {
0360     if (ctx->mm)
0361         mmdrop(ctx->mm);
0362 }