Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * VAS user space API for its accelerators (Only NX-GZIP is supported now)
0004  * Copyright (C) 2019 Haren Myneni, IBM Corp
0005  */
0006 
0007 #include <linux/kernel.h>
0008 #include <linux/device.h>
0009 #include <linux/cdev.h>
0010 #include <linux/fs.h>
0011 #include <linux/slab.h>
0012 #include <linux/uaccess.h>
0013 #include <linux/kthread.h>
0014 #include <linux/sched/signal.h>
0015 #include <linux/mmu_context.h>
0016 #include <linux/io.h>
0017 #include <asm/vas.h>
0018 #include <uapi/asm/vas-api.h>
0019 
0020 /*
0021  * The driver creates the device node that can be used as follows:
0022  * For NX-GZIP
0023  *
0024  *  fd = open("/dev/crypto/nx-gzip", O_RDWR);
0025  *  rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
0026  *  paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
0027  *  vas_copy(&crb, 0, 1);
0028  *  vas_paste(paste_addr, 0, 1);
0029  *  close(fd) or exit process to close window.
0030  *
0031  * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
0032  * copy/paste returns to the user space directly. So refer NX hardware
0033  * documentation for exact copy/paste usage and completion / error
0034  * conditions.
0035  */
0036 
0037 /*
0038  * Wrapper object for the nx-gzip device - there is just one instance of
0039  * this node for the whole system.
0040  */
0041 static struct coproc_dev {
0042     struct cdev cdev;
0043     struct device *device;
0044     char *name;
0045     dev_t devt;
0046     struct class *class;
0047     enum vas_cop_type cop_type;
0048     const struct vas_user_win_ops *vops;
0049 } coproc_device;
0050 
0051 struct coproc_instance {
0052     struct coproc_dev *coproc;
0053     struct vas_window *txwin;
0054 };
0055 
0056 static char *coproc_devnode(struct device *dev, umode_t *mode)
0057 {
0058     return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
0059 }
0060 
0061 /*
0062  * Take reference to pid and mm
0063  */
0064 int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
0065 {
0066     /*
0067      * Window opened by a child thread may not be closed when
0068      * it exits. So take reference to its pid and release it
0069      * when the window is free by parent thread.
0070      * Acquire a reference to the task's pid to make sure
0071      * pid will not be re-used - needed only for multithread
0072      * applications.
0073      */
0074     task_ref->pid = get_task_pid(current, PIDTYPE_PID);
0075     /*
0076      * Acquire a reference to the task's mm.
0077      */
0078     task_ref->mm = get_task_mm(current);
0079     if (!task_ref->mm) {
0080         put_pid(task_ref->pid);
0081         pr_err("VAS: pid(%d): mm_struct is not found\n",
0082                 current->pid);
0083         return -EPERM;
0084     }
0085 
0086     mmgrab(task_ref->mm);
0087     mmput(task_ref->mm);
0088     /*
0089      * Process closes window during exit. In the case of
0090      * multithread application, the child thread can open
0091      * window and can exit without closing it. So takes tgid
0092      * reference until window closed to make sure tgid is not
0093      * reused.
0094      */
0095     task_ref->tgid = find_get_pid(task_tgid_vnr(current));
0096 
0097     return 0;
0098 }
0099 
0100 /*
0101  * Successful return must release the task reference with
0102  * put_task_struct
0103  */
0104 static bool ref_get_pid_and_task(struct vas_user_win_ref *task_ref,
0105               struct task_struct **tskp, struct pid **pidp)
0106 {
0107     struct task_struct *tsk;
0108     struct pid *pid;
0109 
0110     pid = task_ref->pid;
0111     tsk = get_pid_task(pid, PIDTYPE_PID);
0112     if (!tsk) {
0113         pid = task_ref->tgid;
0114         tsk = get_pid_task(pid, PIDTYPE_PID);
0115         /*
0116          * Parent thread (tgid) will be closing window when it
0117          * exits. So should not get here.
0118          */
0119         if (WARN_ON_ONCE(!tsk))
0120             return false;
0121     }
0122 
0123     /* Return if the task is exiting. */
0124     if (tsk->flags & PF_EXITING) {
0125         put_task_struct(tsk);
0126         return false;
0127     }
0128 
0129     *tskp = tsk;
0130     *pidp = pid;
0131 
0132     return true;
0133 }
0134 
0135 /*
0136  * Update the CSB to indicate a translation error.
0137  *
0138  * User space will be polling on CSB after the request is issued.
0139  * If NX can handle the request without any issues, it updates CSB.
0140  * Whereas if NX encounters page fault, the kernel will handle the
0141  * fault and update CSB with translation error.
0142  *
0143  * If we are unable to update the CSB means copy_to_user failed due to
0144  * invalid csb_addr, send a signal to the process.
0145  */
0146 void vas_update_csb(struct coprocessor_request_block *crb,
0147             struct vas_user_win_ref *task_ref)
0148 {
0149     struct coprocessor_status_block csb;
0150     struct kernel_siginfo info;
0151     struct task_struct *tsk;
0152     void __user *csb_addr;
0153     struct pid *pid;
0154     int rc;
0155 
0156     /*
0157      * NX user space windows can not be opened for task->mm=NULL
0158      * and faults will not be generated for kernel requests.
0159      */
0160     if (WARN_ON_ONCE(!task_ref->mm))
0161         return;
0162 
0163     csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
0164 
0165     memset(&csb, 0, sizeof(csb));
0166     csb.cc = CSB_CC_FAULT_ADDRESS;
0167     csb.ce = CSB_CE_TERMINATION;
0168     csb.cs = 0;
0169     csb.count = 0;
0170 
0171     /*
0172      * NX operates and returns in BE format as defined CRB struct.
0173      * So saves fault_storage_addr in BE as NX pastes in FIFO and
0174      * expects user space to convert to CPU format.
0175      */
0176     csb.address = crb->stamp.nx.fault_storage_addr;
0177     csb.flags = 0;
0178 
0179     /*
0180      * Process closes send window after all pending NX requests are
0181      * completed. In multi-thread applications, a child thread can
0182      * open a window and can exit without closing it. May be some
0183      * requests are pending or this window can be used by other
0184      * threads later. We should handle faults if NX encounters
0185      * pages faults on these requests. Update CSB with translation
0186      * error and fault address. If csb_addr passed by user space is
0187      * invalid, send SEGV signal to pid saved in window. If the
0188      * child thread is not running, send the signal to tgid.
0189      * Parent thread (tgid) will close this window upon its exit.
0190      *
0191      * pid and mm references are taken when window is opened by
0192      * process (pid). So tgid is used only when child thread opens
0193      * a window and exits without closing it.
0194      */
0195 
0196     if (!ref_get_pid_and_task(task_ref, &tsk, &pid))
0197         return;
0198 
0199     kthread_use_mm(task_ref->mm);
0200     rc = copy_to_user(csb_addr, &csb, sizeof(csb));
0201     /*
0202      * User space polls on csb.flags (first byte). So add barrier
0203      * then copy first byte with csb flags update.
0204      */
0205     if (!rc) {
0206         csb.flags = CSB_V;
0207         /* Make sure update to csb.flags is visible now */
0208         smp_mb();
0209         rc = copy_to_user(csb_addr, &csb, sizeof(u8));
0210     }
0211     kthread_unuse_mm(task_ref->mm);
0212     put_task_struct(tsk);
0213 
0214     /* Success */
0215     if (!rc)
0216         return;
0217 
0218 
0219     pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
0220             csb_addr, pid_vnr(pid));
0221 
0222     clear_siginfo(&info);
0223     info.si_signo = SIGSEGV;
0224     info.si_errno = EFAULT;
0225     info.si_code = SEGV_MAPERR;
0226     info.si_addr = csb_addr;
0227     /*
0228      * process will be polling on csb.flags after request is sent to
0229      * NX. So generally CSB update should not fail except when an
0230      * application passes invalid csb_addr. So an error message will
0231      * be displayed and leave it to user space whether to ignore or
0232      * handle this signal.
0233      */
0234     rcu_read_lock();
0235     rc = kill_pid_info(SIGSEGV, &info, pid);
0236     rcu_read_unlock();
0237 
0238     pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
0239             pid_vnr(pid), rc);
0240 }
0241 
0242 void vas_dump_crb(struct coprocessor_request_block *crb)
0243 {
0244     struct data_descriptor_entry *dde;
0245     struct nx_fault_stamp *nx;
0246 
0247     dde = &crb->source;
0248     pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
0249         be64_to_cpu(dde->address), be32_to_cpu(dde->length),
0250         dde->count, dde->index, dde->flags);
0251 
0252     dde = &crb->target;
0253     pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
0254         be64_to_cpu(dde->address), be32_to_cpu(dde->length),
0255         dde->count, dde->index, dde->flags);
0256 
0257     nx = &crb->stamp.nx;
0258     pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
0259         be32_to_cpu(nx->pswid),
0260         be64_to_cpu(crb->stamp.nx.fault_storage_addr),
0261         nx->flags, nx->fault_status);
0262 }
0263 
0264 static int coproc_open(struct inode *inode, struct file *fp)
0265 {
0266     struct coproc_instance *cp_inst;
0267 
0268     cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
0269     if (!cp_inst)
0270         return -ENOMEM;
0271 
0272     cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
0273                     cdev);
0274     fp->private_data = cp_inst;
0275 
0276     return 0;
0277 }
0278 
0279 static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
0280 {
0281     void __user *uptr = (void __user *)arg;
0282     struct vas_tx_win_open_attr uattr;
0283     struct coproc_instance *cp_inst;
0284     struct vas_window *txwin;
0285     int rc;
0286 
0287     cp_inst = fp->private_data;
0288 
0289     /*
0290      * One window for file descriptor
0291      */
0292     if (cp_inst->txwin)
0293         return -EEXIST;
0294 
0295     rc = copy_from_user(&uattr, uptr, sizeof(uattr));
0296     if (rc) {
0297         pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
0298         return -EFAULT;
0299     }
0300 
0301     if (uattr.version != 1) {
0302         pr_err("Invalid window open API version\n");
0303         return -EINVAL;
0304     }
0305 
0306     if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->open_win) {
0307         pr_err("VAS API is not registered\n");
0308         return -EACCES;
0309     }
0310 
0311     txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
0312                         cp_inst->coproc->cop_type);
0313     if (IS_ERR(txwin)) {
0314         pr_err("%s() VAS window open failed, %ld\n", __func__,
0315                 PTR_ERR(txwin));
0316         return PTR_ERR(txwin);
0317     }
0318 
0319     mutex_init(&txwin->task_ref.mmap_mutex);
0320     cp_inst->txwin = txwin;
0321 
0322     return 0;
0323 }
0324 
0325 static int coproc_release(struct inode *inode, struct file *fp)
0326 {
0327     struct coproc_instance *cp_inst = fp->private_data;
0328     int rc;
0329 
0330     if (cp_inst->txwin) {
0331         if (cp_inst->coproc->vops &&
0332             cp_inst->coproc->vops->close_win) {
0333             rc = cp_inst->coproc->vops->close_win(cp_inst->txwin);
0334             if (rc)
0335                 return rc;
0336         }
0337         cp_inst->txwin = NULL;
0338     }
0339 
0340     kfree(cp_inst);
0341     fp->private_data = NULL;
0342 
0343     /*
0344      * We don't know here if user has other receive windows
0345      * open, so we can't really call clear_thread_tidr().
0346      * So, once the process calls set_thread_tidr(), the
0347      * TIDR value sticks around until process exits, resulting
0348      * in an extra copy in restore_sprs().
0349      */
0350 
0351     return 0;
0352 }
0353 
0354 /*
0355  * If the executed instruction that caused the fault was a paste, then
0356  * clear regs CR0[EQ], advance NIP, and return 0. Else return error code.
0357  */
0358 static int do_fail_paste(void)
0359 {
0360     struct pt_regs *regs = current->thread.regs;
0361     u32 instword;
0362 
0363     if (WARN_ON_ONCE(!regs))
0364         return -EINVAL;
0365 
0366     if (WARN_ON_ONCE(!user_mode(regs)))
0367         return -EINVAL;
0368 
0369     /*
0370      * If we couldn't translate the instruction, the driver should
0371      * return success without handling the fault, it will be retried
0372      * or the instruction fetch will fault.
0373      */
0374     if (get_user(instword, (u32 __user *)(regs->nip)))
0375         return -EAGAIN;
0376 
0377     /*
0378      * Not a paste instruction, driver may fail the fault.
0379      */
0380     if ((instword & PPC_INST_PASTE_MASK) != PPC_INST_PASTE)
0381         return -ENOENT;
0382 
0383     regs->ccr &= ~0xe0000000;   /* Clear CR0[0-2] to fail paste */
0384     regs_add_return_ip(regs, 4);    /* Emulate the paste */
0385 
0386     return 0;
0387 }
0388 
0389 /*
0390  * This fault handler is invoked when the core generates page fault on
0391  * the paste address. Happens if the kernel closes window in hypervisor
0392  * (on pseries) due to lost credit or the paste address is not mapped.
0393  */
0394 static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
0395 {
0396     struct vm_area_struct *vma = vmf->vma;
0397     struct file *fp = vma->vm_file;
0398     struct coproc_instance *cp_inst = fp->private_data;
0399     struct vas_window *txwin;
0400     vm_fault_t fault;
0401     u64 paste_addr;
0402     int ret;
0403 
0404     /*
0405      * window is not opened. Shouldn't expect this error.
0406      */
0407     if (!cp_inst || !cp_inst->txwin) {
0408         pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
0409                 __func__);
0410         return VM_FAULT_SIGBUS;
0411     }
0412 
0413     txwin = cp_inst->txwin;
0414     /*
0415      * When the LPAR lost credits due to core removal or during
0416      * migration, invalidate the existing mapping for the current
0417      * paste addresses and set windows in-active (zap_page_range in
0418      * reconfig_close_windows()).
0419      * New mapping will be done later after migration or new credits
0420      * available. So continue to receive faults if the user space
0421      * issue NX request.
0422      */
0423     if (txwin->task_ref.vma != vmf->vma) {
0424         pr_err("%s(): No previous mapping with paste address\n",
0425             __func__);
0426         return VM_FAULT_SIGBUS;
0427     }
0428 
0429     mutex_lock(&txwin->task_ref.mmap_mutex);
0430     /*
0431      * The window may be inactive due to lost credit (Ex: core
0432      * removal with DLPAR). If the window is active again when
0433      * the credit is available, map the new paste address at the
0434      * the window virtual address.
0435      */
0436     if (txwin->status == VAS_WIN_ACTIVE) {
0437         paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
0438         if (paste_addr) {
0439             fault = vmf_insert_pfn(vma, vma->vm_start,
0440                     (paste_addr >> PAGE_SHIFT));
0441             mutex_unlock(&txwin->task_ref.mmap_mutex);
0442             return fault;
0443         }
0444     }
0445     mutex_unlock(&txwin->task_ref.mmap_mutex);
0446 
0447     /*
0448      * Received this fault due to closing the actual window.
0449      * It can happen during migration or lost credits.
0450      * Since no mapping, return the paste instruction failure
0451      * to the user space.
0452      */
0453     ret = do_fail_paste();
0454     /*
0455      * The user space can retry several times until success (needed
0456      * for migration) or should fallback to SW compression or
0457      * manage with the existing open windows if available.
0458      * Looking at sysfs interface, it can determine whether these
0459      * failures are coming during migration or core removal:
0460      * nr_used_credits > nr_total_credits when lost credits
0461      */
0462     if (!ret || (ret == -EAGAIN))
0463         return VM_FAULT_NOPAGE;
0464 
0465     return VM_FAULT_SIGBUS;
0466 }
0467 
0468 static const struct vm_operations_struct vas_vm_ops = {
0469     .fault = vas_mmap_fault,
0470 };
0471 
0472 static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
0473 {
0474     struct coproc_instance *cp_inst = fp->private_data;
0475     struct vas_window *txwin;
0476     unsigned long pfn;
0477     u64 paste_addr;
0478     pgprot_t prot;
0479     int rc;
0480 
0481     txwin = cp_inst->txwin;
0482 
0483     if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
0484         pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
0485                 (vma->vm_end - vma->vm_start), PAGE_SIZE);
0486         return -EINVAL;
0487     }
0488 
0489     /* Ensure instance has an open send window */
0490     if (!txwin) {
0491         pr_err("%s(): No send window open?\n", __func__);
0492         return -EINVAL;
0493     }
0494 
0495     if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
0496         pr_err("%s(): VAS API is not registered\n", __func__);
0497         return -EACCES;
0498     }
0499 
0500     /*
0501      * The initial mmap is done after the window is opened
0502      * with ioctl. But before mmap(), this window can be closed in
0503      * the hypervisor due to lost credit (core removal on pseries).
0504      * So if the window is not active, return mmap() failure with
0505      * -EACCES and expects the user space reissue mmap() when it
0506      * is active again or open new window when the credit is available.
0507      * mmap_mutex protects the paste address mmap() with DLPAR
0508      * close/open event and allows mmap() only when the window is
0509      * active.
0510      */
0511     mutex_lock(&txwin->task_ref.mmap_mutex);
0512     if (txwin->status != VAS_WIN_ACTIVE) {
0513         pr_err("%s(): Window is not active\n", __func__);
0514         rc = -EACCES;
0515         goto out;
0516     }
0517 
0518     paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
0519     if (!paste_addr) {
0520         pr_err("%s(): Window paste address failed\n", __func__);
0521         rc = -EINVAL;
0522         goto out;
0523     }
0524 
0525     pfn = paste_addr >> PAGE_SHIFT;
0526 
0527     /* flags, page_prot from cxl_mmap(), except we want cachable */
0528     vma->vm_flags |= VM_IO | VM_PFNMAP;
0529     vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
0530 
0531     prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
0532 
0533     rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
0534             vma->vm_end - vma->vm_start, prot);
0535 
0536     pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
0537             paste_addr, vma->vm_start, rc);
0538 
0539     txwin->task_ref.vma = vma;
0540     vma->vm_ops = &vas_vm_ops;
0541 
0542 out:
0543     mutex_unlock(&txwin->task_ref.mmap_mutex);
0544     return rc;
0545 }
0546 
0547 static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
0548 {
0549     switch (cmd) {
0550     case VAS_TX_WIN_OPEN:
0551         return coproc_ioc_tx_win_open(fp, arg);
0552     default:
0553         return -EINVAL;
0554     }
0555 }
0556 
0557 static struct file_operations coproc_fops = {
0558     .open = coproc_open,
0559     .release = coproc_release,
0560     .mmap = coproc_mmap,
0561     .unlocked_ioctl = coproc_ioctl,
0562 };
0563 
0564 /*
0565  * Supporting only nx-gzip coprocessor type now, but this API code
0566  * extended to other coprocessor types later.
0567  */
0568 int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
0569                 const char *name,
0570                 const struct vas_user_win_ops *vops)
0571 {
0572     int rc = -EINVAL;
0573     dev_t devno;
0574 
0575     rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
0576     if (rc) {
0577         pr_err("Unable to allocate coproc major number: %i\n", rc);
0578         return rc;
0579     }
0580 
0581     pr_devel("%s device allocated, dev [%i,%i]\n", name,
0582             MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
0583 
0584     coproc_device.class = class_create(mod, name);
0585     if (IS_ERR(coproc_device.class)) {
0586         rc = PTR_ERR(coproc_device.class);
0587         pr_err("Unable to create %s class %d\n", name, rc);
0588         goto err_class;
0589     }
0590     coproc_device.class->devnode = coproc_devnode;
0591     coproc_device.cop_type = cop_type;
0592     coproc_device.vops = vops;
0593 
0594     coproc_fops.owner = mod;
0595     cdev_init(&coproc_device.cdev, &coproc_fops);
0596 
0597     devno = MKDEV(MAJOR(coproc_device.devt), 0);
0598     rc = cdev_add(&coproc_device.cdev, devno, 1);
0599     if (rc) {
0600         pr_err("cdev_add() failed %d\n", rc);
0601         goto err_cdev;
0602     }
0603 
0604     coproc_device.device = device_create(coproc_device.class, NULL,
0605             devno, NULL, name, MINOR(devno));
0606     if (IS_ERR(coproc_device.device)) {
0607         rc = PTR_ERR(coproc_device.device);
0608         pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
0609         goto err;
0610     }
0611 
0612     pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
0613             MINOR(devno));
0614 
0615     return 0;
0616 
0617 err:
0618     cdev_del(&coproc_device.cdev);
0619 err_cdev:
0620     class_destroy(coproc_device.class);
0621 err_class:
0622     unregister_chrdev_region(coproc_device.devt, 1);
0623     return rc;
0624 }
0625 
0626 void vas_unregister_coproc_api(void)
0627 {
0628     dev_t devno;
0629 
0630     cdev_del(&coproc_device.cdev);
0631     devno = MKDEV(MAJOR(coproc_device.devt), 0);
0632     device_destroy(coproc_device.class, devno);
0633 
0634     class_destroy(coproc_device.class);
0635     unregister_chrdev_region(coproc_device.devt, 1);
0636 }