Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * IBM Accelerator Family 'GenWQE'
0004  *
0005  * (C) Copyright IBM Corp. 2013
0006  *
0007  * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
0008  * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
0009  * Author: Michael Jung <mijung@gmx.net>
0010  * Author: Michael Ruettger <michael@ibmra.de>
0011  */
0012 
0013 /*
0014  * Character device representation of the GenWQE device. This allows
0015  * user-space applications to communicate with the card.
0016  */
0017 
0018 #include <linux/kernel.h>
0019 #include <linux/types.h>
0020 #include <linux/module.h>
0021 #include <linux/pci.h>
0022 #include <linux/string.h>
0023 #include <linux/fs.h>
0024 #include <linux/sched/signal.h>
0025 #include <linux/wait.h>
0026 #include <linux/delay.h>
0027 #include <linux/atomic.h>
0028 
0029 #include "card_base.h"
0030 #include "card_ddcb.h"
0031 
0032 static int genwqe_open_files(struct genwqe_dev *cd)
0033 {
0034     int rc;
0035     unsigned long flags;
0036 
0037     spin_lock_irqsave(&cd->file_lock, flags);
0038     rc = list_empty(&cd->file_list);
0039     spin_unlock_irqrestore(&cd->file_lock, flags);
0040     return !rc;
0041 }
0042 
0043 static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
0044 {
0045     unsigned long flags;
0046 
0047     cfile->opener = get_pid(task_tgid(current));
0048     spin_lock_irqsave(&cd->file_lock, flags);
0049     list_add(&cfile->list, &cd->file_list);
0050     spin_unlock_irqrestore(&cd->file_lock, flags);
0051 }
0052 
0053 static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
0054 {
0055     unsigned long flags;
0056 
0057     spin_lock_irqsave(&cd->file_lock, flags);
0058     list_del(&cfile->list);
0059     spin_unlock_irqrestore(&cd->file_lock, flags);
0060     put_pid(cfile->opener);
0061 
0062     return 0;
0063 }
0064 
0065 static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
0066 {
0067     unsigned long flags;
0068 
0069     spin_lock_irqsave(&cfile->pin_lock, flags);
0070     list_add(&m->pin_list, &cfile->pin_list);
0071     spin_unlock_irqrestore(&cfile->pin_lock, flags);
0072 }
0073 
0074 static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
0075 {
0076     unsigned long flags;
0077 
0078     spin_lock_irqsave(&cfile->pin_lock, flags);
0079     list_del(&m->pin_list);
0080     spin_unlock_irqrestore(&cfile->pin_lock, flags);
0081 
0082     return 0;
0083 }
0084 
0085 /**
0086  * genwqe_search_pin() - Search for the mapping for a userspace address
0087  * @cfile:  Descriptor of opened file
0088  * @u_addr: User virtual address
0089  * @size:   Size of buffer
0090  * @virt_addr:  Virtual address to be updated
0091  *
0092  * Return: Pointer to the corresponding mapping NULL if not found
0093  */
0094 static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
0095                         unsigned long u_addr,
0096                         unsigned int size,
0097                         void **virt_addr)
0098 {
0099     unsigned long flags;
0100     struct dma_mapping *m;
0101 
0102     spin_lock_irqsave(&cfile->pin_lock, flags);
0103 
0104     list_for_each_entry(m, &cfile->pin_list, pin_list) {
0105         if ((((u64)m->u_vaddr) <= (u_addr)) &&
0106             (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
0107 
0108             if (virt_addr)
0109                 *virt_addr = m->k_vaddr +
0110                     (u_addr - (u64)m->u_vaddr);
0111 
0112             spin_unlock_irqrestore(&cfile->pin_lock, flags);
0113             return m;
0114         }
0115     }
0116     spin_unlock_irqrestore(&cfile->pin_lock, flags);
0117     return NULL;
0118 }
0119 
0120 static void __genwqe_add_mapping(struct genwqe_file *cfile,
0121                   struct dma_mapping *dma_map)
0122 {
0123     unsigned long flags;
0124 
0125     spin_lock_irqsave(&cfile->map_lock, flags);
0126     list_add(&dma_map->card_list, &cfile->map_list);
0127     spin_unlock_irqrestore(&cfile->map_lock, flags);
0128 }
0129 
0130 static void __genwqe_del_mapping(struct genwqe_file *cfile,
0131                   struct dma_mapping *dma_map)
0132 {
0133     unsigned long flags;
0134 
0135     spin_lock_irqsave(&cfile->map_lock, flags);
0136     list_del(&dma_map->card_list);
0137     spin_unlock_irqrestore(&cfile->map_lock, flags);
0138 }
0139 
0140 
0141 /**
0142  * __genwqe_search_mapping() - Search for the mapping for a userspace address
0143  * @cfile:  descriptor of opened file
0144  * @u_addr: user virtual address
0145  * @size:   size of buffer
0146  * @dma_addr:   DMA address to be updated
0147  * @virt_addr:  Virtual address to be updated
0148  * Return: Pointer to the corresponding mapping NULL if not found
0149  */
0150 static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
0151                            unsigned long u_addr,
0152                            unsigned int size,
0153                            dma_addr_t *dma_addr,
0154                            void **virt_addr)
0155 {
0156     unsigned long flags;
0157     struct dma_mapping *m;
0158     struct pci_dev *pci_dev = cfile->cd->pci_dev;
0159 
0160     spin_lock_irqsave(&cfile->map_lock, flags);
0161     list_for_each_entry(m, &cfile->map_list, card_list) {
0162 
0163         if ((((u64)m->u_vaddr) <= (u_addr)) &&
0164             (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
0165 
0166             /* match found: current is as expected and
0167                addr is in range */
0168             if (dma_addr)
0169                 *dma_addr = m->dma_addr +
0170                     (u_addr - (u64)m->u_vaddr);
0171 
0172             if (virt_addr)
0173                 *virt_addr = m->k_vaddr +
0174                     (u_addr - (u64)m->u_vaddr);
0175 
0176             spin_unlock_irqrestore(&cfile->map_lock, flags);
0177             return m;
0178         }
0179     }
0180     spin_unlock_irqrestore(&cfile->map_lock, flags);
0181 
0182     dev_err(&pci_dev->dev,
0183         "[%s] Entry not found: u_addr=%lx, size=%x\n",
0184         __func__, u_addr, size);
0185 
0186     return NULL;
0187 }
0188 
0189 static void genwqe_remove_mappings(struct genwqe_file *cfile)
0190 {
0191     int i = 0;
0192     struct list_head *node, *next;
0193     struct dma_mapping *dma_map;
0194     struct genwqe_dev *cd = cfile->cd;
0195     struct pci_dev *pci_dev = cfile->cd->pci_dev;
0196 
0197     list_for_each_safe(node, next, &cfile->map_list) {
0198         dma_map = list_entry(node, struct dma_mapping, card_list);
0199 
0200         list_del_init(&dma_map->card_list);
0201 
0202         /*
0203          * This is really a bug, because those things should
0204          * have been already tidied up.
0205          *
0206          * GENWQE_MAPPING_RAW should have been removed via mmunmap().
0207          * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
0208          */
0209         dev_err(&pci_dev->dev,
0210             "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n",
0211             __func__, i++, dma_map->u_vaddr,
0212             (unsigned long)dma_map->k_vaddr,
0213             (unsigned long)dma_map->dma_addr);
0214 
0215         if (dma_map->type == GENWQE_MAPPING_RAW) {
0216             /* we allocated this dynamically */
0217             __genwqe_free_consistent(cd, dma_map->size,
0218                         dma_map->k_vaddr,
0219                         dma_map->dma_addr);
0220             kfree(dma_map);
0221         } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
0222             /* we use dma_map statically from the request */
0223             genwqe_user_vunmap(cd, dma_map);
0224         }
0225     }
0226 }
0227 
0228 static void genwqe_remove_pinnings(struct genwqe_file *cfile)
0229 {
0230     struct list_head *node, *next;
0231     struct dma_mapping *dma_map;
0232     struct genwqe_dev *cd = cfile->cd;
0233 
0234     list_for_each_safe(node, next, &cfile->pin_list) {
0235         dma_map = list_entry(node, struct dma_mapping, pin_list);
0236 
0237         /*
0238          * This is not a bug, because a killed processed might
0239          * not call the unpin ioctl, which is supposed to free
0240          * the resources.
0241          *
0242          * Pinnings are dymically allocated and need to be
0243          * deleted.
0244          */
0245         list_del_init(&dma_map->pin_list);
0246         genwqe_user_vunmap(cd, dma_map);
0247         kfree(dma_map);
0248     }
0249 }
0250 
0251 /**
0252  * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
0253  * @cd: GenWQE device information
0254  * @sig: Signal to send out
0255  *
0256  * E.g. genwqe_send_signal(cd, SIGIO);
0257  */
0258 static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
0259 {
0260     unsigned int files = 0;
0261     unsigned long flags;
0262     struct genwqe_file *cfile;
0263 
0264     spin_lock_irqsave(&cd->file_lock, flags);
0265     list_for_each_entry(cfile, &cd->file_list, list) {
0266         if (cfile->async_queue)
0267             kill_fasync(&cfile->async_queue, sig, POLL_HUP);
0268         files++;
0269     }
0270     spin_unlock_irqrestore(&cd->file_lock, flags);
0271     return files;
0272 }
0273 
0274 static int genwqe_terminate(struct genwqe_dev *cd)
0275 {
0276     unsigned int files = 0;
0277     unsigned long flags;
0278     struct genwqe_file *cfile;
0279 
0280     spin_lock_irqsave(&cd->file_lock, flags);
0281     list_for_each_entry(cfile, &cd->file_list, list) {
0282         kill_pid(cfile->opener, SIGKILL, 1);
0283         files++;
0284     }
0285     spin_unlock_irqrestore(&cd->file_lock, flags);
0286     return files;
0287 }
0288 
0289 /**
0290  * genwqe_open() - file open
0291  * @inode:      file system information
0292  * @filp:   file handle
0293  *
0294  * This function is executed whenever an application calls
0295  * open("/dev/genwqe",..).
0296  *
0297  * Return: 0 if successful or <0 if errors
0298  */
0299 static int genwqe_open(struct inode *inode, struct file *filp)
0300 {
0301     struct genwqe_dev *cd;
0302     struct genwqe_file *cfile;
0303 
0304     cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
0305     if (cfile == NULL)
0306         return -ENOMEM;
0307 
0308     cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
0309     cfile->cd = cd;
0310     cfile->filp = filp;
0311     cfile->client = NULL;
0312 
0313     spin_lock_init(&cfile->map_lock);  /* list of raw memory allocations */
0314     INIT_LIST_HEAD(&cfile->map_list);
0315 
0316     spin_lock_init(&cfile->pin_lock);  /* list of user pinned memory */
0317     INIT_LIST_HEAD(&cfile->pin_list);
0318 
0319     filp->private_data = cfile;
0320 
0321     genwqe_add_file(cd, cfile);
0322     return 0;
0323 }
0324 
0325 /**
0326  * genwqe_fasync() - Setup process to receive SIGIO.
0327  * @fd:        file descriptor
0328  * @filp:      file handle
0329  * @mode:      file mode
0330  *
0331  * Sending a signal is working as following:
0332  *
0333  * if (cdev->async_queue)
0334  *         kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
0335  *
0336  * Some devices also implement asynchronous notification to indicate
0337  * when the device can be written; in this case, of course,
0338  * kill_fasync must be called with a mode of POLL_OUT.
0339  */
0340 static int genwqe_fasync(int fd, struct file *filp, int mode)
0341 {
0342     struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
0343 
0344     return fasync_helper(fd, filp, mode, &cdev->async_queue);
0345 }
0346 
0347 
0348 /**
0349  * genwqe_release() - file close
0350  * @inode:      file system information
0351  * @filp:       file handle
0352  *
0353  * This function is executed whenever an application calls 'close(fd_genwqe)'
0354  *
0355  * Return: always 0
0356  */
0357 static int genwqe_release(struct inode *inode, struct file *filp)
0358 {
0359     struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
0360     struct genwqe_dev *cd = cfile->cd;
0361 
0362     /* there must be no entries in these lists! */
0363     genwqe_remove_mappings(cfile);
0364     genwqe_remove_pinnings(cfile);
0365 
0366     /* remove this filp from the asynchronously notified filp's */
0367     genwqe_fasync(-1, filp, 0);
0368 
0369     /*
0370      * For this to work we must not release cd when this cfile is
0371      * not yet released, otherwise the list entry is invalid,
0372      * because the list itself gets reinstantiated!
0373      */
0374     genwqe_del_file(cd, cfile);
0375     kfree(cfile);
0376     return 0;
0377 }
0378 
0379 static void genwqe_vma_open(struct vm_area_struct *vma)
0380 {
0381     /* nothing ... */
0382 }
0383 
0384 /**
0385  * genwqe_vma_close() - Called each time when vma is unmapped
0386  * @vma: VMA area to close
0387  *
0388  * Free memory which got allocated by GenWQE mmap().
0389  */
0390 static void genwqe_vma_close(struct vm_area_struct *vma)
0391 {
0392     unsigned long vsize = vma->vm_end - vma->vm_start;
0393     struct inode *inode = file_inode(vma->vm_file);
0394     struct dma_mapping *dma_map;
0395     struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
0396                         cdev_genwqe);
0397     struct pci_dev *pci_dev = cd->pci_dev;
0398     dma_addr_t d_addr = 0;
0399     struct genwqe_file *cfile = vma->vm_private_data;
0400 
0401     dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
0402                      &d_addr, NULL);
0403     if (dma_map == NULL) {
0404         dev_err(&pci_dev->dev,
0405             "  [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
0406             __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
0407             vsize);
0408         return;
0409     }
0410     __genwqe_del_mapping(cfile, dma_map);
0411     __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
0412                  dma_map->dma_addr);
0413     kfree(dma_map);
0414 }
0415 
0416 static const struct vm_operations_struct genwqe_vma_ops = {
0417     .open   = genwqe_vma_open,
0418     .close  = genwqe_vma_close,
0419 };
0420 
0421 /**
0422  * genwqe_mmap() - Provide contignous buffers to userspace
0423  * @filp:   File pointer (unused)
0424  * @vma:    VMA area to map
0425  *
0426  * We use mmap() to allocate contignous buffers used for DMA
0427  * transfers. After the buffer is allocated we remap it to user-space
0428  * and remember a reference to our dma_mapping data structure, where
0429  * we store the associated DMA address and allocated size.
0430  *
0431  * When we receive a DDCB execution request with the ATS bits set to
0432  * plain buffer, we lookup our dma_mapping list to find the
0433  * corresponding DMA address for the associated user-space address.
0434  */
0435 static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
0436 {
0437     int rc;
0438     unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
0439     struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
0440     struct genwqe_dev *cd = cfile->cd;
0441     struct dma_mapping *dma_map;
0442 
0443     if (vsize == 0)
0444         return -EINVAL;
0445 
0446     if (get_order(vsize) > MAX_ORDER)
0447         return -ENOMEM;
0448 
0449     dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
0450     if (dma_map == NULL)
0451         return -ENOMEM;
0452 
0453     genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
0454     dma_map->u_vaddr = (void *)vma->vm_start;
0455     dma_map->size = vsize;
0456     dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
0457     dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
0458                              &dma_map->dma_addr);
0459     if (dma_map->k_vaddr == NULL) {
0460         rc = -ENOMEM;
0461         goto free_dma_map;
0462     }
0463 
0464     if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
0465         *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
0466 
0467     pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
0468     rc = remap_pfn_range(vma,
0469                  vma->vm_start,
0470                  pfn,
0471                  vsize,
0472                  vma->vm_page_prot);
0473     if (rc != 0) {
0474         rc = -EFAULT;
0475         goto free_dma_mem;
0476     }
0477 
0478     vma->vm_private_data = cfile;
0479     vma->vm_ops = &genwqe_vma_ops;
0480     __genwqe_add_mapping(cfile, dma_map);
0481 
0482     return 0;
0483 
0484  free_dma_mem:
0485     __genwqe_free_consistent(cd, dma_map->size,
0486                 dma_map->k_vaddr,
0487                 dma_map->dma_addr);
0488  free_dma_map:
0489     kfree(dma_map);
0490     return rc;
0491 }
0492 
0493 #define FLASH_BLOCK 0x40000 /* we use 256k blocks */
0494 
0495 /**
0496  * do_flash_update() - Excute flash update (write image or CVPD)
0497  * @cfile:  Descriptor of opened file
0498  * @load:      details about image load
0499  *
0500  * Return: 0 if successful
0501  */
0502 static int do_flash_update(struct genwqe_file *cfile,
0503                struct genwqe_bitstream *load)
0504 {
0505     int rc = 0;
0506     int blocks_to_flash;
0507     dma_addr_t dma_addr;
0508     u64 flash = 0;
0509     size_t tocopy = 0;
0510     u8 __user *buf;
0511     u8 *xbuf;
0512     u32 crc;
0513     u8 cmdopts;
0514     struct genwqe_dev *cd = cfile->cd;
0515     struct file *filp = cfile->filp;
0516     struct pci_dev *pci_dev = cd->pci_dev;
0517 
0518     if ((load->size & 0x3) != 0)
0519         return -EINVAL;
0520 
0521     if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
0522         return -EINVAL;
0523 
0524     /* FIXME Bits have changed for new service layer! */
0525     switch ((char)load->partition) {
0526     case '0':
0527         cmdopts = 0x14;
0528         break;      /* download/erase_first/part_0 */
0529     case '1':
0530         cmdopts = 0x1C;
0531         break;      /* download/erase_first/part_1 */
0532     case 'v':
0533         cmdopts = 0x0C;
0534         break;      /* download/erase_first/vpd */
0535     default:
0536         return -EINVAL;
0537     }
0538 
0539     buf = (u8 __user *)load->data_addr;
0540     xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
0541     if (xbuf == NULL)
0542         return -ENOMEM;
0543 
0544     blocks_to_flash = load->size / FLASH_BLOCK;
0545     while (load->size) {
0546         struct genwqe_ddcb_cmd *req;
0547 
0548         /*
0549          * We must be 4 byte aligned. Buffer must be 0 appened
0550          * to have defined values when calculating CRC.
0551          */
0552         tocopy = min_t(size_t, load->size, FLASH_BLOCK);
0553 
0554         rc = copy_from_user(xbuf, buf, tocopy);
0555         if (rc) {
0556             rc = -EFAULT;
0557             goto free_buffer;
0558         }
0559         crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
0560 
0561         dev_dbg(&pci_dev->dev,
0562             "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
0563             __func__, (unsigned long)dma_addr, crc, tocopy,
0564             blocks_to_flash);
0565 
0566         /* prepare DDCB for SLU process */
0567         req = ddcb_requ_alloc();
0568         if (req == NULL) {
0569             rc = -ENOMEM;
0570             goto free_buffer;
0571         }
0572 
0573         req->cmd = SLCMD_MOVE_FLASH;
0574         req->cmdopts = cmdopts;
0575 
0576         /* prepare invariant values */
0577         if (genwqe_get_slu_id(cd) <= 0x2) {
0578             *(__be64 *)&req->__asiv[0]  = cpu_to_be64(dma_addr);
0579             *(__be64 *)&req->__asiv[8]  = cpu_to_be64(tocopy);
0580             *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
0581             *(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
0582             req->__asiv[24]        = load->uid;
0583             *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
0584 
0585             /* for simulation only */
0586             *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
0587             *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
0588             req->asiv_length = 32; /* bytes included in crc calc */
0589         } else {    /* setup DDCB for ATS architecture */
0590             *(__be64 *)&req->asiv[0]  = cpu_to_be64(dma_addr);
0591             *(__be32 *)&req->asiv[8]  = cpu_to_be32(tocopy);
0592             *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
0593             *(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
0594             *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
0595             *(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
0596 
0597             /* for simulation only */
0598             *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
0599             *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
0600 
0601             /* Rd only */
0602             req->ats = 0x4ULL << 44;
0603             req->asiv_length = 40; /* bytes included in crc calc */
0604         }
0605         req->asv_length  = 8;
0606 
0607         /* For Genwqe5 we get back the calculated CRC */
0608         *(u64 *)&req->asv[0] = 0ULL;            /* 0x80 */
0609 
0610         rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags);
0611 
0612         load->retc = req->retc;
0613         load->attn = req->attn;
0614         load->progress = req->progress;
0615 
0616         if (rc < 0) {
0617             ddcb_requ_free(req);
0618             goto free_buffer;
0619         }
0620 
0621         if (req->retc != DDCB_RETC_COMPLETE) {
0622             rc = -EIO;
0623             ddcb_requ_free(req);
0624             goto free_buffer;
0625         }
0626 
0627         load->size  -= tocopy;
0628         flash += tocopy;
0629         buf += tocopy;
0630         blocks_to_flash--;
0631         ddcb_requ_free(req);
0632     }
0633 
0634  free_buffer:
0635     __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
0636     return rc;
0637 }
0638 
0639 static int do_flash_read(struct genwqe_file *cfile,
0640              struct genwqe_bitstream *load)
0641 {
0642     int rc, blocks_to_flash;
0643     dma_addr_t dma_addr;
0644     u64 flash = 0;
0645     size_t tocopy = 0;
0646     u8 __user *buf;
0647     u8 *xbuf;
0648     u8 cmdopts;
0649     struct genwqe_dev *cd = cfile->cd;
0650     struct file *filp = cfile->filp;
0651     struct pci_dev *pci_dev = cd->pci_dev;
0652     struct genwqe_ddcb_cmd *cmd;
0653 
0654     if ((load->size & 0x3) != 0)
0655         return -EINVAL;
0656 
0657     if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
0658         return -EINVAL;
0659 
0660     /* FIXME Bits have changed for new service layer! */
0661     switch ((char)load->partition) {
0662     case '0':
0663         cmdopts = 0x12;
0664         break;      /* upload/part_0 */
0665     case '1':
0666         cmdopts = 0x1A;
0667         break;      /* upload/part_1 */
0668     case 'v':
0669         cmdopts = 0x0A;
0670         break;      /* upload/vpd */
0671     default:
0672         return -EINVAL;
0673     }
0674 
0675     buf = (u8 __user *)load->data_addr;
0676     xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
0677     if (xbuf == NULL)
0678         return -ENOMEM;
0679 
0680     blocks_to_flash = load->size / FLASH_BLOCK;
0681     while (load->size) {
0682         /*
0683          * We must be 4 byte aligned. Buffer must be 0 appened
0684          * to have defined values when calculating CRC.
0685          */
0686         tocopy = min_t(size_t, load->size, FLASH_BLOCK);
0687 
0688         dev_dbg(&pci_dev->dev,
0689             "[%s] DMA: %lx SZ: %ld %d\n",
0690             __func__, (unsigned long)dma_addr, tocopy,
0691             blocks_to_flash);
0692 
0693         /* prepare DDCB for SLU process */
0694         cmd = ddcb_requ_alloc();
0695         if (cmd == NULL) {
0696             rc = -ENOMEM;
0697             goto free_buffer;
0698         }
0699         cmd->cmd = SLCMD_MOVE_FLASH;
0700         cmd->cmdopts = cmdopts;
0701 
0702         /* prepare invariant values */
0703         if (genwqe_get_slu_id(cd) <= 0x2) {
0704             *(__be64 *)&cmd->__asiv[0]  = cpu_to_be64(dma_addr);
0705             *(__be64 *)&cmd->__asiv[8]  = cpu_to_be64(tocopy);
0706             *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
0707             *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
0708             cmd->__asiv[24] = load->uid;
0709             *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
0710             cmd->asiv_length = 32; /* bytes included in crc calc */
0711         } else {    /* setup DDCB for ATS architecture */
0712             *(__be64 *)&cmd->asiv[0]  = cpu_to_be64(dma_addr);
0713             *(__be32 *)&cmd->asiv[8]  = cpu_to_be32(tocopy);
0714             *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
0715             *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
0716             *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
0717             *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
0718 
0719             /* rd/wr */
0720             cmd->ats = 0x5ULL << 44;
0721             cmd->asiv_length = 40; /* bytes included in crc calc */
0722         }
0723         cmd->asv_length  = 8;
0724 
0725         /* we only get back the calculated CRC */
0726         *(u64 *)&cmd->asv[0] = 0ULL;    /* 0x80 */
0727 
0728         rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
0729 
0730         load->retc = cmd->retc;
0731         load->attn = cmd->attn;
0732         load->progress = cmd->progress;
0733 
0734         if ((rc < 0) && (rc != -EBADMSG)) {
0735             ddcb_requ_free(cmd);
0736             goto free_buffer;
0737         }
0738 
0739         rc = copy_to_user(buf, xbuf, tocopy);
0740         if (rc) {
0741             rc = -EFAULT;
0742             ddcb_requ_free(cmd);
0743             goto free_buffer;
0744         }
0745 
0746         /* We know that we can get retc 0x104 with CRC err */
0747         if (((cmd->retc == DDCB_RETC_FAULT) &&
0748              (cmd->attn != 0x02)) ||  /* Normally ignore CRC error */
0749             ((cmd->retc == DDCB_RETC_COMPLETE) &&
0750              (cmd->attn != 0x00))) {  /* Everything was fine */
0751             rc = -EIO;
0752             ddcb_requ_free(cmd);
0753             goto free_buffer;
0754         }
0755 
0756         load->size  -= tocopy;
0757         flash += tocopy;
0758         buf += tocopy;
0759         blocks_to_flash--;
0760         ddcb_requ_free(cmd);
0761     }
0762     rc = 0;
0763 
0764  free_buffer:
0765     __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
0766     return rc;
0767 }
0768 
0769 static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
0770 {
0771     int rc;
0772     struct genwqe_dev *cd = cfile->cd;
0773     struct pci_dev *pci_dev = cfile->cd->pci_dev;
0774     struct dma_mapping *dma_map;
0775     unsigned long map_addr;
0776     unsigned long map_size;
0777 
0778     if ((m->addr == 0x0) || (m->size == 0))
0779         return -EINVAL;
0780     if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
0781         return -EINVAL;
0782 
0783     map_addr = (m->addr & PAGE_MASK);
0784     map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
0785 
0786     dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
0787     if (dma_map == NULL)
0788         return -ENOMEM;
0789 
0790     genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
0791     rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
0792     if (rc != 0) {
0793         dev_err(&pci_dev->dev,
0794             "[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
0795         kfree(dma_map);
0796         return rc;
0797     }
0798 
0799     genwqe_add_pin(cfile, dma_map);
0800     return 0;
0801 }
0802 
0803 static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
0804 {
0805     struct genwqe_dev *cd = cfile->cd;
0806     struct dma_mapping *dma_map;
0807     unsigned long map_addr;
0808     unsigned long map_size;
0809 
0810     if (m->addr == 0x0)
0811         return -EINVAL;
0812 
0813     map_addr = (m->addr & PAGE_MASK);
0814     map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
0815 
0816     dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
0817     if (dma_map == NULL)
0818         return -ENOENT;
0819 
0820     genwqe_del_pin(cfile, dma_map);
0821     genwqe_user_vunmap(cd, dma_map);
0822     kfree(dma_map);
0823     return 0;
0824 }
0825 
0826 /**
0827  * ddcb_cmd_cleanup() - Remove dynamically created fixup entries
0828  * @cfile:  Descriptor of opened file
0829  * @req:    DDCB work request
0830  *
0831  * Only if there are any. Pinnings are not removed.
0832  */
0833 static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
0834 {
0835     unsigned int i;
0836     struct dma_mapping *dma_map;
0837     struct genwqe_dev *cd = cfile->cd;
0838 
0839     for (i = 0; i < DDCB_FIXUPS; i++) {
0840         dma_map = &req->dma_mappings[i];
0841 
0842         if (dma_mapping_used(dma_map)) {
0843             __genwqe_del_mapping(cfile, dma_map);
0844             genwqe_user_vunmap(cd, dma_map);
0845         }
0846         if (req->sgls[i].sgl != NULL)
0847             genwqe_free_sync_sgl(cd, &req->sgls[i]);
0848     }
0849     return 0;
0850 }
0851 
0852 /**
0853  * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
0854  * @cfile:  Descriptor of opened file
0855  * @req:    DDCB work request
0856  *
0857  * Before the DDCB gets executed we need to handle the fixups. We
0858  * replace the user-space addresses with DMA addresses or do
0859  * additional setup work e.g. generating a scatter-gather list which
0860  * is used to describe the memory referred to in the fixup.
0861  */
0862 static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
0863 {
0864     int rc;
0865     unsigned int asiv_offs, i;
0866     struct genwqe_dev *cd = cfile->cd;
0867     struct genwqe_ddcb_cmd *cmd = &req->cmd;
0868     struct dma_mapping *m;
0869 
0870     for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
0871          i++, asiv_offs += 0x08) {
0872 
0873         u64 u_addr;
0874         dma_addr_t d_addr;
0875         u32 u_size = 0;
0876         u64 ats_flags;
0877 
0878         ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
0879 
0880         switch (ats_flags) {
0881 
0882         case ATS_TYPE_DATA:
0883             break;  /* nothing to do here */
0884 
0885         case ATS_TYPE_FLAT_RDWR:
0886         case ATS_TYPE_FLAT_RD: {
0887             u_addr = be64_to_cpu(*((__be64 *)&cmd->
0888                            asiv[asiv_offs]));
0889             u_size = be32_to_cpu(*((__be32 *)&cmd->
0890                            asiv[asiv_offs + 0x08]));
0891 
0892             /*
0893              * No data available. Ignore u_addr in this
0894              * case and set addr to 0. Hardware must not
0895              * fetch the buffer.
0896              */
0897             if (u_size == 0x0) {
0898                 *((__be64 *)&cmd->asiv[asiv_offs]) =
0899                     cpu_to_be64(0x0);
0900                 break;
0901             }
0902 
0903             m = __genwqe_search_mapping(cfile, u_addr, u_size,
0904                            &d_addr, NULL);
0905             if (m == NULL) {
0906                 rc = -EFAULT;
0907                 goto err_out;
0908             }
0909 
0910             *((__be64 *)&cmd->asiv[asiv_offs]) =
0911                 cpu_to_be64(d_addr);
0912             break;
0913         }
0914 
0915         case ATS_TYPE_SGL_RDWR:
0916         case ATS_TYPE_SGL_RD: {
0917             int page_offs;
0918 
0919             u_addr = be64_to_cpu(*((__be64 *)
0920                            &cmd->asiv[asiv_offs]));
0921             u_size = be32_to_cpu(*((__be32 *)
0922                            &cmd->asiv[asiv_offs + 0x08]));
0923 
0924             /*
0925              * No data available. Ignore u_addr in this
0926              * case and set addr to 0. Hardware must not
0927              * fetch the empty sgl.
0928              */
0929             if (u_size == 0x0) {
0930                 *((__be64 *)&cmd->asiv[asiv_offs]) =
0931                     cpu_to_be64(0x0);
0932                 break;
0933             }
0934 
0935             m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
0936             if (m != NULL) {
0937                 page_offs = (u_addr -
0938                          (u64)m->u_vaddr)/PAGE_SIZE;
0939             } else {
0940                 m = &req->dma_mappings[i];
0941 
0942                 genwqe_mapping_init(m,
0943                             GENWQE_MAPPING_SGL_TEMP);
0944 
0945                 if (ats_flags == ATS_TYPE_SGL_RD)
0946                     m->write = 0;
0947 
0948                 rc = genwqe_user_vmap(cd, m, (void *)u_addr,
0949                               u_size);
0950                 if (rc != 0)
0951                     goto err_out;
0952 
0953                 __genwqe_add_mapping(cfile, m);
0954                 page_offs = 0;
0955             }
0956 
0957             /* create genwqe style scatter gather list */
0958             rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
0959                            (void __user *)u_addr,
0960                            u_size, m->write);
0961             if (rc != 0)
0962                 goto err_out;
0963 
0964             genwqe_setup_sgl(cd, &req->sgls[i],
0965                      &m->dma_list[page_offs]);
0966 
0967             *((__be64 *)&cmd->asiv[asiv_offs]) =
0968                 cpu_to_be64(req->sgls[i].sgl_dma_addr);
0969 
0970             break;
0971         }
0972         default:
0973             rc = -EINVAL;
0974             goto err_out;
0975         }
0976     }
0977     return 0;
0978 
0979  err_out:
0980     ddcb_cmd_cleanup(cfile, req);
0981     return rc;
0982 }
0983 
0984 /**
0985  * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
0986  * @cfile:  Descriptor of opened file
0987  * @cmd:        Command identifier (passed from user)
0988  *
0989  * The code will build up the translation tables or lookup the
0990  * contignous memory allocation table to find the right translations
0991  * and DMA addresses.
0992  */
0993 static int genwqe_execute_ddcb(struct genwqe_file *cfile,
0994                    struct genwqe_ddcb_cmd *cmd)
0995 {
0996     int rc;
0997     struct genwqe_dev *cd = cfile->cd;
0998     struct file *filp = cfile->filp;
0999     struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
1000 
1001     rc = ddcb_cmd_fixups(cfile, req);
1002     if (rc != 0)
1003         return rc;
1004 
1005     rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1006     ddcb_cmd_cleanup(cfile, req);
1007     return rc;
1008 }
1009 
1010 static int do_execute_ddcb(struct genwqe_file *cfile,
1011                unsigned long arg, int raw)
1012 {
1013     int rc;
1014     struct genwqe_ddcb_cmd *cmd;
1015     struct genwqe_dev *cd = cfile->cd;
1016     struct file *filp = cfile->filp;
1017 
1018     cmd = ddcb_requ_alloc();
1019     if (cmd == NULL)
1020         return -ENOMEM;
1021 
1022     if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
1023         ddcb_requ_free(cmd);
1024         return -EFAULT;
1025     }
1026 
1027     if (!raw)
1028         rc = genwqe_execute_ddcb(cfile, cmd);
1029     else
1030         rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1031 
1032     /* Copy back only the modifed fields. Do not copy ASIV
1033        back since the copy got modified by the driver. */
1034     if (copy_to_user((void __user *)arg, cmd,
1035              sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
1036         ddcb_requ_free(cmd);
1037         return -EFAULT;
1038     }
1039 
1040     ddcb_requ_free(cmd);
1041     return rc;
1042 }
1043 
1044 /**
1045  * genwqe_ioctl() - IO control
1046  * @filp:       file handle
1047  * @cmd:        command identifier (passed from user)
1048  * @arg:        argument (passed from user)
1049  *
1050  * Return: 0 success
1051  */
1052 static long genwqe_ioctl(struct file *filp, unsigned int cmd,
1053              unsigned long arg)
1054 {
1055     int rc = 0;
1056     struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
1057     struct genwqe_dev *cd = cfile->cd;
1058     struct pci_dev *pci_dev = cd->pci_dev;
1059     struct genwqe_reg_io __user *io;
1060     u64 val;
1061     u32 reg_offs;
1062 
1063     /* Return -EIO if card hit EEH */
1064     if (pci_channel_offline(pci_dev))
1065         return -EIO;
1066 
1067     if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
1068         return -EINVAL;
1069 
1070     switch (cmd) {
1071 
1072     case GENWQE_GET_CARD_STATE:
1073         put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
1074         return 0;
1075 
1076         /* Register access */
1077     case GENWQE_READ_REG64: {
1078         io = (struct genwqe_reg_io __user *)arg;
1079 
1080         if (get_user(reg_offs, &io->num))
1081             return -EFAULT;
1082 
1083         if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1084             return -EINVAL;
1085 
1086         val = __genwqe_readq(cd, reg_offs);
1087         put_user(val, &io->val64);
1088         return 0;
1089     }
1090 
1091     case GENWQE_WRITE_REG64: {
1092         io = (struct genwqe_reg_io __user *)arg;
1093 
1094         if (!capable(CAP_SYS_ADMIN))
1095             return -EPERM;
1096 
1097         if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1098             return -EPERM;
1099 
1100         if (get_user(reg_offs, &io->num))
1101             return -EFAULT;
1102 
1103         if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1104             return -EINVAL;
1105 
1106         if (get_user(val, &io->val64))
1107             return -EFAULT;
1108 
1109         __genwqe_writeq(cd, reg_offs, val);
1110         return 0;
1111     }
1112 
1113     case GENWQE_READ_REG32: {
1114         io = (struct genwqe_reg_io __user *)arg;
1115 
1116         if (get_user(reg_offs, &io->num))
1117             return -EFAULT;
1118 
1119         if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1120             return -EINVAL;
1121 
1122         val = __genwqe_readl(cd, reg_offs);
1123         put_user(val, &io->val64);
1124         return 0;
1125     }
1126 
1127     case GENWQE_WRITE_REG32: {
1128         io = (struct genwqe_reg_io __user *)arg;
1129 
1130         if (!capable(CAP_SYS_ADMIN))
1131             return -EPERM;
1132 
1133         if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1134             return -EPERM;
1135 
1136         if (get_user(reg_offs, &io->num))
1137             return -EFAULT;
1138 
1139         if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1140             return -EINVAL;
1141 
1142         if (get_user(val, &io->val64))
1143             return -EFAULT;
1144 
1145         __genwqe_writel(cd, reg_offs, val);
1146         return 0;
1147     }
1148 
1149         /* Flash update/reading */
1150     case GENWQE_SLU_UPDATE: {
1151         struct genwqe_bitstream load;
1152 
1153         if (!genwqe_is_privileged(cd))
1154             return -EPERM;
1155 
1156         if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1157             return -EPERM;
1158 
1159         if (copy_from_user(&load, (void __user *)arg,
1160                    sizeof(load)))
1161             return -EFAULT;
1162 
1163         rc = do_flash_update(cfile, &load);
1164 
1165         if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1166             return -EFAULT;
1167 
1168         return rc;
1169     }
1170 
1171     case GENWQE_SLU_READ: {
1172         struct genwqe_bitstream load;
1173 
1174         if (!genwqe_is_privileged(cd))
1175             return -EPERM;
1176 
1177         if (genwqe_flash_readback_fails(cd))
1178             return -ENOSPC;  /* known to fail for old versions */
1179 
1180         if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
1181             return -EFAULT;
1182 
1183         rc = do_flash_read(cfile, &load);
1184 
1185         if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1186             return -EFAULT;
1187 
1188         return rc;
1189     }
1190 
1191         /* memory pinning and unpinning */
1192     case GENWQE_PIN_MEM: {
1193         struct genwqe_mem m;
1194 
1195         if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1196             return -EFAULT;
1197 
1198         return genwqe_pin_mem(cfile, &m);
1199     }
1200 
1201     case GENWQE_UNPIN_MEM: {
1202         struct genwqe_mem m;
1203 
1204         if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1205             return -EFAULT;
1206 
1207         return genwqe_unpin_mem(cfile, &m);
1208     }
1209 
1210         /* launch an DDCB and wait for completion */
1211     case GENWQE_EXECUTE_DDCB:
1212         return do_execute_ddcb(cfile, arg, 0);
1213 
1214     case GENWQE_EXECUTE_RAW_DDCB: {
1215 
1216         if (!capable(CAP_SYS_ADMIN))
1217             return -EPERM;
1218 
1219         return do_execute_ddcb(cfile, arg, 1);
1220     }
1221 
1222     default:
1223         return -EINVAL;
1224     }
1225 
1226     return rc;
1227 }
1228 
1229 static const struct file_operations genwqe_fops = {
1230     .owner      = THIS_MODULE,
1231     .open       = genwqe_open,
1232     .fasync     = genwqe_fasync,
1233     .mmap       = genwqe_mmap,
1234     .unlocked_ioctl = genwqe_ioctl,
1235     .compat_ioctl   = compat_ptr_ioctl,
1236     .release    = genwqe_release,
1237 };
1238 
1239 static int genwqe_device_initialized(struct genwqe_dev *cd)
1240 {
1241     return cd->dev != NULL;
1242 }
1243 
1244 /**
1245  * genwqe_device_create() - Create and configure genwqe char device
1246  * @cd:      genwqe device descriptor
1247  *
1248  * This function must be called before we create any more genwqe
1249  * character devices, because it is allocating the major and minor
1250  * number which are supposed to be used by the client drivers.
1251  */
1252 int genwqe_device_create(struct genwqe_dev *cd)
1253 {
1254     int rc;
1255     struct pci_dev *pci_dev = cd->pci_dev;
1256 
1257     /*
1258      * Here starts the individual setup per client. It must
1259      * initialize its own cdev data structure with its own fops.
1260      * The appropriate devnum needs to be created. The ranges must
1261      * not overlap.
1262      */
1263     rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
1264                  GENWQE_MAX_MINOR, GENWQE_DEVNAME);
1265     if (rc < 0) {
1266         dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
1267         goto err_dev;
1268     }
1269 
1270     cdev_init(&cd->cdev_genwqe, &genwqe_fops);
1271     cd->cdev_genwqe.owner = THIS_MODULE;
1272 
1273     rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
1274     if (rc < 0) {
1275         dev_err(&pci_dev->dev, "err: cdev_add failed\n");
1276         goto err_add;
1277     }
1278 
1279     /*
1280      * Finally the device in /dev/... must be created. The rule is
1281      * to use card%d_clientname for each created device.
1282      */
1283     cd->dev = device_create_with_groups(cd->class_genwqe,
1284                         &cd->pci_dev->dev,
1285                         cd->devnum_genwqe, cd,
1286                         genwqe_attribute_groups,
1287                         GENWQE_DEVNAME "%u_card",
1288                         cd->card_idx);
1289     if (IS_ERR(cd->dev)) {
1290         rc = PTR_ERR(cd->dev);
1291         goto err_cdev;
1292     }
1293 
1294     genwqe_init_debugfs(cd);
1295 
1296     return 0;
1297 
1298  err_cdev:
1299     cdev_del(&cd->cdev_genwqe);
1300  err_add:
1301     unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1302  err_dev:
1303     cd->dev = NULL;
1304     return rc;
1305 }
1306 
1307 static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
1308 {
1309     int rc;
1310     unsigned int i;
1311     struct pci_dev *pci_dev = cd->pci_dev;
1312 
1313     if (!genwqe_open_files(cd))
1314         return 0;
1315 
1316     dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
1317 
1318     rc = genwqe_kill_fasync(cd, SIGIO);
1319     if (rc > 0) {
1320         /* give kill_timeout seconds to close file descriptors ... */
1321         for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1322                  genwqe_open_files(cd); i++) {
1323             dev_info(&pci_dev->dev, "  %d sec ...", i);
1324 
1325             cond_resched();
1326             msleep(1000);
1327         }
1328 
1329         /* if no open files we can safely continue, else ... */
1330         if (!genwqe_open_files(cd))
1331             return 0;
1332 
1333         dev_warn(&pci_dev->dev,
1334              "[%s] send SIGKILL and wait ...\n", __func__);
1335 
1336         rc = genwqe_terminate(cd);
1337         if (rc) {
1338             /* Give kill_timout more seconds to end processes */
1339             for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1340                      genwqe_open_files(cd); i++) {
1341                 dev_warn(&pci_dev->dev, "  %d sec ...", i);
1342 
1343                 cond_resched();
1344                 msleep(1000);
1345             }
1346         }
1347     }
1348     return 0;
1349 }
1350 
1351 /**
1352  * genwqe_device_remove() - Remove genwqe's char device
1353  * @cd: GenWQE device information
1354  *
1355  * This function must be called after the client devices are removed
1356  * because it will free the major/minor number range for the genwqe
1357  * drivers.
1358  *
1359  * This function must be robust enough to be called twice.
1360  */
1361 int genwqe_device_remove(struct genwqe_dev *cd)
1362 {
1363     int rc;
1364     struct pci_dev *pci_dev = cd->pci_dev;
1365 
1366     if (!genwqe_device_initialized(cd))
1367         return 1;
1368 
1369     genwqe_inform_and_stop_processes(cd);
1370 
1371     /*
1372      * We currently do wait until all filedescriptors are
1373      * closed. This leads to a problem when we abort the
1374      * application which will decrease this reference from
1375      * 1/unused to 0/illegal and not from 2/used 1/empty.
1376      */
1377     rc = kref_read(&cd->cdev_genwqe.kobj.kref);
1378     if (rc != 1) {
1379         dev_err(&pci_dev->dev,
1380             "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
1381         panic("Fatal err: cannot free resources with pending references!");
1382     }
1383 
1384     genqwe_exit_debugfs(cd);
1385     device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1386     cdev_del(&cd->cdev_genwqe);
1387     unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1388     cd->dev = NULL;
1389 
1390     return 0;
1391 }