Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  Adaptec AAC series RAID controller driver
0004  *  (c) Copyright 2001 Red Hat Inc.
0005  *
0006  * based on the old aacraid driver that is..
0007  * Adaptec aacraid device driver for Linux.
0008  *
0009  * Copyright (c) 2000-2010 Adaptec, Inc.
0010  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
0011  *       2016-2017 Microsemi Corp. (aacraid@microsemi.com)
0012  *
0013  * Module Name:
0014  *  commctrl.c
0015  *
0016  * Abstract: Contains all routines for control of the AFA comm layer
0017  */
0018 
0019 #include <linux/kernel.h>
0020 #include <linux/init.h>
0021 #include <linux/types.h>
0022 #include <linux/pci.h>
0023 #include <linux/spinlock.h>
0024 #include <linux/slab.h>
0025 #include <linux/completion.h>
0026 #include <linux/dma-mapping.h>
0027 #include <linux/blkdev.h>
0028 #include <linux/compat.h>
0029 #include <linux/delay.h> /* ssleep prototype */
0030 #include <linux/kthread.h>
0031 #include <linux/uaccess.h>
0032 #include <scsi/scsi_host.h>
0033 
0034 #include "aacraid.h"
0035 
0036 # define AAC_DEBUG_PREAMBLE KERN_INFO
0037 # define AAC_DEBUG_POSTAMBLE
0038 /**
0039  *  ioctl_send_fib  -   send a FIB from userspace
0040  *  @dev:   adapter is being processed
0041  *  @arg:   arguments to the ioctl call
0042  *
0043  *  This routine sends a fib to the adapter on behalf of a user level
0044  *  program.
0045  */
0046 static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
0047 {
0048     struct hw_fib * kfib;
0049     struct fib *fibptr;
0050     struct hw_fib * hw_fib = (struct hw_fib *)0;
0051     dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
0052     unsigned int size, osize;
0053     int retval;
0054 
0055     if (dev->in_reset) {
0056         return -EBUSY;
0057     }
0058     fibptr = aac_fib_alloc(dev);
0059     if(fibptr == NULL) {
0060         return -ENOMEM;
0061     }
0062 
0063     kfib = fibptr->hw_fib_va;
0064     /*
0065      *  First copy in the header so that we can check the size field.
0066      */
0067     if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
0068         aac_fib_free(fibptr);
0069         return -EFAULT;
0070     }
0071     /*
0072      *  Since we copy based on the fib header size, make sure that we
0073      *  will not overrun the buffer when we copy the memory. Return
0074      *  an error if we would.
0075      */
0076     osize = size = le16_to_cpu(kfib->header.Size) +
0077         sizeof(struct aac_fibhdr);
0078     if (size < le16_to_cpu(kfib->header.SenderSize))
0079         size = le16_to_cpu(kfib->header.SenderSize);
0080     if (size > dev->max_fib_size) {
0081         dma_addr_t daddr;
0082 
0083         if (size > 2048) {
0084             retval = -EINVAL;
0085             goto cleanup;
0086         }
0087 
0088         kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr,
0089                       GFP_KERNEL);
0090         if (!kfib) {
0091             retval = -ENOMEM;
0092             goto cleanup;
0093         }
0094 
0095         /* Highjack the hw_fib */
0096         hw_fib = fibptr->hw_fib_va;
0097         hw_fib_pa = fibptr->hw_fib_pa;
0098         fibptr->hw_fib_va = kfib;
0099         fibptr->hw_fib_pa = daddr;
0100         memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
0101         memcpy(kfib, hw_fib, dev->max_fib_size);
0102     }
0103 
0104     if (copy_from_user(kfib, arg, size)) {
0105         retval = -EFAULT;
0106         goto cleanup;
0107     }
0108 
0109     /* Sanity check the second copy */
0110     if ((osize != le16_to_cpu(kfib->header.Size) +
0111         sizeof(struct aac_fibhdr))
0112         || (size < le16_to_cpu(kfib->header.SenderSize))) {
0113         retval = -EINVAL;
0114         goto cleanup;
0115     }
0116 
0117     if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
0118         aac_adapter_interrupt(dev);
0119         /*
0120          * Since we didn't really send a fib, zero out the state to allow
0121          * cleanup code not to assert.
0122          */
0123         kfib->header.XferState = 0;
0124     } else {
0125         retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
0126                 le16_to_cpu(kfib->header.Size) , FsaNormal,
0127                 1, 1, NULL, NULL);
0128         if (retval) {
0129             goto cleanup;
0130         }
0131         if (aac_fib_complete(fibptr) != 0) {
0132             retval = -EINVAL;
0133             goto cleanup;
0134         }
0135     }
0136     /*
0137      *  Make sure that the size returned by the adapter (which includes
0138      *  the header) is less than or equal to the size of a fib, so we
0139      *  don't corrupt application data. Then copy that size to the user
0140      *  buffer. (Don't try to add the header information again, since it
0141      *  was already included by the adapter.)
0142      */
0143 
0144     retval = 0;
0145     if (copy_to_user(arg, (void *)kfib, size))
0146         retval = -EFAULT;
0147 cleanup:
0148     if (hw_fib) {
0149         dma_free_coherent(&dev->pdev->dev, size, kfib,
0150                   fibptr->hw_fib_pa);
0151         fibptr->hw_fib_pa = hw_fib_pa;
0152         fibptr->hw_fib_va = hw_fib;
0153     }
0154     if (retval != -ERESTARTSYS)
0155         aac_fib_free(fibptr);
0156     return retval;
0157 }
0158 
0159 /**
0160  *  open_getadapter_fib -   Get the next fib
0161  *  @dev:   adapter is being processed
0162  *  @arg:   arguments to the open call
0163  *
0164  *  This routine will get the next Fib, if available, from the AdapterFibContext
0165  *  passed in from the user.
0166  */
0167 static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
0168 {
0169     struct aac_fib_context * fibctx;
0170     int status;
0171 
0172     fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
0173     if (fibctx == NULL) {
0174         status = -ENOMEM;
0175     } else {
0176         unsigned long flags;
0177         struct list_head * entry;
0178         struct aac_fib_context * context;
0179 
0180         fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
0181         fibctx->size = sizeof(struct aac_fib_context);
0182         /*
0183          *  Yes yes, I know this could be an index, but we have a
0184          * better guarantee of uniqueness for the locked loop below.
0185          * Without the aid of a persistent history, this also helps
0186          * reduce the chance that the opaque context would be reused.
0187          */
0188         fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
0189         /*
0190          *  Initialize the mutex used to wait for the next AIF.
0191          */
0192         init_completion(&fibctx->completion);
0193         fibctx->wait = 0;
0194         /*
0195          *  Initialize the fibs and set the count of fibs on
0196          *  the list to 0.
0197          */
0198         fibctx->count = 0;
0199         INIT_LIST_HEAD(&fibctx->fib_list);
0200         fibctx->jiffies = jiffies/HZ;
0201         /*
0202          *  Now add this context onto the adapter's
0203          *  AdapterFibContext list.
0204          */
0205         spin_lock_irqsave(&dev->fib_lock, flags);
0206         /* Ensure that we have a unique identifier */
0207         entry = dev->fib_list.next;
0208         while (entry != &dev->fib_list) {
0209             context = list_entry(entry, struct aac_fib_context, next);
0210             if (context->unique == fibctx->unique) {
0211                 /* Not unique (32 bits) */
0212                 fibctx->unique++;
0213                 entry = dev->fib_list.next;
0214             } else {
0215                 entry = entry->next;
0216             }
0217         }
0218         list_add_tail(&fibctx->next, &dev->fib_list);
0219         spin_unlock_irqrestore(&dev->fib_lock, flags);
0220         if (copy_to_user(arg, &fibctx->unique,
0221                         sizeof(fibctx->unique))) {
0222             status = -EFAULT;
0223         } else {
0224             status = 0;
0225         }
0226     }
0227     return status;
0228 }
0229 
0230 struct compat_fib_ioctl {
0231     u32 fibctx;
0232     s32 wait;
0233     compat_uptr_t fib;
0234 };
0235 
0236 /**
0237  *  next_getadapter_fib -   get the next fib
0238  *  @dev: adapter to use
0239  *  @arg: ioctl argument
0240  *
0241  *  This routine will get the next Fib, if available, from the AdapterFibContext
0242  *  passed in from the user.
0243  */
0244 static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
0245 {
0246     struct fib_ioctl f;
0247     struct fib *fib;
0248     struct aac_fib_context *fibctx;
0249     int status;
0250     struct list_head * entry;
0251     unsigned long flags;
0252 
0253     if (in_compat_syscall()) {
0254         struct compat_fib_ioctl cf;
0255 
0256         if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl)))
0257             return -EFAULT;
0258 
0259         f.fibctx = cf.fibctx;
0260         f.wait = cf.wait;
0261         f.fib = compat_ptr(cf.fib);
0262     } else {
0263         if (copy_from_user(&f, arg, sizeof(struct fib_ioctl)))
0264             return -EFAULT;
0265     }
0266     /*
0267      *  Verify that the HANDLE passed in was a valid AdapterFibContext
0268      *
0269      *  Search the list of AdapterFibContext addresses on the adapter
0270      *  to be sure this is a valid address
0271      */
0272     spin_lock_irqsave(&dev->fib_lock, flags);
0273     entry = dev->fib_list.next;
0274     fibctx = NULL;
0275 
0276     while (entry != &dev->fib_list) {
0277         fibctx = list_entry(entry, struct aac_fib_context, next);
0278         /*
0279          *  Extract the AdapterFibContext from the Input parameters.
0280          */
0281         if (fibctx->unique == f.fibctx) { /* We found a winner */
0282             break;
0283         }
0284         entry = entry->next;
0285         fibctx = NULL;
0286     }
0287     if (!fibctx) {
0288         spin_unlock_irqrestore(&dev->fib_lock, flags);
0289         dprintk ((KERN_INFO "Fib Context not found\n"));
0290         return -EINVAL;
0291     }
0292 
0293     if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
0294          (fibctx->size != sizeof(struct aac_fib_context))) {
0295         spin_unlock_irqrestore(&dev->fib_lock, flags);
0296         dprintk ((KERN_INFO "Fib Context corrupt?\n"));
0297         return -EINVAL;
0298     }
0299     status = 0;
0300     /*
0301      *  If there are no fibs to send back, then either wait or return
0302      *  -EAGAIN
0303      */
0304 return_fib:
0305     if (!list_empty(&fibctx->fib_list)) {
0306         /*
0307          *  Pull the next fib from the fibs
0308          */
0309         entry = fibctx->fib_list.next;
0310         list_del(entry);
0311 
0312         fib = list_entry(entry, struct fib, fiblink);
0313         fibctx->count--;
0314         spin_unlock_irqrestore(&dev->fib_lock, flags);
0315         if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
0316             kfree(fib->hw_fib_va);
0317             kfree(fib);
0318             return -EFAULT;
0319         }
0320         /*
0321          *  Free the space occupied by this copy of the fib.
0322          */
0323         kfree(fib->hw_fib_va);
0324         kfree(fib);
0325         status = 0;
0326     } else {
0327         spin_unlock_irqrestore(&dev->fib_lock, flags);
0328         /* If someone killed the AIF aacraid thread, restart it */
0329         status = !dev->aif_thread;
0330         if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
0331             /* Be paranoid, be very paranoid! */
0332             kthread_stop(dev->thread);
0333             ssleep(1);
0334             dev->aif_thread = 0;
0335             dev->thread = kthread_run(aac_command_thread, dev,
0336                           "%s", dev->name);
0337             ssleep(1);
0338         }
0339         if (f.wait) {
0340             if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
0341                 status = -ERESTARTSYS;
0342             } else {
0343                 /* Lock again and retry */
0344                 spin_lock_irqsave(&dev->fib_lock, flags);
0345                 goto return_fib;
0346             }
0347         } else {
0348             status = -EAGAIN;
0349         }
0350     }
0351     fibctx->jiffies = jiffies/HZ;
0352     return status;
0353 }
0354 
0355 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
0356 {
0357     struct fib *fib;
0358 
0359     /*
0360      *  First free any FIBs that have not been consumed.
0361      */
0362     while (!list_empty(&fibctx->fib_list)) {
0363         struct list_head * entry;
0364         /*
0365          *  Pull the next fib from the fibs
0366          */
0367         entry = fibctx->fib_list.next;
0368         list_del(entry);
0369         fib = list_entry(entry, struct fib, fiblink);
0370         fibctx->count--;
0371         /*
0372          *  Free the space occupied by this copy of the fib.
0373          */
0374         kfree(fib->hw_fib_va);
0375         kfree(fib);
0376     }
0377     /*
0378      *  Remove the Context from the AdapterFibContext List
0379      */
0380     list_del(&fibctx->next);
0381     /*
0382      *  Invalidate context
0383      */
0384     fibctx->type = 0;
0385     /*
0386      *  Free the space occupied by the Context
0387      */
0388     kfree(fibctx);
0389     return 0;
0390 }
0391 
0392 /**
0393  *  close_getadapter_fib    -   close down user fib context
0394  *  @dev: adapter
0395  *  @arg: ioctl arguments
0396  *
0397  *  This routine will close down the fibctx passed in from the user.
0398  */
0399 
0400 static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
0401 {
0402     struct aac_fib_context *fibctx;
0403     int status;
0404     unsigned long flags;
0405     struct list_head * entry;
0406 
0407     /*
0408      *  Verify that the HANDLE passed in was a valid AdapterFibContext
0409      *
0410      *  Search the list of AdapterFibContext addresses on the adapter
0411      *  to be sure this is a valid address
0412      */
0413 
0414     entry = dev->fib_list.next;
0415     fibctx = NULL;
0416 
0417     while(entry != &dev->fib_list) {
0418         fibctx = list_entry(entry, struct aac_fib_context, next);
0419         /*
0420          *  Extract the fibctx from the input parameters
0421          */
0422         if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
0423             break;
0424         entry = entry->next;
0425         fibctx = NULL;
0426     }
0427 
0428     if (!fibctx)
0429         return 0; /* Already gone */
0430 
0431     if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
0432          (fibctx->size != sizeof(struct aac_fib_context)))
0433         return -EINVAL;
0434     spin_lock_irqsave(&dev->fib_lock, flags);
0435     status = aac_close_fib_context(dev, fibctx);
0436     spin_unlock_irqrestore(&dev->fib_lock, flags);
0437     return status;
0438 }
0439 
0440 /**
0441  *  check_revision  -   close down user fib context
0442  *  @dev: adapter
0443  *  @arg: ioctl arguments
0444  *
0445  *  This routine returns the driver version.
0446  *  Under Linux, there have been no version incompatibilities, so this is
0447  *  simple!
0448  */
0449 
0450 static int check_revision(struct aac_dev *dev, void __user *arg)
0451 {
0452     struct revision response;
0453     char *driver_version = aac_driver_version;
0454     u32 version;
0455 
0456     response.compat = 1;
0457     version = (simple_strtol(driver_version,
0458                 &driver_version, 10) << 24) | 0x00000400;
0459     version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
0460     version += simple_strtol(driver_version + 1, NULL, 10);
0461     response.version = cpu_to_le32(version);
0462 #   ifdef AAC_DRIVER_BUILD
0463         response.build = cpu_to_le32(AAC_DRIVER_BUILD);
0464 #   else
0465         response.build = cpu_to_le32(9999);
0466 #   endif
0467 
0468     if (copy_to_user(arg, &response, sizeof(response)))
0469         return -EFAULT;
0470     return 0;
0471 }
0472 
0473 
0474 /**
0475  * aac_send_raw_srb()
0476  *  @dev:   adapter is being processed
0477  *  @arg:   arguments to the send call
0478  */
0479 static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
0480 {
0481     struct fib* srbfib;
0482     int status;
0483     struct aac_srb *srbcmd = NULL;
0484     struct aac_hba_cmd_req *hbacmd = NULL;
0485     struct user_aac_srb *user_srbcmd = NULL;
0486     struct user_aac_srb __user *user_srb = arg;
0487     struct aac_srb_reply __user *user_reply;
0488     u32 chn;
0489     u32 fibsize = 0;
0490     u32 flags = 0;
0491     s32 rcode = 0;
0492     u32 data_dir;
0493     void __user *sg_user[HBA_MAX_SG_EMBEDDED];
0494     void *sg_list[HBA_MAX_SG_EMBEDDED];
0495     u32 sg_count[HBA_MAX_SG_EMBEDDED];
0496     u32 sg_indx = 0;
0497     u32 byte_count = 0;
0498     u32 actual_fibsize64, actual_fibsize = 0;
0499     int i;
0500     int is_native_device;
0501     u64 address;
0502 
0503 
0504     if (dev->in_reset) {
0505         dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
0506         return -EBUSY;
0507     }
0508     if (!capable(CAP_SYS_ADMIN)){
0509         dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
0510         return -EPERM;
0511     }
0512     /*
0513      *  Allocate and initialize a Fib then setup a SRB command
0514      */
0515     if (!(srbfib = aac_fib_alloc(dev))) {
0516         return -ENOMEM;
0517     }
0518 
0519     memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
0520     if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
0521         dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
0522         rcode = -EFAULT;
0523         goto cleanup;
0524     }
0525 
0526     if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
0527         (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
0528         rcode = -EINVAL;
0529         goto cleanup;
0530     }
0531 
0532     user_srbcmd = memdup_user(user_srb, fibsize);
0533     if (IS_ERR(user_srbcmd)) {
0534         rcode = PTR_ERR(user_srbcmd);
0535         user_srbcmd = NULL;
0536         goto cleanup;
0537     }
0538 
0539     flags = user_srbcmd->flags; /* from user in cpu order */
0540     switch (flags & (SRB_DataIn | SRB_DataOut)) {
0541     case SRB_DataOut:
0542         data_dir = DMA_TO_DEVICE;
0543         break;
0544     case (SRB_DataIn | SRB_DataOut):
0545         data_dir = DMA_BIDIRECTIONAL;
0546         break;
0547     case SRB_DataIn:
0548         data_dir = DMA_FROM_DEVICE;
0549         break;
0550     default:
0551         data_dir = DMA_NONE;
0552     }
0553     if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
0554         dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
0555             user_srbcmd->sg.count));
0556         rcode = -EINVAL;
0557         goto cleanup;
0558     }
0559     if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
0560         dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
0561         rcode = -EINVAL;
0562         goto cleanup;
0563     }
0564     actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
0565         ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
0566     actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
0567       (sizeof(struct sgentry64) - sizeof(struct sgentry));
0568     /* User made a mistake - should not continue */
0569     if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
0570         dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
0571           "Raw SRB command calculated fibsize=%lu;%lu "
0572           "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
0573           "issued fibsize=%d\n",
0574           actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
0575           sizeof(struct aac_srb), sizeof(struct sgentry),
0576           sizeof(struct sgentry64), fibsize));
0577         rcode = -EINVAL;
0578         goto cleanup;
0579     }
0580 
0581     chn = user_srbcmd->channel;
0582     if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
0583         dev->hba_map[chn][user_srbcmd->id].devtype ==
0584         AAC_DEVTYPE_NATIVE_RAW) {
0585         is_native_device = 1;
0586         hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
0587         memset(hbacmd, 0, 96);  /* sizeof(*hbacmd) is not necessary */
0588 
0589         /* iu_type is a parameter of aac_hba_send */
0590         switch (data_dir) {
0591         case DMA_TO_DEVICE:
0592             hbacmd->byte1 = 2;
0593             break;
0594         case DMA_FROM_DEVICE:
0595         case DMA_BIDIRECTIONAL:
0596             hbacmd->byte1 = 1;
0597             break;
0598         case DMA_NONE:
0599         default:
0600             break;
0601         }
0602         hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
0603         hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
0604 
0605         /*
0606          * we fill in reply_qid later in aac_src_deliver_message
0607          * we fill in iu_type, request_id later in aac_hba_send
0608          * we fill in emb_data_desc_count, data_length later
0609          * in sg list build
0610          */
0611 
0612         memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
0613 
0614         address = (u64)srbfib->hw_error_pa;
0615         hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
0616         hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
0617         hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
0618         hbacmd->emb_data_desc_count =
0619                     cpu_to_le32(user_srbcmd->sg.count);
0620         srbfib->hbacmd_size = 64 +
0621             user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
0622 
0623     } else {
0624         is_native_device = 0;
0625         aac_fib_init(srbfib);
0626 
0627         /* raw_srb FIB is not FastResponseCapable */
0628         srbfib->hw_fib_va->header.XferState &=
0629             ~cpu_to_le32(FastResponseCapable);
0630 
0631         srbcmd = (struct aac_srb *) fib_data(srbfib);
0632 
0633         // Fix up srb for endian and force some values
0634 
0635         srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
0636         srbcmd->channel  = cpu_to_le32(user_srbcmd->channel);
0637         srbcmd->id   = cpu_to_le32(user_srbcmd->id);
0638         srbcmd->lun  = cpu_to_le32(user_srbcmd->lun);
0639         srbcmd->timeout  = cpu_to_le32(user_srbcmd->timeout);
0640         srbcmd->flags    = cpu_to_le32(flags);
0641         srbcmd->retry_limit = 0; // Obsolete parameter
0642         srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
0643         memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
0644     }
0645 
0646     byte_count = 0;
0647     if (is_native_device) {
0648         struct user_sgmap *usg32 = &user_srbcmd->sg;
0649         struct user_sgmap64 *usg64 =
0650             (struct user_sgmap64 *)&user_srbcmd->sg;
0651 
0652         for (i = 0; i < usg32->count; i++) {
0653             void *p;
0654             u64 addr;
0655 
0656             sg_count[i] = (actual_fibsize64 == fibsize) ?
0657                 usg64->sg[i].count : usg32->sg[i].count;
0658             if (sg_count[i] >
0659                 (dev->scsi_host_ptr->max_sectors << 9)) {
0660                 pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
0661                     i, sg_count[i],
0662                     dev->scsi_host_ptr->max_sectors << 9);
0663                 rcode = -EINVAL;
0664                 goto cleanup;
0665             }
0666 
0667             p = kmalloc(sg_count[i], GFP_KERNEL);
0668             if (!p) {
0669                 rcode = -ENOMEM;
0670                 goto cleanup;
0671             }
0672 
0673             if (actual_fibsize64 == fibsize) {
0674                 addr = (u64)usg64->sg[i].addr[0];
0675                 addr += ((u64)usg64->sg[i].addr[1]) << 32;
0676             } else {
0677                 addr = (u64)usg32->sg[i].addr;
0678             }
0679 
0680             sg_user[i] = (void __user *)(uintptr_t)addr;
0681             sg_list[i] = p; // save so we can clean up later
0682             sg_indx = i;
0683 
0684             if (flags & SRB_DataOut) {
0685                 if (copy_from_user(p, sg_user[i],
0686                     sg_count[i])) {
0687                     rcode = -EFAULT;
0688                     goto cleanup;
0689                 }
0690             }
0691             addr = dma_map_single(&dev->pdev->dev, p, sg_count[i],
0692                           data_dir);
0693             hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
0694             hbacmd->sge[i].addr_lo = cpu_to_le32(
0695                         (u32)(addr & 0xffffffff));
0696             hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
0697             hbacmd->sge[i].flags = 0;
0698             byte_count += sg_count[i];
0699         }
0700 
0701         if (usg32->count > 0)   /* embedded sglist */
0702             hbacmd->sge[usg32->count-1].flags =
0703                 cpu_to_le32(0x40000000);
0704         hbacmd->data_length = cpu_to_le32(byte_count);
0705 
0706         status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
0707                     NULL, NULL);
0708 
0709     } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
0710         struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
0711         struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
0712 
0713         /*
0714          * This should also catch if user used the 32 bit sgmap
0715          */
0716         if (actual_fibsize64 == fibsize) {
0717             actual_fibsize = actual_fibsize64;
0718             for (i = 0; i < upsg->count; i++) {
0719                 u64 addr;
0720                 void* p;
0721 
0722                 sg_count[i] = upsg->sg[i].count;
0723                 if (sg_count[i] >
0724                     ((dev->adapter_info.options &
0725                      AAC_OPT_NEW_COMM) ?
0726                       (dev->scsi_host_ptr->max_sectors << 9) :
0727                       65536)) {
0728                     rcode = -EINVAL;
0729                     goto cleanup;
0730                 }
0731 
0732                 p = kmalloc(sg_count[i], GFP_KERNEL);
0733                 if(!p) {
0734                     dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
0735                       sg_count[i], i, upsg->count));
0736                     rcode = -ENOMEM;
0737                     goto cleanup;
0738                 }
0739                 addr = (u64)upsg->sg[i].addr[0];
0740                 addr += ((u64)upsg->sg[i].addr[1]) << 32;
0741                 sg_user[i] = (void __user *)(uintptr_t)addr;
0742                 sg_list[i] = p; // save so we can clean up later
0743                 sg_indx = i;
0744 
0745                 if (flags & SRB_DataOut) {
0746                     if (copy_from_user(p, sg_user[i],
0747                         sg_count[i])){
0748                         dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
0749                         rcode = -EFAULT;
0750                         goto cleanup;
0751                     }
0752                 }
0753                 addr = dma_map_single(&dev->pdev->dev, p,
0754                               sg_count[i], data_dir);
0755 
0756                 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
0757                 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
0758                 byte_count += sg_count[i];
0759                 psg->sg[i].count = cpu_to_le32(sg_count[i]);
0760             }
0761         } else {
0762             struct user_sgmap* usg;
0763             usg = kmemdup(upsg,
0764                       actual_fibsize - sizeof(struct aac_srb)
0765                       + sizeof(struct sgmap), GFP_KERNEL);
0766             if (!usg) {
0767                 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
0768                 rcode = -ENOMEM;
0769                 goto cleanup;
0770             }
0771             actual_fibsize = actual_fibsize64;
0772 
0773             for (i = 0; i < usg->count; i++) {
0774                 u64 addr;
0775                 void* p;
0776 
0777                 sg_count[i] = usg->sg[i].count;
0778                 if (sg_count[i] >
0779                     ((dev->adapter_info.options &
0780                      AAC_OPT_NEW_COMM) ?
0781                       (dev->scsi_host_ptr->max_sectors << 9) :
0782                       65536)) {
0783                     kfree(usg);
0784                     rcode = -EINVAL;
0785                     goto cleanup;
0786                 }
0787 
0788                 p = kmalloc(sg_count[i], GFP_KERNEL);
0789                 if(!p) {
0790                     dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
0791                         sg_count[i], i, usg->count));
0792                     kfree(usg);
0793                     rcode = -ENOMEM;
0794                     goto cleanup;
0795                 }
0796                 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
0797                 sg_list[i] = p; // save so we can clean up later
0798                 sg_indx = i;
0799 
0800                 if (flags & SRB_DataOut) {
0801                     if (copy_from_user(p, sg_user[i],
0802                         sg_count[i])) {
0803                         kfree (usg);
0804                         dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
0805                         rcode = -EFAULT;
0806                         goto cleanup;
0807                     }
0808                 }
0809                 addr = dma_map_single(&dev->pdev->dev, p,
0810                               sg_count[i], data_dir);
0811 
0812                 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
0813                 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
0814                 byte_count += sg_count[i];
0815                 psg->sg[i].count = cpu_to_le32(sg_count[i]);
0816             }
0817             kfree (usg);
0818         }
0819         srbcmd->count = cpu_to_le32(byte_count);
0820         if (user_srbcmd->sg.count)
0821             psg->count = cpu_to_le32(sg_indx+1);
0822         else
0823             psg->count = 0;
0824         status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
0825     } else {
0826         struct user_sgmap* upsg = &user_srbcmd->sg;
0827         struct sgmap* psg = &srbcmd->sg;
0828 
0829         if (actual_fibsize64 == fibsize) {
0830             struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
0831             for (i = 0; i < upsg->count; i++) {
0832                 uintptr_t addr;
0833                 void* p;
0834 
0835                 sg_count[i] = usg->sg[i].count;
0836                 if (sg_count[i] >
0837                     ((dev->adapter_info.options &
0838                      AAC_OPT_NEW_COMM) ?
0839                       (dev->scsi_host_ptr->max_sectors << 9) :
0840                       65536)) {
0841                     rcode = -EINVAL;
0842                     goto cleanup;
0843                 }
0844                 p = kmalloc(sg_count[i], GFP_KERNEL);
0845                 if (!p) {
0846                     dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
0847                         sg_count[i], i, usg->count));
0848                     rcode = -ENOMEM;
0849                     goto cleanup;
0850                 }
0851                 addr = (u64)usg->sg[i].addr[0];
0852                 addr += ((u64)usg->sg[i].addr[1]) << 32;
0853                 sg_user[i] = (void __user *)addr;
0854                 sg_list[i] = p; // save so we can clean up later
0855                 sg_indx = i;
0856 
0857                 if (flags & SRB_DataOut) {
0858                     if (copy_from_user(p, sg_user[i],
0859                         sg_count[i])){
0860                         dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
0861                         rcode = -EFAULT;
0862                         goto cleanup;
0863                     }
0864                 }
0865                 addr = dma_map_single(&dev->pdev->dev, p,
0866                               usg->sg[i].count,
0867                               data_dir);
0868 
0869                 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
0870                 byte_count += usg->sg[i].count;
0871                 psg->sg[i].count = cpu_to_le32(sg_count[i]);
0872             }
0873         } else {
0874             for (i = 0; i < upsg->count; i++) {
0875                 dma_addr_t addr;
0876                 void* p;
0877 
0878                 sg_count[i] = upsg->sg[i].count;
0879                 if (sg_count[i] >
0880                     ((dev->adapter_info.options &
0881                      AAC_OPT_NEW_COMM) ?
0882                       (dev->scsi_host_ptr->max_sectors << 9) :
0883                       65536)) {
0884                     rcode = -EINVAL;
0885                     goto cleanup;
0886                 }
0887                 p = kmalloc(sg_count[i], GFP_KERNEL);
0888                 if (!p) {
0889                     dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
0890                       sg_count[i], i, upsg->count));
0891                     rcode = -ENOMEM;
0892                     goto cleanup;
0893                 }
0894                 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
0895                 sg_list[i] = p; // save so we can clean up later
0896                 sg_indx = i;
0897 
0898                 if (flags & SRB_DataOut) {
0899                     if (copy_from_user(p, sg_user[i],
0900                         sg_count[i])) {
0901                         dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
0902                         rcode = -EFAULT;
0903                         goto cleanup;
0904                     }
0905                 }
0906                 addr = dma_map_single(&dev->pdev->dev, p,
0907                               sg_count[i], data_dir);
0908 
0909                 psg->sg[i].addr = cpu_to_le32(addr);
0910                 byte_count += sg_count[i];
0911                 psg->sg[i].count = cpu_to_le32(sg_count[i]);
0912             }
0913         }
0914         srbcmd->count = cpu_to_le32(byte_count);
0915         if (user_srbcmd->sg.count)
0916             psg->count = cpu_to_le32(sg_indx+1);
0917         else
0918             psg->count = 0;
0919         status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
0920     }
0921 
0922     if (status == -ERESTARTSYS) {
0923         rcode = -ERESTARTSYS;
0924         goto cleanup;
0925     }
0926 
0927     if (status != 0) {
0928         dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
0929         rcode = -ENXIO;
0930         goto cleanup;
0931     }
0932 
0933     if (flags & SRB_DataIn) {
0934         for(i = 0 ; i <= sg_indx; i++){
0935             if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
0936                 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
0937                 rcode = -EFAULT;
0938                 goto cleanup;
0939 
0940             }
0941         }
0942     }
0943 
0944     user_reply = arg + fibsize;
0945     if (is_native_device) {
0946         struct aac_hba_resp *err =
0947             &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
0948         struct aac_srb_reply reply;
0949 
0950         memset(&reply, 0, sizeof(reply));
0951         reply.status = ST_OK;
0952         if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
0953             /* fast response */
0954             reply.srb_status = SRB_STATUS_SUCCESS;
0955             reply.scsi_status = 0;
0956             reply.data_xfer_length = byte_count;
0957             reply.sense_data_size = 0;
0958             memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
0959         } else {
0960             reply.srb_status = err->service_response;
0961             reply.scsi_status = err->status;
0962             reply.data_xfer_length = byte_count -
0963                 le32_to_cpu(err->residual_count);
0964             reply.sense_data_size = err->sense_response_data_len;
0965             memcpy(reply.sense_data, err->sense_response_buf,
0966                 AAC_SENSE_BUFFERSIZE);
0967         }
0968         if (copy_to_user(user_reply, &reply,
0969             sizeof(struct aac_srb_reply))) {
0970             dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
0971             rcode = -EFAULT;
0972             goto cleanup;
0973         }
0974     } else {
0975         struct aac_srb_reply *reply;
0976 
0977         reply = (struct aac_srb_reply *) fib_data(srbfib);
0978         if (copy_to_user(user_reply, reply,
0979             sizeof(struct aac_srb_reply))) {
0980             dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
0981             rcode = -EFAULT;
0982             goto cleanup;
0983         }
0984     }
0985 
0986 cleanup:
0987     kfree(user_srbcmd);
0988     if (rcode != -ERESTARTSYS) {
0989         for (i = 0; i <= sg_indx; i++)
0990             kfree(sg_list[i]);
0991         aac_fib_complete(srbfib);
0992         aac_fib_free(srbfib);
0993     }
0994 
0995     return rcode;
0996 }
0997 
0998 struct aac_pci_info {
0999     u32 bus;
1000     u32 slot;
1001 };
1002 
1003 
1004 static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
1005 {
1006     struct aac_pci_info pci_info;
1007 
1008     pci_info.bus = dev->pdev->bus->number;
1009     pci_info.slot = PCI_SLOT(dev->pdev->devfn);
1010 
1011     if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
1012         dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
1013         return -EFAULT;
1014     }
1015     return 0;
1016 }
1017 
1018 static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
1019 {
1020     struct aac_hba_info hbainfo;
1021 
1022     memset(&hbainfo, 0, sizeof(hbainfo));
1023     hbainfo.adapter_number      = (u8) dev->id;
1024     hbainfo.system_io_bus_number    = dev->pdev->bus->number;
1025     hbainfo.device_number       = (dev->pdev->devfn >> 3);
1026     hbainfo.function_number     = (dev->pdev->devfn & 0x0007);
1027 
1028     hbainfo.vendor_id       = dev->pdev->vendor;
1029     hbainfo.device_id       = dev->pdev->device;
1030     hbainfo.sub_vendor_id       = dev->pdev->subsystem_vendor;
1031     hbainfo.sub_system_id       = dev->pdev->subsystem_device;
1032 
1033     if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
1034         dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
1035         return -EFAULT;
1036     }
1037 
1038     return 0;
1039 }
1040 
1041 struct aac_reset_iop {
1042     u8  reset_type;
1043 };
1044 
1045 static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
1046 {
1047     struct aac_reset_iop reset;
1048     int retval;
1049 
1050     if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
1051         return -EFAULT;
1052 
1053     dev->adapter_shutdown = 1;
1054 
1055     mutex_unlock(&dev->ioctl_mutex);
1056     retval = aac_reset_adapter(dev, 0, reset.reset_type);
1057     mutex_lock(&dev->ioctl_mutex);
1058 
1059     return retval;
1060 }
1061 
1062 int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
1063 {
1064     int status;
1065 
1066     mutex_lock(&dev->ioctl_mutex);
1067 
1068     if (dev->adapter_shutdown) {
1069         status = -EACCES;
1070         goto cleanup;
1071     }
1072 
1073     /*
1074      *  HBA gets first crack
1075      */
1076 
1077     status = aac_dev_ioctl(dev, cmd, arg);
1078     if (status != -ENOTTY)
1079         goto cleanup;
1080 
1081     switch (cmd) {
1082     case FSACTL_MINIPORT_REV_CHECK:
1083         status = check_revision(dev, arg);
1084         break;
1085     case FSACTL_SEND_LARGE_FIB:
1086     case FSACTL_SENDFIB:
1087         status = ioctl_send_fib(dev, arg);
1088         break;
1089     case FSACTL_OPEN_GET_ADAPTER_FIB:
1090         status = open_getadapter_fib(dev, arg);
1091         break;
1092     case FSACTL_GET_NEXT_ADAPTER_FIB:
1093         status = next_getadapter_fib(dev, arg);
1094         break;
1095     case FSACTL_CLOSE_GET_ADAPTER_FIB:
1096         status = close_getadapter_fib(dev, arg);
1097         break;
1098     case FSACTL_SEND_RAW_SRB:
1099         status = aac_send_raw_srb(dev,arg);
1100         break;
1101     case FSACTL_GET_PCI_INFO:
1102         status = aac_get_pci_info(dev,arg);
1103         break;
1104     case FSACTL_GET_HBA_INFO:
1105         status = aac_get_hba_info(dev, arg);
1106         break;
1107     case FSACTL_RESET_IOP:
1108         status = aac_send_reset_adapter(dev, arg);
1109         break;
1110 
1111     default:
1112         status = -ENOTTY;
1113         break;
1114     }
1115 
1116 cleanup:
1117     mutex_unlock(&dev->ioctl_mutex);
1118 
1119     return status;
1120 }
1121