Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *  linux/drivers/scsi/esas2r/esas2r_ioctl.c
0003  *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
0004  *
0005  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
0006  *  (mailto:linuxdrivers@attotech.com)
0007  *
0008  * This program is free software; you can redistribute it and/or
0009  * modify it under the terms of the GNU General Public License
0010  * as published by the Free Software Foundation; either version 2
0011  * of the License, or (at your option) any later version.
0012  *
0013  * This program is distributed in the hope that it will be useful,
0014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0016  * GNU General Public License for more details.
0017  *
0018  * NO WARRANTY
0019  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
0020  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
0021  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
0022  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
0023  * solely responsible for determining the appropriateness of using and
0024  * distributing the Program and assumes all risks associated with its
0025  * exercise of rights under this Agreement, including but not limited to
0026  * the risks and costs of program errors, damage to or loss of data,
0027  * programs or equipment, and unavailability or interruption of operations.
0028  *
0029  * DISCLAIMER OF LIABILITY
0030  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
0031  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0032  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
0033  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
0034  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
0035  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
0036  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
0037  *
0038  * You should have received a copy of the GNU General Public License
0039  * along with this program; if not, write to the Free Software
0040  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
0041  * USA.
0042  */
0043 
0044 #include "esas2r.h"
0045 
0046 /*
0047  * Buffered ioctl handlers.  A buffered ioctl is one which requires that we
0048  * allocate a DMA-able memory area to communicate with the firmware.  In
0049  * order to prevent continually allocating and freeing consistent memory,
0050  * we will allocate a global buffer the first time we need it and re-use
0051  * it for subsequent ioctl calls that require it.
0052  */
0053 
0054 u8 *esas2r_buffered_ioctl;
0055 dma_addr_t esas2r_buffered_ioctl_addr;
0056 u32 esas2r_buffered_ioctl_size;
0057 struct pci_dev *esas2r_buffered_ioctl_pcid;
0058 
0059 static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
0060 typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
0061                        struct esas2r_request *,
0062                        struct esas2r_sg_context *,
0063                        void *);
0064 typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
0065                          struct esas2r_request *, void *);
0066 
0067 struct esas2r_buffered_ioctl {
0068     struct esas2r_adapter *a;
0069     void *ioctl;
0070     u32 length;
0071     u32 control_code;
0072     u32 offset;
0073     BUFFERED_IOCTL_CALLBACK
0074         callback;
0075     void *context;
0076     BUFFERED_IOCTL_DONE_CALLBACK
0077         done_callback;
0078     void *done_context;
0079 
0080 };
0081 
0082 static void complete_fm_api_req(struct esas2r_adapter *a,
0083                 struct esas2r_request *rq)
0084 {
0085     a->fm_api_command_done = 1;
0086     wake_up_interruptible(&a->fm_api_waiter);
0087 }
0088 
0089 /* Callbacks for building scatter/gather lists for FM API requests */
0090 static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
0091 {
0092     struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
0093     int offset = sgc->cur_offset - a->save_offset;
0094 
0095     (*addr) = a->firmware.phys + offset;
0096     return a->firmware.orig_len - offset;
0097 }
0098 
0099 static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
0100 {
0101     struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
0102     int offset = sgc->cur_offset - a->save_offset;
0103 
0104     (*addr) = a->firmware.header_buff_phys + offset;
0105     return sizeof(struct esas2r_flash_img) - offset;
0106 }
0107 
0108 /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
0109 static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
0110 {
0111     struct esas2r_request *rq;
0112 
0113     if (mutex_lock_interruptible(&a->fm_api_mutex)) {
0114         fi->status = FI_STAT_BUSY;
0115         return;
0116     }
0117 
0118     rq = esas2r_alloc_request(a);
0119     if (rq == NULL) {
0120         fi->status = FI_STAT_BUSY;
0121         goto free_sem;
0122     }
0123 
0124     if (fi == &a->firmware.header) {
0125         a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
0126                                  (size_t)sizeof(
0127                                      struct
0128                                      esas2r_flash_img),
0129                                  (dma_addr_t *)&a->
0130                                  firmware.
0131                                  header_buff_phys,
0132                                  GFP_KERNEL);
0133 
0134         if (a->firmware.header_buff == NULL) {
0135             esas2r_debug("failed to allocate header buffer!");
0136             fi->status = FI_STAT_BUSY;
0137             goto free_req;
0138         }
0139 
0140         memcpy(a->firmware.header_buff, fi,
0141                sizeof(struct esas2r_flash_img));
0142         a->save_offset = a->firmware.header_buff;
0143         a->fm_api_sgc.get_phys_addr =
0144             (PGETPHYSADDR)get_physaddr_fm_api_header;
0145     } else {
0146         a->save_offset = (u8 *)fi;
0147         a->fm_api_sgc.get_phys_addr =
0148             (PGETPHYSADDR)get_physaddr_fm_api;
0149     }
0150 
0151     rq->comp_cb = complete_fm_api_req;
0152     a->fm_api_command_done = 0;
0153     a->fm_api_sgc.cur_offset = a->save_offset;
0154 
0155     if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
0156                &a->fm_api_sgc))
0157         goto all_done;
0158 
0159     /* Now wait around for it to complete. */
0160     while (!a->fm_api_command_done)
0161         wait_event_interruptible(a->fm_api_waiter,
0162                      a->fm_api_command_done);
0163 all_done:
0164     if (fi == &a->firmware.header) {
0165         memcpy(fi, a->firmware.header_buff,
0166                sizeof(struct esas2r_flash_img));
0167 
0168         dma_free_coherent(&a->pcid->dev,
0169                   (size_t)sizeof(struct esas2r_flash_img),
0170                   a->firmware.header_buff,
0171                   (dma_addr_t)a->firmware.header_buff_phys);
0172     }
0173 free_req:
0174     esas2r_free_request(a, (struct esas2r_request *)rq);
0175 free_sem:
0176     mutex_unlock(&a->fm_api_mutex);
0177     return;
0178 
0179 }
0180 
0181 static void complete_nvr_req(struct esas2r_adapter *a,
0182                  struct esas2r_request *rq)
0183 {
0184     a->nvram_command_done = 1;
0185     wake_up_interruptible(&a->nvram_waiter);
0186 }
0187 
0188 /* Callback for building scatter/gather lists for buffered ioctls */
0189 static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
0190                        u64 *addr)
0191 {
0192     int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
0193 
0194     (*addr) = esas2r_buffered_ioctl_addr + offset;
0195     return esas2r_buffered_ioctl_size - offset;
0196 }
0197 
0198 static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
0199                     struct esas2r_request *rq)
0200 {
0201     a->buffered_ioctl_done = 1;
0202     wake_up_interruptible(&a->buffered_ioctl_waiter);
0203 }
0204 
0205 static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
0206 {
0207     struct esas2r_adapter *a = bi->a;
0208     struct esas2r_request *rq;
0209     struct esas2r_sg_context sgc;
0210     u8 result = IOCTL_SUCCESS;
0211 
0212     if (down_interruptible(&buffered_ioctl_semaphore))
0213         return IOCTL_OUT_OF_RESOURCES;
0214 
0215     /* allocate a buffer or use the existing buffer. */
0216     if (esas2r_buffered_ioctl) {
0217         if (esas2r_buffered_ioctl_size < bi->length) {
0218             /* free the too-small buffer and get a new one */
0219             dma_free_coherent(&a->pcid->dev,
0220                       (size_t)esas2r_buffered_ioctl_size,
0221                       esas2r_buffered_ioctl,
0222                       esas2r_buffered_ioctl_addr);
0223 
0224             goto allocate_buffer;
0225         }
0226     } else {
0227 allocate_buffer:
0228         esas2r_buffered_ioctl_size = bi->length;
0229         esas2r_buffered_ioctl_pcid = a->pcid;
0230         esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
0231                                (size_t)
0232                                esas2r_buffered_ioctl_size,
0233                                &
0234                                esas2r_buffered_ioctl_addr,
0235                                GFP_KERNEL);
0236     }
0237 
0238     if (!esas2r_buffered_ioctl) {
0239         esas2r_log(ESAS2R_LOG_CRIT,
0240                "could not allocate %d bytes of consistent memory "
0241                "for a buffered ioctl!",
0242                bi->length);
0243 
0244         esas2r_debug("buffered ioctl alloc failure");
0245         result = IOCTL_OUT_OF_RESOURCES;
0246         goto exit_cleanly;
0247     }
0248 
0249     memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
0250 
0251     rq = esas2r_alloc_request(a);
0252     if (rq == NULL) {
0253         esas2r_log(ESAS2R_LOG_CRIT,
0254                "could not allocate an internal request");
0255 
0256         result = IOCTL_OUT_OF_RESOURCES;
0257         esas2r_debug("buffered ioctl - no requests");
0258         goto exit_cleanly;
0259     }
0260 
0261     a->buffered_ioctl_done = 0;
0262     rq->comp_cb = complete_buffered_ioctl_req;
0263     sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
0264     sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
0265     sgc.length = esas2r_buffered_ioctl_size;
0266 
0267     if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
0268         /* completed immediately, no need to wait */
0269         a->buffered_ioctl_done = 0;
0270         goto free_andexit_cleanly;
0271     }
0272 
0273     /* now wait around for it to complete. */
0274     while (!a->buffered_ioctl_done)
0275         wait_event_interruptible(a->buffered_ioctl_waiter,
0276                      a->buffered_ioctl_done);
0277 
0278 free_andexit_cleanly:
0279     if (result == IOCTL_SUCCESS && bi->done_callback)
0280         (*bi->done_callback)(a, rq, bi->done_context);
0281 
0282     esas2r_free_request(a, rq);
0283 
0284 exit_cleanly:
0285     if (result == IOCTL_SUCCESS)
0286         memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
0287 
0288     up(&buffered_ioctl_semaphore);
0289     return result;
0290 }
0291 
0292 /* SMP ioctl support */
0293 static int smp_ioctl_callback(struct esas2r_adapter *a,
0294                   struct esas2r_request *rq,
0295                   struct esas2r_sg_context *sgc, void *context)
0296 {
0297     struct atto_ioctl_smp *si =
0298         (struct atto_ioctl_smp *)esas2r_buffered_ioctl;
0299 
0300     esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
0301     esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
0302 
0303     if (!esas2r_build_sg_list(a, rq, sgc)) {
0304         si->status = ATTO_STS_OUT_OF_RSRC;
0305         return false;
0306     }
0307 
0308     esas2r_start_request(a, rq);
0309     return true;
0310 }
0311 
0312 static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
0313 {
0314     struct esas2r_buffered_ioctl bi;
0315 
0316     memset(&bi, 0, sizeof(bi));
0317 
0318     bi.a = a;
0319     bi.ioctl = si;
0320     bi.length = sizeof(struct atto_ioctl_smp)
0321             + le32_to_cpu(si->req_length)
0322             + le32_to_cpu(si->rsp_length);
0323     bi.offset = 0;
0324     bi.callback = smp_ioctl_callback;
0325     return handle_buffered_ioctl(&bi);
0326 }
0327 
0328 
0329 /* CSMI ioctl support */
0330 static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
0331                          struct esas2r_request *rq)
0332 {
0333     rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
0334     rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
0335 
0336     /* Now call the original completion callback. */
0337     (*rq->aux_req_cb)(a, rq);
0338 }
0339 
0340 /* Tunnel a CSMI IOCTL to the back end driver for processing. */
0341 static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
0342                   union atto_ioctl_csmi *ci,
0343                   struct esas2r_request *rq,
0344                   struct esas2r_sg_context *sgc,
0345                   u32 ctrl_code,
0346                   u16 target_id)
0347 {
0348     struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
0349 
0350     if (test_bit(AF_DEGRADED_MODE, &a->flags))
0351         return false;
0352 
0353     esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
0354     esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
0355     ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
0356     ioctl->csmi.target_id = cpu_to_le16(target_id);
0357     ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
0358 
0359     /*
0360      * Always usurp the completion callback since the interrupt callback
0361      * mechanism may be used.
0362      */
0363     rq->aux_req_cx = ci;
0364     rq->aux_req_cb = rq->comp_cb;
0365     rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
0366 
0367     if (!esas2r_build_sg_list(a, rq, sgc))
0368         return false;
0369 
0370     esas2r_start_request(a, rq);
0371     return true;
0372 }
0373 
0374 static bool check_lun(struct scsi_lun lun)
0375 {
0376     bool result;
0377 
0378     result = ((lun.scsi_lun[7] == 0) &&
0379           (lun.scsi_lun[6] == 0) &&
0380           (lun.scsi_lun[5] == 0) &&
0381           (lun.scsi_lun[4] == 0) &&
0382           (lun.scsi_lun[3] == 0) &&
0383           (lun.scsi_lun[2] == 0) &&
0384 /* Byte 1 is intentionally skipped */
0385           (lun.scsi_lun[0] == 0));
0386 
0387     return result;
0388 }
0389 
0390 static int csmi_ioctl_callback(struct esas2r_adapter *a,
0391                    struct esas2r_request *rq,
0392                    struct esas2r_sg_context *sgc, void *context)
0393 {
0394     struct atto_csmi *ci = (struct atto_csmi *)context;
0395     union atto_ioctl_csmi *ioctl_csmi =
0396         (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
0397     u8 path = 0;
0398     u8 tid = 0;
0399     u8 lun = 0;
0400     u32 sts = CSMI_STS_SUCCESS;
0401     struct esas2r_target *t;
0402     unsigned long flags;
0403 
0404     if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
0405         struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
0406 
0407         path = gda->path_id;
0408         tid = gda->target_id;
0409         lun = gda->lun;
0410     } else if (ci->control_code == CSMI_CC_TASK_MGT) {
0411         struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
0412 
0413         path = tm->path_id;
0414         tid = tm->target_id;
0415         lun = tm->lun;
0416     }
0417 
0418     if (path > 0) {
0419         rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
0420             CSMI_STS_INV_PARAM);
0421         return false;
0422     }
0423 
0424     rq->target_id = tid;
0425     rq->vrq->scsi.flags |= cpu_to_le32(lun);
0426 
0427     switch (ci->control_code) {
0428     case CSMI_CC_GET_DRVR_INFO:
0429     {
0430         struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
0431 
0432         strcpy(gdi->description, esas2r_get_model_name(a));
0433         gdi->csmi_major_rev = CSMI_MAJOR_REV;
0434         gdi->csmi_minor_rev = CSMI_MINOR_REV;
0435         break;
0436     }
0437 
0438     case CSMI_CC_GET_CNTLR_CFG:
0439     {
0440         struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
0441 
0442         gcc->base_io_addr = 0;
0443         pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
0444                       &gcc->base_memaddr_lo);
0445         pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
0446                       &gcc->base_memaddr_hi);
0447         gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
0448                       a->pcid->subsystem_vendor);
0449         gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
0450         gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
0451         gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
0452         gcc->pci_addr.bus_num = a->pcid->bus->number;
0453         gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
0454         gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
0455 
0456         memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
0457 
0458         gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
0459         gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
0460         gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
0461         gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
0462         gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
0463         gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
0464         gcc->bios_build_rev = LOWORD(a->flash_ver);
0465 
0466         if (test_bit(AF2_THUNDERLINK, &a->flags2))
0467             gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
0468                        | CSMI_CNTLRF_SATA_HBA;
0469         else
0470             gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
0471                        | CSMI_CNTLRF_SATA_RAID;
0472 
0473         gcc->rrom_major_rev = 0;
0474         gcc->rrom_minor_rev = 0;
0475         gcc->rrom_build_rev = 0;
0476         gcc->rrom_release_rev = 0;
0477         gcc->rrom_biosmajor_rev = 0;
0478         gcc->rrom_biosminor_rev = 0;
0479         gcc->rrom_biosbuild_rev = 0;
0480         gcc->rrom_biosrelease_rev = 0;
0481         break;
0482     }
0483 
0484     case CSMI_CC_GET_CNTLR_STS:
0485     {
0486         struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
0487 
0488         if (test_bit(AF_DEGRADED_MODE, &a->flags))
0489             gcs->status = CSMI_CNTLR_STS_FAILED;
0490         else
0491             gcs->status = CSMI_CNTLR_STS_GOOD;
0492 
0493         gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
0494         break;
0495     }
0496 
0497     case CSMI_CC_FW_DOWNLOAD:
0498     case CSMI_CC_GET_RAID_INFO:
0499     case CSMI_CC_GET_RAID_CFG:
0500 
0501         sts = CSMI_STS_BAD_CTRL_CODE;
0502         break;
0503 
0504     case CSMI_CC_SMP_PASSTHRU:
0505     case CSMI_CC_SSP_PASSTHRU:
0506     case CSMI_CC_STP_PASSTHRU:
0507     case CSMI_CC_GET_PHY_INFO:
0508     case CSMI_CC_SET_PHY_INFO:
0509     case CSMI_CC_GET_LINK_ERRORS:
0510     case CSMI_CC_GET_SATA_SIG:
0511     case CSMI_CC_GET_CONN_INFO:
0512     case CSMI_CC_PHY_CTRL:
0513 
0514         if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
0515                        ci->control_code,
0516                        ESAS2R_TARG_ID_INV)) {
0517             sts = CSMI_STS_FAILED;
0518             break;
0519         }
0520 
0521         return true;
0522 
0523     case CSMI_CC_GET_SCSI_ADDR:
0524     {
0525         struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
0526 
0527         struct scsi_lun lun;
0528 
0529         memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
0530 
0531         if (!check_lun(lun)) {
0532             sts = CSMI_STS_NO_SCSI_ADDR;
0533             break;
0534         }
0535 
0536         /* make sure the device is present */
0537         spin_lock_irqsave(&a->mem_lock, flags);
0538         t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
0539         spin_unlock_irqrestore(&a->mem_lock, flags);
0540 
0541         if (t == NULL) {
0542             sts = CSMI_STS_NO_SCSI_ADDR;
0543             break;
0544         }
0545 
0546         gsa->host_index = 0xFF;
0547         gsa->lun = gsa->sas_lun[1];
0548         rq->target_id = esas2r_targ_get_id(t, a);
0549         break;
0550     }
0551 
0552     case CSMI_CC_GET_DEV_ADDR:
0553     {
0554         struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
0555 
0556         /* make sure the target is present */
0557         t = a->targetdb + rq->target_id;
0558 
0559         if (t >= a->targetdb_end
0560             || t->target_state != TS_PRESENT
0561             || t->sas_addr == 0) {
0562             sts = CSMI_STS_NO_DEV_ADDR;
0563             break;
0564         }
0565 
0566         /* fill in the result */
0567         *(u64 *)gda->sas_addr = t->sas_addr;
0568         memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
0569         gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
0570         break;
0571     }
0572 
0573     case CSMI_CC_TASK_MGT:
0574 
0575         /* make sure the target is present */
0576         t = a->targetdb + rq->target_id;
0577 
0578         if (t >= a->targetdb_end
0579             || t->target_state != TS_PRESENT
0580             || !(t->flags & TF_PASS_THRU)) {
0581             sts = CSMI_STS_NO_DEV_ADDR;
0582             break;
0583         }
0584 
0585         if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
0586                        ci->control_code,
0587                        t->phys_targ_id)) {
0588             sts = CSMI_STS_FAILED;
0589             break;
0590         }
0591 
0592         return true;
0593 
0594     default:
0595 
0596         sts = CSMI_STS_BAD_CTRL_CODE;
0597         break;
0598     }
0599 
0600     rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
0601 
0602     return false;
0603 }
0604 
0605 
0606 static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
0607                      struct esas2r_request *rq, void *context)
0608 {
0609     struct atto_csmi *ci = (struct atto_csmi *)context;
0610     union atto_ioctl_csmi *ioctl_csmi =
0611         (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
0612 
0613     switch (ci->control_code) {
0614     case CSMI_CC_GET_DRVR_INFO:
0615     {
0616         struct atto_csmi_get_driver_info *gdi =
0617             &ioctl_csmi->drvr_info;
0618 
0619         strcpy(gdi->name, ESAS2R_VERSION_STR);
0620 
0621         gdi->major_rev = ESAS2R_MAJOR_REV;
0622         gdi->minor_rev = ESAS2R_MINOR_REV;
0623         gdi->build_rev = 0;
0624         gdi->release_rev = 0;
0625         break;
0626     }
0627 
0628     case CSMI_CC_GET_SCSI_ADDR:
0629     {
0630         struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
0631 
0632         if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
0633             CSMI_STS_SUCCESS) {
0634             gsa->target_id = rq->target_id;
0635             gsa->path_id = 0;
0636         }
0637 
0638         break;
0639     }
0640     }
0641 
0642     ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
0643 }
0644 
0645 
0646 static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
0647 {
0648     struct esas2r_buffered_ioctl bi;
0649 
0650     memset(&bi, 0, sizeof(bi));
0651 
0652     bi.a = a;
0653     bi.ioctl = &ci->data;
0654     bi.length = sizeof(union atto_ioctl_csmi);
0655     bi.offset = 0;
0656     bi.callback = csmi_ioctl_callback;
0657     bi.context = ci;
0658     bi.done_callback = csmi_ioctl_done_callback;
0659     bi.done_context = ci;
0660 
0661     return handle_buffered_ioctl(&bi);
0662 }
0663 
0664 /* ATTO HBA ioctl support */
0665 
0666 /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
0667 static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
0668                  struct atto_ioctl *hi,
0669                  struct esas2r_request *rq,
0670                  struct esas2r_sg_context *sgc)
0671 {
0672     esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
0673 
0674     esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
0675 
0676     if (!esas2r_build_sg_list(a, rq, sgc)) {
0677         hi->status = ATTO_STS_OUT_OF_RSRC;
0678 
0679         return false;
0680     }
0681 
0682     esas2r_start_request(a, rq);
0683 
0684     return true;
0685 }
0686 
0687 static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
0688                   struct esas2r_request *rq)
0689 {
0690     struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
0691     struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
0692     u8 sts = ATTO_SPT_RS_FAILED;
0693 
0694     spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
0695     spt->sense_length = rq->sense_len;
0696     spt->residual_length =
0697         le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
0698 
0699     switch (rq->req_stat) {
0700     case RS_SUCCESS:
0701     case RS_SCSI_ERROR:
0702         sts = ATTO_SPT_RS_SUCCESS;
0703         break;
0704     case RS_UNDERRUN:
0705         sts = ATTO_SPT_RS_UNDERRUN;
0706         break;
0707     case RS_OVERRUN:
0708         sts = ATTO_SPT_RS_OVERRUN;
0709         break;
0710     case RS_SEL:
0711     case RS_SEL2:
0712         sts = ATTO_SPT_RS_NO_DEVICE;
0713         break;
0714     case RS_NO_LUN:
0715         sts = ATTO_SPT_RS_NO_LUN;
0716         break;
0717     case RS_TIMEOUT:
0718         sts = ATTO_SPT_RS_TIMEOUT;
0719         break;
0720     case RS_DEGRADED:
0721         sts = ATTO_SPT_RS_DEGRADED;
0722         break;
0723     case RS_BUSY:
0724         sts = ATTO_SPT_RS_BUSY;
0725         break;
0726     case RS_ABORTED:
0727         sts = ATTO_SPT_RS_ABORTED;
0728         break;
0729     case RS_RESET:
0730         sts = ATTO_SPT_RS_BUS_RESET;
0731         break;
0732     }
0733 
0734     spt->req_status = sts;
0735 
0736     /* Update the target ID to the next one present. */
0737     spt->target_id =
0738         esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
0739 
0740     /* Done, call the completion callback. */
0741     (*rq->aux_req_cb)(a, rq);
0742 }
0743 
0744 static int hba_ioctl_callback(struct esas2r_adapter *a,
0745                   struct esas2r_request *rq,
0746                   struct esas2r_sg_context *sgc,
0747                   void *context)
0748 {
0749     struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
0750 
0751     hi->status = ATTO_STS_SUCCESS;
0752 
0753     switch (hi->function) {
0754     case ATTO_FUNC_GET_ADAP_INFO:
0755     {
0756         u8 *class_code = (u8 *)&a->pcid->class;
0757 
0758         struct atto_hba_get_adapter_info *gai =
0759             &hi->data.get_adap_info;
0760 
0761         if (hi->flags & HBAF_TUNNEL) {
0762             hi->status = ATTO_STS_UNSUPPORTED;
0763             break;
0764         }
0765 
0766         if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
0767             hi->status = ATTO_STS_INV_VERSION;
0768             hi->version = ATTO_VER_GET_ADAP_INFO0;
0769             break;
0770         }
0771 
0772         memset(gai, 0, sizeof(*gai));
0773 
0774         gai->pci.vendor_id = a->pcid->vendor;
0775         gai->pci.device_id = a->pcid->device;
0776         gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
0777         gai->pci.ss_device_id = a->pcid->subsystem_device;
0778         gai->pci.class_code[0] = class_code[0];
0779         gai->pci.class_code[1] = class_code[1];
0780         gai->pci.class_code[2] = class_code[2];
0781         gai->pci.rev_id = a->pcid->revision;
0782         gai->pci.bus_num = a->pcid->bus->number;
0783         gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
0784         gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
0785 
0786         if (pci_is_pcie(a->pcid)) {
0787             u16 stat;
0788             u32 caps;
0789 
0790             pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA,
0791                           &stat);
0792             pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
0793                            &caps);
0794 
0795             gai->pci.link_speed_curr =
0796                 (u8)(stat & PCI_EXP_LNKSTA_CLS);
0797             gai->pci.link_speed_max =
0798                 (u8)(caps & PCI_EXP_LNKCAP_SLS);
0799             gai->pci.link_width_curr =
0800                 (u8)((stat & PCI_EXP_LNKSTA_NLW)
0801                      >> PCI_EXP_LNKSTA_NLW_SHIFT);
0802             gai->pci.link_width_max =
0803                 (u8)((caps & PCI_EXP_LNKCAP_MLW)
0804                      >> 4);
0805         }
0806 
0807         gai->pci.msi_vector_cnt = 1;
0808 
0809         if (a->pcid->msix_enabled)
0810             gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
0811         else if (a->pcid->msi_enabled)
0812             gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
0813         else
0814             gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
0815 
0816         gai->adap_type = ATTO_GAI_AT_ESASRAID2;
0817 
0818         if (test_bit(AF2_THUNDERLINK, &a->flags2))
0819             gai->adap_type = ATTO_GAI_AT_TLSASHBA;
0820 
0821         if (test_bit(AF_DEGRADED_MODE, &a->flags))
0822             gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
0823 
0824         gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
0825                    ATTO_GAI_AF_DEVADDR_SUPP;
0826 
0827         if (a->pcid->subsystem_device == ATTO_ESAS_R60F
0828             || a->pcid->subsystem_device == ATTO_ESAS_R608
0829             || a->pcid->subsystem_device == ATTO_ESAS_R644
0830             || a->pcid->subsystem_device == ATTO_TSSC_3808E)
0831             gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
0832 
0833         gai->num_ports = ESAS2R_NUM_PHYS;
0834         gai->num_phys = ESAS2R_NUM_PHYS;
0835 
0836         strcpy(gai->firmware_rev, a->fw_rev);
0837         strcpy(gai->flash_rev, a->flash_rev);
0838         strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
0839         strcpy(gai->model_name, esas2r_get_model_name(a));
0840 
0841         gai->num_targets = ESAS2R_MAX_TARGETS;
0842 
0843         gai->num_busses = 1;
0844         gai->num_targsper_bus = gai->num_targets;
0845         gai->num_lunsper_targ = 256;
0846 
0847         if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
0848             || a->pcid->subsystem_device == ATTO_ESAS_R60F)
0849             gai->num_connectors = 4;
0850         else
0851             gai->num_connectors = 2;
0852 
0853         gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
0854 
0855         gai->num_targets_backend = a->num_targets_backend;
0856 
0857         gai->tunnel_flags = a->ioctl_tunnel
0858                     & (ATTO_GAI_TF_MEM_RW
0859                        | ATTO_GAI_TF_TRACE
0860                        | ATTO_GAI_TF_SCSI_PASS_THRU
0861                        | ATTO_GAI_TF_GET_DEV_ADDR
0862                        | ATTO_GAI_TF_PHY_CTRL
0863                        | ATTO_GAI_TF_CONN_CTRL
0864                        | ATTO_GAI_TF_GET_DEV_INFO);
0865         break;
0866     }
0867 
0868     case ATTO_FUNC_GET_ADAP_ADDR:
0869     {
0870         struct atto_hba_get_adapter_address *gaa =
0871             &hi->data.get_adap_addr;
0872 
0873         if (hi->flags & HBAF_TUNNEL) {
0874             hi->status = ATTO_STS_UNSUPPORTED;
0875             break;
0876         }
0877 
0878         if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
0879             hi->status = ATTO_STS_INV_VERSION;
0880             hi->version = ATTO_VER_GET_ADAP_ADDR0;
0881         } else if (gaa->addr_type == ATTO_GAA_AT_PORT
0882                || gaa->addr_type == ATTO_GAA_AT_NODE) {
0883             if (gaa->addr_type == ATTO_GAA_AT_PORT
0884                 && gaa->port_id >= ESAS2R_NUM_PHYS) {
0885                 hi->status = ATTO_STS_NOT_APPL;
0886             } else {
0887                 memcpy((u64 *)gaa->address,
0888                        &a->nvram->sas_addr[0], sizeof(u64));
0889                 gaa->addr_len = sizeof(u64);
0890             }
0891         } else {
0892             hi->status = ATTO_STS_INV_PARAM;
0893         }
0894 
0895         break;
0896     }
0897 
0898     case ATTO_FUNC_MEM_RW:
0899     {
0900         if (hi->flags & HBAF_TUNNEL) {
0901             if (hba_ioctl_tunnel(a, hi, rq, sgc))
0902                 return true;
0903 
0904             break;
0905         }
0906 
0907         hi->status = ATTO_STS_UNSUPPORTED;
0908 
0909         break;
0910     }
0911 
0912     case ATTO_FUNC_TRACE:
0913     {
0914         struct atto_hba_trace *trc = &hi->data.trace;
0915 
0916         if (hi->flags & HBAF_TUNNEL) {
0917             if (hba_ioctl_tunnel(a, hi, rq, sgc))
0918                 return true;
0919 
0920             break;
0921         }
0922 
0923         if (hi->version > ATTO_VER_TRACE1) {
0924             hi->status = ATTO_STS_INV_VERSION;
0925             hi->version = ATTO_VER_TRACE1;
0926             break;
0927         }
0928 
0929         if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
0930             && hi->version >= ATTO_VER_TRACE1) {
0931             if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
0932                 u32 len = hi->data_length;
0933                 u32 offset = trc->current_offset;
0934                 u32 total_len = ESAS2R_FWCOREDUMP_SZ;
0935 
0936                 /* Size is zero if a core dump isn't present */
0937                 if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
0938                     total_len = 0;
0939 
0940                 if (len > total_len)
0941                     len = total_len;
0942 
0943                 if (offset >= total_len
0944                     || offset + len > total_len
0945                     || len == 0) {
0946                     hi->status = ATTO_STS_INV_PARAM;
0947                     break;
0948                 }
0949 
0950                 memcpy(trc + 1,
0951                        a->fw_coredump_buff + offset,
0952                        len);
0953 
0954                 hi->data_length = len;
0955             } else if (trc->trace_func == ATTO_TRC_TF_RESET) {
0956                 memset(a->fw_coredump_buff, 0,
0957                        ESAS2R_FWCOREDUMP_SZ);
0958 
0959                 clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
0960             } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
0961                 hi->status = ATTO_STS_UNSUPPORTED;
0962                 break;
0963             }
0964 
0965             /* Always return all the info we can. */
0966             trc->trace_mask = 0;
0967             trc->current_offset = 0;
0968             trc->total_length = ESAS2R_FWCOREDUMP_SZ;
0969 
0970             /* Return zero length buffer if core dump not present */
0971             if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
0972                 trc->total_length = 0;
0973         } else {
0974             hi->status = ATTO_STS_UNSUPPORTED;
0975         }
0976 
0977         break;
0978     }
0979 
0980     case ATTO_FUNC_SCSI_PASS_THRU:
0981     {
0982         struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
0983         struct scsi_lun lun;
0984 
0985         memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
0986 
0987         if (hi->flags & HBAF_TUNNEL) {
0988             if (hba_ioctl_tunnel(a, hi, rq, sgc))
0989                 return true;
0990 
0991             break;
0992         }
0993 
0994         if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
0995             hi->status = ATTO_STS_INV_VERSION;
0996             hi->version = ATTO_VER_SCSI_PASS_THRU0;
0997             break;
0998         }
0999 
1000         if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
1001             hi->status = ATTO_STS_INV_PARAM;
1002             break;
1003         }
1004 
1005         esas2r_sgc_init(sgc, a, rq, NULL);
1006 
1007         sgc->length = hi->data_length;
1008         sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
1009                    + sizeof(struct atto_hba_scsi_pass_thru);
1010 
1011         /* Finish request initialization */
1012         rq->target_id = (u16)spt->target_id;
1013         rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
1014         memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
1015         rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
1016         rq->sense_len = spt->sense_length;
1017         rq->sense_buf = (u8 *)spt->sense_data;
1018         /* NOTE: we ignore spt->timeout */
1019 
1020         /*
1021          * always usurp the completion callback since the interrupt
1022          * callback mechanism may be used.
1023          */
1024 
1025         rq->aux_req_cx = hi;
1026         rq->aux_req_cb = rq->comp_cb;
1027         rq->comp_cb = scsi_passthru_comp_cb;
1028 
1029         if (spt->flags & ATTO_SPTF_DATA_IN) {
1030             rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
1031         } else if (spt->flags & ATTO_SPTF_DATA_OUT) {
1032             rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
1033         } else {
1034             if (sgc->length) {
1035                 hi->status = ATTO_STS_INV_PARAM;
1036                 break;
1037             }
1038         }
1039 
1040         if (spt->flags & ATTO_SPTF_ORDERED_Q)
1041             rq->vrq->scsi.flags |=
1042                 cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
1043         else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
1044             rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
1045 
1046 
1047         if (!esas2r_build_sg_list(a, rq, sgc)) {
1048             hi->status = ATTO_STS_OUT_OF_RSRC;
1049             break;
1050         }
1051 
1052         esas2r_start_request(a, rq);
1053 
1054         return true;
1055     }
1056 
1057     case ATTO_FUNC_GET_DEV_ADDR:
1058     {
1059         struct atto_hba_get_device_address *gda =
1060             &hi->data.get_dev_addr;
1061         struct esas2r_target *t;
1062 
1063         if (hi->flags & HBAF_TUNNEL) {
1064             if (hba_ioctl_tunnel(a, hi, rq, sgc))
1065                 return true;
1066 
1067             break;
1068         }
1069 
1070         if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
1071             hi->status = ATTO_STS_INV_VERSION;
1072             hi->version = ATTO_VER_GET_DEV_ADDR0;
1073             break;
1074         }
1075 
1076         if (gda->target_id >= ESAS2R_MAX_TARGETS) {
1077             hi->status = ATTO_STS_INV_PARAM;
1078             break;
1079         }
1080 
1081         t = a->targetdb + (u16)gda->target_id;
1082 
1083         if (t->target_state != TS_PRESENT) {
1084             hi->status = ATTO_STS_FAILED;
1085         } else if (gda->addr_type == ATTO_GDA_AT_PORT) {
1086             if (t->sas_addr == 0) {
1087                 hi->status = ATTO_STS_UNSUPPORTED;
1088             } else {
1089                 *(u64 *)gda->address = t->sas_addr;
1090 
1091                 gda->addr_len = sizeof(u64);
1092             }
1093         } else if (gda->addr_type == ATTO_GDA_AT_NODE) {
1094             hi->status = ATTO_STS_NOT_APPL;
1095         } else {
1096             hi->status = ATTO_STS_INV_PARAM;
1097         }
1098 
1099         /* update the target ID to the next one present. */
1100 
1101         gda->target_id =
1102             esas2r_targ_db_find_next_present(a,
1103                              (u16)gda->target_id);
1104         break;
1105     }
1106 
1107     case ATTO_FUNC_PHY_CTRL:
1108     case ATTO_FUNC_CONN_CTRL:
1109     {
1110         if (hba_ioctl_tunnel(a, hi, rq, sgc))
1111             return true;
1112 
1113         break;
1114     }
1115 
1116     case ATTO_FUNC_ADAP_CTRL:
1117     {
1118         struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
1119 
1120         if (hi->flags & HBAF_TUNNEL) {
1121             hi->status = ATTO_STS_UNSUPPORTED;
1122             break;
1123         }
1124 
1125         if (hi->version > ATTO_VER_ADAP_CTRL0) {
1126             hi->status = ATTO_STS_INV_VERSION;
1127             hi->version = ATTO_VER_ADAP_CTRL0;
1128             break;
1129         }
1130 
1131         if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
1132             esas2r_reset_adapter(a);
1133         } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
1134             hi->status = ATTO_STS_UNSUPPORTED;
1135             break;
1136         }
1137 
1138         if (test_bit(AF_CHPRST_NEEDED, &a->flags))
1139             ac->adap_state = ATTO_AC_AS_RST_SCHED;
1140         else if (test_bit(AF_CHPRST_PENDING, &a->flags))
1141             ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
1142         else if (test_bit(AF_DISC_PENDING, &a->flags))
1143             ac->adap_state = ATTO_AC_AS_RST_DISC;
1144         else if (test_bit(AF_DISABLED, &a->flags))
1145             ac->adap_state = ATTO_AC_AS_DISABLED;
1146         else if (test_bit(AF_DEGRADED_MODE, &a->flags))
1147             ac->adap_state = ATTO_AC_AS_DEGRADED;
1148         else
1149             ac->adap_state = ATTO_AC_AS_OK;
1150 
1151         break;
1152     }
1153 
1154     case ATTO_FUNC_GET_DEV_INFO:
1155     {
1156         struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
1157         struct esas2r_target *t;
1158 
1159         if (hi->flags & HBAF_TUNNEL) {
1160             if (hba_ioctl_tunnel(a, hi, rq, sgc))
1161                 return true;
1162 
1163             break;
1164         }
1165 
1166         if (hi->version > ATTO_VER_GET_DEV_INFO0) {
1167             hi->status = ATTO_STS_INV_VERSION;
1168             hi->version = ATTO_VER_GET_DEV_INFO0;
1169             break;
1170         }
1171 
1172         if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
1173             hi->status = ATTO_STS_INV_PARAM;
1174             break;
1175         }
1176 
1177         t = a->targetdb + (u16)gdi->target_id;
1178 
1179         /* update the target ID to the next one present. */
1180 
1181         gdi->target_id =
1182             esas2r_targ_db_find_next_present(a,
1183                              (u16)gdi->target_id);
1184 
1185         if (t->target_state != TS_PRESENT) {
1186             hi->status = ATTO_STS_FAILED;
1187             break;
1188         }
1189 
1190         hi->status = ATTO_STS_UNSUPPORTED;
1191         break;
1192     }
1193 
1194     default:
1195 
1196         hi->status = ATTO_STS_INV_FUNC;
1197         break;
1198     }
1199 
1200     return false;
1201 }
1202 
1203 static void hba_ioctl_done_callback(struct esas2r_adapter *a,
1204                     struct esas2r_request *rq, void *context)
1205 {
1206     struct atto_ioctl *ioctl_hba =
1207         (struct atto_ioctl *)esas2r_buffered_ioctl;
1208 
1209     esas2r_debug("hba_ioctl_done_callback %d", a->index);
1210 
1211     if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
1212         struct atto_hba_get_adapter_info *gai =
1213             &ioctl_hba->data.get_adap_info;
1214 
1215         esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
1216 
1217         gai->drvr_rev_major = ESAS2R_MAJOR_REV;
1218         gai->drvr_rev_minor = ESAS2R_MINOR_REV;
1219 
1220         strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
1221         strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
1222 
1223         gai->num_busses = 1;
1224         gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
1225         gai->num_lunsper_targ = 1;
1226     }
1227 }
1228 
1229 u8 handle_hba_ioctl(struct esas2r_adapter *a,
1230             struct atto_ioctl *ioctl_hba)
1231 {
1232     struct esas2r_buffered_ioctl bi;
1233 
1234     memset(&bi, 0, sizeof(bi));
1235 
1236     bi.a = a;
1237     bi.ioctl = ioctl_hba;
1238     bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
1239     bi.callback = hba_ioctl_callback;
1240     bi.context = NULL;
1241     bi.done_callback = hba_ioctl_done_callback;
1242     bi.done_context = NULL;
1243     bi.offset = 0;
1244 
1245     return handle_buffered_ioctl(&bi);
1246 }
1247 
1248 
1249 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
1250             struct esas2r_sas_nvram *data)
1251 {
1252     int result = 0;
1253 
1254     a->nvram_command_done = 0;
1255     rq->comp_cb = complete_nvr_req;
1256 
1257     if (esas2r_nvram_write(a, rq, data)) {
1258         /* now wait around for it to complete. */
1259         while (!a->nvram_command_done)
1260             wait_event_interruptible(a->nvram_waiter,
1261                          a->nvram_command_done);
1262         ;
1263 
1264         /* done, check the status. */
1265         if (rq->req_stat == RS_SUCCESS)
1266             result = 1;
1267     }
1268     return result;
1269 }
1270 
1271 
1272 /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
1273 int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg)
1274 {
1275     struct atto_express_ioctl *ioctl = NULL;
1276     struct esas2r_adapter *a;
1277     struct esas2r_request *rq;
1278     u16 code;
1279     int err;
1280 
1281     esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
1282 
1283     if ((arg == NULL)
1284         || (cmd < EXPRESS_IOCTL_MIN)
1285         || (cmd > EXPRESS_IOCTL_MAX))
1286         return -ENOTSUPP;
1287 
1288     ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl));
1289     if (IS_ERR(ioctl)) {
1290         esas2r_log(ESAS2R_LOG_WARN,
1291                "ioctl_handler access_ok failed for cmd %u, address %p",
1292                cmd, arg);
1293         return PTR_ERR(ioctl);
1294     }
1295 
1296     /* verify the signature */
1297 
1298     if (memcmp(ioctl->header.signature,
1299            EXPRESS_IOCTL_SIGNATURE,
1300            EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
1301         esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
1302         kfree(ioctl);
1303 
1304         return -ENOTSUPP;
1305     }
1306 
1307     /* assume success */
1308 
1309     ioctl->header.return_code = IOCTL_SUCCESS;
1310     err = 0;
1311 
1312     /*
1313      * handle EXPRESS_IOCTL_GET_CHANNELS
1314      * without paying attention to channel
1315      */
1316 
1317     if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
1318         int i = 0, k = 0;
1319 
1320         ioctl->data.chanlist.num_channels = 0;
1321 
1322         while (i < MAX_ADAPTERS) {
1323             if (esas2r_adapters[i]) {
1324                 ioctl->data.chanlist.num_channels++;
1325                 ioctl->data.chanlist.channel[k] = i;
1326                 k++;
1327             }
1328             i++;
1329         }
1330 
1331         goto ioctl_done;
1332     }
1333 
1334     /* get the channel */
1335 
1336     if (ioctl->header.channel == 0xFF) {
1337         a = (struct esas2r_adapter *)hostdata;
1338     } else {
1339         if (ioctl->header.channel >= MAX_ADAPTERS ||
1340             esas2r_adapters[ioctl->header.channel] == NULL) {
1341             ioctl->header.return_code = IOCTL_BAD_CHANNEL;
1342             esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
1343             kfree(ioctl);
1344 
1345             return -ENOTSUPP;
1346         }
1347         a = esas2r_adapters[ioctl->header.channel];
1348     }
1349 
1350     switch (cmd) {
1351     case EXPRESS_IOCTL_RW_FIRMWARE:
1352 
1353         if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
1354             err = esas2r_write_fw(a,
1355                           (char *)ioctl->data.fwrw.image,
1356                           0,
1357                           sizeof(struct
1358                              atto_express_ioctl));
1359 
1360             if (err >= 0) {
1361                 err = esas2r_read_fw(a,
1362                              (char *)ioctl->data.fwrw.
1363                              image,
1364                              0,
1365                              sizeof(struct
1366                                 atto_express_ioctl));
1367             }
1368         } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
1369             err = esas2r_write_fs(a,
1370                           (char *)ioctl->data.fwrw.image,
1371                           0,
1372                           sizeof(struct
1373                              atto_express_ioctl));
1374 
1375             if (err >= 0) {
1376                 err = esas2r_read_fs(a,
1377                              (char *)ioctl->data.fwrw.
1378                              image,
1379                              0,
1380                              sizeof(struct
1381                                 atto_express_ioctl));
1382             }
1383         } else {
1384             ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
1385         }
1386 
1387         break;
1388 
1389     case EXPRESS_IOCTL_READ_PARAMS:
1390 
1391         memcpy(ioctl->data.prw.data_buffer, a->nvram,
1392                sizeof(struct esas2r_sas_nvram));
1393         ioctl->data.prw.code = 1;
1394         break;
1395 
1396     case EXPRESS_IOCTL_WRITE_PARAMS:
1397 
1398         rq = esas2r_alloc_request(a);
1399         if (rq == NULL) {
1400             kfree(ioctl);
1401             esas2r_log(ESAS2R_LOG_WARN,
1402                "could not allocate an internal request");
1403             return -ENOMEM;
1404         }
1405 
1406         code = esas2r_write_params(a, rq,
1407                        (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
1408         ioctl->data.prw.code = code;
1409 
1410         esas2r_free_request(a, rq);
1411 
1412         break;
1413 
1414     case EXPRESS_IOCTL_DEFAULT_PARAMS:
1415 
1416         esas2r_nvram_get_defaults(a,
1417                       (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
1418         ioctl->data.prw.code = 1;
1419         break;
1420 
1421     case EXPRESS_IOCTL_CHAN_INFO:
1422 
1423         ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
1424         ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
1425         ioctl->data.chaninfo.IRQ = a->pcid->irq;
1426         ioctl->data.chaninfo.device_id = a->pcid->device;
1427         ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
1428         ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
1429         ioctl->data.chaninfo.revision_id = a->pcid->revision;
1430         ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
1431         ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
1432         ioctl->data.chaninfo.core_rev = 0;
1433         ioctl->data.chaninfo.host_no = a->host->host_no;
1434         ioctl->data.chaninfo.hbaapi_rev = 0;
1435         break;
1436 
1437     case EXPRESS_IOCTL_SMP:
1438         ioctl->header.return_code = handle_smp_ioctl(a,
1439                                  &ioctl->data.
1440                                  ioctl_smp);
1441         break;
1442 
1443     case EXPRESS_CSMI:
1444         ioctl->header.return_code =
1445             handle_csmi_ioctl(a, &ioctl->data.csmi);
1446         break;
1447 
1448     case EXPRESS_IOCTL_HBA:
1449         ioctl->header.return_code = handle_hba_ioctl(a,
1450                                  &ioctl->data.
1451                                  ioctl_hba);
1452         break;
1453 
1454     case EXPRESS_IOCTL_VDA:
1455         err = esas2r_write_vda(a,
1456                        (char *)&ioctl->data.ioctl_vda,
1457                        0,
1458                        sizeof(struct atto_ioctl_vda) +
1459                        ioctl->data.ioctl_vda.data_length);
1460 
1461         if (err >= 0) {
1462             err = esas2r_read_vda(a,
1463                           (char *)&ioctl->data.ioctl_vda,
1464                           0,
1465                           sizeof(struct atto_ioctl_vda) +
1466                           ioctl->data.ioctl_vda.data_length);
1467         }
1468 
1469 
1470 
1471 
1472         break;
1473 
1474     case EXPRESS_IOCTL_GET_MOD_INFO:
1475 
1476         ioctl->data.modinfo.adapter = a;
1477         ioctl->data.modinfo.pci_dev = a->pcid;
1478         ioctl->data.modinfo.scsi_host = a->host;
1479         ioctl->data.modinfo.host_no = a->host->host_no;
1480 
1481         break;
1482 
1483     default:
1484         esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
1485         ioctl->header.return_code = IOCTL_ERR_INVCMD;
1486     }
1487 
1488 ioctl_done:
1489 
1490     if (err < 0) {
1491         esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err,
1492                cmd);
1493 
1494         switch (err) {
1495         case -ENOMEM:
1496         case -EBUSY:
1497             ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
1498             break;
1499 
1500         case -ENOSYS:
1501         case -EINVAL:
1502             ioctl->header.return_code = IOCTL_INVALID_PARAM;
1503             break;
1504 
1505         default:
1506             ioctl->header.return_code = IOCTL_GENERAL_ERROR;
1507             break;
1508         }
1509 
1510     }
1511 
1512     /* Always copy the buffer back, if only to pick up the status */
1513     err = copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
1514     if (err != 0) {
1515         esas2r_log(ESAS2R_LOG_WARN,
1516                "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
1517                err, cmd);
1518         kfree(ioctl);
1519 
1520         return -EFAULT;
1521     }
1522 
1523     kfree(ioctl);
1524 
1525     return 0;
1526 }
1527 
1528 int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg)
1529 {
1530     return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
1531 }
1532 
1533 static void free_fw_buffers(struct esas2r_adapter *a)
1534 {
1535     if (a->firmware.data) {
1536         dma_free_coherent(&a->pcid->dev,
1537                   (size_t)a->firmware.orig_len,
1538                   a->firmware.data,
1539                   (dma_addr_t)a->firmware.phys);
1540 
1541         a->firmware.data = NULL;
1542     }
1543 }
1544 
1545 static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
1546 {
1547     free_fw_buffers(a);
1548 
1549     a->firmware.orig_len = length;
1550 
1551     a->firmware.data = dma_alloc_coherent(&a->pcid->dev,
1552                           (size_t)length,
1553                           (dma_addr_t *)&a->firmware.phys,
1554                           GFP_KERNEL);
1555 
1556     if (!a->firmware.data) {
1557         esas2r_debug("buffer alloc failed!");
1558         return 0;
1559     }
1560 
1561     return 1;
1562 }
1563 
1564 /* Handle a call to read firmware. */
1565 int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
1566 {
1567     esas2r_trace_enter();
1568     /* if the cached header is a status, simply copy it over and return. */
1569     if (a->firmware.state == FW_STATUS_ST) {
1570         int size = min_t(int, count, sizeof(a->firmware.header));
1571         esas2r_trace_exit();
1572         memcpy(buf, &a->firmware.header, size);
1573         esas2r_debug("esas2r_read_fw: STATUS size %d", size);
1574         return size;
1575     }
1576 
1577     /*
1578      * if the cached header is a command, do it if at
1579      * offset 0, otherwise copy the pieces.
1580      */
1581 
1582     if (a->firmware.state == FW_COMMAND_ST) {
1583         u32 length = a->firmware.header.length;
1584         esas2r_trace_exit();
1585 
1586         esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
1587                  length,
1588                  off);
1589 
1590         if (off == 0) {
1591             if (a->firmware.header.action == FI_ACT_UP) {
1592                 if (!allocate_fw_buffers(a, length))
1593                     return -ENOMEM;
1594 
1595 
1596                 /* copy header over */
1597 
1598                 memcpy(a->firmware.data,
1599                        &a->firmware.header,
1600                        sizeof(a->firmware.header));
1601 
1602                 do_fm_api(a,
1603                       (struct esas2r_flash_img *)a->firmware.data);
1604             } else if (a->firmware.header.action == FI_ACT_UPSZ) {
1605                 int size =
1606                     min((int)count,
1607                         (int)sizeof(a->firmware.header));
1608                 do_fm_api(a, &a->firmware.header);
1609                 memcpy(buf, &a->firmware.header, size);
1610                 esas2r_debug("FI_ACT_UPSZ size %d", size);
1611                 return size;
1612             } else {
1613                 esas2r_debug("invalid action %d",
1614                          a->firmware.header.action);
1615                 return -ENOSYS;
1616             }
1617         }
1618 
1619         if (count + off > length)
1620             count = length - off;
1621 
1622         if (count < 0)
1623             return 0;
1624 
1625         if (!a->firmware.data) {
1626             esas2r_debug(
1627                 "read: nonzero offset but no buffer available!");
1628             return -ENOMEM;
1629         }
1630 
1631         esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
1632                  count,
1633                  length);
1634 
1635         memcpy(buf, &a->firmware.data[off], count);
1636 
1637         /* when done, release the buffer */
1638 
1639         if (length <= off + count) {
1640             esas2r_debug("esas2r_read_fw: freeing buffer!");
1641 
1642             free_fw_buffers(a);
1643         }
1644 
1645         return count;
1646     }
1647 
1648     esas2r_trace_exit();
1649     esas2r_debug("esas2r_read_fw: invalid firmware state %d",
1650              a->firmware.state);
1651 
1652     return -EINVAL;
1653 }
1654 
1655 /* Handle a call to write firmware. */
1656 int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
1657             int count)
1658 {
1659     u32 length;
1660 
1661     if (off == 0) {
1662         struct esas2r_flash_img *header =
1663             (struct esas2r_flash_img *)buf;
1664 
1665         /* assume version 0 flash image */
1666 
1667         int min_size = sizeof(struct esas2r_flash_img_v0);
1668 
1669         a->firmware.state = FW_INVALID_ST;
1670 
1671         /* validate the version field first */
1672 
1673         if (count < 4
1674             ||  header->fi_version > FI_VERSION_1) {
1675             esas2r_debug(
1676                 "esas2r_write_fw: short header or invalid version");
1677             return -EINVAL;
1678         }
1679 
1680         /* See if its a version 1 flash image */
1681 
1682         if (header->fi_version == FI_VERSION_1)
1683             min_size = sizeof(struct esas2r_flash_img);
1684 
1685         /* If this is the start, the header must be full and valid. */
1686         if (count < min_size) {
1687             esas2r_debug("esas2r_write_fw: short header, aborting");
1688             return -EINVAL;
1689         }
1690 
1691         /* Make sure the size is reasonable. */
1692         length = header->length;
1693 
1694         if (length > 1024 * 1024) {
1695             esas2r_debug(
1696                 "esas2r_write_fw: hosed, length %d  fi_version %d",
1697                 length, header->fi_version);
1698             return -EINVAL;
1699         }
1700 
1701         /*
1702          * If this is a write command, allocate memory because
1703          * we have to cache everything. otherwise, just cache
1704          * the header, because the read op will do the command.
1705          */
1706 
1707         if (header->action == FI_ACT_DOWN) {
1708             if (!allocate_fw_buffers(a, length))
1709                 return -ENOMEM;
1710 
1711             /*
1712              * Store the command, so there is context on subsequent
1713              * calls.
1714              */
1715             memcpy(&a->firmware.header,
1716                    buf,
1717                    sizeof(*header));
1718         } else if (header->action == FI_ACT_UP
1719                ||  header->action == FI_ACT_UPSZ) {
1720             /* Save the command, result will be picked up on read */
1721             memcpy(&a->firmware.header,
1722                    buf,
1723                    sizeof(*header));
1724 
1725             a->firmware.state = FW_COMMAND_ST;
1726 
1727             esas2r_debug(
1728                 "esas2r_write_fw: COMMAND, count %d, action %d ",
1729                 count, header->action);
1730 
1731             /*
1732              * Pretend we took the whole buffer,
1733              * so we don't get bothered again.
1734              */
1735 
1736             return count;
1737         } else {
1738             esas2r_debug("esas2r_write_fw: invalid action %d ",
1739                      a->firmware.header.action);
1740             return -ENOSYS;
1741         }
1742     } else {
1743         length = a->firmware.header.length;
1744     }
1745 
1746     /*
1747      * We only get here on a download command, regardless of offset.
1748      * the chunks written by the system need to be cached, and when
1749      * the final one arrives, issue the fmapi command.
1750      */
1751 
1752     if (off + count > length)
1753         count = length - off;
1754 
1755     if (count > 0) {
1756         esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
1757                  count,
1758                  length);
1759 
1760         /*
1761          * On a full upload, the system tries sending the whole buffer.
1762          * there's nothing to do with it, so just drop it here, before
1763          * trying to copy over into unallocated memory!
1764          */
1765         if (a->firmware.header.action == FI_ACT_UP)
1766             return count;
1767 
1768         if (!a->firmware.data) {
1769             esas2r_debug(
1770                 "write: nonzero offset but no buffer available!");
1771             return -ENOMEM;
1772         }
1773 
1774         memcpy(&a->firmware.data[off], buf, count);
1775 
1776         if (length == off + count) {
1777             do_fm_api(a,
1778                   (struct esas2r_flash_img *)a->firmware.data);
1779 
1780             /*
1781              * Now copy the header result to be picked up by the
1782              * next read
1783              */
1784             memcpy(&a->firmware.header,
1785                    a->firmware.data,
1786                    sizeof(a->firmware.header));
1787 
1788             a->firmware.state = FW_STATUS_ST;
1789 
1790             esas2r_debug("write completed");
1791 
1792             /*
1793              * Since the system has the data buffered, the only way
1794              * this can leak is if a root user writes a program
1795              * that writes a shorter buffer than it claims, and the
1796              * copyin fails.
1797              */
1798             free_fw_buffers(a);
1799         }
1800     }
1801 
1802     return count;
1803 }
1804 
1805 /* Callback for the completion of a VDA request. */
1806 static void vda_complete_req(struct esas2r_adapter *a,
1807                  struct esas2r_request *rq)
1808 {
1809     a->vda_command_done = 1;
1810     wake_up_interruptible(&a->vda_waiter);
1811 }
1812 
1813 /* Scatter/gather callback for VDA requests */
1814 static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
1815 {
1816     struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
1817     int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
1818 
1819     (*addr) = a->ppvda_buffer + offset;
1820     return VDA_MAX_BUFFER_SIZE - offset;
1821 }
1822 
1823 /* Handle a call to read a VDA command. */
1824 int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
1825 {
1826     if (!a->vda_buffer)
1827         return -ENOMEM;
1828 
1829     if (off == 0) {
1830         struct esas2r_request *rq;
1831         struct atto_ioctl_vda *vi =
1832             (struct atto_ioctl_vda *)a->vda_buffer;
1833         struct esas2r_sg_context sgc;
1834         bool wait_for_completion;
1835 
1836         /*
1837          * Presumeably, someone has already written to the vda_buffer,
1838          * and now they are reading the node the response, so now we
1839          * will actually issue the request to the chip and reply.
1840          */
1841 
1842         /* allocate a request */
1843         rq = esas2r_alloc_request(a);
1844         if (rq == NULL) {
1845             esas2r_debug("esas2r_read_vda: out of requests");
1846             return -EBUSY;
1847         }
1848 
1849         rq->comp_cb = vda_complete_req;
1850 
1851         sgc.first_req = rq;
1852         sgc.adapter = a;
1853         sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
1854         sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
1855 
1856         a->vda_command_done = 0;
1857 
1858         wait_for_completion =
1859             esas2r_process_vda_ioctl(a, vi, rq, &sgc);
1860 
1861         if (wait_for_completion) {
1862             /* now wait around for it to complete. */
1863 
1864             while (!a->vda_command_done)
1865                 wait_event_interruptible(a->vda_waiter,
1866                              a->vda_command_done);
1867         }
1868 
1869         esas2r_free_request(a, (struct esas2r_request *)rq);
1870     }
1871 
1872     if (off > VDA_MAX_BUFFER_SIZE)
1873         return 0;
1874 
1875     if (count + off > VDA_MAX_BUFFER_SIZE)
1876         count = VDA_MAX_BUFFER_SIZE - off;
1877 
1878     if (count < 0)
1879         return 0;
1880 
1881     memcpy(buf, a->vda_buffer + off, count);
1882 
1883     return count;
1884 }
1885 
1886 /* Handle a call to write a VDA command. */
1887 int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
1888              int count)
1889 {
1890     /*
1891      * allocate memory for it, if not already done.  once allocated,
1892      * we will keep it around until the driver is unloaded.
1893      */
1894 
1895     if (!a->vda_buffer) {
1896         dma_addr_t dma_addr;
1897         a->vda_buffer = dma_alloc_coherent(&a->pcid->dev,
1898                            (size_t)
1899                            VDA_MAX_BUFFER_SIZE,
1900                            &dma_addr,
1901                            GFP_KERNEL);
1902 
1903         a->ppvda_buffer = dma_addr;
1904     }
1905 
1906     if (!a->vda_buffer)
1907         return -ENOMEM;
1908 
1909     if (off > VDA_MAX_BUFFER_SIZE)
1910         return 0;
1911 
1912     if (count + off > VDA_MAX_BUFFER_SIZE)
1913         count = VDA_MAX_BUFFER_SIZE - off;
1914 
1915     if (count < 1)
1916         return 0;
1917 
1918     memcpy(a->vda_buffer + off, buf, count);
1919 
1920     return count;
1921 }
1922 
1923 /* Callback for the completion of an FS_API request.*/
1924 static void fs_api_complete_req(struct esas2r_adapter *a,
1925                 struct esas2r_request *rq)
1926 {
1927     a->fs_api_command_done = 1;
1928 
1929     wake_up_interruptible(&a->fs_api_waiter);
1930 }
1931 
1932 /* Scatter/gather callback for VDA requests */
1933 static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
1934 {
1935     struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
1936     struct esas2r_ioctl_fs *fs =
1937         (struct esas2r_ioctl_fs *)a->fs_api_buffer;
1938     u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
1939 
1940     (*addr) = a->ppfs_api_buffer + offset;
1941 
1942     return a->fs_api_buffer_size - offset;
1943 }
1944 
1945 /* Handle a call to read firmware via FS_API. */
1946 int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
1947 {
1948     if (!a->fs_api_buffer)
1949         return -ENOMEM;
1950 
1951     if (off == 0) {
1952         struct esas2r_request *rq;
1953         struct esas2r_sg_context sgc;
1954         struct esas2r_ioctl_fs *fs =
1955             (struct esas2r_ioctl_fs *)a->fs_api_buffer;
1956 
1957         /* If another flash request is already in progress, return. */
1958         if (mutex_lock_interruptible(&a->fs_api_mutex)) {
1959 busy:
1960             fs->status = ATTO_STS_OUT_OF_RSRC;
1961             return -EBUSY;
1962         }
1963 
1964         /*
1965          * Presumeably, someone has already written to the
1966          * fs_api_buffer, and now they are reading the node the
1967          * response, so now we will actually issue the request to the
1968          * chip and reply. Allocate a request
1969          */
1970 
1971         rq = esas2r_alloc_request(a);
1972         if (rq == NULL) {
1973             esas2r_debug("esas2r_read_fs: out of requests");
1974             mutex_unlock(&a->fs_api_mutex);
1975             goto busy;
1976         }
1977 
1978         rq->comp_cb = fs_api_complete_req;
1979 
1980         /* Set up the SGCONTEXT for to build the s/g table */
1981 
1982         sgc.cur_offset = fs->data;
1983         sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
1984 
1985         a->fs_api_command_done = 0;
1986 
1987         if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
1988             if (fs->status == ATTO_STS_OUT_OF_RSRC)
1989                 count = -EBUSY;
1990 
1991             goto dont_wait;
1992         }
1993 
1994         /* Now wait around for it to complete. */
1995 
1996         while (!a->fs_api_command_done)
1997             wait_event_interruptible(a->fs_api_waiter,
1998                          a->fs_api_command_done);
1999         ;
2000 dont_wait:
2001         /* Free the request and keep going */
2002         mutex_unlock(&a->fs_api_mutex);
2003         esas2r_free_request(a, (struct esas2r_request *)rq);
2004 
2005         /* Pick up possible error code from above */
2006         if (count < 0)
2007             return count;
2008     }
2009 
2010     if (off > a->fs_api_buffer_size)
2011         return 0;
2012 
2013     if (count + off > a->fs_api_buffer_size)
2014         count = a->fs_api_buffer_size - off;
2015 
2016     if (count < 0)
2017         return 0;
2018 
2019     memcpy(buf, a->fs_api_buffer + off, count);
2020 
2021     return count;
2022 }
2023 
2024 /* Handle a call to write firmware via FS_API. */
2025 int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
2026             int count)
2027 {
2028     if (off == 0) {
2029         struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
2030         u32 length = fs->command.length + offsetof(
2031             struct esas2r_ioctl_fs,
2032             data);
2033 
2034         /*
2035          * Special case, for BEGIN commands, the length field
2036          * is lying to us, so just get enough for the header.
2037          */
2038 
2039         if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
2040             length = offsetof(struct esas2r_ioctl_fs, data);
2041 
2042         /*
2043          * Beginning a command.  We assume we'll get at least
2044          * enough in the first write so we can look at the
2045          * header and see how much we need to alloc.
2046          */
2047 
2048         if (count < offsetof(struct esas2r_ioctl_fs, data))
2049             return -EINVAL;
2050 
2051         /* Allocate a buffer or use the existing buffer. */
2052         if (a->fs_api_buffer) {
2053             if (a->fs_api_buffer_size < length) {
2054                 /* Free too-small buffer and get a new one */
2055                 dma_free_coherent(&a->pcid->dev,
2056                           (size_t)a->fs_api_buffer_size,
2057                           a->fs_api_buffer,
2058                           (dma_addr_t)a->ppfs_api_buffer);
2059 
2060                 goto re_allocate_buffer;
2061             }
2062         } else {
2063 re_allocate_buffer:
2064             a->fs_api_buffer_size = length;
2065 
2066             a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev,
2067                                   (size_t)a->fs_api_buffer_size,
2068                                   (dma_addr_t *)&a->ppfs_api_buffer,
2069                                   GFP_KERNEL);
2070         }
2071     }
2072 
2073     if (!a->fs_api_buffer)
2074         return -ENOMEM;
2075 
2076     if (off > a->fs_api_buffer_size)
2077         return 0;
2078 
2079     if (count + off > a->fs_api_buffer_size)
2080         count = a->fs_api_buffer_size - off;
2081 
2082     if (count < 1)
2083         return 0;
2084 
2085     memcpy(a->fs_api_buffer + off, buf, count);
2086 
2087     return count;
2088 }