Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is provided under a dual BSD/GPLv2 license.  When using or
0003  * redistributing this file, you may do so under either license.
0004  *
0005  * GPL LICENSE SUMMARY
0006  *
0007  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
0008  *
0009  * This program is free software; you can redistribute it and/or modify
0010  * it under the terms of version 2 of the GNU General Public License as
0011  * published by the Free Software Foundation.
0012  *
0013  * This program is distributed in the hope that it will be useful, but
0014  * WITHOUT ANY WARRANTY; without even the implied warranty of
0015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
0016  * General Public License for more details.
0017  *
0018  * You should have received a copy of the GNU General Public License
0019  * along with this program; if not, write to the Free Software
0020  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
0021  * The full GNU General Public License is included in this distribution
0022  * in the file called LICENSE.GPL.
0023  *
0024  * BSD LICENSE
0025  *
0026  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
0027  * All rights reserved.
0028  *
0029  * Redistribution and use in source and binary forms, with or without
0030  * modification, are permitted provided that the following conditions
0031  * are met:
0032  *
0033  *   * Redistributions of source code must retain the above copyright
0034  *     notice, this list of conditions and the following disclaimer.
0035  *   * Redistributions in binary form must reproduce the above copyright
0036  *     notice, this list of conditions and the following disclaimer in
0037  *     the documentation and/or other materials provided with the
0038  *     distribution.
0039  *   * Neither the name of Intel Corporation nor the names of its
0040  *     contributors may be used to endorse or promote products derived
0041  *     from this software without specific prior written permission.
0042  *
0043  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
0044  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
0045  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
0046  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
0047  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
0048  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
0049  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
0050  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
0051  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
0052  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
0053  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0054  */
0055 
0056 #include <scsi/scsi_cmnd.h>
0057 #include "isci.h"
0058 #include "task.h"
0059 #include "request.h"
0060 #include "scu_completion_codes.h"
0061 #include "scu_event_codes.h"
0062 #include "sas.h"
0063 
0064 #undef C
0065 #define C(a) (#a)
0066 const char *req_state_name(enum sci_base_request_states state)
0067 {
0068     static const char * const strings[] = REQUEST_STATES;
0069 
0070     return strings[state];
0071 }
0072 #undef C
0073 
0074 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
0075                             int idx)
0076 {
0077     if (idx == 0)
0078         return &ireq->tc->sgl_pair_ab;
0079     else if (idx == 1)
0080         return &ireq->tc->sgl_pair_cd;
0081     else if (idx < 0)
0082         return NULL;
0083     else
0084         return &ireq->sg_table[idx - 2];
0085 }
0086 
0087 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
0088                       struct isci_request *ireq, u32 idx)
0089 {
0090     u32 offset;
0091 
0092     if (idx == 0) {
0093         offset = (void *) &ireq->tc->sgl_pair_ab -
0094              (void *) &ihost->task_context_table[0];
0095         return ihost->tc_dma + offset;
0096     } else if (idx == 1) {
0097         offset = (void *) &ireq->tc->sgl_pair_cd -
0098              (void *) &ihost->task_context_table[0];
0099         return ihost->tc_dma + offset;
0100     }
0101 
0102     return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
0103 }
0104 
0105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
0106 {
0107     e->length = sg_dma_len(sg);
0108     e->address_upper = upper_32_bits(sg_dma_address(sg));
0109     e->address_lower = lower_32_bits(sg_dma_address(sg));
0110     e->address_modifier = 0;
0111 }
0112 
0113 static void sci_request_build_sgl(struct isci_request *ireq)
0114 {
0115     struct isci_host *ihost = ireq->isci_host;
0116     struct sas_task *task = isci_request_access_task(ireq);
0117     struct scatterlist *sg = NULL;
0118     dma_addr_t dma_addr;
0119     u32 sg_idx = 0;
0120     struct scu_sgl_element_pair *scu_sg   = NULL;
0121     struct scu_sgl_element_pair *prev_sg  = NULL;
0122 
0123     if (task->num_scatter > 0) {
0124         sg = task->scatter;
0125 
0126         while (sg) {
0127             scu_sg = to_sgl_element_pair(ireq, sg_idx);
0128             init_sgl_element(&scu_sg->A, sg);
0129             sg = sg_next(sg);
0130             if (sg) {
0131                 init_sgl_element(&scu_sg->B, sg);
0132                 sg = sg_next(sg);
0133             } else
0134                 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
0135 
0136             if (prev_sg) {
0137                 dma_addr = to_sgl_element_pair_dma(ihost,
0138                                    ireq,
0139                                    sg_idx);
0140 
0141                 prev_sg->next_pair_upper =
0142                     upper_32_bits(dma_addr);
0143                 prev_sg->next_pair_lower =
0144                     lower_32_bits(dma_addr);
0145             }
0146 
0147             prev_sg = scu_sg;
0148             sg_idx++;
0149         }
0150     } else {    /* handle when no sg */
0151         scu_sg = to_sgl_element_pair(ireq, sg_idx);
0152 
0153         dma_addr = dma_map_single(&ihost->pdev->dev,
0154                       task->scatter,
0155                       task->total_xfer_len,
0156                       task->data_dir);
0157 
0158         ireq->zero_scatter_daddr = dma_addr;
0159 
0160         scu_sg->A.length = task->total_xfer_len;
0161         scu_sg->A.address_upper = upper_32_bits(dma_addr);
0162         scu_sg->A.address_lower = lower_32_bits(dma_addr);
0163     }
0164 
0165     if (scu_sg) {
0166         scu_sg->next_pair_upper = 0;
0167         scu_sg->next_pair_lower = 0;
0168     }
0169 }
0170 
0171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
0172 {
0173     struct ssp_cmd_iu *cmd_iu;
0174     struct sas_task *task = isci_request_access_task(ireq);
0175 
0176     cmd_iu = &ireq->ssp.cmd;
0177 
0178     memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
0179     cmd_iu->add_cdb_len = 0;
0180     cmd_iu->_r_a = 0;
0181     cmd_iu->_r_b = 0;
0182     cmd_iu->en_fburst = 0; /* unsupported */
0183     cmd_iu->task_prio = task->ssp_task.task_prio;
0184     cmd_iu->task_attr = task->ssp_task.task_attr;
0185     cmd_iu->_r_c = 0;
0186 
0187     sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
0188                (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
0189 }
0190 
0191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
0192 {
0193     struct ssp_task_iu *task_iu;
0194     struct sas_task *task = isci_request_access_task(ireq);
0195     struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
0196 
0197     task_iu = &ireq->ssp.tmf;
0198 
0199     memset(task_iu, 0, sizeof(struct ssp_task_iu));
0200 
0201     memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
0202 
0203     task_iu->task_func = isci_tmf->tmf_code;
0204     task_iu->task_tag =
0205         (test_bit(IREQ_TMF, &ireq->flags)) ?
0206         isci_tmf->io_tag :
0207         SCI_CONTROLLER_INVALID_IO_TAG;
0208 }
0209 
0210 /*
0211  * This method is will fill in the SCU Task Context for any type of SSP request.
0212  */
0213 static void scu_ssp_request_construct_task_context(
0214     struct isci_request *ireq,
0215     struct scu_task_context *task_context)
0216 {
0217     dma_addr_t dma_addr;
0218     struct isci_remote_device *idev;
0219     struct isci_port *iport;
0220 
0221     idev = ireq->target_device;
0222     iport = idev->owning_port;
0223 
0224     /* Fill in the TC with its required data */
0225     task_context->abort = 0;
0226     task_context->priority = 0;
0227     task_context->initiator_request = 1;
0228     task_context->connection_rate = idev->connection_rate;
0229     task_context->protocol_engine_index = ISCI_PEG;
0230     task_context->logical_port_index = iport->physical_port_index;
0231     task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
0232     task_context->valid = SCU_TASK_CONTEXT_VALID;
0233     task_context->context_type = SCU_TASK_CONTEXT_TYPE;
0234 
0235     task_context->remote_node_index = idev->rnc.remote_node_index;
0236     task_context->command_code = 0;
0237 
0238     task_context->link_layer_control = 0;
0239     task_context->do_not_dma_ssp_good_response = 1;
0240     task_context->strict_ordering = 0;
0241     task_context->control_frame = 0;
0242     task_context->timeout_enable = 0;
0243     task_context->block_guard_enable = 0;
0244 
0245     task_context->address_modifier = 0;
0246 
0247     /* task_context->type.ssp.tag = ireq->io_tag; */
0248     task_context->task_phase = 0x01;
0249 
0250     ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
0251                   (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
0252                   (iport->physical_port_index <<
0253                    SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
0254                   ISCI_TAG_TCI(ireq->io_tag));
0255 
0256     /*
0257      * Copy the physical address for the command buffer to the
0258      * SCU Task Context
0259      */
0260     dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
0261 
0262     task_context->command_iu_upper = upper_32_bits(dma_addr);
0263     task_context->command_iu_lower = lower_32_bits(dma_addr);
0264 
0265     /*
0266      * Copy the physical address for the response buffer to the
0267      * SCU Task Context
0268      */
0269     dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
0270 
0271     task_context->response_iu_upper = upper_32_bits(dma_addr);
0272     task_context->response_iu_lower = lower_32_bits(dma_addr);
0273 }
0274 
0275 static u8 scu_bg_blk_size(struct scsi_device *sdp)
0276 {
0277     switch (sdp->sector_size) {
0278     case 512:
0279         return 0;
0280     case 1024:
0281         return 1;
0282     case 4096:
0283         return 3;
0284     default:
0285         return 0xff;
0286     }
0287 }
0288 
0289 static u32 scu_dif_bytes(u32 len, u32 sector_size)
0290 {
0291     return (len >> ilog2(sector_size)) * 8;
0292 }
0293 
0294 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
0295 {
0296     struct scu_task_context *tc = ireq->tc;
0297     struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
0298     u8 blk_sz = scu_bg_blk_size(scmd->device);
0299 
0300     tc->block_guard_enable = 1;
0301     tc->blk_prot_en = 1;
0302     tc->blk_sz = blk_sz;
0303     /* DIF write insert */
0304     tc->blk_prot_func = 0x2;
0305 
0306     tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
0307                            scmd->device->sector_size);
0308 
0309     /* always init to 0, used by hw */
0310     tc->interm_crc_val = 0;
0311 
0312     tc->init_crc_seed = 0;
0313     tc->app_tag_verify = 0;
0314     tc->app_tag_gen = 0;
0315     tc->ref_tag_seed_verify = 0;
0316 
0317     /* always init to same as bg_blk_sz */
0318     tc->UD_bytes_immed_val = scmd->device->sector_size;
0319 
0320     tc->reserved_DC_0 = 0;
0321 
0322     /* always init to 8 */
0323     tc->DIF_bytes_immed_val = 8;
0324 
0325     tc->reserved_DC_1 = 0;
0326     tc->bgc_blk_sz = scmd->device->sector_size;
0327     tc->reserved_E0_0 = 0;
0328     tc->app_tag_gen_mask = 0;
0329 
0330     /** setup block guard control **/
0331     tc->bgctl = 0;
0332 
0333     /* DIF write insert */
0334     tc->bgctl_f.op = 0x2;
0335 
0336     tc->app_tag_verify_mask = 0;
0337 
0338     /* must init to 0 for hw */
0339     tc->blk_guard_err = 0;
0340 
0341     tc->reserved_E8_0 = 0;
0342 
0343     if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
0344         tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd);
0345     else if (type & SCSI_PROT_DIF_TYPE3)
0346         tc->ref_tag_seed_gen = 0;
0347 }
0348 
0349 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
0350 {
0351     struct scu_task_context *tc = ireq->tc;
0352     struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
0353     u8 blk_sz = scu_bg_blk_size(scmd->device);
0354 
0355     tc->block_guard_enable = 1;
0356     tc->blk_prot_en = 1;
0357     tc->blk_sz = blk_sz;
0358     /* DIF read strip */
0359     tc->blk_prot_func = 0x1;
0360 
0361     tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
0362                            scmd->device->sector_size);
0363 
0364     /* always init to 0, used by hw */
0365     tc->interm_crc_val = 0;
0366 
0367     tc->init_crc_seed = 0;
0368     tc->app_tag_verify = 0;
0369     tc->app_tag_gen = 0;
0370 
0371     if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
0372         tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd);
0373     else if (type & SCSI_PROT_DIF_TYPE3)
0374         tc->ref_tag_seed_verify = 0;
0375 
0376     /* always init to same as bg_blk_sz */
0377     tc->UD_bytes_immed_val = scmd->device->sector_size;
0378 
0379     tc->reserved_DC_0 = 0;
0380 
0381     /* always init to 8 */
0382     tc->DIF_bytes_immed_val = 8;
0383 
0384     tc->reserved_DC_1 = 0;
0385     tc->bgc_blk_sz = scmd->device->sector_size;
0386     tc->reserved_E0_0 = 0;
0387     tc->app_tag_gen_mask = 0;
0388 
0389     /** setup block guard control **/
0390     tc->bgctl = 0;
0391 
0392     /* DIF read strip */
0393     tc->bgctl_f.crc_verify = 1;
0394     tc->bgctl_f.op = 0x1;
0395     if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
0396         tc->bgctl_f.ref_tag_chk = 1;
0397         tc->bgctl_f.app_f_detect = 1;
0398     } else if (type & SCSI_PROT_DIF_TYPE3)
0399         tc->bgctl_f.app_ref_f_detect = 1;
0400 
0401     tc->app_tag_verify_mask = 0;
0402 
0403     /* must init to 0 for hw */
0404     tc->blk_guard_err = 0;
0405 
0406     tc->reserved_E8_0 = 0;
0407     tc->ref_tag_seed_gen = 0;
0408 }
0409 
0410 /*
0411  * This method is will fill in the SCU Task Context for a SSP IO request.
0412  */
0413 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
0414                               enum dma_data_direction dir,
0415                               u32 len)
0416 {
0417     struct scu_task_context *task_context = ireq->tc;
0418     struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
0419     struct scsi_cmnd *scmd = sas_task->uldd_task;
0420     u8 prot_type = scsi_get_prot_type(scmd);
0421     u8 prot_op = scsi_get_prot_op(scmd);
0422 
0423     scu_ssp_request_construct_task_context(ireq, task_context);
0424 
0425     task_context->ssp_command_iu_length =
0426         sizeof(struct ssp_cmd_iu) / sizeof(u32);
0427     task_context->type.ssp.frame_type = SSP_COMMAND;
0428 
0429     switch (dir) {
0430     case DMA_FROM_DEVICE:
0431     case DMA_NONE:
0432     default:
0433         task_context->task_type = SCU_TASK_TYPE_IOREAD;
0434         break;
0435     case DMA_TO_DEVICE:
0436         task_context->task_type = SCU_TASK_TYPE_IOWRITE;
0437         break;
0438     }
0439 
0440     task_context->transfer_length_bytes = len;
0441 
0442     if (task_context->transfer_length_bytes > 0)
0443         sci_request_build_sgl(ireq);
0444 
0445     if (prot_type != SCSI_PROT_DIF_TYPE0) {
0446         if (prot_op == SCSI_PROT_READ_STRIP)
0447             scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
0448         else if (prot_op == SCSI_PROT_WRITE_INSERT)
0449             scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
0450     }
0451 }
0452 
0453 /**
0454  * scu_ssp_task_request_construct_task_context() - This method will fill in
0455  *    the SCU Task Context for a SSP Task request.  The following important
0456  *    settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH.  This
0457  *    ensures that the task request is issued ahead of other task destined
0458  *    for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD.  This
0459  *    simply indicates that a normal request type (i.e. non-raw frame) is
0460  *    being utilized to perform task management. -#control_frame == 1.  This
0461  *    ensures that the proper endianness is set so that the bytes are
0462  *    transmitted in the right order for a task frame.
0463  * @ireq: This parameter specifies the task request object being constructed.
0464  */
0465 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
0466 {
0467     struct scu_task_context *task_context = ireq->tc;
0468 
0469     scu_ssp_request_construct_task_context(ireq, task_context);
0470 
0471     task_context->control_frame                = 1;
0472     task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
0473     task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
0474     task_context->transfer_length_bytes        = 0;
0475     task_context->type.ssp.frame_type          = SSP_TASK;
0476     task_context->ssp_command_iu_length =
0477         sizeof(struct ssp_task_iu) / sizeof(u32);
0478 }
0479 
0480 /**
0481  * scu_sata_request_construct_task_context()
0482  * This method is will fill in the SCU Task Context for any type of SATA
0483  *    request.  This is called from the various SATA constructors.
0484  * @ireq: The general IO request object which is to be used in
0485  *    constructing the SCU task context.
0486  * @task_context: The buffer pointer for the SCU task context which is being
0487  *    constructed.
0488  *
0489  * The general io request construction is complete. The buffer assignment for
0490  * the command buffer is complete. none Revisit task context construction to
0491  * determine what is common for SSP/SMP/STP task context structures.
0492  */
0493 static void scu_sata_request_construct_task_context(
0494     struct isci_request *ireq,
0495     struct scu_task_context *task_context)
0496 {
0497     dma_addr_t dma_addr;
0498     struct isci_remote_device *idev;
0499     struct isci_port *iport;
0500 
0501     idev = ireq->target_device;
0502     iport = idev->owning_port;
0503 
0504     /* Fill in the TC with its required data */
0505     task_context->abort = 0;
0506     task_context->priority = SCU_TASK_PRIORITY_NORMAL;
0507     task_context->initiator_request = 1;
0508     task_context->connection_rate = idev->connection_rate;
0509     task_context->protocol_engine_index = ISCI_PEG;
0510     task_context->logical_port_index = iport->physical_port_index;
0511     task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
0512     task_context->valid = SCU_TASK_CONTEXT_VALID;
0513     task_context->context_type = SCU_TASK_CONTEXT_TYPE;
0514 
0515     task_context->remote_node_index = idev->rnc.remote_node_index;
0516     task_context->command_code = 0;
0517 
0518     task_context->link_layer_control = 0;
0519     task_context->do_not_dma_ssp_good_response = 1;
0520     task_context->strict_ordering = 0;
0521     task_context->control_frame = 0;
0522     task_context->timeout_enable = 0;
0523     task_context->block_guard_enable = 0;
0524 
0525     task_context->address_modifier = 0;
0526     task_context->task_phase = 0x01;
0527 
0528     task_context->ssp_command_iu_length =
0529         (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
0530 
0531     /* Set the first word of the H2D REG FIS */
0532     task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
0533 
0534     ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
0535                   (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
0536                   (iport->physical_port_index <<
0537                    SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
0538                   ISCI_TAG_TCI(ireq->io_tag));
0539     /*
0540      * Copy the physical address for the command buffer to the SCU Task
0541      * Context. We must offset the command buffer by 4 bytes because the
0542      * first 4 bytes are transfered in the body of the TC.
0543      */
0544     dma_addr = sci_io_request_get_dma_addr(ireq,
0545                         ((char *) &ireq->stp.cmd) +
0546                         sizeof(u32));
0547 
0548     task_context->command_iu_upper = upper_32_bits(dma_addr);
0549     task_context->command_iu_lower = lower_32_bits(dma_addr);
0550 
0551     /* SATA Requests do not have a response buffer */
0552     task_context->response_iu_upper = 0;
0553     task_context->response_iu_lower = 0;
0554 }
0555 
0556 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
0557 {
0558     struct scu_task_context *task_context = ireq->tc;
0559 
0560     scu_sata_request_construct_task_context(ireq, task_context);
0561 
0562     task_context->control_frame         = 0;
0563     task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
0564     task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
0565     task_context->type.stp.fis_type     = FIS_REGH2D;
0566     task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
0567 }
0568 
0569 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
0570                               bool copy_rx_frame)
0571 {
0572     struct isci_stp_request *stp_req = &ireq->stp.req;
0573 
0574     scu_stp_raw_request_construct_task_context(ireq);
0575 
0576     stp_req->status = 0;
0577     stp_req->sgl.offset = 0;
0578     stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
0579 
0580     if (copy_rx_frame) {
0581         sci_request_build_sgl(ireq);
0582         stp_req->sgl.index = 0;
0583     } else {
0584         /* The user does not want the data copied to the SGL buffer location */
0585         stp_req->sgl.index = -1;
0586     }
0587 
0588     return SCI_SUCCESS;
0589 }
0590 
0591 /*
0592  * sci_stp_optimized_request_construct()
0593  * @ireq: This parameter specifies the request to be constructed as an
0594  *    optimized request.
0595  * @optimized_task_type: This parameter specifies whether the request is to be
0596  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
0597  *    value of 1 indicates NCQ.
0598  *
0599  * This method will perform request construction common to all types of STP
0600  * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
0601  * returns an indication as to whether the construction was successful.
0602  */
0603 static void sci_stp_optimized_request_construct(struct isci_request *ireq,
0604                              u8 optimized_task_type,
0605                              u32 len,
0606                              enum dma_data_direction dir)
0607 {
0608     struct scu_task_context *task_context = ireq->tc;
0609 
0610     /* Build the STP task context structure */
0611     scu_sata_request_construct_task_context(ireq, task_context);
0612 
0613     /* Copy over the SGL elements */
0614     sci_request_build_sgl(ireq);
0615 
0616     /* Copy over the number of bytes to be transfered */
0617     task_context->transfer_length_bytes = len;
0618 
0619     if (dir == DMA_TO_DEVICE) {
0620         /*
0621          * The difference between the DMA IN and DMA OUT request task type
0622          * values are consistent with the difference between FPDMA READ
0623          * and FPDMA WRITE values.  Add the supplied task type parameter
0624          * to this difference to set the task type properly for this
0625          * DATA OUT (WRITE) case. */
0626         task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
0627                                  - SCU_TASK_TYPE_DMA_IN);
0628     } else {
0629         /*
0630          * For the DATA IN (READ) case, simply save the supplied
0631          * optimized task type. */
0632         task_context->task_type = optimized_task_type;
0633     }
0634 }
0635 
0636 static void sci_atapi_construct(struct isci_request *ireq)
0637 {
0638     struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
0639     struct sas_task *task;
0640 
0641     /* To simplify the implementation we take advantage of the
0642      * silicon's partial acceleration of atapi protocol (dma data
0643      * transfers), so we promote all commands to dma protocol.  This
0644      * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
0645      */
0646     h2d_fis->features |= ATAPI_PKT_DMA;
0647 
0648     scu_stp_raw_request_construct_task_context(ireq);
0649 
0650     task = isci_request_access_task(ireq);
0651     if (task->data_dir == DMA_NONE)
0652         task->total_xfer_len = 0;
0653 
0654     /* clear the response so we can detect arrivial of an
0655      * unsolicited h2d fis
0656      */
0657     ireq->stp.rsp.fis_type = 0;
0658 }
0659 
0660 static enum sci_status
0661 sci_io_request_construct_sata(struct isci_request *ireq,
0662                    u32 len,
0663                    enum dma_data_direction dir,
0664                    bool copy)
0665 {
0666     enum sci_status status = SCI_SUCCESS;
0667     struct sas_task *task = isci_request_access_task(ireq);
0668     struct domain_device *dev = ireq->target_device->domain_dev;
0669 
0670     /* check for management protocols */
0671     if (test_bit(IREQ_TMF, &ireq->flags)) {
0672         struct isci_tmf *tmf = isci_request_access_tmf(ireq);
0673 
0674         dev_err(&ireq->owning_controller->pdev->dev,
0675             "%s: Request 0x%p received un-handled SAT "
0676             "management protocol 0x%x.\n",
0677             __func__, ireq, tmf->tmf_code);
0678 
0679         return SCI_FAILURE;
0680     }
0681 
0682     if (!sas_protocol_ata(task->task_proto)) {
0683         dev_err(&ireq->owning_controller->pdev->dev,
0684             "%s: Non-ATA protocol in SATA path: 0x%x\n",
0685             __func__,
0686             task->task_proto);
0687         return SCI_FAILURE;
0688 
0689     }
0690 
0691     /* ATAPI */
0692     if (dev->sata_dev.class == ATA_DEV_ATAPI &&
0693         task->ata_task.fis.command == ATA_CMD_PACKET) {
0694         sci_atapi_construct(ireq);
0695         return SCI_SUCCESS;
0696     }
0697 
0698     /* non data */
0699     if (task->data_dir == DMA_NONE) {
0700         scu_stp_raw_request_construct_task_context(ireq);
0701         return SCI_SUCCESS;
0702     }
0703 
0704     /* NCQ */
0705     if (task->ata_task.use_ncq) {
0706         sci_stp_optimized_request_construct(ireq,
0707                              SCU_TASK_TYPE_FPDMAQ_READ,
0708                              len, dir);
0709         return SCI_SUCCESS;
0710     }
0711 
0712     /* DMA */
0713     if (task->ata_task.dma_xfer) {
0714         sci_stp_optimized_request_construct(ireq,
0715                              SCU_TASK_TYPE_DMA_IN,
0716                              len, dir);
0717         return SCI_SUCCESS;
0718     } else /* PIO */
0719         return sci_stp_pio_request_construct(ireq, copy);
0720 
0721     return status;
0722 }
0723 
0724 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
0725 {
0726     struct sas_task *task = isci_request_access_task(ireq);
0727 
0728     ireq->protocol = SAS_PROTOCOL_SSP;
0729 
0730     scu_ssp_io_request_construct_task_context(ireq,
0731                           task->data_dir,
0732                           task->total_xfer_len);
0733 
0734     sci_io_request_build_ssp_command_iu(ireq);
0735 
0736     sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
0737 
0738     return SCI_SUCCESS;
0739 }
0740 
0741 enum sci_status sci_task_request_construct_ssp(
0742     struct isci_request *ireq)
0743 {
0744     /* Construct the SSP Task SCU Task Context */
0745     scu_ssp_task_request_construct_task_context(ireq);
0746 
0747     /* Fill in the SSP Task IU */
0748     sci_task_request_build_ssp_task_iu(ireq);
0749 
0750     sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
0751 
0752     return SCI_SUCCESS;
0753 }
0754 
0755 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
0756 {
0757     enum sci_status status;
0758     bool copy = false;
0759     struct sas_task *task = isci_request_access_task(ireq);
0760 
0761     ireq->protocol = SAS_PROTOCOL_STP;
0762 
0763     copy = (task->data_dir == DMA_NONE) ? false : true;
0764 
0765     status = sci_io_request_construct_sata(ireq,
0766                         task->total_xfer_len,
0767                         task->data_dir,
0768                         copy);
0769 
0770     if (status == SCI_SUCCESS)
0771         sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
0772 
0773     return status;
0774 }
0775 
0776 #define SCU_TASK_CONTEXT_SRAM 0x200000
0777 /**
0778  * sci_req_tx_bytes - bytes transferred when reply underruns request
0779  * @ireq: request that was terminated early
0780  */
0781 static u32 sci_req_tx_bytes(struct isci_request *ireq)
0782 {
0783     struct isci_host *ihost = ireq->owning_controller;
0784     u32 ret_val = 0;
0785 
0786     if (readl(&ihost->smu_registers->address_modifier) == 0) {
0787         void __iomem *scu_reg_base = ihost->scu_registers;
0788 
0789         /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
0790          *   BAR1 is the scu_registers
0791          *   0x20002C = 0x200000 + 0x2c
0792          *            = start of task context SRAM + offset of (type.ssp.data_offset)
0793          *   TCi is the io_tag of struct sci_request
0794          */
0795         ret_val = readl(scu_reg_base +
0796                 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
0797                 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
0798     }
0799 
0800     return ret_val;
0801 }
0802 
0803 enum sci_status sci_request_start(struct isci_request *ireq)
0804 {
0805     enum sci_base_request_states state;
0806     struct scu_task_context *tc = ireq->tc;
0807     struct isci_host *ihost = ireq->owning_controller;
0808 
0809     state = ireq->sm.current_state_id;
0810     if (state != SCI_REQ_CONSTRUCTED) {
0811         dev_warn(&ihost->pdev->dev,
0812             "%s: SCIC IO Request requested to start while in wrong "
0813              "state %d\n", __func__, state);
0814         return SCI_FAILURE_INVALID_STATE;
0815     }
0816 
0817     tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
0818 
0819     switch (tc->protocol_type) {
0820     case SCU_TASK_CONTEXT_PROTOCOL_SMP:
0821     case SCU_TASK_CONTEXT_PROTOCOL_SSP:
0822         /* SSP/SMP Frame */
0823         tc->type.ssp.tag = ireq->io_tag;
0824         tc->type.ssp.target_port_transfer_tag = 0xFFFF;
0825         break;
0826 
0827     case SCU_TASK_CONTEXT_PROTOCOL_STP:
0828         /* STP/SATA Frame
0829          * tc->type.stp.ncq_tag = ireq->ncq_tag;
0830          */
0831         break;
0832 
0833     case SCU_TASK_CONTEXT_PROTOCOL_NONE:
0834         /* / @todo When do we set no protocol type? */
0835         break;
0836 
0837     default:
0838         /* This should never happen since we build the IO
0839          * requests */
0840         break;
0841     }
0842 
0843     /* Add to the post_context the io tag value */
0844     ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
0845 
0846     /* Everything is good go ahead and change state */
0847     sci_change_state(&ireq->sm, SCI_REQ_STARTED);
0848 
0849     return SCI_SUCCESS;
0850 }
0851 
0852 enum sci_status
0853 sci_io_request_terminate(struct isci_request *ireq)
0854 {
0855     enum sci_base_request_states state;
0856 
0857     state = ireq->sm.current_state_id;
0858 
0859     switch (state) {
0860     case SCI_REQ_CONSTRUCTED:
0861         /* Set to make sure no HW terminate posting is done: */
0862         set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
0863         ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
0864         ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
0865         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
0866         return SCI_SUCCESS;
0867     case SCI_REQ_STARTED:
0868     case SCI_REQ_TASK_WAIT_TC_COMP:
0869     case SCI_REQ_SMP_WAIT_RESP:
0870     case SCI_REQ_SMP_WAIT_TC_COMP:
0871     case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
0872     case SCI_REQ_STP_UDMA_WAIT_D2H:
0873     case SCI_REQ_STP_NON_DATA_WAIT_H2D:
0874     case SCI_REQ_STP_NON_DATA_WAIT_D2H:
0875     case SCI_REQ_STP_PIO_WAIT_H2D:
0876     case SCI_REQ_STP_PIO_WAIT_FRAME:
0877     case SCI_REQ_STP_PIO_DATA_IN:
0878     case SCI_REQ_STP_PIO_DATA_OUT:
0879     case SCI_REQ_ATAPI_WAIT_H2D:
0880     case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
0881     case SCI_REQ_ATAPI_WAIT_D2H:
0882     case SCI_REQ_ATAPI_WAIT_TC_COMP:
0883         /* Fall through and change state to ABORTING... */
0884     case SCI_REQ_TASK_WAIT_TC_RESP:
0885         /* The task frame was already confirmed to have been
0886          * sent by the SCU HW.  Since the state machine is
0887          * now only waiting for the task response itself,
0888          * abort the request and complete it immediately
0889          * and don't wait for the task response.
0890          */
0891         sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
0892         fallthrough;    /* and handle like ABORTING */
0893     case SCI_REQ_ABORTING:
0894         if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
0895             set_bit(IREQ_PENDING_ABORT, &ireq->flags);
0896         else
0897             clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
0898         /* If the request is only waiting on the remote device
0899          * suspension, return SUCCESS so the caller will wait too.
0900          */
0901         return SCI_SUCCESS;
0902     case SCI_REQ_COMPLETED:
0903     default:
0904         dev_warn(&ireq->owning_controller->pdev->dev,
0905              "%s: SCIC IO Request requested to abort while in wrong "
0906              "state %d\n", __func__, ireq->sm.current_state_id);
0907         break;
0908     }
0909 
0910     return SCI_FAILURE_INVALID_STATE;
0911 }
0912 
0913 enum sci_status sci_request_complete(struct isci_request *ireq)
0914 {
0915     enum sci_base_request_states state;
0916     struct isci_host *ihost = ireq->owning_controller;
0917 
0918     state = ireq->sm.current_state_id;
0919     if (WARN_ONCE(state != SCI_REQ_COMPLETED,
0920               "isci: request completion from wrong state (%s)\n",
0921               req_state_name(state)))
0922         return SCI_FAILURE_INVALID_STATE;
0923 
0924     if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
0925         sci_controller_release_frame(ihost,
0926                           ireq->saved_rx_frame_index);
0927 
0928     /* XXX can we just stop the machine and remove the 'final' state? */
0929     sci_change_state(&ireq->sm, SCI_REQ_FINAL);
0930     return SCI_SUCCESS;
0931 }
0932 
0933 enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
0934                           u32 event_code)
0935 {
0936     enum sci_base_request_states state;
0937     struct isci_host *ihost = ireq->owning_controller;
0938 
0939     state = ireq->sm.current_state_id;
0940 
0941     if (state != SCI_REQ_STP_PIO_DATA_IN) {
0942         dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
0943              __func__, event_code, req_state_name(state));
0944 
0945         return SCI_FAILURE_INVALID_STATE;
0946     }
0947 
0948     switch (scu_get_event_specifier(event_code)) {
0949     case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
0950         /* We are waiting for data and the SCU has R_ERR the data frame.
0951          * Go back to waiting for the D2H Register FIS
0952          */
0953         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
0954         return SCI_SUCCESS;
0955     default:
0956         dev_err(&ihost->pdev->dev,
0957             "%s: pio request unexpected event %#x\n",
0958             __func__, event_code);
0959 
0960         /* TODO Should we fail the PIO request when we get an
0961          * unexpected event?
0962          */
0963         return SCI_FAILURE;
0964     }
0965 }
0966 
0967 /*
0968  * This function copies response data for requests returning response data
0969  *    instead of sense data.
0970  * @sci_req: This parameter specifies the request object for which to copy
0971  *    the response data.
0972  */
0973 static void sci_io_request_copy_response(struct isci_request *ireq)
0974 {
0975     void *resp_buf;
0976     u32 len;
0977     struct ssp_response_iu *ssp_response;
0978     struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
0979 
0980     ssp_response = &ireq->ssp.rsp;
0981 
0982     resp_buf = &isci_tmf->resp.resp_iu;
0983 
0984     len = min_t(u32,
0985             SSP_RESP_IU_MAX_SIZE,
0986             be32_to_cpu(ssp_response->response_data_len));
0987 
0988     memcpy(resp_buf, ssp_response->resp_data, len);
0989 }
0990 
0991 static enum sci_status
0992 request_started_state_tc_event(struct isci_request *ireq,
0993                    u32 completion_code)
0994 {
0995     struct ssp_response_iu *resp_iu;
0996     u8 datapres;
0997 
0998     /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
0999      * to determine SDMA status
1000      */
1001     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1002     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1003         ireq->scu_status = SCU_TASK_DONE_GOOD;
1004         ireq->sci_status = SCI_SUCCESS;
1005         break;
1006     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
1007         /* There are times when the SCU hardware will return an early
1008          * response because the io request specified more data than is
1009          * returned by the target device (mode pages, inquiry data,
1010          * etc.).  We must check the response stats to see if this is
1011          * truly a failed request or a good request that just got
1012          * completed early.
1013          */
1014         struct ssp_response_iu *resp = &ireq->ssp.rsp;
1015         ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1016 
1017         sci_swab32_cpy(&ireq->ssp.rsp,
1018                    &ireq->ssp.rsp,
1019                    word_cnt);
1020 
1021         if (resp->status == 0) {
1022             ireq->scu_status = SCU_TASK_DONE_GOOD;
1023             ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
1024         } else {
1025             ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1026             ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1027         }
1028         break;
1029     }
1030     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
1031         ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1032 
1033         sci_swab32_cpy(&ireq->ssp.rsp,
1034                    &ireq->ssp.rsp,
1035                    word_cnt);
1036 
1037         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1038         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1039         break;
1040     }
1041 
1042     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1043         /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1044          * guaranteed to be received before this completion status is
1045          * posted?
1046          */
1047         resp_iu = &ireq->ssp.rsp;
1048         datapres = resp_iu->datapres;
1049 
1050         if (datapres == SAS_DATAPRES_RESPONSE_DATA ||
1051             datapres == SAS_DATAPRES_SENSE_DATA) {
1052             ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1053             ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1054         } else {
1055             ireq->scu_status = SCU_TASK_DONE_GOOD;
1056             ireq->sci_status = SCI_SUCCESS;
1057         }
1058         break;
1059     /* only stp device gets suspended. */
1060     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1061     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1062     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1063     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1064     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1065     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1066     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1067     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1068     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1069     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1070     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1071         if (ireq->protocol == SAS_PROTOCOL_STP) {
1072             ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1073                        SCU_COMPLETION_TL_STATUS_SHIFT;
1074             ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1075         } else {
1076             ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1077                        SCU_COMPLETION_TL_STATUS_SHIFT;
1078             ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1079         }
1080         break;
1081 
1082     /* both stp/ssp device gets suspended */
1083     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1084     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1085     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1086     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1087     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1088     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1089     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1090     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1091     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1092     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1093         ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1094                    SCU_COMPLETION_TL_STATUS_SHIFT;
1095         ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1096         break;
1097 
1098     /* neither ssp nor stp gets suspended. */
1099     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1100     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1101     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1102     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1103     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1104     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1105     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1106     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1107     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1108     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1109     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1110     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1111     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1112     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1113     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1114     default:
1115         ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1116                    SCU_COMPLETION_TL_STATUS_SHIFT;
1117         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1118         break;
1119     }
1120 
1121     /*
1122      * TODO: This is probably wrong for ACK/NAK timeout conditions
1123      */
1124 
1125     /* In all cases we will treat this as the completion of the IO req. */
1126     sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1127     return SCI_SUCCESS;
1128 }
1129 
1130 static enum sci_status
1131 request_aborting_state_tc_event(struct isci_request *ireq,
1132                 u32 completion_code)
1133 {
1134     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1135     case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1136     case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1137         ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
1138         ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
1139         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1140         break;
1141 
1142     default:
1143         /* Unless we get some strange error wait for the task abort to complete
1144          * TODO: Should there be a state change for this completion?
1145          */
1146         break;
1147     }
1148 
1149     return SCI_SUCCESS;
1150 }
1151 
1152 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1153                                u32 completion_code)
1154 {
1155     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1156     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1157         ireq->scu_status = SCU_TASK_DONE_GOOD;
1158         ireq->sci_status = SCI_SUCCESS;
1159         sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1160         break;
1161     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1162         /* Currently, the decision is to simply allow the task request
1163          * to timeout if the task IU wasn't received successfully.
1164          * There is a potential for receiving multiple task responses if
1165          * we decide to send the task IU again.
1166          */
1167         dev_warn(&ireq->owning_controller->pdev->dev,
1168              "%s: TaskRequest:0x%p CompletionCode:%x - "
1169              "ACK/NAK timeout\n", __func__, ireq,
1170              completion_code);
1171 
1172         sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1173         break;
1174     default:
1175         /*
1176          * All other completion status cause the IO to be complete.
1177          * If a NAK was received, then it is up to the user to retry
1178          * the request.
1179          */
1180         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1181         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1182         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1183         break;
1184     }
1185 
1186     return SCI_SUCCESS;
1187 }
1188 
1189 static enum sci_status
1190 smp_request_await_response_tc_event(struct isci_request *ireq,
1191                     u32 completion_code)
1192 {
1193     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1194     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1195         /* In the AWAIT RESPONSE state, any TC completion is
1196          * unexpected.  but if the TC has success status, we
1197          * complete the IO anyway.
1198          */
1199         ireq->scu_status = SCU_TASK_DONE_GOOD;
1200         ireq->sci_status = SCI_SUCCESS;
1201         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1202         break;
1203     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1204     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1205     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1206     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1207         /* These status has been seen in a specific LSI
1208          * expander, which sometimes is not able to send smp
1209          * response within 2 ms. This causes our hardware break
1210          * the connection and set TC completion with one of
1211          * these SMP_XXX_XX_ERR status. For these type of error,
1212          * we ask ihost user to retry the request.
1213          */
1214         ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1215         ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1216         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1217         break;
1218     default:
1219         /* All other completion status cause the IO to be complete.  If a NAK
1220          * was received, then it is up to the user to retry the request
1221          */
1222         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1223         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1224         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1225         break;
1226     }
1227 
1228     return SCI_SUCCESS;
1229 }
1230 
1231 static enum sci_status
1232 smp_request_await_tc_event(struct isci_request *ireq,
1233                u32 completion_code)
1234 {
1235     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1236     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1237         ireq->scu_status = SCU_TASK_DONE_GOOD;
1238         ireq->sci_status = SCI_SUCCESS;
1239         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1240         break;
1241     default:
1242         /* All other completion status cause the IO to be
1243          * complete.  If a NAK was received, then it is up to
1244          * the user to retry the request.
1245          */
1246         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1247         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1248         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1249         break;
1250     }
1251 
1252     return SCI_SUCCESS;
1253 }
1254 
1255 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1256 {
1257     struct scu_sgl_element *sgl;
1258     struct scu_sgl_element_pair *sgl_pair;
1259     struct isci_request *ireq = to_ireq(stp_req);
1260     struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1261 
1262     sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1263     if (!sgl_pair)
1264         sgl = NULL;
1265     else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1266         if (sgl_pair->B.address_lower == 0 &&
1267             sgl_pair->B.address_upper == 0) {
1268             sgl = NULL;
1269         } else {
1270             pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1271             sgl = &sgl_pair->B;
1272         }
1273     } else {
1274         if (sgl_pair->next_pair_lower == 0 &&
1275             sgl_pair->next_pair_upper == 0) {
1276             sgl = NULL;
1277         } else {
1278             pio_sgl->index++;
1279             pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1280             sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1281             sgl = &sgl_pair->A;
1282         }
1283     }
1284 
1285     return sgl;
1286 }
1287 
1288 static enum sci_status
1289 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1290                     u32 completion_code)
1291 {
1292     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1293     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1294         ireq->scu_status = SCU_TASK_DONE_GOOD;
1295         ireq->sci_status = SCI_SUCCESS;
1296         sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1297         break;
1298 
1299     default:
1300         /* All other completion status cause the IO to be
1301          * complete.  If a NAK was received, then it is up to
1302          * the user to retry the request.
1303          */
1304         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1305         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1306         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1307         break;
1308     }
1309 
1310     return SCI_SUCCESS;
1311 }
1312 
1313 #define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1314 
1315 /* transmit DATA_FIS from (current sgl + offset) for input
1316  * parameter length. current sgl and offset is alreay stored in the IO request
1317  */
1318 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1319     struct isci_request *ireq,
1320     u32 length)
1321 {
1322     struct isci_stp_request *stp_req = &ireq->stp.req;
1323     struct scu_task_context *task_context = ireq->tc;
1324     struct scu_sgl_element_pair *sgl_pair;
1325     struct scu_sgl_element *current_sgl;
1326 
1327     /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1328      * for the data from current_sgl+offset for the input length
1329      */
1330     sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1331     if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1332         current_sgl = &sgl_pair->A;
1333     else
1334         current_sgl = &sgl_pair->B;
1335 
1336     /* update the TC */
1337     task_context->command_iu_upper = current_sgl->address_upper;
1338     task_context->command_iu_lower = current_sgl->address_lower;
1339     task_context->transfer_length_bytes = length;
1340     task_context->type.stp.fis_type = FIS_DATA;
1341 
1342     /* send the new TC out. */
1343     return sci_controller_continue_io(ireq);
1344 }
1345 
1346 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1347 {
1348     struct isci_stp_request *stp_req = &ireq->stp.req;
1349     struct scu_sgl_element_pair *sgl_pair;
1350     enum sci_status status = SCI_SUCCESS;
1351     struct scu_sgl_element *sgl;
1352     u32 offset;
1353     u32 len = 0;
1354 
1355     offset = stp_req->sgl.offset;
1356     sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1357     if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1358         return SCI_FAILURE;
1359 
1360     if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1361         sgl = &sgl_pair->A;
1362         len = sgl_pair->A.length - offset;
1363     } else {
1364         sgl = &sgl_pair->B;
1365         len = sgl_pair->B.length - offset;
1366     }
1367 
1368     if (stp_req->pio_len == 0)
1369         return SCI_SUCCESS;
1370 
1371     if (stp_req->pio_len >= len) {
1372         status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1373         if (status != SCI_SUCCESS)
1374             return status;
1375         stp_req->pio_len -= len;
1376 
1377         /* update the current sgl, offset and save for future */
1378         sgl = pio_sgl_next(stp_req);
1379         offset = 0;
1380     } else if (stp_req->pio_len < len) {
1381         sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1382 
1383         /* Sgl offset will be adjusted and saved for future */
1384         offset += stp_req->pio_len;
1385         sgl->address_lower += stp_req->pio_len;
1386         stp_req->pio_len = 0;
1387     }
1388 
1389     stp_req->sgl.offset = offset;
1390 
1391     return status;
1392 }
1393 
1394 /**
1395  * sci_stp_request_pio_data_in_copy_data_buffer()
1396  * @stp_req: The request that is used for the SGL processing.
1397  * @data_buf: The buffer of data to be copied.
1398  * @len: The length of the data transfer.
1399  *
1400  * Copy the data from the buffer for the length specified to the IO request SGL
1401  * specified data region. enum sci_status
1402  */
1403 static enum sci_status
1404 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1405                          u8 *data_buf, u32 len)
1406 {
1407     struct isci_request *ireq;
1408     u8 *src_addr;
1409     int copy_len;
1410     struct sas_task *task;
1411     struct scatterlist *sg;
1412     void *kaddr;
1413     int total_len = len;
1414 
1415     ireq = to_ireq(stp_req);
1416     task = isci_request_access_task(ireq);
1417     src_addr = data_buf;
1418 
1419     if (task->num_scatter > 0) {
1420         sg = task->scatter;
1421 
1422         while (total_len > 0) {
1423             struct page *page = sg_page(sg);
1424 
1425             copy_len = min_t(int, total_len, sg_dma_len(sg));
1426             kaddr = kmap_atomic(page);
1427             memcpy(kaddr + sg->offset, src_addr, copy_len);
1428             kunmap_atomic(kaddr);
1429             total_len -= copy_len;
1430             src_addr += copy_len;
1431             sg = sg_next(sg);
1432         }
1433     } else {
1434         BUG_ON(task->total_xfer_len < total_len);
1435         memcpy(task->scatter, src_addr, total_len);
1436     }
1437 
1438     return SCI_SUCCESS;
1439 }
1440 
1441 /**
1442  * sci_stp_request_pio_data_in_copy_data()
1443  * @stp_req: The PIO DATA IN request that is to receive the data.
1444  * @data_buffer: The buffer to copy from.
1445  *
1446  * Copy the data buffer to the io request data region. enum sci_status
1447  */
1448 static enum sci_status sci_stp_request_pio_data_in_copy_data(
1449     struct isci_stp_request *stp_req,
1450     u8 *data_buffer)
1451 {
1452     enum sci_status status;
1453 
1454     /*
1455      * If there is less than 1K remaining in the transfer request
1456      * copy just the data for the transfer */
1457     if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1458         status = sci_stp_request_pio_data_in_copy_data_buffer(
1459             stp_req, data_buffer, stp_req->pio_len);
1460 
1461         if (status == SCI_SUCCESS)
1462             stp_req->pio_len = 0;
1463     } else {
1464         /* We are transfering the whole frame so copy */
1465         status = sci_stp_request_pio_data_in_copy_data_buffer(
1466             stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1467 
1468         if (status == SCI_SUCCESS)
1469             stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1470     }
1471 
1472     return status;
1473 }
1474 
1475 static enum sci_status
1476 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1477                           u32 completion_code)
1478 {
1479     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1480     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1481         ireq->scu_status = SCU_TASK_DONE_GOOD;
1482         ireq->sci_status = SCI_SUCCESS;
1483         sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1484         break;
1485 
1486     default:
1487         /* All other completion status cause the IO to be
1488          * complete.  If a NAK was received, then it is up to
1489          * the user to retry the request.
1490          */
1491         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1492         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1493         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1494         break;
1495     }
1496 
1497     return SCI_SUCCESS;
1498 }
1499 
1500 static enum sci_status
1501 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1502                   u32 completion_code)
1503 {
1504     enum sci_status status = SCI_SUCCESS;
1505     bool all_frames_transferred = false;
1506     struct isci_stp_request *stp_req = &ireq->stp.req;
1507 
1508     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1509     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1510         /* Transmit data */
1511         if (stp_req->pio_len != 0) {
1512             status = sci_stp_request_pio_data_out_transmit_data(ireq);
1513             if (status == SCI_SUCCESS) {
1514                 if (stp_req->pio_len == 0)
1515                     all_frames_transferred = true;
1516             }
1517         } else if (stp_req->pio_len == 0) {
1518             /*
1519              * this will happen if the all data is written at the
1520              * first time after the pio setup fis is received
1521              */
1522             all_frames_transferred  = true;
1523         }
1524 
1525         /* all data transferred. */
1526         if (all_frames_transferred) {
1527             /*
1528              * Change the state to SCI_REQ_STP_PIO_DATA_IN
1529              * and wait for PIO_SETUP fis / or D2H REg fis. */
1530             sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1531         }
1532         break;
1533 
1534     default:
1535         /*
1536          * All other completion status cause the IO to be complete.
1537          * If a NAK was received, then it is up to the user to retry
1538          * the request.
1539          */
1540         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1541         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1542         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1543         break;
1544     }
1545 
1546     return status;
1547 }
1548 
1549 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1550                                        u32 frame_index)
1551 {
1552     struct isci_host *ihost = ireq->owning_controller;
1553     struct dev_to_host_fis *frame_header;
1554     enum sci_status status;
1555     u32 *frame_buffer;
1556 
1557     status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1558                                    frame_index,
1559                                    (void **)&frame_header);
1560 
1561     if ((status == SCI_SUCCESS) &&
1562         (frame_header->fis_type == FIS_REGD2H)) {
1563         sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1564                                   frame_index,
1565                                   (void **)&frame_buffer);
1566 
1567         sci_controller_copy_sata_response(&ireq->stp.rsp,
1568                                frame_header,
1569                                frame_buffer);
1570     }
1571 
1572     sci_controller_release_frame(ihost, frame_index);
1573 
1574     return status;
1575 }
1576 
1577 static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1578                            u32 frame_index)
1579 {
1580     struct isci_host *ihost = ireq->owning_controller;
1581     enum sci_status status;
1582     struct dev_to_host_fis *frame_header;
1583     u32 *frame_buffer;
1584 
1585     status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1586                               frame_index,
1587                               (void **)&frame_header);
1588 
1589     if (status != SCI_SUCCESS)
1590         return status;
1591 
1592     if (frame_header->fis_type != FIS_REGD2H) {
1593         dev_err(&ireq->isci_host->pdev->dev,
1594             "%s ERROR: invalid fis type 0x%X\n",
1595             __func__, frame_header->fis_type);
1596         return SCI_FAILURE;
1597     }
1598 
1599     sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1600                          frame_index,
1601                          (void **)&frame_buffer);
1602 
1603     sci_controller_copy_sata_response(&ireq->stp.rsp,
1604                       (u32 *)frame_header,
1605                       frame_buffer);
1606 
1607     /* Frame has been decoded return it to the controller */
1608     sci_controller_release_frame(ihost, frame_index);
1609 
1610     return status;
1611 }
1612 
1613 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1614                            u32 frame_index)
1615 {
1616     struct sas_task *task = isci_request_access_task(ireq);
1617     enum sci_status status;
1618 
1619     status = process_unsolicited_fis(ireq, frame_index);
1620 
1621     if (status == SCI_SUCCESS) {
1622         if (ireq->stp.rsp.status & ATA_ERR)
1623             status = SCI_FAILURE_IO_RESPONSE_VALID;
1624     } else {
1625         status = SCI_FAILURE_IO_RESPONSE_VALID;
1626     }
1627 
1628     if (status != SCI_SUCCESS) {
1629         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1630         ireq->sci_status = status;
1631     } else {
1632         ireq->scu_status = SCU_TASK_DONE_GOOD;
1633         ireq->sci_status = SCI_SUCCESS;
1634     }
1635 
1636     /* the d2h ufi is the end of non-data commands */
1637     if (task->data_dir == DMA_NONE)
1638         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1639 
1640     return status;
1641 }
1642 
1643 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1644 {
1645     struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1646     void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1647     struct scu_task_context *task_context = ireq->tc;
1648 
1649     /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1650      * type. The TC for previous Packet fis was already there, we only need to
1651      * change the H2D fis content.
1652      */
1653     memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1654     memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1655     memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1656     task_context->type.stp.fis_type = FIS_DATA;
1657     task_context->transfer_length_bytes = dev->cdb_len;
1658 }
1659 
1660 static void scu_atapi_construct_task_context(struct isci_request *ireq)
1661 {
1662     struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1663     struct sas_task *task = isci_request_access_task(ireq);
1664     struct scu_task_context *task_context = ireq->tc;
1665     int cdb_len = dev->cdb_len;
1666 
1667     /* reference: SSTL 1.13.4.2
1668      * task_type, sata_direction
1669      */
1670     if (task->data_dir == DMA_TO_DEVICE) {
1671         task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1672         task_context->sata_direction = 0;
1673     } else {
1674         /* todo: for NO_DATA command, we need to send out raw frame. */
1675         task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1676         task_context->sata_direction = 1;
1677     }
1678 
1679     memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1680     task_context->type.stp.fis_type = FIS_DATA;
1681 
1682     memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1683     memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1684     task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1685 
1686     /* task phase is set to TX_CMD */
1687     task_context->task_phase = 0x1;
1688 
1689     /* retry counter */
1690     task_context->stp_retry_count = 0;
1691 
1692     /* data transfer size. */
1693     task_context->transfer_length_bytes = task->total_xfer_len;
1694 
1695     /* setup sgl */
1696     sci_request_build_sgl(ireq);
1697 }
1698 
1699 enum sci_status
1700 sci_io_request_frame_handler(struct isci_request *ireq,
1701                   u32 frame_index)
1702 {
1703     struct isci_host *ihost = ireq->owning_controller;
1704     struct isci_stp_request *stp_req = &ireq->stp.req;
1705     enum sci_base_request_states state;
1706     enum sci_status status;
1707     ssize_t word_cnt;
1708 
1709     state = ireq->sm.current_state_id;
1710     switch (state)  {
1711     case SCI_REQ_STARTED: {
1712         struct ssp_frame_hdr ssp_hdr;
1713         void *frame_header;
1714 
1715         sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1716                                   frame_index,
1717                                   &frame_header);
1718 
1719         word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1720         sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1721 
1722         if (ssp_hdr.frame_type == SSP_RESPONSE) {
1723             struct ssp_response_iu *resp_iu;
1724             ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1725 
1726             sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1727                                       frame_index,
1728                                       (void **)&resp_iu);
1729 
1730             sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1731 
1732             resp_iu = &ireq->ssp.rsp;
1733 
1734             if (resp_iu->datapres == SAS_DATAPRES_RESPONSE_DATA ||
1735                 resp_iu->datapres == SAS_DATAPRES_SENSE_DATA) {
1736                 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1737                 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1738             } else {
1739                 ireq->scu_status = SCU_TASK_DONE_GOOD;
1740                 ireq->sci_status = SCI_SUCCESS;
1741             }
1742         } else {
1743             /* not a response frame, why did it get forwarded? */
1744             dev_err(&ihost->pdev->dev,
1745                 "%s: SCIC IO Request 0x%p received unexpected "
1746                 "frame %d type 0x%02x\n", __func__, ireq,
1747                 frame_index, ssp_hdr.frame_type);
1748         }
1749 
1750         /*
1751          * In any case we are done with this frame buffer return it to
1752          * the controller
1753          */
1754         sci_controller_release_frame(ihost, frame_index);
1755 
1756         return SCI_SUCCESS;
1757     }
1758 
1759     case SCI_REQ_TASK_WAIT_TC_RESP:
1760         sci_io_request_copy_response(ireq);
1761         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1762         sci_controller_release_frame(ihost, frame_index);
1763         return SCI_SUCCESS;
1764 
1765     case SCI_REQ_SMP_WAIT_RESP: {
1766         struct sas_task *task = isci_request_access_task(ireq);
1767         struct scatterlist *sg = &task->smp_task.smp_resp;
1768         void *frame_header, *kaddr;
1769         u8 *rsp;
1770 
1771         sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1772                              frame_index,
1773                              &frame_header);
1774         kaddr = kmap_atomic(sg_page(sg));
1775         rsp = kaddr + sg->offset;
1776         sci_swab32_cpy(rsp, frame_header, 1);
1777 
1778         if (rsp[0] == SMP_RESPONSE) {
1779             void *smp_resp;
1780 
1781             sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1782                                  frame_index,
1783                                  &smp_resp);
1784 
1785             word_cnt = (sg->length/4)-1;
1786             if (word_cnt > 0)
1787                 word_cnt = min_t(unsigned int, word_cnt,
1788                          SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
1789             sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1790 
1791             ireq->scu_status = SCU_TASK_DONE_GOOD;
1792             ireq->sci_status = SCI_SUCCESS;
1793             sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1794         } else {
1795             /*
1796              * This was not a response frame why did it get
1797              * forwarded?
1798              */
1799             dev_err(&ihost->pdev->dev,
1800                 "%s: SCIC SMP Request 0x%p received unexpected "
1801                 "frame %d type 0x%02x\n",
1802                 __func__,
1803                 ireq,
1804                 frame_index,
1805                 rsp[0]);
1806 
1807             ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1808             ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1809             sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1810         }
1811         kunmap_atomic(kaddr);
1812 
1813         sci_controller_release_frame(ihost, frame_index);
1814 
1815         return SCI_SUCCESS;
1816     }
1817 
1818     case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1819         return sci_stp_request_udma_general_frame_handler(ireq,
1820                                        frame_index);
1821 
1822     case SCI_REQ_STP_UDMA_WAIT_D2H:
1823         /* Use the general frame handler to copy the resposne data */
1824         status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1825 
1826         if (status != SCI_SUCCESS)
1827             return status;
1828 
1829         ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1830         ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1831         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1832         return SCI_SUCCESS;
1833 
1834     case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1835         struct dev_to_host_fis *frame_header;
1836         u32 *frame_buffer;
1837 
1838         status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1839                                        frame_index,
1840                                        (void **)&frame_header);
1841 
1842         if (status != SCI_SUCCESS) {
1843             dev_err(&ihost->pdev->dev,
1844                 "%s: SCIC IO Request 0x%p could not get frame "
1845                 "header for frame index %d, status %x\n",
1846                 __func__,
1847                 stp_req,
1848                 frame_index,
1849                 status);
1850 
1851             return status;
1852         }
1853 
1854         switch (frame_header->fis_type) {
1855         case FIS_REGD2H:
1856             sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1857                                       frame_index,
1858                                       (void **)&frame_buffer);
1859 
1860             sci_controller_copy_sata_response(&ireq->stp.rsp,
1861                                    frame_header,
1862                                    frame_buffer);
1863 
1864             /* The command has completed with error */
1865             ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1866             ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1867             break;
1868 
1869         default:
1870             dev_warn(&ihost->pdev->dev,
1871                  "%s: IO Request:0x%p Frame Id:%d protocol "
1872                   "violation occurred\n", __func__, stp_req,
1873                   frame_index);
1874 
1875             ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1876             ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1877             break;
1878         }
1879 
1880         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1881 
1882         /* Frame has been decoded return it to the controller */
1883         sci_controller_release_frame(ihost, frame_index);
1884 
1885         return status;
1886     }
1887 
1888     case SCI_REQ_STP_PIO_WAIT_FRAME: {
1889         struct sas_task *task = isci_request_access_task(ireq);
1890         struct dev_to_host_fis *frame_header;
1891         u32 *frame_buffer;
1892 
1893         status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1894                                        frame_index,
1895                                        (void **)&frame_header);
1896 
1897         if (status != SCI_SUCCESS) {
1898             dev_err(&ihost->pdev->dev,
1899                 "%s: SCIC IO Request 0x%p could not get frame "
1900                 "header for frame index %d, status %x\n",
1901                 __func__, stp_req, frame_index, status);
1902             return status;
1903         }
1904 
1905         switch (frame_header->fis_type) {
1906         case FIS_PIO_SETUP:
1907             /* Get from the frame buffer the PIO Setup Data */
1908             sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1909                                       frame_index,
1910                                       (void **)&frame_buffer);
1911 
1912             /* Get the data from the PIO Setup The SCU Hardware
1913              * returns first word in the frame_header and the rest
1914              * of the data is in the frame buffer so we need to
1915              * back up one dword
1916              */
1917 
1918             /* transfer_count: first 16bits in the 4th dword */
1919             stp_req->pio_len = frame_buffer[3] & 0xffff;
1920 
1921             /* status: 4th byte in the 3rd dword */
1922             stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1923 
1924             sci_controller_copy_sata_response(&ireq->stp.rsp,
1925                                    frame_header,
1926                                    frame_buffer);
1927 
1928             ireq->stp.rsp.status = stp_req->status;
1929 
1930             /* The next state is dependent on whether the
1931              * request was PIO Data-in or Data out
1932              */
1933             if (task->data_dir == DMA_FROM_DEVICE) {
1934                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1935             } else if (task->data_dir == DMA_TO_DEVICE) {
1936                 /* Transmit data */
1937                 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1938                 if (status != SCI_SUCCESS)
1939                     break;
1940                 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1941             }
1942             break;
1943 
1944         case FIS_SETDEVBITS:
1945             sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1946             break;
1947 
1948         case FIS_REGD2H:
1949             if (frame_header->status & ATA_BUSY) {
1950                 /*
1951                  * Now why is the drive sending a D2H Register
1952                  * FIS when it is still busy?  Do nothing since
1953                  * we are still in the right state.
1954                  */
1955                 dev_dbg(&ihost->pdev->dev,
1956                     "%s: SCIC PIO Request 0x%p received "
1957                     "D2H Register FIS with BSY status "
1958                     "0x%x\n",
1959                     __func__,
1960                     stp_req,
1961                     frame_header->status);
1962                 break;
1963             }
1964 
1965             sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1966                                       frame_index,
1967                                       (void **)&frame_buffer);
1968 
1969             sci_controller_copy_sata_response(&ireq->stp.rsp,
1970                                    frame_header,
1971                                    frame_buffer);
1972 
1973             ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1974             ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1975             sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1976             break;
1977 
1978         default:
1979             /* FIXME: what do we do here? */
1980             break;
1981         }
1982 
1983         /* Frame is decoded return it to the controller */
1984         sci_controller_release_frame(ihost, frame_index);
1985 
1986         return status;
1987     }
1988 
1989     case SCI_REQ_STP_PIO_DATA_IN: {
1990         struct dev_to_host_fis *frame_header;
1991         struct sata_fis_data *frame_buffer;
1992 
1993         status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1994                                        frame_index,
1995                                        (void **)&frame_header);
1996 
1997         if (status != SCI_SUCCESS) {
1998             dev_err(&ihost->pdev->dev,
1999                 "%s: SCIC IO Request 0x%p could not get frame "
2000                 "header for frame index %d, status %x\n",
2001                 __func__,
2002                 stp_req,
2003                 frame_index,
2004                 status);
2005             return status;
2006         }
2007 
2008         if (frame_header->fis_type != FIS_DATA) {
2009             dev_err(&ihost->pdev->dev,
2010                 "%s: SCIC PIO Request 0x%p received frame %d "
2011                 "with fis type 0x%02x when expecting a data "
2012                 "fis.\n",
2013                 __func__,
2014                 stp_req,
2015                 frame_index,
2016                 frame_header->fis_type);
2017 
2018             ireq->scu_status = SCU_TASK_DONE_GOOD;
2019             ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
2020             sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2021 
2022             /* Frame is decoded return it to the controller */
2023             sci_controller_release_frame(ihost, frame_index);
2024             return status;
2025         }
2026 
2027         if (stp_req->sgl.index < 0) {
2028             ireq->saved_rx_frame_index = frame_index;
2029             stp_req->pio_len = 0;
2030         } else {
2031             sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
2032                                       frame_index,
2033                                       (void **)&frame_buffer);
2034 
2035             status = sci_stp_request_pio_data_in_copy_data(stp_req,
2036                                         (u8 *)frame_buffer);
2037 
2038             /* Frame is decoded return it to the controller */
2039             sci_controller_release_frame(ihost, frame_index);
2040         }
2041 
2042         /* Check for the end of the transfer, are there more
2043          * bytes remaining for this data transfer
2044          */
2045         if (status != SCI_SUCCESS || stp_req->pio_len != 0)
2046             return status;
2047 
2048         if ((stp_req->status & ATA_BUSY) == 0) {
2049             ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2050             ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2051             sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2052         } else {
2053             sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
2054         }
2055         return status;
2056     }
2057 
2058     case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
2059         struct sas_task *task = isci_request_access_task(ireq);
2060 
2061         sci_controller_release_frame(ihost, frame_index);
2062         ireq->target_device->working_request = ireq;
2063         if (task->data_dir == DMA_NONE) {
2064             sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2065             scu_atapi_reconstruct_raw_frame_task_context(ireq);
2066         } else {
2067             sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2068             scu_atapi_construct_task_context(ireq);
2069         }
2070 
2071         sci_controller_continue_io(ireq);
2072         return SCI_SUCCESS;
2073     }
2074     case SCI_REQ_ATAPI_WAIT_D2H:
2075         return atapi_d2h_reg_frame_handler(ireq, frame_index);
2076     case SCI_REQ_ABORTING:
2077         /*
2078          * TODO: Is it even possible to get an unsolicited frame in the
2079          * aborting state?
2080          */
2081         sci_controller_release_frame(ihost, frame_index);
2082         return SCI_SUCCESS;
2083 
2084     default:
2085         dev_warn(&ihost->pdev->dev,
2086              "%s: SCIC IO Request given unexpected frame %x while "
2087              "in state %d\n",
2088              __func__,
2089              frame_index,
2090              state);
2091 
2092         sci_controller_release_frame(ihost, frame_index);
2093         return SCI_FAILURE_INVALID_STATE;
2094     }
2095 }
2096 
2097 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
2098                                u32 completion_code)
2099 {
2100     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2101     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2102         ireq->scu_status = SCU_TASK_DONE_GOOD;
2103         ireq->sci_status = SCI_SUCCESS;
2104         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2105         break;
2106     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2107     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2108         /* We must check ther response buffer to see if the D2H
2109          * Register FIS was received before we got the TC
2110          * completion.
2111          */
2112         if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2113             sci_remote_device_suspend(ireq->target_device,
2114                           SCI_SW_SUSPEND_NORMAL);
2115 
2116             ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2117             ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2118             sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2119         } else {
2120             /* If we have an error completion status for the
2121              * TC then we can expect a D2H register FIS from
2122              * the device so we must change state to wait
2123              * for it
2124              */
2125             sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2126         }
2127         break;
2128 
2129     /* TODO Check to see if any of these completion status need to
2130      * wait for the device to host register fis.
2131      */
2132     /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2133      * - this comes only for B0
2134      */
2135     default:
2136         /* All other completion status cause the IO to be complete. */
2137         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2138         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2139         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2140         break;
2141     }
2142 
2143     return SCI_SUCCESS;
2144 }
2145 
2146 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2147                           enum sci_base_request_states next)
2148 {
2149     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2150     case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2151         ireq->scu_status = SCU_TASK_DONE_GOOD;
2152         ireq->sci_status = SCI_SUCCESS;
2153         sci_change_state(&ireq->sm, next);
2154         break;
2155     default:
2156         /* All other completion status cause the IO to be complete.
2157          * If a NAK was received, then it is up to the user to retry
2158          * the request.
2159          */
2160         ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2161         ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2162 
2163         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2164         break;
2165     }
2166 
2167     return SCI_SUCCESS;
2168 }
2169 
2170 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2171                             u32 completion_code)
2172 {
2173     struct isci_remote_device *idev = ireq->target_device;
2174     struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2175     enum sci_status status = SCI_SUCCESS;
2176 
2177     switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2178     case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
2179         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2180         break;
2181 
2182     case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
2183         u16 len = sci_req_tx_bytes(ireq);
2184 
2185         /* likely non-error data underrun, workaround missing
2186          * d2h frame from the controller
2187          */
2188         if (d2h->fis_type != FIS_REGD2H) {
2189             d2h->fis_type = FIS_REGD2H;
2190             d2h->flags = (1 << 6);
2191             d2h->status = 0x50;
2192             d2h->error = 0;
2193             d2h->lbal = 0;
2194             d2h->byte_count_low = len & 0xff;
2195             d2h->byte_count_high = len >> 8;
2196             d2h->device = 0xa0;
2197             d2h->lbal_exp = 0;
2198             d2h->lbam_exp = 0;
2199             d2h->lbah_exp = 0;
2200             d2h->_r_a = 0;
2201             d2h->sector_count = 0x3;
2202             d2h->sector_count_exp = 0;
2203             d2h->_r_b = 0;
2204             d2h->_r_c = 0;
2205             d2h->_r_d = 0;
2206         }
2207 
2208         ireq->scu_status = SCU_TASK_DONE_GOOD;
2209         ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
2210         status = ireq->sci_status;
2211 
2212         /* the hw will have suspended the rnc, so complete the
2213          * request upon pending resume
2214          */
2215         sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2216         break;
2217     }
2218     case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
2219         /* In this case, there is no UF coming after.
2220          * compelte the IO now.
2221          */
2222         ireq->scu_status = SCU_TASK_DONE_GOOD;
2223         ireq->sci_status = SCI_SUCCESS;
2224         sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2225         break;
2226 
2227     default:
2228         if (d2h->fis_type == FIS_REGD2H) {
2229             /* UF received change the device state to ATAPI_ERROR */
2230             status = ireq->sci_status;
2231             sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2232         } else {
2233             /* If receiving any non-success TC status, no UF
2234              * received yet, then an UF for the status fis
2235              * is coming after (XXX: suspect this is
2236              * actually a protocol error or a bug like the
2237              * DONE_UNEXP_FIS case)
2238              */
2239             ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2240             ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2241 
2242             sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2243         }
2244         break;
2245     }
2246 
2247     return status;
2248 }
2249 
2250 static int sci_request_smp_completion_status_is_tx_suspend(
2251     unsigned int completion_status)
2252 {
2253     switch (completion_status) {
2254     case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2255     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2256     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2257     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2258     case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2259     case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2260         return 1;
2261     }
2262     return 0;
2263 }
2264 
2265 static int sci_request_smp_completion_status_is_tx_rx_suspend(
2266     unsigned int completion_status)
2267 {
2268     return 0; /* There are no Tx/Rx SMP suspend conditions. */
2269 }
2270 
2271 static int sci_request_ssp_completion_status_is_tx_suspend(
2272     unsigned int completion_status)
2273 {
2274     switch (completion_status) {
2275     case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2276     case SCU_TASK_DONE_LF_ERR:
2277     case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2278     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2279     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2280     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2281     case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2282     case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2283     case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2284     case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2285     case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2286         return 1;
2287     }
2288     return 0;
2289 }
2290 
2291 static int sci_request_ssp_completion_status_is_tx_rx_suspend(
2292     unsigned int completion_status)
2293 {
2294     return 0; /* There are no Tx/Rx SSP suspend conditions. */
2295 }
2296 
2297 static int sci_request_stpsata_completion_status_is_tx_suspend(
2298     unsigned int completion_status)
2299 {
2300     switch (completion_status) {
2301     case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2302     case SCU_TASK_DONE_LL_R_ERR:
2303     case SCU_TASK_DONE_LL_PERR:
2304     case SCU_TASK_DONE_REG_ERR:
2305     case SCU_TASK_DONE_SDB_ERR:
2306     case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2307     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2308     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2309     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2310     case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2311     case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2312     case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2313     case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2314     case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2315         return 1;
2316     }
2317     return 0;
2318 }
2319 
2320 
2321 static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
2322     unsigned int completion_status)
2323 {
2324     switch (completion_status) {
2325     case SCU_TASK_DONE_LF_ERR:
2326     case SCU_TASK_DONE_LL_SY_TERM:
2327     case SCU_TASK_DONE_LL_LF_TERM:
2328     case SCU_TASK_DONE_BREAK_RCVD:
2329     case SCU_TASK_DONE_INV_FIS_LEN:
2330     case SCU_TASK_DONE_UNEXP_FIS:
2331     case SCU_TASK_DONE_UNEXP_SDBFIS:
2332     case SCU_TASK_DONE_MAX_PLD_ERR:
2333         return 1;
2334     }
2335     return 0;
2336 }
2337 
2338 static void sci_request_handle_suspending_completions(
2339     struct isci_request *ireq,
2340     u32 completion_code)
2341 {
2342     int is_tx = 0;
2343     int is_tx_rx = 0;
2344 
2345     switch (ireq->protocol) {
2346     case SAS_PROTOCOL_SMP:
2347         is_tx = sci_request_smp_completion_status_is_tx_suspend(
2348             completion_code);
2349         is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
2350             completion_code);
2351         break;
2352     case SAS_PROTOCOL_SSP:
2353         is_tx = sci_request_ssp_completion_status_is_tx_suspend(
2354             completion_code);
2355         is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
2356             completion_code);
2357         break;
2358     case SAS_PROTOCOL_STP:
2359         is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
2360             completion_code);
2361         is_tx_rx =
2362             sci_request_stpsata_completion_status_is_tx_rx_suspend(
2363                 completion_code);
2364         break;
2365     default:
2366         dev_warn(&ireq->isci_host->pdev->dev,
2367              "%s: request %p has no valid protocol\n",
2368              __func__, ireq);
2369         break;
2370     }
2371     if (is_tx || is_tx_rx) {
2372         BUG_ON(is_tx && is_tx_rx);
2373 
2374         sci_remote_node_context_suspend(
2375             &ireq->target_device->rnc,
2376             SCI_HW_SUSPEND,
2377             (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
2378                    : SCU_EVENT_TL_RNC_SUSPEND_TX);
2379     }
2380 }
2381 
2382 enum sci_status
2383 sci_io_request_tc_completion(struct isci_request *ireq,
2384                  u32 completion_code)
2385 {
2386     enum sci_base_request_states state;
2387     struct isci_host *ihost = ireq->owning_controller;
2388 
2389     state = ireq->sm.current_state_id;
2390 
2391     /* Decode those completions that signal upcoming suspension events. */
2392     sci_request_handle_suspending_completions(
2393         ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
2394 
2395     switch (state) {
2396     case SCI_REQ_STARTED:
2397         return request_started_state_tc_event(ireq, completion_code);
2398 
2399     case SCI_REQ_TASK_WAIT_TC_COMP:
2400         return ssp_task_request_await_tc_event(ireq,
2401                                completion_code);
2402 
2403     case SCI_REQ_SMP_WAIT_RESP:
2404         return smp_request_await_response_tc_event(ireq,
2405                                completion_code);
2406 
2407     case SCI_REQ_SMP_WAIT_TC_COMP:
2408         return smp_request_await_tc_event(ireq, completion_code);
2409 
2410     case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2411         return stp_request_udma_await_tc_event(ireq,
2412                                completion_code);
2413 
2414     case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2415         return stp_request_non_data_await_h2d_tc_event(ireq,
2416                                    completion_code);
2417 
2418     case SCI_REQ_STP_PIO_WAIT_H2D:
2419         return stp_request_pio_await_h2d_completion_tc_event(ireq,
2420                                      completion_code);
2421 
2422     case SCI_REQ_STP_PIO_DATA_OUT:
2423         return pio_data_out_tx_done_tc_event(ireq, completion_code);
2424 
2425     case SCI_REQ_ABORTING:
2426         return request_aborting_state_tc_event(ireq,
2427                                completion_code);
2428 
2429     case SCI_REQ_ATAPI_WAIT_H2D:
2430         return atapi_raw_completion(ireq, completion_code,
2431                         SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2432 
2433     case SCI_REQ_ATAPI_WAIT_TC_COMP:
2434         return atapi_raw_completion(ireq, completion_code,
2435                         SCI_REQ_ATAPI_WAIT_D2H);
2436 
2437     case SCI_REQ_ATAPI_WAIT_D2H:
2438         return atapi_data_tc_completion_handler(ireq, completion_code);
2439 
2440     default:
2441         dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
2442              __func__, completion_code, req_state_name(state));
2443         return SCI_FAILURE_INVALID_STATE;
2444     }
2445 }
2446 
2447 /**
2448  * isci_request_process_response_iu() - This function sets the status and
2449  *    response iu, in the task struct, from the request object for the upper
2450  *    layer driver.
2451  * @task: This parameter is the task struct from the upper layer driver.
2452  * @resp_iu: This parameter points to the response iu of the completed request.
2453  * @dev: This parameter specifies the linux device struct.
2454  *
2455  * none.
2456  */
2457 static void isci_request_process_response_iu(
2458     struct sas_task *task,
2459     struct ssp_response_iu *resp_iu,
2460     struct device *dev)
2461 {
2462     dev_dbg(dev,
2463         "%s: resp_iu = %p "
2464         "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2465         "resp_iu->response_data_len = %x, "
2466         "resp_iu->sense_data_len = %x\nresponse data: ",
2467         __func__,
2468         resp_iu,
2469         resp_iu->status,
2470         resp_iu->datapres,
2471         resp_iu->response_data_len,
2472         resp_iu->sense_data_len);
2473 
2474     task->task_status.stat = resp_iu->status;
2475 
2476     /* libsas updates the task status fields based on the response iu. */
2477     sas_ssp_task_response(dev, task, resp_iu);
2478 }
2479 
2480 /**
2481  * isci_request_set_open_reject_status() - This function prepares the I/O
2482  *    completion for OPEN_REJECT conditions.
2483  * @request: This parameter is the completed isci_request object.
2484  * @task: This parameter is the task struct from the upper layer driver.
2485  * @response_ptr: This parameter specifies the service response for the I/O.
2486  * @status_ptr: This parameter specifies the exec status for the I/O.
2487  * @open_rej_reason: This parameter specifies the encoded reason for the
2488  *    abandon-class reject.
2489  *
2490  * none.
2491  */
2492 static void isci_request_set_open_reject_status(
2493     struct isci_request *request,
2494     struct sas_task *task,
2495     enum service_response *response_ptr,
2496     enum exec_status *status_ptr,
2497     enum sas_open_rej_reason open_rej_reason)
2498 {
2499     /* Task in the target is done. */
2500     set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2501     *response_ptr                     = SAS_TASK_UNDELIVERED;
2502     *status_ptr                       = SAS_OPEN_REJECT;
2503     task->task_status.open_rej_reason = open_rej_reason;
2504 }
2505 
2506 /**
2507  * isci_request_handle_controller_specific_errors() - This function decodes
2508  *    controller-specific I/O completion error conditions.
2509  * @idev: Remote device
2510  * @request: This parameter is the completed isci_request object.
2511  * @task: This parameter is the task struct from the upper layer driver.
2512  * @response_ptr: This parameter specifies the service response for the I/O.
2513  * @status_ptr: This parameter specifies the exec status for the I/O.
2514  *
2515  * none.
2516  */
2517 static void isci_request_handle_controller_specific_errors(
2518     struct isci_remote_device *idev,
2519     struct isci_request *request,
2520     struct sas_task *task,
2521     enum service_response *response_ptr,
2522     enum exec_status *status_ptr)
2523 {
2524     unsigned int cstatus;
2525 
2526     cstatus = request->scu_status;
2527 
2528     dev_dbg(&request->isci_host->pdev->dev,
2529         "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2530         "- controller status = 0x%x\n",
2531         __func__, request, cstatus);
2532 
2533     /* Decode the controller-specific errors; most
2534      * important is to recognize those conditions in which
2535      * the target may still have a task outstanding that
2536      * must be aborted.
2537      *
2538      * Note that there are SCU completion codes being
2539      * named in the decode below for which SCIC has already
2540      * done work to handle them in a way other than as
2541      * a controller-specific completion code; these are left
2542      * in the decode below for completeness sake.
2543      */
2544     switch (cstatus) {
2545     case SCU_TASK_DONE_DMASETUP_DIRERR:
2546     /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2547     case SCU_TASK_DONE_XFERCNT_ERR:
2548         /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2549         if (task->task_proto == SAS_PROTOCOL_SMP) {
2550             /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2551             *response_ptr = SAS_TASK_COMPLETE;
2552 
2553             /* See if the device has been/is being stopped. Note
2554              * that we ignore the quiesce state, since we are
2555              * concerned about the actual device state.
2556              */
2557             if (!idev)
2558                 *status_ptr = SAS_DEVICE_UNKNOWN;
2559             else
2560                 *status_ptr = SAS_ABORTED_TASK;
2561 
2562             set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2563         } else {
2564             /* Task in the target is not done. */
2565             *response_ptr = SAS_TASK_UNDELIVERED;
2566 
2567             if (!idev)
2568                 *status_ptr = SAS_DEVICE_UNKNOWN;
2569             else
2570                 *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
2571 
2572             clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2573         }
2574 
2575         break;
2576 
2577     case SCU_TASK_DONE_CRC_ERR:
2578     case SCU_TASK_DONE_NAK_CMD_ERR:
2579     case SCU_TASK_DONE_EXCESS_DATA:
2580     case SCU_TASK_DONE_UNEXP_FIS:
2581     /* Also SCU_TASK_DONE_UNEXP_RESP: */
2582     case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2583     case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2584     case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2585         /* These are conditions in which the target
2586          * has completed the task, so that no cleanup
2587          * is necessary.
2588          */
2589         *response_ptr = SAS_TASK_COMPLETE;
2590 
2591         /* See if the device has been/is being stopped. Note
2592          * that we ignore the quiesce state, since we are
2593          * concerned about the actual device state.
2594          */
2595         if (!idev)
2596             *status_ptr = SAS_DEVICE_UNKNOWN;
2597         else
2598             *status_ptr = SAS_ABORTED_TASK;
2599 
2600         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2601         break;
2602 
2603 
2604     /* Note that the only open reject completion codes seen here will be
2605      * abandon-class codes; all others are automatically retried in the SCU.
2606      */
2607     case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2608 
2609         isci_request_set_open_reject_status(
2610             request, task, response_ptr, status_ptr,
2611             SAS_OREJ_WRONG_DEST);
2612         break;
2613 
2614     case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2615 
2616         /* Note - the return of AB0 will change when
2617          * libsas implements detection of zone violations.
2618          */
2619         isci_request_set_open_reject_status(
2620             request, task, response_ptr, status_ptr,
2621             SAS_OREJ_RESV_AB0);
2622         break;
2623 
2624     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2625 
2626         isci_request_set_open_reject_status(
2627             request, task, response_ptr, status_ptr,
2628             SAS_OREJ_RESV_AB1);
2629         break;
2630 
2631     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2632 
2633         isci_request_set_open_reject_status(
2634             request, task, response_ptr, status_ptr,
2635             SAS_OREJ_RESV_AB2);
2636         break;
2637 
2638     case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2639 
2640         isci_request_set_open_reject_status(
2641             request, task, response_ptr, status_ptr,
2642             SAS_OREJ_RESV_AB3);
2643         break;
2644 
2645     case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2646 
2647         isci_request_set_open_reject_status(
2648             request, task, response_ptr, status_ptr,
2649             SAS_OREJ_BAD_DEST);
2650         break;
2651 
2652     case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2653 
2654         isci_request_set_open_reject_status(
2655             request, task, response_ptr, status_ptr,
2656             SAS_OREJ_STP_NORES);
2657         break;
2658 
2659     case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2660 
2661         isci_request_set_open_reject_status(
2662             request, task, response_ptr, status_ptr,
2663             SAS_OREJ_EPROTO);
2664         break;
2665 
2666     case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2667 
2668         isci_request_set_open_reject_status(
2669             request, task, response_ptr, status_ptr,
2670             SAS_OREJ_CONN_RATE);
2671         break;
2672 
2673     case SCU_TASK_DONE_LL_R_ERR:
2674     /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2675     case SCU_TASK_DONE_LL_PERR:
2676     case SCU_TASK_DONE_LL_SY_TERM:
2677     /* Also SCU_TASK_DONE_NAK_ERR:*/
2678     case SCU_TASK_DONE_LL_LF_TERM:
2679     /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2680     case SCU_TASK_DONE_LL_ABORT_ERR:
2681     case SCU_TASK_DONE_SEQ_INV_TYPE:
2682     /* Also SCU_TASK_DONE_UNEXP_XR: */
2683     case SCU_TASK_DONE_XR_IU_LEN_ERR:
2684     case SCU_TASK_DONE_INV_FIS_LEN:
2685     /* Also SCU_TASK_DONE_XR_WD_LEN: */
2686     case SCU_TASK_DONE_SDMA_ERR:
2687     case SCU_TASK_DONE_OFFSET_ERR:
2688     case SCU_TASK_DONE_MAX_PLD_ERR:
2689     case SCU_TASK_DONE_LF_ERR:
2690     case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2691     case SCU_TASK_DONE_SMP_LL_RX_ERR:
2692     case SCU_TASK_DONE_UNEXP_DATA:
2693     case SCU_TASK_DONE_UNEXP_SDBFIS:
2694     case SCU_TASK_DONE_REG_ERR:
2695     case SCU_TASK_DONE_SDB_ERR:
2696     case SCU_TASK_DONE_TASK_ABORT:
2697     default:
2698         /* Task in the target is not done. */
2699         *response_ptr = SAS_TASK_UNDELIVERED;
2700         *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
2701 
2702         if (task->task_proto == SAS_PROTOCOL_SMP)
2703             set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2704         else
2705             clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2706         break;
2707     }
2708 }
2709 
2710 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2711 {
2712     struct task_status_struct *ts = &task->task_status;
2713     struct ata_task_resp *resp = (void *)&ts->buf[0];
2714 
2715     resp->frame_len = sizeof(*fis);
2716     memcpy(resp->ending_fis, fis, sizeof(*fis));
2717     ts->buf_valid_size = sizeof(*resp);
2718 
2719     /* If an error is flagged let libata decode the fis */
2720     if (ac_err_mask(fis->status))
2721         ts->stat = SAS_PROTO_RESPONSE;
2722     else
2723         ts->stat = SAS_SAM_STAT_GOOD;
2724 
2725     ts->resp = SAS_TASK_COMPLETE;
2726 }
2727 
2728 static void isci_request_io_request_complete(struct isci_host *ihost,
2729                          struct isci_request *request,
2730                          enum sci_io_status completion_status)
2731 {
2732     struct sas_task *task = isci_request_access_task(request);
2733     struct ssp_response_iu *resp_iu;
2734     unsigned long task_flags;
2735     struct isci_remote_device *idev = request->target_device;
2736     enum service_response response = SAS_TASK_UNDELIVERED;
2737     enum exec_status status = SAS_ABORTED_TASK;
2738 
2739     dev_dbg(&ihost->pdev->dev,
2740         "%s: request = %p, task = %p, "
2741         "task->data_dir = %d completion_status = 0x%x\n",
2742         __func__, request, task, task->data_dir, completion_status);
2743 
2744     /* The request is done from an SCU HW perspective. */
2745 
2746     /* This is an active request being completed from the core. */
2747     switch (completion_status) {
2748 
2749     case SCI_IO_FAILURE_RESPONSE_VALID:
2750         dev_dbg(&ihost->pdev->dev,
2751             "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2752             __func__, request, task);
2753 
2754         if (sas_protocol_ata(task->task_proto)) {
2755             isci_process_stp_response(task, &request->stp.rsp);
2756         } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2757 
2758             /* crack the iu response buffer. */
2759             resp_iu = &request->ssp.rsp;
2760             isci_request_process_response_iu(task, resp_iu,
2761                              &ihost->pdev->dev);
2762 
2763         } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2764 
2765             dev_err(&ihost->pdev->dev,
2766                 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2767                     "SAS_PROTOCOL_SMP protocol\n",
2768                 __func__);
2769 
2770         } else
2771             dev_err(&ihost->pdev->dev,
2772                 "%s: unknown protocol\n", __func__);
2773 
2774         /* use the task status set in the task struct by the
2775         * isci_request_process_response_iu call.
2776         */
2777         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2778         response = task->task_status.resp;
2779         status = task->task_status.stat;
2780         break;
2781 
2782     case SCI_IO_SUCCESS:
2783     case SCI_IO_SUCCESS_IO_DONE_EARLY:
2784 
2785         response = SAS_TASK_COMPLETE;
2786         status   = SAS_SAM_STAT_GOOD;
2787         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2788 
2789         if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2790 
2791             /* This was an SSP / STP / SATA transfer.
2792             * There is a possibility that less data than
2793             * the maximum was transferred.
2794             */
2795             u32 transferred_length = sci_req_tx_bytes(request);
2796 
2797             task->task_status.residual
2798                 = task->total_xfer_len - transferred_length;
2799 
2800             /* If there were residual bytes, call this an
2801             * underrun.
2802             */
2803             if (task->task_status.residual != 0)
2804                 status = SAS_DATA_UNDERRUN;
2805 
2806             dev_dbg(&ihost->pdev->dev,
2807                 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2808                 __func__, status);
2809 
2810         } else
2811             dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
2812                 __func__);
2813         break;
2814 
2815     case SCI_IO_FAILURE_TERMINATED:
2816 
2817         dev_dbg(&ihost->pdev->dev,
2818             "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2819             __func__, request, task);
2820 
2821         /* The request was terminated explicitly. */
2822         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2823         response = SAS_TASK_UNDELIVERED;
2824 
2825         /* See if the device has been/is being stopped. Note
2826         * that we ignore the quiesce state, since we are
2827         * concerned about the actual device state.
2828         */
2829         if (!idev)
2830             status = SAS_DEVICE_UNKNOWN;
2831         else
2832             status = SAS_ABORTED_TASK;
2833         break;
2834 
2835     case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2836 
2837         isci_request_handle_controller_specific_errors(idev, request,
2838                                    task, &response,
2839                                    &status);
2840         break;
2841 
2842     case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2843         /* This is a special case, in that the I/O completion
2844         * is telling us that the device needs a reset.
2845         * In order for the device reset condition to be
2846         * noticed, the I/O has to be handled in the error
2847         * handler.  Set the reset flag and cause the
2848         * SCSI error thread to be scheduled.
2849         */
2850         spin_lock_irqsave(&task->task_state_lock, task_flags);
2851         task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2852         spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2853 
2854         /* Fail the I/O. */
2855         response = SAS_TASK_UNDELIVERED;
2856         status = SAS_SAM_STAT_TASK_ABORTED;
2857 
2858         clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2859         break;
2860 
2861     case SCI_FAILURE_RETRY_REQUIRED:
2862 
2863         /* Fail the I/O so it can be retried. */
2864         response = SAS_TASK_UNDELIVERED;
2865         if (!idev)
2866             status = SAS_DEVICE_UNKNOWN;
2867         else
2868             status = SAS_ABORTED_TASK;
2869 
2870         set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2871         break;
2872 
2873 
2874     default:
2875         /* Catch any otherwise unhandled error codes here. */
2876         dev_dbg(&ihost->pdev->dev,
2877             "%s: invalid completion code: 0x%x - "
2878                 "isci_request = %p\n",
2879             __func__, completion_status, request);
2880 
2881         response = SAS_TASK_UNDELIVERED;
2882 
2883         /* See if the device has been/is being stopped. Note
2884         * that we ignore the quiesce state, since we are
2885         * concerned about the actual device state.
2886         */
2887         if (!idev)
2888             status = SAS_DEVICE_UNKNOWN;
2889         else
2890             status = SAS_ABORTED_TASK;
2891 
2892         if (SAS_PROTOCOL_SMP == task->task_proto)
2893             set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2894         else
2895             clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2896         break;
2897     }
2898 
2899     switch (task->task_proto) {
2900     case SAS_PROTOCOL_SSP:
2901         if (task->data_dir == DMA_NONE)
2902             break;
2903         if (task->num_scatter == 0)
2904             /* 0 indicates a single dma address */
2905             dma_unmap_single(&ihost->pdev->dev,
2906                      request->zero_scatter_daddr,
2907                      task->total_xfer_len, task->data_dir);
2908         else  /* unmap the sgl dma addresses */
2909             dma_unmap_sg(&ihost->pdev->dev, task->scatter,
2910                      request->num_sg_entries, task->data_dir);
2911         break;
2912     case SAS_PROTOCOL_SMP: {
2913         struct scatterlist *sg = &task->smp_task.smp_req;
2914         struct smp_req *smp_req;
2915         void *kaddr;
2916 
2917         dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
2918 
2919         /* need to swab it back in case the command buffer is re-used */
2920         kaddr = kmap_atomic(sg_page(sg));
2921         smp_req = kaddr + sg->offset;
2922         sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2923         kunmap_atomic(kaddr);
2924         break;
2925     }
2926     default:
2927         break;
2928     }
2929 
2930     spin_lock_irqsave(&task->task_state_lock, task_flags);
2931 
2932     task->task_status.resp = response;
2933     task->task_status.stat = status;
2934 
2935     if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
2936         /* Normal notification (task_done) */
2937         task->task_state_flags |= SAS_TASK_STATE_DONE;
2938         task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2939     }
2940     spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2941 
2942     /* complete the io request to the core. */
2943     sci_controller_complete_io(ihost, request->target_device, request);
2944 
2945     /* set terminated handle so it cannot be completed or
2946      * terminated again, and to cause any calls into abort
2947      * task to recognize the already completed case.
2948      */
2949     set_bit(IREQ_TERMINATED, &request->flags);
2950 
2951     ireq_done(ihost, request, task);
2952 }
2953 
2954 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2955 {
2956     struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2957     struct domain_device *dev = ireq->target_device->domain_dev;
2958     enum sci_base_request_states state;
2959     struct sas_task *task;
2960 
2961     /* XXX as hch said always creating an internal sas_task for tmf
2962      * requests would simplify the driver
2963      */
2964     task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
2965 
2966     /* all unaccelerated request types (non ssp or ncq) handled with
2967      * substates
2968      */
2969     if (!task && dev->dev_type == SAS_END_DEVICE) {
2970         state = SCI_REQ_TASK_WAIT_TC_COMP;
2971     } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2972         state = SCI_REQ_SMP_WAIT_RESP;
2973     } else if (task && sas_protocol_ata(task->task_proto) &&
2974            !task->ata_task.use_ncq) {
2975         if (dev->sata_dev.class == ATA_DEV_ATAPI &&
2976             task->ata_task.fis.command == ATA_CMD_PACKET) {
2977             state = SCI_REQ_ATAPI_WAIT_H2D;
2978         } else if (task->data_dir == DMA_NONE) {
2979             state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2980         } else if (task->ata_task.dma_xfer) {
2981             state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2982         } else /* PIO */ {
2983             state = SCI_REQ_STP_PIO_WAIT_H2D;
2984         }
2985     } else {
2986         /* SSP or NCQ are fully accelerated, no substates */
2987         return;
2988     }
2989     sci_change_state(sm, state);
2990 }
2991 
2992 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
2993 {
2994     struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2995     struct isci_host *ihost = ireq->owning_controller;
2996 
2997     /* Tell the SCI_USER that the IO request is complete */
2998     if (!test_bit(IREQ_TMF, &ireq->flags))
2999         isci_request_io_request_complete(ihost, ireq,
3000                          ireq->sci_status);
3001     else
3002         isci_task_request_complete(ihost, ireq, ireq->sci_status);
3003 }
3004 
3005 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
3006 {
3007     struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3008 
3009     /* Setting the abort bit in the Task Context is required by the silicon. */
3010     ireq->tc->abort = 1;
3011 }
3012 
3013 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3014 {
3015     struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3016 
3017     ireq->target_device->working_request = ireq;
3018 }
3019 
3020 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3021 {
3022     struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3023 
3024     ireq->target_device->working_request = ireq;
3025 }
3026 
3027 static const struct sci_base_state sci_request_state_table[] = {
3028     [SCI_REQ_INIT] = { },
3029     [SCI_REQ_CONSTRUCTED] = { },
3030     [SCI_REQ_STARTED] = {
3031         .enter_state = sci_request_started_state_enter,
3032     },
3033     [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3034         .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
3035     },
3036     [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3037     [SCI_REQ_STP_PIO_WAIT_H2D] = {
3038         .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
3039     },
3040     [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3041     [SCI_REQ_STP_PIO_DATA_IN] = { },
3042     [SCI_REQ_STP_PIO_DATA_OUT] = { },
3043     [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3044     [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3045     [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3046     [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3047     [SCI_REQ_SMP_WAIT_RESP] = { },
3048     [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3049     [SCI_REQ_ATAPI_WAIT_H2D] = { },
3050     [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3051     [SCI_REQ_ATAPI_WAIT_D2H] = { },
3052     [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
3053     [SCI_REQ_COMPLETED] = {
3054         .enter_state = sci_request_completed_state_enter,
3055     },
3056     [SCI_REQ_ABORTING] = {
3057         .enter_state = sci_request_aborting_state_enter,
3058     },
3059     [SCI_REQ_FINAL] = { },
3060 };
3061 
3062 static void
3063 sci_general_request_construct(struct isci_host *ihost,
3064                    struct isci_remote_device *idev,
3065                    struct isci_request *ireq)
3066 {
3067     sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3068 
3069     ireq->target_device = idev;
3070     ireq->protocol = SAS_PROTOCOL_NONE;
3071     ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3072 
3073     ireq->sci_status   = SCI_SUCCESS;
3074     ireq->scu_status   = 0;
3075     ireq->post_context = 0xFFFFFFFF;
3076 }
3077 
3078 static enum sci_status
3079 sci_io_request_construct(struct isci_host *ihost,
3080               struct isci_remote_device *idev,
3081               struct isci_request *ireq)
3082 {
3083     struct domain_device *dev = idev->domain_dev;
3084     enum sci_status status = SCI_SUCCESS;
3085 
3086     /* Build the common part of the request */
3087     sci_general_request_construct(ihost, idev, ireq);
3088 
3089     if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3090         return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3091 
3092     if (dev->dev_type == SAS_END_DEVICE)
3093         /* pass */;
3094     else if (dev_is_sata(dev))
3095         memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3096     else if (dev_is_expander(dev->dev_type))
3097         /* pass */;
3098     else
3099         return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3100 
3101     memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3102 
3103     return status;
3104 }
3105 
3106 enum sci_status sci_task_request_construct(struct isci_host *ihost,
3107                         struct isci_remote_device *idev,
3108                         u16 io_tag, struct isci_request *ireq)
3109 {
3110     struct domain_device *dev = idev->domain_dev;
3111     enum sci_status status = SCI_SUCCESS;
3112 
3113     /* Build the common part of the request */
3114     sci_general_request_construct(ihost, idev, ireq);
3115 
3116     if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
3117         set_bit(IREQ_TMF, &ireq->flags);
3118         memset(ireq->tc, 0, sizeof(struct scu_task_context));
3119 
3120         /* Set the protocol indicator. */
3121         if (dev_is_sata(dev))
3122             ireq->protocol = SAS_PROTOCOL_STP;
3123         else
3124             ireq->protocol = SAS_PROTOCOL_SSP;
3125     } else
3126         status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3127 
3128     return status;
3129 }
3130 
3131 static enum sci_status isci_request_ssp_request_construct(
3132     struct isci_request *request)
3133 {
3134     enum sci_status status;
3135 
3136     dev_dbg(&request->isci_host->pdev->dev,
3137         "%s: request = %p\n",
3138         __func__,
3139         request);
3140     status = sci_io_request_construct_basic_ssp(request);
3141     return status;
3142 }
3143 
3144 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3145 {
3146     struct sas_task *task = isci_request_access_task(ireq);
3147     struct host_to_dev_fis *fis = &ireq->stp.cmd;
3148     struct ata_queued_cmd *qc = task->uldd_task;
3149     enum sci_status status;
3150 
3151     dev_dbg(&ireq->isci_host->pdev->dev,
3152         "%s: ireq = %p\n",
3153         __func__,
3154         ireq);
3155 
3156     memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3157     if (!task->ata_task.device_control_reg_update)
3158         fis->flags |= 0x80;
3159     fis->flags &= 0xF0;
3160 
3161     status = sci_io_request_construct_basic_sata(ireq);
3162 
3163     if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3164            qc->tf.command == ATA_CMD_FPDMA_READ ||
3165            qc->tf.command == ATA_CMD_FPDMA_RECV ||
3166            qc->tf.command == ATA_CMD_FPDMA_SEND ||
3167            qc->tf.command == ATA_CMD_NCQ_NON_DATA)) {
3168         fis->sector_count = qc->tag << 3;
3169         ireq->tc->type.stp.ncq_tag = qc->tag;
3170     }
3171 
3172     return status;
3173 }
3174 
3175 static enum sci_status
3176 sci_io_request_construct_smp(struct device *dev,
3177                   struct isci_request *ireq,
3178                   struct sas_task *task)
3179 {
3180     struct scatterlist *sg = &task->smp_task.smp_req;
3181     struct isci_remote_device *idev;
3182     struct scu_task_context *task_context;
3183     struct isci_port *iport;
3184     struct smp_req *smp_req;
3185     void *kaddr;
3186     u8 req_len;
3187     u32 cmd;
3188 
3189     kaddr = kmap_atomic(sg_page(sg));
3190     smp_req = kaddr + sg->offset;
3191     /*
3192      * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3193      * functions under SAS 2.0, a zero request length really indicates
3194      * a non-zero default length.
3195      */
3196     if (smp_req->req_len == 0) {
3197         switch (smp_req->func) {
3198         case SMP_DISCOVER:
3199         case SMP_REPORT_PHY_ERR_LOG:
3200         case SMP_REPORT_PHY_SATA:
3201         case SMP_REPORT_ROUTE_INFO:
3202             smp_req->req_len = 2;
3203             break;
3204         case SMP_CONF_ROUTE_INFO:
3205         case SMP_PHY_CONTROL:
3206         case SMP_PHY_TEST_FUNCTION:
3207             smp_req->req_len = 9;
3208             break;
3209             /* Default - zero is a valid default for 2.0. */
3210         }
3211     }
3212     req_len = smp_req->req_len;
3213     sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3214     cmd = *(u32 *) smp_req;
3215     kunmap_atomic(kaddr);
3216 
3217     if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3218         return SCI_FAILURE;
3219 
3220     ireq->protocol = SAS_PROTOCOL_SMP;
3221 
3222     /* byte swap the smp request. */
3223 
3224     task_context = ireq->tc;
3225 
3226     idev = ireq->target_device;
3227     iport = idev->owning_port;
3228 
3229     /*
3230      * Fill in the TC with its required data
3231      * 00h
3232      */
3233     task_context->priority = 0;
3234     task_context->initiator_request = 1;
3235     task_context->connection_rate = idev->connection_rate;
3236     task_context->protocol_engine_index = ISCI_PEG;
3237     task_context->logical_port_index = iport->physical_port_index;
3238     task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3239     task_context->abort = 0;
3240     task_context->valid = SCU_TASK_CONTEXT_VALID;
3241     task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3242 
3243     /* 04h */
3244     task_context->remote_node_index = idev->rnc.remote_node_index;
3245     task_context->command_code = 0;
3246     task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3247 
3248     /* 08h */
3249     task_context->link_layer_control = 0;
3250     task_context->do_not_dma_ssp_good_response = 1;
3251     task_context->strict_ordering = 0;
3252     task_context->control_frame = 1;
3253     task_context->timeout_enable = 0;
3254     task_context->block_guard_enable = 0;
3255 
3256     /* 0ch */
3257     task_context->address_modifier = 0;
3258 
3259     /* 10h */
3260     task_context->ssp_command_iu_length = req_len;
3261 
3262     /* 14h */
3263     task_context->transfer_length_bytes = 0;
3264 
3265     /*
3266      * 18h ~ 30h, protocol specific
3267      * since commandIU has been build by framework at this point, we just
3268      * copy the frist DWord from command IU to this location. */
3269     memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3270 
3271     /*
3272      * 40h
3273      * "For SMP you could program it to zero. We would prefer that way
3274      * so that done code will be consistent." - Venki
3275      */
3276     task_context->task_phase = 0;
3277 
3278     ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3279                   (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3280                    (iport->physical_port_index <<
3281                 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3282                   ISCI_TAG_TCI(ireq->io_tag));
3283     /*
3284      * Copy the physical address for the command buffer to the SCU Task
3285      * Context command buffer should not contain command header.
3286      */
3287     task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3288     task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3289 
3290     /* SMP response comes as UF, so no need to set response IU address. */
3291     task_context->response_iu_upper = 0;
3292     task_context->response_iu_lower = 0;
3293 
3294     sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3295 
3296     return SCI_SUCCESS;
3297 }
3298 
3299 /*
3300  * isci_smp_request_build() - This function builds the smp request.
3301  * @ireq: This parameter points to the isci_request allocated in the
3302  *    request construct function.
3303  *
3304  * SCI_SUCCESS on successfull completion, or specific failure code.
3305  */
3306 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3307 {
3308     struct sas_task *task = isci_request_access_task(ireq);
3309     struct device *dev = &ireq->isci_host->pdev->dev;
3310     enum sci_status status = SCI_FAILURE;
3311 
3312     status = sci_io_request_construct_smp(dev, ireq, task);
3313     if (status != SCI_SUCCESS)
3314         dev_dbg(&ireq->isci_host->pdev->dev,
3315              "%s: failed with status = %d\n",
3316              __func__,
3317              status);
3318 
3319     return status;
3320 }
3321 
3322 /**
3323  * isci_io_request_build() - This function builds the io request object.
3324  * @ihost: This parameter specifies the ISCI host object
3325  * @request: This parameter points to the isci_request object allocated in the
3326  *    request construct function.
3327  * @idev: This parameter is the handle for the sci core's remote device
3328  *    object that is the destination for this request.
3329  *
3330  * SCI_SUCCESS on successfull completion, or specific failure code.
3331  */
3332 static enum sci_status isci_io_request_build(struct isci_host *ihost,
3333                          struct isci_request *request,
3334                          struct isci_remote_device *idev)
3335 {
3336     enum sci_status status = SCI_SUCCESS;
3337     struct sas_task *task = isci_request_access_task(request);
3338 
3339     dev_dbg(&ihost->pdev->dev,
3340         "%s: idev = 0x%p; request = %p, "
3341         "num_scatter = %d\n",
3342         __func__,
3343         idev,
3344         request,
3345         task->num_scatter);
3346 
3347     /* map the sgl addresses, if present.
3348      * libata does the mapping for sata devices
3349      * before we get the request.
3350      */
3351     if (task->num_scatter &&
3352         !sas_protocol_ata(task->task_proto) &&
3353         !(SAS_PROTOCOL_SMP & task->task_proto)) {
3354 
3355         request->num_sg_entries = dma_map_sg(
3356             &ihost->pdev->dev,
3357             task->scatter,
3358             task->num_scatter,
3359             task->data_dir
3360             );
3361 
3362         if (request->num_sg_entries == 0)
3363             return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3364     }
3365 
3366     status = sci_io_request_construct(ihost, idev, request);
3367 
3368     if (status != SCI_SUCCESS) {
3369         dev_dbg(&ihost->pdev->dev,
3370              "%s: failed request construct\n",
3371              __func__);
3372         return SCI_FAILURE;
3373     }
3374 
3375     switch (task->task_proto) {
3376     case SAS_PROTOCOL_SMP:
3377         status = isci_smp_request_build(request);
3378         break;
3379     case SAS_PROTOCOL_SSP:
3380         status = isci_request_ssp_request_construct(request);
3381         break;
3382     case SAS_PROTOCOL_SATA:
3383     case SAS_PROTOCOL_STP:
3384     case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3385         status = isci_request_stp_request_construct(request);
3386         break;
3387     default:
3388         dev_dbg(&ihost->pdev->dev,
3389              "%s: unknown protocol\n", __func__);
3390         return SCI_FAILURE;
3391     }
3392 
3393     return SCI_SUCCESS;
3394 }
3395 
3396 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3397 {
3398     struct isci_request *ireq;
3399 
3400     ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3401     ireq->io_tag = tag;
3402     ireq->io_request_completion = NULL;
3403     ireq->flags = 0;
3404     ireq->num_sg_entries = 0;
3405 
3406     return ireq;
3407 }
3408 
3409 struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3410                           struct sas_task *task,
3411                           u16 tag)
3412 {
3413     struct isci_request *ireq;
3414 
3415     ireq = isci_request_from_tag(ihost, tag);
3416     ireq->ttype_ptr.io_task_ptr = task;
3417     clear_bit(IREQ_TMF, &ireq->flags);
3418     task->lldd_task = ireq;
3419 
3420     return ireq;
3421 }
3422 
3423 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3424                            struct isci_tmf *isci_tmf,
3425                            u16 tag)
3426 {
3427     struct isci_request *ireq;
3428 
3429     ireq = isci_request_from_tag(ihost, tag);
3430     ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3431     set_bit(IREQ_TMF, &ireq->flags);
3432 
3433     return ireq;
3434 }
3435 
3436 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3437              struct sas_task *task, struct isci_request *ireq)
3438 {
3439     enum sci_status status;
3440     unsigned long flags;
3441     int ret = 0;
3442 
3443     status = isci_io_request_build(ihost, ireq, idev);
3444     if (status != SCI_SUCCESS) {
3445         dev_dbg(&ihost->pdev->dev,
3446              "%s: request_construct failed - status = 0x%x\n",
3447              __func__,
3448              status);
3449         return status;
3450     }
3451 
3452     spin_lock_irqsave(&ihost->scic_lock, flags);
3453 
3454     if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3455 
3456         if (isci_task_is_ncq_recovery(task)) {
3457 
3458             /* The device is in an NCQ recovery state.  Issue the
3459              * request on the task side.  Note that it will
3460              * complete on the I/O request side because the
3461              * request was built that way (ie.
3462              * ireq->is_task_management_request is false).
3463              */
3464             status = sci_controller_start_task(ihost,
3465                                 idev,
3466                                 ireq);
3467         } else {
3468             status = SCI_FAILURE;
3469         }
3470     } else {
3471         /* send the request, let the core assign the IO TAG.    */
3472         status = sci_controller_start_io(ihost, idev,
3473                           ireq);
3474     }
3475 
3476     if (status != SCI_SUCCESS &&
3477         status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3478         dev_dbg(&ihost->pdev->dev,
3479              "%s: failed request start (0x%x)\n",
3480              __func__, status);
3481         spin_unlock_irqrestore(&ihost->scic_lock, flags);
3482         return status;
3483     }
3484     /* Either I/O started OK, or the core has signaled that
3485      * the device needs a target reset.
3486      */
3487     if (status != SCI_SUCCESS) {
3488         /* The request did not really start in the
3489          * hardware, so clear the request handle
3490          * here so no terminations will be done.
3491          */
3492         set_bit(IREQ_TERMINATED, &ireq->flags);
3493     }
3494     spin_unlock_irqrestore(&ihost->scic_lock, flags);
3495 
3496     if (status ==
3497         SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3498         /* Signal libsas that we need the SCSI error
3499          * handler thread to work on this I/O and that
3500          * we want a device reset.
3501          */
3502         spin_lock_irqsave(&task->task_state_lock, flags);
3503         task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3504         spin_unlock_irqrestore(&task->task_state_lock, flags);
3505 
3506         /* Cause this task to be scheduled in the SCSI error
3507          * handler thread.
3508          */
3509         sas_task_abort(task);
3510 
3511         /* Change the status, since we are holding
3512          * the I/O until it is managed by the SCSI
3513          * error handler.
3514          */
3515         status = SCI_SUCCESS;
3516     }
3517 
3518     return ret;
3519 }