0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/module.h>
0019 #include <linux/sched.h>
0020 #include <linux/gfp.h>
0021 #include <linux/timer.h>
0022 #include <linux/string.h>
0023 #include <linux/kernel.h>
0024 #include <linux/freezer.h>
0025 #include <linux/kthread.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/blkdev.h>
0028 #include <linux/delay.h>
0029 #include <linux/jiffies.h>
0030
0031 #include <scsi/scsi.h>
0032 #include <scsi/scsi_cmnd.h>
0033 #include <scsi/scsi_dbg.h>
0034 #include <scsi/scsi_device.h>
0035 #include <scsi/scsi_driver.h>
0036 #include <scsi/scsi_eh.h>
0037 #include <scsi/scsi_common.h>
0038 #include <scsi/scsi_transport.h>
0039 #include <scsi/scsi_host.h>
0040 #include <scsi/scsi_ioctl.h>
0041 #include <scsi/scsi_dh.h>
0042 #include <scsi/scsi_devinfo.h>
0043 #include <scsi/sg.h>
0044
0045 #include "scsi_priv.h"
0046 #include "scsi_logging.h"
0047 #include "scsi_transport_api.h"
0048
0049 #include <trace/events/scsi.h>
0050
0051 #include <asm/unaligned.h>
0052
0053
0054
0055
0056
0057 #define BUS_RESET_SETTLE_TIME (10)
0058 #define HOST_RESET_SETTLE_TIME (10)
0059
0060 static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
0061 static enum scsi_disposition scsi_try_to_abort_cmd(struct scsi_host_template *,
0062 struct scsi_cmnd *);
0063
0064 void scsi_eh_wakeup(struct Scsi_Host *shost)
0065 {
0066 lockdep_assert_held(shost->host_lock);
0067
0068 if (scsi_host_busy(shost) == shost->host_failed) {
0069 trace_scsi_eh_wakeup(shost);
0070 wake_up_process(shost->ehandler);
0071 SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
0072 "Waking error handler thread\n"));
0073 }
0074 }
0075
0076
0077
0078
0079
0080
0081
0082 void scsi_schedule_eh(struct Scsi_Host *shost)
0083 {
0084 unsigned long flags;
0085
0086 spin_lock_irqsave(shost->host_lock, flags);
0087
0088 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
0089 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
0090 shost->host_eh_scheduled++;
0091 scsi_eh_wakeup(shost);
0092 }
0093
0094 spin_unlock_irqrestore(shost->host_lock, flags);
0095 }
0096 EXPORT_SYMBOL_GPL(scsi_schedule_eh);
0097
0098 static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
0099 {
0100 if (!shost->last_reset || shost->eh_deadline == -1)
0101 return 0;
0102
0103
0104
0105
0106
0107
0108
0109
0110 if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
0111 shost->eh_deadline > -1)
0112 return 0;
0113
0114 return 1;
0115 }
0116
0117 static bool scsi_cmd_retry_allowed(struct scsi_cmnd *cmd)
0118 {
0119 if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
0120 return true;
0121
0122 return ++cmd->retries <= cmd->allowed;
0123 }
0124
0125 static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
0126 {
0127 struct scsi_device *sdev = cmd->device;
0128 struct Scsi_Host *host = sdev->host;
0129
0130 if (host->hostt->eh_should_retry_cmd)
0131 return host->hostt->eh_should_retry_cmd(cmd);
0132
0133 return true;
0134 }
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146 void
0147 scmd_eh_abort_handler(struct work_struct *work)
0148 {
0149 struct scsi_cmnd *scmd =
0150 container_of(work, struct scsi_cmnd, abort_work.work);
0151 struct scsi_device *sdev = scmd->device;
0152 struct Scsi_Host *shost = sdev->host;
0153 enum scsi_disposition rtn;
0154 unsigned long flags;
0155
0156 if (scsi_host_eh_past_deadline(shost)) {
0157 SCSI_LOG_ERROR_RECOVERY(3,
0158 scmd_printk(KERN_INFO, scmd,
0159 "eh timeout, not aborting\n"));
0160 goto out;
0161 }
0162
0163 SCSI_LOG_ERROR_RECOVERY(3,
0164 scmd_printk(KERN_INFO, scmd,
0165 "aborting command\n"));
0166 rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
0167 if (rtn != SUCCESS) {
0168 SCSI_LOG_ERROR_RECOVERY(3,
0169 scmd_printk(KERN_INFO, scmd,
0170 "cmd abort %s\n",
0171 (rtn == FAST_IO_FAIL) ?
0172 "not send" : "failed"));
0173 goto out;
0174 }
0175 set_host_byte(scmd, DID_TIME_OUT);
0176 if (scsi_host_eh_past_deadline(shost)) {
0177 SCSI_LOG_ERROR_RECOVERY(3,
0178 scmd_printk(KERN_INFO, scmd,
0179 "eh timeout, not retrying "
0180 "aborted command\n"));
0181 goto out;
0182 }
0183
0184 spin_lock_irqsave(shost->host_lock, flags);
0185 list_del_init(&scmd->eh_entry);
0186
0187
0188
0189
0190
0191 if (list_empty(&shost->eh_abort_list) &&
0192 list_empty(&shost->eh_cmd_q))
0193 if (shost->eh_deadline != -1)
0194 shost->last_reset = 0;
0195
0196 spin_unlock_irqrestore(shost->host_lock, flags);
0197
0198 if (!scsi_noretry_cmd(scmd) &&
0199 scsi_cmd_retry_allowed(scmd) &&
0200 scsi_eh_should_retry_cmd(scmd)) {
0201 SCSI_LOG_ERROR_RECOVERY(3,
0202 scmd_printk(KERN_WARNING, scmd,
0203 "retry aborted command\n"));
0204 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
0205 } else {
0206 SCSI_LOG_ERROR_RECOVERY(3,
0207 scmd_printk(KERN_WARNING, scmd,
0208 "finish aborted command\n"));
0209 scsi_finish_command(scmd);
0210 }
0211 return;
0212
0213 out:
0214 spin_lock_irqsave(shost->host_lock, flags);
0215 list_del_init(&scmd->eh_entry);
0216 spin_unlock_irqrestore(shost->host_lock, flags);
0217
0218 scsi_eh_scmd_add(scmd);
0219 }
0220
0221
0222
0223
0224
0225
0226
0227 static int
0228 scsi_abort_command(struct scsi_cmnd *scmd)
0229 {
0230 struct scsi_device *sdev = scmd->device;
0231 struct Scsi_Host *shost = sdev->host;
0232 unsigned long flags;
0233
0234 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
0235
0236
0237
0238 SCSI_LOG_ERROR_RECOVERY(3,
0239 scmd_printk(KERN_INFO, scmd,
0240 "previous abort failed\n"));
0241 BUG_ON(delayed_work_pending(&scmd->abort_work));
0242 return FAILED;
0243 }
0244
0245 spin_lock_irqsave(shost->host_lock, flags);
0246 if (shost->eh_deadline != -1 && !shost->last_reset)
0247 shost->last_reset = jiffies;
0248 BUG_ON(!list_empty(&scmd->eh_entry));
0249 list_add_tail(&scmd->eh_entry, &shost->eh_abort_list);
0250 spin_unlock_irqrestore(shost->host_lock, flags);
0251
0252 scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
0253 SCSI_LOG_ERROR_RECOVERY(3,
0254 scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
0255 queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
0256 return SUCCESS;
0257 }
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 static void scsi_eh_reset(struct scsi_cmnd *scmd)
0268 {
0269 if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
0270 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
0271 if (sdrv->eh_reset)
0272 sdrv->eh_reset(scmd);
0273 }
0274 }
0275
0276 static void scsi_eh_inc_host_failed(struct rcu_head *head)
0277 {
0278 struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
0279 struct Scsi_Host *shost = scmd->device->host;
0280 unsigned long flags;
0281
0282 spin_lock_irqsave(shost->host_lock, flags);
0283 shost->host_failed++;
0284 scsi_eh_wakeup(shost);
0285 spin_unlock_irqrestore(shost->host_lock, flags);
0286 }
0287
0288
0289
0290
0291
0292 void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
0293 {
0294 struct Scsi_Host *shost = scmd->device->host;
0295 unsigned long flags;
0296 int ret;
0297
0298 WARN_ON_ONCE(!shost->ehandler);
0299
0300 spin_lock_irqsave(shost->host_lock, flags);
0301 if (scsi_host_set_state(shost, SHOST_RECOVERY)) {
0302 ret = scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY);
0303 WARN_ON_ONCE(ret);
0304 }
0305 if (shost->eh_deadline != -1 && !shost->last_reset)
0306 shost->last_reset = jiffies;
0307
0308 scsi_eh_reset(scmd);
0309 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
0310 spin_unlock_irqrestore(shost->host_lock, flags);
0311
0312
0313
0314
0315 call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
0316 }
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 enum blk_eh_timer_return scsi_timeout(struct request *req)
0329 {
0330 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
0331 enum blk_eh_timer_return rtn = BLK_EH_DONE;
0332 struct Scsi_Host *host = scmd->device->host;
0333
0334 trace_scsi_dispatch_cmd_timeout(scmd);
0335 scsi_log_completion(scmd, TIMEOUT_ERROR);
0336
0337 if (host->eh_deadline != -1 && !host->last_reset)
0338 host->last_reset = jiffies;
0339
0340 if (host->hostt->eh_timed_out)
0341 rtn = host->hostt->eh_timed_out(scmd);
0342
0343 if (rtn == BLK_EH_DONE) {
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
0357 return BLK_EH_RESET_TIMER;
0358 if (scsi_abort_command(scmd) != SUCCESS) {
0359 set_host_byte(scmd, DID_TIME_OUT);
0360 scsi_eh_scmd_add(scmd);
0361 }
0362 }
0363
0364 return rtn;
0365 }
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 int scsi_block_when_processing_errors(struct scsi_device *sdev)
0379 {
0380 int online;
0381
0382 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
0383
0384 online = scsi_device_online(sdev);
0385
0386 return online;
0387 }
0388 EXPORT_SYMBOL(scsi_block_when_processing_errors);
0389
0390 #ifdef CONFIG_SCSI_LOGGING
0391
0392
0393
0394
0395
0396 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
0397 struct list_head *work_q)
0398 {
0399 struct scsi_cmnd *scmd;
0400 struct scsi_device *sdev;
0401 int total_failures = 0;
0402 int cmd_failed = 0;
0403 int cmd_cancel = 0;
0404 int devices_failed = 0;
0405
0406 shost_for_each_device(sdev, shost) {
0407 list_for_each_entry(scmd, work_q, eh_entry) {
0408 if (scmd->device == sdev) {
0409 ++total_failures;
0410 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
0411 ++cmd_cancel;
0412 else
0413 ++cmd_failed;
0414 }
0415 }
0416
0417 if (cmd_cancel || cmd_failed) {
0418 SCSI_LOG_ERROR_RECOVERY(3,
0419 shost_printk(KERN_INFO, shost,
0420 "%s: cmds failed: %d, cancel: %d\n",
0421 __func__, cmd_failed,
0422 cmd_cancel));
0423 cmd_cancel = 0;
0424 cmd_failed = 0;
0425 ++devices_failed;
0426 }
0427 }
0428
0429 SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
0430 "Total of %d commands on %d"
0431 " devices require eh work\n",
0432 total_failures, devices_failed));
0433 }
0434 #endif
0435
0436
0437
0438
0439
0440
0441 static void scsi_report_lun_change(struct scsi_device *sdev)
0442 {
0443 sdev->sdev_target->expecting_lun_change = 1;
0444 }
0445
0446
0447
0448
0449
0450
0451
0452 static void scsi_report_sense(struct scsi_device *sdev,
0453 struct scsi_sense_hdr *sshdr)
0454 {
0455 enum scsi_device_event evt_type = SDEV_EVT_MAXBITS;
0456
0457 if (sshdr->sense_key == UNIT_ATTENTION) {
0458 if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
0459 evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
0460 sdev_printk(KERN_WARNING, sdev,
0461 "Inquiry data has changed");
0462 } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
0463 evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
0464 scsi_report_lun_change(sdev);
0465 sdev_printk(KERN_WARNING, sdev,
0466 "LUN assignments on this target have "
0467 "changed. The Linux SCSI layer does not "
0468 "automatically remap LUN assignments.\n");
0469 } else if (sshdr->asc == 0x3f)
0470 sdev_printk(KERN_WARNING, sdev,
0471 "Operating parameters on this target have "
0472 "changed. The Linux SCSI layer does not "
0473 "automatically adjust these parameters.\n");
0474
0475 if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
0476 evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
0477 sdev_printk(KERN_WARNING, sdev,
0478 "Warning! Received an indication that the "
0479 "LUN reached a thin provisioning soft "
0480 "threshold.\n");
0481 }
0482
0483 if (sshdr->asc == 0x29) {
0484 evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
0485
0486
0487
0488
0489 if (!sdev->silence_suspend)
0490 sdev_printk(KERN_WARNING, sdev,
0491 "Power-on or device reset occurred\n");
0492 }
0493
0494 if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
0495 evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
0496 sdev_printk(KERN_WARNING, sdev,
0497 "Mode parameters changed");
0498 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
0499 evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
0500 sdev_printk(KERN_WARNING, sdev,
0501 "Asymmetric access state changed");
0502 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
0503 evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
0504 sdev_printk(KERN_WARNING, sdev,
0505 "Capacity data has changed");
0506 } else if (sshdr->asc == 0x2a)
0507 sdev_printk(KERN_WARNING, sdev,
0508 "Parameters changed");
0509 }
0510
0511 if (evt_type != SDEV_EVT_MAXBITS) {
0512 set_bit(evt_type, sdev->pending_events);
0513 schedule_work(&sdev->event_work);
0514 }
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528 enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
0529 {
0530 struct scsi_device *sdev = scmd->device;
0531 struct scsi_sense_hdr sshdr;
0532
0533 if (! scsi_command_normalize_sense(scmd, &sshdr))
0534 return FAILED;
0535
0536 scsi_report_sense(sdev, &sshdr);
0537
0538 if (scsi_sense_is_deferred(&sshdr))
0539 return NEEDS_RETRY;
0540
0541 if (sdev->handler && sdev->handler->check_sense) {
0542 enum scsi_disposition rc;
0543
0544 rc = sdev->handler->check_sense(sdev, &sshdr);
0545 if (rc != SCSI_RETURN_NOT_HANDLED)
0546 return rc;
0547
0548 }
0549
0550 if (scmd->cmnd[0] == TEST_UNIT_READY &&
0551 scmd->submitter != SUBMITTED_BY_SCSI_ERROR_HANDLER)
0552
0553
0554
0555
0556
0557 return SUCCESS;
0558
0559
0560
0561
0562
0563 if (sshdr.response_code == 0x70) {
0564
0565 if (scmd->sense_buffer[2] & 0xe0)
0566 return SUCCESS;
0567 } else {
0568
0569
0570
0571
0572
0573 if ((sshdr.additional_length > 3) &&
0574 (scmd->sense_buffer[8] == 0x4) &&
0575 (scmd->sense_buffer[11] & 0xe0))
0576 return SUCCESS;
0577 }
0578
0579 switch (sshdr.sense_key) {
0580 case NO_SENSE:
0581 return SUCCESS;
0582 case RECOVERED_ERROR:
0583 return SUCCESS;
0584
0585 case ABORTED_COMMAND:
0586 if (sshdr.asc == 0x10)
0587 return SUCCESS;
0588
0589 if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
0590 return ADD_TO_MLQUEUE;
0591 if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 &&
0592 sdev->sdev_bflags & BLIST_RETRY_ASC_C1)
0593 return ADD_TO_MLQUEUE;
0594
0595 return NEEDS_RETRY;
0596 case NOT_READY:
0597 case UNIT_ATTENTION:
0598
0599
0600
0601
0602
0603
0604 if (scmd->device->expecting_cc_ua) {
0605
0606
0607
0608
0609
0610
0611 if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
0612 scmd->device->expecting_cc_ua = 0;
0613 return NEEDS_RETRY;
0614 }
0615 }
0616
0617
0618
0619
0620
0621 if (scmd->device->sdev_target->expecting_lun_change &&
0622 sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
0623 return NEEDS_RETRY;
0624
0625
0626
0627
0628 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
0629 return NEEDS_RETRY;
0630
0631
0632
0633
0634 if (scmd->device->allow_restart &&
0635 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
0636 return FAILED;
0637
0638
0639
0640
0641 return SUCCESS;
0642
0643
0644 case DATA_PROTECT:
0645 if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
0646
0647 set_host_byte(scmd, DID_ALLOC_FAILURE);
0648 return SUCCESS;
0649 }
0650 fallthrough;
0651 case COPY_ABORTED:
0652 case VOLUME_OVERFLOW:
0653 case MISCOMPARE:
0654 case BLANK_CHECK:
0655 set_host_byte(scmd, DID_TARGET_FAILURE);
0656 return SUCCESS;
0657
0658 case MEDIUM_ERROR:
0659 if (sshdr.asc == 0x11 ||
0660 sshdr.asc == 0x13 ||
0661 sshdr.asc == 0x14) {
0662 set_host_byte(scmd, DID_MEDIUM_ERROR);
0663 return SUCCESS;
0664 }
0665 return NEEDS_RETRY;
0666
0667 case HARDWARE_ERROR:
0668 if (scmd->device->retry_hwerror)
0669 return ADD_TO_MLQUEUE;
0670 else
0671 set_host_byte(scmd, DID_TARGET_FAILURE);
0672 fallthrough;
0673
0674 case ILLEGAL_REQUEST:
0675 if (sshdr.asc == 0x20 ||
0676 sshdr.asc == 0x21 ||
0677 sshdr.asc == 0x22 ||
0678 sshdr.asc == 0x24 ||
0679 sshdr.asc == 0x26 ||
0680 sshdr.asc == 0x27) {
0681 set_host_byte(scmd, DID_TARGET_FAILURE);
0682 }
0683 return SUCCESS;
0684
0685 default:
0686 return SUCCESS;
0687 }
0688 }
0689 EXPORT_SYMBOL_GPL(scsi_check_sense);
0690
0691 static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
0692 {
0693 struct scsi_host_template *sht = sdev->host->hostt;
0694 struct scsi_device *tmp_sdev;
0695
0696 if (!sht->track_queue_depth ||
0697 sdev->queue_depth >= sdev->max_queue_depth)
0698 return;
0699
0700 if (time_before(jiffies,
0701 sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
0702 return;
0703
0704 if (time_before(jiffies,
0705 sdev->last_queue_full_time + sdev->queue_ramp_up_period))
0706 return;
0707
0708
0709
0710
0711
0712 shost_for_each_device(tmp_sdev, sdev->host) {
0713 if (tmp_sdev->channel != sdev->channel ||
0714 tmp_sdev->id != sdev->id ||
0715 tmp_sdev->queue_depth == sdev->max_queue_depth)
0716 continue;
0717
0718 scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
0719 sdev->last_queue_ramp_up = jiffies;
0720 }
0721 }
0722
0723 static void scsi_handle_queue_full(struct scsi_device *sdev)
0724 {
0725 struct scsi_host_template *sht = sdev->host->hostt;
0726 struct scsi_device *tmp_sdev;
0727
0728 if (!sht->track_queue_depth)
0729 return;
0730
0731 shost_for_each_device(tmp_sdev, sdev->host) {
0732 if (tmp_sdev->channel != sdev->channel ||
0733 tmp_sdev->id != sdev->id)
0734 continue;
0735
0736
0737
0738
0739
0740 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
0741 }
0742 }
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
0755 {
0756
0757
0758
0759
0760 if (host_byte(scmd->result) == DID_RESET) {
0761
0762
0763
0764
0765
0766
0767 return scsi_check_sense(scmd);
0768 }
0769 if (host_byte(scmd->result) != DID_OK)
0770 return FAILED;
0771
0772
0773
0774
0775
0776 switch (get_status_byte(scmd)) {
0777 case SAM_STAT_GOOD:
0778 scsi_handle_queue_ramp_up(scmd->device);
0779 fallthrough;
0780 case SAM_STAT_COMMAND_TERMINATED:
0781 return SUCCESS;
0782 case SAM_STAT_CHECK_CONDITION:
0783 return scsi_check_sense(scmd);
0784 case SAM_STAT_CONDITION_MET:
0785 case SAM_STAT_INTERMEDIATE:
0786 case SAM_STAT_INTERMEDIATE_CONDITION_MET:
0787
0788
0789
0790 return SUCCESS;
0791 case SAM_STAT_RESERVATION_CONFLICT:
0792 if (scmd->cmnd[0] == TEST_UNIT_READY)
0793
0794
0795 return SUCCESS;
0796
0797 return FAILED;
0798 case SAM_STAT_TASK_SET_FULL:
0799 scsi_handle_queue_full(scmd->device);
0800 fallthrough;
0801 case SAM_STAT_BUSY:
0802 return NEEDS_RETRY;
0803 default:
0804 return FAILED;
0805 }
0806 return FAILED;
0807 }
0808
0809
0810
0811
0812
0813 void scsi_eh_done(struct scsi_cmnd *scmd)
0814 {
0815 struct completion *eh_action;
0816
0817 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
0818 "%s result: %x\n", __func__, scmd->result));
0819
0820 eh_action = scmd->device->host->eh_action;
0821 if (eh_action)
0822 complete(eh_action);
0823 }
0824
0825
0826
0827
0828
0829 static enum scsi_disposition scsi_try_host_reset(struct scsi_cmnd *scmd)
0830 {
0831 unsigned long flags;
0832 enum scsi_disposition rtn;
0833 struct Scsi_Host *host = scmd->device->host;
0834 struct scsi_host_template *hostt = host->hostt;
0835
0836 SCSI_LOG_ERROR_RECOVERY(3,
0837 shost_printk(KERN_INFO, host, "Snd Host RST\n"));
0838
0839 if (!hostt->eh_host_reset_handler)
0840 return FAILED;
0841
0842 rtn = hostt->eh_host_reset_handler(scmd);
0843
0844 if (rtn == SUCCESS) {
0845 if (!hostt->skip_settle_delay)
0846 ssleep(HOST_RESET_SETTLE_TIME);
0847 spin_lock_irqsave(host->host_lock, flags);
0848 scsi_report_bus_reset(host, scmd_channel(scmd));
0849 spin_unlock_irqrestore(host->host_lock, flags);
0850 }
0851
0852 return rtn;
0853 }
0854
0855
0856
0857
0858
0859 static enum scsi_disposition scsi_try_bus_reset(struct scsi_cmnd *scmd)
0860 {
0861 unsigned long flags;
0862 enum scsi_disposition rtn;
0863 struct Scsi_Host *host = scmd->device->host;
0864 struct scsi_host_template *hostt = host->hostt;
0865
0866 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
0867 "%s: Snd Bus RST\n", __func__));
0868
0869 if (!hostt->eh_bus_reset_handler)
0870 return FAILED;
0871
0872 rtn = hostt->eh_bus_reset_handler(scmd);
0873
0874 if (rtn == SUCCESS) {
0875 if (!hostt->skip_settle_delay)
0876 ssleep(BUS_RESET_SETTLE_TIME);
0877 spin_lock_irqsave(host->host_lock, flags);
0878 scsi_report_bus_reset(host, scmd_channel(scmd));
0879 spin_unlock_irqrestore(host->host_lock, flags);
0880 }
0881
0882 return rtn;
0883 }
0884
0885 static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
0886 {
0887 sdev->was_reset = 1;
0888 sdev->expecting_cc_ua = 1;
0889 }
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd)
0902 {
0903 unsigned long flags;
0904 enum scsi_disposition rtn;
0905 struct Scsi_Host *host = scmd->device->host;
0906 struct scsi_host_template *hostt = host->hostt;
0907
0908 if (!hostt->eh_target_reset_handler)
0909 return FAILED;
0910
0911 rtn = hostt->eh_target_reset_handler(scmd);
0912 if (rtn == SUCCESS) {
0913 spin_lock_irqsave(host->host_lock, flags);
0914 __starget_for_each_device(scsi_target(scmd->device), NULL,
0915 __scsi_report_device_reset);
0916 spin_unlock_irqrestore(host->host_lock, flags);
0917 }
0918
0919 return rtn;
0920 }
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932 static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
0933 {
0934 enum scsi_disposition rtn;
0935 struct scsi_host_template *hostt = scmd->device->host->hostt;
0936
0937 if (!hostt->eh_device_reset_handler)
0938 return FAILED;
0939
0940 rtn = hostt->eh_device_reset_handler(scmd);
0941 if (rtn == SUCCESS)
0942 __scsi_report_device_reset(scmd->device, NULL);
0943 return rtn;
0944 }
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 static enum scsi_disposition
0964 scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
0965 {
0966 if (!hostt->eh_abort_handler)
0967 return FAILED;
0968
0969 return hostt->eh_abort_handler(scmd);
0970 }
0971
0972 static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
0973 {
0974 if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
0975 if (scsi_try_bus_device_reset(scmd) != SUCCESS)
0976 if (scsi_try_target_reset(scmd) != SUCCESS)
0977 if (scsi_try_bus_reset(scmd) != SUCCESS)
0978 scsi_try_host_reset(scmd);
0979 }
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995 void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
0996 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
0997 {
0998 struct scsi_device *sdev = scmd->device;
0999
1000
1001
1002
1003
1004
1005
1006
1007 ses->cmd_len = scmd->cmd_len;
1008 ses->data_direction = scmd->sc_data_direction;
1009 ses->sdb = scmd->sdb;
1010 ses->result = scmd->result;
1011 ses->resid_len = scmd->resid_len;
1012 ses->underflow = scmd->underflow;
1013 ses->prot_op = scmd->prot_op;
1014 ses->eh_eflags = scmd->eh_eflags;
1015
1016 scmd->prot_op = SCSI_PROT_NORMAL;
1017 scmd->eh_eflags = 0;
1018 memcpy(ses->cmnd, scmd->cmnd, sizeof(ses->cmnd));
1019 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
1020 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
1021 scmd->result = 0;
1022 scmd->resid_len = 0;
1023
1024 if (sense_bytes) {
1025 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
1026 sense_bytes);
1027 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
1028 scmd->sdb.length);
1029 scmd->sdb.table.sgl = &ses->sense_sgl;
1030 scmd->sc_data_direction = DMA_FROM_DEVICE;
1031 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
1032 scmd->cmnd[0] = REQUEST_SENSE;
1033 scmd->cmnd[4] = scmd->sdb.length;
1034 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
1035 } else {
1036 scmd->sc_data_direction = DMA_NONE;
1037 if (cmnd) {
1038 BUG_ON(cmnd_size > sizeof(scmd->cmnd));
1039 memcpy(scmd->cmnd, cmnd, cmnd_size);
1040 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
1041 }
1042 }
1043
1044 scmd->underflow = 0;
1045
1046 if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
1047 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
1048 (sdev->lun << 5 & 0xe0);
1049
1050
1051
1052
1053
1054 memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1055 }
1056 EXPORT_SYMBOL(scsi_eh_prep_cmnd);
1057
1058
1059
1060
1061
1062
1063
1064
1065 void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
1066 {
1067
1068
1069
1070 scmd->cmd_len = ses->cmd_len;
1071 memcpy(scmd->cmnd, ses->cmnd, sizeof(ses->cmnd));
1072 scmd->sc_data_direction = ses->data_direction;
1073 scmd->sdb = ses->sdb;
1074 scmd->result = ses->result;
1075 scmd->resid_len = ses->resid_len;
1076 scmd->underflow = ses->underflow;
1077 scmd->prot_op = ses->prot_op;
1078 scmd->eh_eflags = ses->eh_eflags;
1079 }
1080 EXPORT_SYMBOL(scsi_eh_restore_cmnd);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd,
1097 unsigned char *cmnd, int cmnd_size, int timeout, unsigned sense_bytes)
1098 {
1099 struct scsi_device *sdev = scmd->device;
1100 struct Scsi_Host *shost = sdev->host;
1101 DECLARE_COMPLETION_ONSTACK(done);
1102 unsigned long timeleft = timeout, delay;
1103 struct scsi_eh_save ses;
1104 const unsigned long stall_for = msecs_to_jiffies(100);
1105 int rtn;
1106
1107 retry:
1108 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
1109 shost->eh_action = &done;
1110
1111 scsi_log_send(scmd);
1112 scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
1113
1114
1115
1116
1117
1118
1119 mutex_lock(&sdev->state_mutex);
1120 while (sdev->sdev_state == SDEV_BLOCK && timeleft > 0) {
1121 mutex_unlock(&sdev->state_mutex);
1122 SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_DEBUG, sdev,
1123 "%s: state %d <> %d\n", __func__, sdev->sdev_state,
1124 SDEV_BLOCK));
1125 delay = min(timeleft, stall_for);
1126 timeleft -= delay;
1127 msleep(jiffies_to_msecs(delay));
1128 mutex_lock(&sdev->state_mutex);
1129 }
1130 if (sdev->sdev_state != SDEV_BLOCK)
1131 rtn = shost->hostt->queuecommand(shost, scmd);
1132 else
1133 rtn = FAILED;
1134 mutex_unlock(&sdev->state_mutex);
1135
1136 if (rtn) {
1137 if (timeleft > stall_for) {
1138 scsi_eh_restore_cmnd(scmd, &ses);
1139
1140 timeleft -= stall_for;
1141 msleep(jiffies_to_msecs(stall_for));
1142 goto retry;
1143 }
1144
1145 timeleft = 0;
1146 rtn = FAILED;
1147 } else {
1148 timeleft = wait_for_completion_timeout(&done, timeout);
1149 rtn = SUCCESS;
1150 }
1151
1152 shost->eh_action = NULL;
1153
1154 scsi_log_completion(scmd, rtn);
1155
1156 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1157 "%s timeleft: %ld\n",
1158 __func__, timeleft));
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 if (timeleft) {
1170 rtn = scsi_eh_completed_normally(scmd);
1171 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1172 "%s: scsi_eh_completed_normally %x\n", __func__, rtn));
1173
1174 switch (rtn) {
1175 case SUCCESS:
1176 case NEEDS_RETRY:
1177 case FAILED:
1178 break;
1179 case ADD_TO_MLQUEUE:
1180 rtn = NEEDS_RETRY;
1181 break;
1182 default:
1183 rtn = FAILED;
1184 break;
1185 }
1186 } else if (rtn != FAILED) {
1187 scsi_abort_eh_cmnd(scmd);
1188 rtn = FAILED;
1189 }
1190
1191 scsi_eh_restore_cmnd(scmd, &ses);
1192
1193 return rtn;
1194 }
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd)
1206 {
1207 return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
1208 }
1209
1210 static enum scsi_disposition
1211 scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn)
1212 {
1213 if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
1214 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
1215 if (sdrv->eh_action)
1216 rtn = sdrv->eh_action(scmd, rtn);
1217 }
1218 return rtn;
1219 }
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1234 {
1235 list_move_tail(&scmd->eh_entry, done_q);
1236 }
1237 EXPORT_SYMBOL(scsi_eh_finish_cmd);
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 int scsi_eh_get_sense(struct list_head *work_q,
1260 struct list_head *done_q)
1261 {
1262 struct scsi_cmnd *scmd, *next;
1263 struct Scsi_Host *shost;
1264 enum scsi_disposition rtn;
1265
1266
1267
1268
1269
1270 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1271 if ((scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
1272 SCSI_SENSE_VALID(scmd))
1273 continue;
1274
1275 shost = scmd->device->host;
1276 if (scsi_host_eh_past_deadline(shost)) {
1277 SCSI_LOG_ERROR_RECOVERY(3,
1278 scmd_printk(KERN_INFO, scmd,
1279 "%s: skip request sense, past eh deadline\n",
1280 current->comm));
1281 break;
1282 }
1283 if (!scsi_status_is_check_condition(scmd->result))
1284
1285
1286
1287
1288
1289
1290 continue;
1291
1292 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1293 "%s: requesting sense\n",
1294 current->comm));
1295 rtn = scsi_request_sense(scmd);
1296 if (rtn != SUCCESS)
1297 continue;
1298
1299 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1300 "sense requested, result %x\n", scmd->result));
1301 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
1302
1303 rtn = scsi_decide_disposition(scmd);
1304
1305
1306
1307
1308
1309 if (rtn == SUCCESS)
1310
1311
1312
1313
1314
1315
1316
1317
1318 if (scmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
1319 scmd->retries = scmd->allowed = 1;
1320 else
1321 scmd->retries = scmd->allowed;
1322 else if (rtn != NEEDS_RETRY)
1323 continue;
1324
1325 scsi_eh_finish_cmd(scmd, done_q);
1326 }
1327
1328 return list_empty(work_q);
1329 }
1330 EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
1331
1332
1333
1334
1335
1336
1337
1338
1339 static int scsi_eh_tur(struct scsi_cmnd *scmd)
1340 {
1341 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
1342 int retry_cnt = 1;
1343 enum scsi_disposition rtn;
1344
1345 retry_tur:
1346 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
1347 scmd->device->eh_timeout, 0);
1348
1349 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1350 "%s return: %x\n", __func__, rtn));
1351
1352 switch (rtn) {
1353 case NEEDS_RETRY:
1354 if (retry_cnt--)
1355 goto retry_tur;
1356 fallthrough;
1357 case SUCCESS:
1358 return 0;
1359 default:
1360 return 1;
1361 }
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 static int scsi_eh_test_devices(struct list_head *cmd_list,
1378 struct list_head *work_q,
1379 struct list_head *done_q, int try_stu)
1380 {
1381 struct scsi_cmnd *scmd, *next;
1382 struct scsi_device *sdev;
1383 int finish_cmds;
1384
1385 while (!list_empty(cmd_list)) {
1386 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1387 sdev = scmd->device;
1388
1389 if (!try_stu) {
1390 if (scsi_host_eh_past_deadline(sdev->host)) {
1391
1392 list_splice_init(cmd_list, work_q);
1393 SCSI_LOG_ERROR_RECOVERY(3,
1394 sdev_printk(KERN_INFO, sdev,
1395 "%s: skip test device, past eh deadline",
1396 current->comm));
1397 break;
1398 }
1399 }
1400
1401 finish_cmds = !scsi_device_online(scmd->device) ||
1402 (try_stu && !scsi_eh_try_stu(scmd) &&
1403 !scsi_eh_tur(scmd)) ||
1404 !scsi_eh_tur(scmd);
1405
1406 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1407 if (scmd->device == sdev) {
1408 if (finish_cmds &&
1409 (try_stu ||
1410 scsi_eh_action(scmd, SUCCESS) == SUCCESS))
1411 scsi_eh_finish_cmd(scmd, done_q);
1412 else
1413 list_move_tail(&scmd->eh_entry, work_q);
1414 }
1415 }
1416 return list_empty(work_q);
1417 }
1418
1419
1420
1421
1422
1423
1424
1425
1426 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1427 {
1428 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
1429
1430 if (scmd->device->allow_restart) {
1431 int i;
1432 enum scsi_disposition rtn = NEEDS_RETRY;
1433
1434 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
1435 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
1436 scmd->device->eh_timeout, 0);
1437
1438 if (rtn == SUCCESS)
1439 return 0;
1440 }
1441
1442 return 1;
1443 }
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 static int scsi_eh_stu(struct Scsi_Host *shost,
1456 struct list_head *work_q,
1457 struct list_head *done_q)
1458 {
1459 struct scsi_cmnd *scmd, *stu_scmd, *next;
1460 struct scsi_device *sdev;
1461
1462 shost_for_each_device(sdev, shost) {
1463 if (scsi_host_eh_past_deadline(shost)) {
1464 SCSI_LOG_ERROR_RECOVERY(3,
1465 sdev_printk(KERN_INFO, sdev,
1466 "%s: skip START_UNIT, past eh deadline\n",
1467 current->comm));
1468 scsi_device_put(sdev);
1469 break;
1470 }
1471 stu_scmd = NULL;
1472 list_for_each_entry(scmd, work_q, eh_entry)
1473 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1474 scsi_check_sense(scmd) == FAILED ) {
1475 stu_scmd = scmd;
1476 break;
1477 }
1478
1479 if (!stu_scmd)
1480 continue;
1481
1482 SCSI_LOG_ERROR_RECOVERY(3,
1483 sdev_printk(KERN_INFO, sdev,
1484 "%s: Sending START_UNIT\n",
1485 current->comm));
1486
1487 if (!scsi_eh_try_stu(stu_scmd)) {
1488 if (!scsi_device_online(sdev) ||
1489 !scsi_eh_tur(stu_scmd)) {
1490 list_for_each_entry_safe(scmd, next,
1491 work_q, eh_entry) {
1492 if (scmd->device == sdev &&
1493 scsi_eh_action(scmd, SUCCESS) == SUCCESS)
1494 scsi_eh_finish_cmd(scmd, done_q);
1495 }
1496 }
1497 } else {
1498 SCSI_LOG_ERROR_RECOVERY(3,
1499 sdev_printk(KERN_INFO, sdev,
1500 "%s: START_UNIT failed\n",
1501 current->comm));
1502 }
1503 }
1504
1505 return list_empty(work_q);
1506 }
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1522 struct list_head *work_q,
1523 struct list_head *done_q)
1524 {
1525 struct scsi_cmnd *scmd, *bdr_scmd, *next;
1526 struct scsi_device *sdev;
1527 enum scsi_disposition rtn;
1528
1529 shost_for_each_device(sdev, shost) {
1530 if (scsi_host_eh_past_deadline(shost)) {
1531 SCSI_LOG_ERROR_RECOVERY(3,
1532 sdev_printk(KERN_INFO, sdev,
1533 "%s: skip BDR, past eh deadline\n",
1534 current->comm));
1535 scsi_device_put(sdev);
1536 break;
1537 }
1538 bdr_scmd = NULL;
1539 list_for_each_entry(scmd, work_q, eh_entry)
1540 if (scmd->device == sdev) {
1541 bdr_scmd = scmd;
1542 break;
1543 }
1544
1545 if (!bdr_scmd)
1546 continue;
1547
1548 SCSI_LOG_ERROR_RECOVERY(3,
1549 sdev_printk(KERN_INFO, sdev,
1550 "%s: Sending BDR\n", current->comm));
1551 rtn = scsi_try_bus_device_reset(bdr_scmd);
1552 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1553 if (!scsi_device_online(sdev) ||
1554 rtn == FAST_IO_FAIL ||
1555 !scsi_eh_tur(bdr_scmd)) {
1556 list_for_each_entry_safe(scmd, next,
1557 work_q, eh_entry) {
1558 if (scmd->device == sdev &&
1559 scsi_eh_action(scmd, rtn) != FAILED)
1560 scsi_eh_finish_cmd(scmd,
1561 done_q);
1562 }
1563 }
1564 } else {
1565 SCSI_LOG_ERROR_RECOVERY(3,
1566 sdev_printk(KERN_INFO, sdev,
1567 "%s: BDR failed\n", current->comm));
1568 }
1569 }
1570
1571 return list_empty(work_q);
1572 }
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583 static int scsi_eh_target_reset(struct Scsi_Host *shost,
1584 struct list_head *work_q,
1585 struct list_head *done_q)
1586 {
1587 LIST_HEAD(tmp_list);
1588 LIST_HEAD(check_list);
1589
1590 list_splice_init(work_q, &tmp_list);
1591
1592 while (!list_empty(&tmp_list)) {
1593 struct scsi_cmnd *next, *scmd;
1594 enum scsi_disposition rtn;
1595 unsigned int id;
1596
1597 if (scsi_host_eh_past_deadline(shost)) {
1598
1599 list_splice_init(&check_list, work_q);
1600 list_splice_init(&tmp_list, work_q);
1601 SCSI_LOG_ERROR_RECOVERY(3,
1602 shost_printk(KERN_INFO, shost,
1603 "%s: Skip target reset, past eh deadline\n",
1604 current->comm));
1605 return list_empty(work_q);
1606 }
1607
1608 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1609 id = scmd_id(scmd);
1610
1611 SCSI_LOG_ERROR_RECOVERY(3,
1612 shost_printk(KERN_INFO, shost,
1613 "%s: Sending target reset to target %d\n",
1614 current->comm, id));
1615 rtn = scsi_try_target_reset(scmd);
1616 if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1617 SCSI_LOG_ERROR_RECOVERY(3,
1618 shost_printk(KERN_INFO, shost,
1619 "%s: Target reset failed"
1620 " target: %d\n",
1621 current->comm, id));
1622 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1623 if (scmd_id(scmd) != id)
1624 continue;
1625
1626 if (rtn == SUCCESS)
1627 list_move_tail(&scmd->eh_entry, &check_list);
1628 else if (rtn == FAST_IO_FAIL)
1629 scsi_eh_finish_cmd(scmd, done_q);
1630 else
1631
1632 list_move(&scmd->eh_entry, work_q);
1633 }
1634 }
1635
1636 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1637 }
1638
1639
1640
1641
1642
1643
1644
1645 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1646 struct list_head *work_q,
1647 struct list_head *done_q)
1648 {
1649 struct scsi_cmnd *scmd, *chan_scmd, *next;
1650 LIST_HEAD(check_list);
1651 unsigned int channel;
1652 enum scsi_disposition rtn;
1653
1654
1655
1656
1657
1658
1659
1660
1661 for (channel = 0; channel <= shost->max_channel; channel++) {
1662 if (scsi_host_eh_past_deadline(shost)) {
1663 list_splice_init(&check_list, work_q);
1664 SCSI_LOG_ERROR_RECOVERY(3,
1665 shost_printk(KERN_INFO, shost,
1666 "%s: skip BRST, past eh deadline\n",
1667 current->comm));
1668 return list_empty(work_q);
1669 }
1670
1671 chan_scmd = NULL;
1672 list_for_each_entry(scmd, work_q, eh_entry) {
1673 if (channel == scmd_channel(scmd)) {
1674 chan_scmd = scmd;
1675 break;
1676
1677
1678
1679
1680 }
1681 }
1682
1683 if (!chan_scmd)
1684 continue;
1685 SCSI_LOG_ERROR_RECOVERY(3,
1686 shost_printk(KERN_INFO, shost,
1687 "%s: Sending BRST chan: %d\n",
1688 current->comm, channel));
1689 rtn = scsi_try_bus_reset(chan_scmd);
1690 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1691 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1692 if (channel == scmd_channel(scmd)) {
1693 if (rtn == FAST_IO_FAIL)
1694 scsi_eh_finish_cmd(scmd,
1695 done_q);
1696 else
1697 list_move_tail(&scmd->eh_entry,
1698 &check_list);
1699 }
1700 }
1701 } else {
1702 SCSI_LOG_ERROR_RECOVERY(3,
1703 shost_printk(KERN_INFO, shost,
1704 "%s: BRST failed chan: %d\n",
1705 current->comm, channel));
1706 }
1707 }
1708 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1709 }
1710
1711
1712
1713
1714
1715
1716
1717 static int scsi_eh_host_reset(struct Scsi_Host *shost,
1718 struct list_head *work_q,
1719 struct list_head *done_q)
1720 {
1721 struct scsi_cmnd *scmd, *next;
1722 LIST_HEAD(check_list);
1723 enum scsi_disposition rtn;
1724
1725 if (!list_empty(work_q)) {
1726 scmd = list_entry(work_q->next,
1727 struct scsi_cmnd, eh_entry);
1728
1729 SCSI_LOG_ERROR_RECOVERY(3,
1730 shost_printk(KERN_INFO, shost,
1731 "%s: Sending HRST\n",
1732 current->comm));
1733
1734 rtn = scsi_try_host_reset(scmd);
1735 if (rtn == SUCCESS) {
1736 list_splice_init(work_q, &check_list);
1737 } else if (rtn == FAST_IO_FAIL) {
1738 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1739 scsi_eh_finish_cmd(scmd, done_q);
1740 }
1741 } else {
1742 SCSI_LOG_ERROR_RECOVERY(3,
1743 shost_printk(KERN_INFO, shost,
1744 "%s: HRST failed\n",
1745 current->comm));
1746 }
1747 }
1748 return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1749 }
1750
1751
1752
1753
1754
1755
1756 static void scsi_eh_offline_sdevs(struct list_head *work_q,
1757 struct list_head *done_q)
1758 {
1759 struct scsi_cmnd *scmd, *next;
1760 struct scsi_device *sdev;
1761
1762 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1763 sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1764 "not ready after error recovery\n");
1765 sdev = scmd->device;
1766
1767 mutex_lock(&sdev->state_mutex);
1768 scsi_device_set_state(sdev, SDEV_OFFLINE);
1769 mutex_unlock(&sdev->state_mutex);
1770
1771 scsi_eh_finish_cmd(scmd, done_q);
1772 }
1773 return;
1774 }
1775
1776
1777
1778
1779
1780 bool scsi_noretry_cmd(struct scsi_cmnd *scmd)
1781 {
1782 struct request *req = scsi_cmd_to_rq(scmd);
1783
1784 switch (host_byte(scmd->result)) {
1785 case DID_OK:
1786 break;
1787 case DID_TIME_OUT:
1788 goto check_type;
1789 case DID_BUS_BUSY:
1790 return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT);
1791 case DID_PARITY:
1792 return !!(req->cmd_flags & REQ_FAILFAST_DEV);
1793 case DID_ERROR:
1794 if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
1795 return false;
1796 fallthrough;
1797 case DID_SOFT_ERROR:
1798 return !!(req->cmd_flags & REQ_FAILFAST_DRIVER);
1799 }
1800
1801 if (!scsi_status_is_check_condition(scmd->result))
1802 return false;
1803
1804 check_type:
1805
1806
1807
1808
1809 if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
1810 return true;
1811
1812 return false;
1813 }
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
1830 {
1831 enum scsi_disposition rtn;
1832
1833
1834
1835
1836
1837 if (!scsi_device_online(scmd->device)) {
1838 SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
1839 "%s: device offline - report as SUCCESS\n", __func__));
1840 return SUCCESS;
1841 }
1842
1843
1844
1845
1846
1847 switch (host_byte(scmd->result)) {
1848 case DID_PASSTHROUGH:
1849
1850
1851
1852
1853
1854 scmd->result &= 0xff00ffff;
1855 return SUCCESS;
1856 case DID_OK:
1857
1858
1859
1860 break;
1861 case DID_ABORT:
1862 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1863 set_host_byte(scmd, DID_TIME_OUT);
1864 return SUCCESS;
1865 }
1866 fallthrough;
1867 case DID_NO_CONNECT:
1868 case DID_BAD_TARGET:
1869
1870
1871
1872
1873
1874 return SUCCESS;
1875 case DID_SOFT_ERROR:
1876
1877
1878
1879
1880
1881 goto maybe_retry;
1882 case DID_IMM_RETRY:
1883 return NEEDS_RETRY;
1884
1885 case DID_REQUEUE:
1886 return ADD_TO_MLQUEUE;
1887 case DID_TRANSPORT_DISRUPTED:
1888
1889
1890
1891
1892
1893
1894
1895 goto maybe_retry;
1896 case DID_TRANSPORT_FAILFAST:
1897
1898
1899
1900
1901 return SUCCESS;
1902 case DID_TRANSPORT_MARGINAL:
1903
1904
1905
1906
1907 return SUCCESS;
1908 case DID_ERROR:
1909 if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
1910
1911
1912
1913
1914 break;
1915 fallthrough;
1916 case DID_BUS_BUSY:
1917 case DID_PARITY:
1918 goto maybe_retry;
1919 case DID_TIME_OUT:
1920
1921
1922
1923
1924
1925 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1926 scmd->cmnd[0] == INQUIRY)) {
1927 return SUCCESS;
1928 } else {
1929 return FAILED;
1930 }
1931 case DID_RESET:
1932 return SUCCESS;
1933 default:
1934 return FAILED;
1935 }
1936
1937
1938
1939
1940 switch (get_status_byte(scmd)) {
1941 case SAM_STAT_TASK_SET_FULL:
1942 scsi_handle_queue_full(scmd->device);
1943
1944
1945
1946
1947 fallthrough;
1948 case SAM_STAT_BUSY:
1949
1950
1951
1952
1953
1954
1955 return ADD_TO_MLQUEUE;
1956 case SAM_STAT_GOOD:
1957 if (scmd->cmnd[0] == REPORT_LUNS)
1958 scmd->device->sdev_target->expecting_lun_change = 0;
1959 scsi_handle_queue_ramp_up(scmd->device);
1960 fallthrough;
1961 case SAM_STAT_COMMAND_TERMINATED:
1962 return SUCCESS;
1963 case SAM_STAT_TASK_ABORTED:
1964 goto maybe_retry;
1965 case SAM_STAT_CHECK_CONDITION:
1966 rtn = scsi_check_sense(scmd);
1967 if (rtn == NEEDS_RETRY)
1968 goto maybe_retry;
1969
1970
1971
1972
1973 return rtn;
1974 case SAM_STAT_CONDITION_MET:
1975 case SAM_STAT_INTERMEDIATE:
1976 case SAM_STAT_INTERMEDIATE_CONDITION_MET:
1977 case SAM_STAT_ACA_ACTIVE:
1978
1979
1980
1981 return SUCCESS;
1982
1983 case SAM_STAT_RESERVATION_CONFLICT:
1984 sdev_printk(KERN_INFO, scmd->device,
1985 "reservation conflict\n");
1986 set_host_byte(scmd, DID_NEXUS_FAILURE);
1987 return SUCCESS;
1988 }
1989 return FAILED;
1990
1991 maybe_retry:
1992
1993
1994
1995
1996
1997 if (scsi_cmd_retry_allowed(scmd) && !scsi_noretry_cmd(scmd)) {
1998 return NEEDS_RETRY;
1999 } else {
2000
2001
2002
2003 return SUCCESS;
2004 }
2005 }
2006
2007 static void eh_lock_door_done(struct request *req, blk_status_t status)
2008 {
2009 blk_mq_free_request(req);
2010 }
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 static void scsi_eh_lock_door(struct scsi_device *sdev)
2024 {
2025 struct scsi_cmnd *scmd;
2026 struct request *req;
2027
2028 req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
2029 if (IS_ERR(req))
2030 return;
2031 scmd = blk_mq_rq_to_pdu(req);
2032
2033 scmd->cmnd[0] = ALLOW_MEDIUM_REMOVAL;
2034 scmd->cmnd[1] = 0;
2035 scmd->cmnd[2] = 0;
2036 scmd->cmnd[3] = 0;
2037 scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
2038 scmd->cmnd[5] = 0;
2039 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
2040 scmd->allowed = 5;
2041
2042 req->rq_flags |= RQF_QUIET;
2043 req->timeout = 10 * HZ;
2044 req->end_io = eh_lock_door_done;
2045
2046 blk_execute_rq_nowait(req, true);
2047 }
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 static void scsi_restart_operations(struct Scsi_Host *shost)
2058 {
2059 struct scsi_device *sdev;
2060 unsigned long flags;
2061
2062
2063
2064
2065
2066
2067 shost_for_each_device(sdev, shost) {
2068 if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
2069 scsi_eh_lock_door(sdev);
2070 sdev->was_reset = 0;
2071 }
2072 }
2073
2074
2075
2076
2077
2078
2079 SCSI_LOG_ERROR_RECOVERY(3,
2080 shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
2081
2082 spin_lock_irqsave(shost->host_lock, flags);
2083 if (scsi_host_set_state(shost, SHOST_RUNNING))
2084 if (scsi_host_set_state(shost, SHOST_CANCEL))
2085 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
2086 spin_unlock_irqrestore(shost->host_lock, flags);
2087
2088 wake_up(&shost->host_wait);
2089
2090
2091
2092
2093
2094
2095
2096 scsi_run_host_queues(shost);
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106 spin_lock_irqsave(shost->host_lock, flags);
2107 if (shost->host_eh_scheduled)
2108 if (scsi_host_set_state(shost, SHOST_RECOVERY))
2109 WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
2110 spin_unlock_irqrestore(shost->host_lock, flags);
2111 }
2112
2113
2114
2115
2116
2117
2118
2119 void scsi_eh_ready_devs(struct Scsi_Host *shost,
2120 struct list_head *work_q,
2121 struct list_head *done_q)
2122 {
2123 if (!scsi_eh_stu(shost, work_q, done_q))
2124 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
2125 if (!scsi_eh_target_reset(shost, work_q, done_q))
2126 if (!scsi_eh_bus_reset(shost, work_q, done_q))
2127 if (!scsi_eh_host_reset(shost, work_q, done_q))
2128 scsi_eh_offline_sdevs(work_q,
2129 done_q);
2130 }
2131 EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
2132
2133
2134
2135
2136
2137 void scsi_eh_flush_done_q(struct list_head *done_q)
2138 {
2139 struct scsi_cmnd *scmd, *next;
2140
2141 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
2142 list_del_init(&scmd->eh_entry);
2143 if (scsi_device_online(scmd->device) &&
2144 !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
2145 scsi_eh_should_retry_cmd(scmd)) {
2146 SCSI_LOG_ERROR_RECOVERY(3,
2147 scmd_printk(KERN_INFO, scmd,
2148 "%s: flush retry cmd\n",
2149 current->comm));
2150 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
2151 } else {
2152
2153
2154
2155
2156
2157 if (!scmd->result)
2158 scmd->result |= (DID_TIME_OUT << 16);
2159 SCSI_LOG_ERROR_RECOVERY(3,
2160 scmd_printk(KERN_INFO, scmd,
2161 "%s: flush finish cmd\n",
2162 current->comm));
2163 scsi_finish_command(scmd);
2164 }
2165 }
2166 }
2167 EXPORT_SYMBOL(scsi_eh_flush_done_q);
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 static void scsi_unjam_host(struct Scsi_Host *shost)
2193 {
2194 unsigned long flags;
2195 LIST_HEAD(eh_work_q);
2196 LIST_HEAD(eh_done_q);
2197
2198 spin_lock_irqsave(shost->host_lock, flags);
2199 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
2200 spin_unlock_irqrestore(shost->host_lock, flags);
2201
2202 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
2203
2204 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
2205 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
2206
2207 spin_lock_irqsave(shost->host_lock, flags);
2208 if (shost->eh_deadline != -1)
2209 shost->last_reset = 0;
2210 spin_unlock_irqrestore(shost->host_lock, flags);
2211 scsi_eh_flush_done_q(&eh_done_q);
2212 }
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222 int scsi_error_handler(void *data)
2223 {
2224 struct Scsi_Host *shost = data;
2225
2226
2227
2228
2229
2230
2231
2232 while (true) {
2233
2234
2235
2236
2237
2238
2239 set_current_state(TASK_INTERRUPTIBLE);
2240 if (kthread_should_stop())
2241 break;
2242
2243 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2244 shost->host_failed != scsi_host_busy(shost)) {
2245 SCSI_LOG_ERROR_RECOVERY(1,
2246 shost_printk(KERN_INFO, shost,
2247 "scsi_eh_%d: sleeping\n",
2248 shost->host_no));
2249 schedule();
2250 continue;
2251 }
2252
2253 __set_current_state(TASK_RUNNING);
2254 SCSI_LOG_ERROR_RECOVERY(1,
2255 shost_printk(KERN_INFO, shost,
2256 "scsi_eh_%d: waking up %d/%d/%d\n",
2257 shost->host_no, shost->host_eh_scheduled,
2258 shost->host_failed,
2259 scsi_host_busy(shost)));
2260
2261
2262
2263
2264
2265
2266 if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
2267 SCSI_LOG_ERROR_RECOVERY(1,
2268 shost_printk(KERN_ERR, shost,
2269 "scsi_eh_%d: unable to autoresume\n",
2270 shost->host_no));
2271 continue;
2272 }
2273
2274 if (shost->transportt->eh_strategy_handler)
2275 shost->transportt->eh_strategy_handler(shost);
2276 else
2277 scsi_unjam_host(shost);
2278
2279
2280 shost->host_failed = 0;
2281
2282
2283
2284
2285
2286
2287
2288
2289 scsi_restart_operations(shost);
2290 if (!shost->eh_noresume)
2291 scsi_autopm_put_host(shost);
2292 }
2293 __set_current_state(TASK_RUNNING);
2294
2295 SCSI_LOG_ERROR_RECOVERY(1,
2296 shost_printk(KERN_INFO, shost,
2297 "Error handler scsi_eh_%d exiting\n",
2298 shost->host_no));
2299 shost->ehandler = NULL;
2300 return 0;
2301 }
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
2325 {
2326 struct scsi_device *sdev;
2327
2328 __shost_for_each_device(sdev, shost) {
2329 if (channel == sdev_channel(sdev))
2330 __scsi_report_device_reset(sdev, NULL);
2331 }
2332 }
2333 EXPORT_SYMBOL(scsi_report_bus_reset);
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
2358 {
2359 struct scsi_device *sdev;
2360
2361 __shost_for_each_device(sdev, shost) {
2362 if (channel == sdev_channel(sdev) &&
2363 target == sdev_id(sdev))
2364 __scsi_report_device_reset(sdev, NULL);
2365 }
2366 }
2367 EXPORT_SYMBOL(scsi_report_device_reset);
2368
2369
2370
2371
2372
2373
2374 int
2375 scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2376 {
2377 struct scsi_cmnd *scmd;
2378 struct Scsi_Host *shost = dev->host;
2379 struct request *rq;
2380 unsigned long flags;
2381 int error = 0, val;
2382 enum scsi_disposition rtn;
2383
2384 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2385 return -EACCES;
2386
2387 error = get_user(val, arg);
2388 if (error)
2389 return error;
2390
2391 if (scsi_autopm_get_host(shost) < 0)
2392 return -EIO;
2393
2394 error = -EIO;
2395 rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
2396 shost->hostt->cmd_size, GFP_KERNEL);
2397 if (!rq)
2398 goto out_put_autopm_host;
2399 blk_rq_init(NULL, rq);
2400
2401 scmd = (struct scsi_cmnd *)(rq + 1);
2402 scsi_init_command(dev, scmd);
2403
2404 scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
2405 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
2406
2407 scmd->cmd_len = 0;
2408
2409 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
2410
2411 spin_lock_irqsave(shost->host_lock, flags);
2412 shost->tmf_in_progress = 1;
2413 spin_unlock_irqrestore(shost->host_lock, flags);
2414
2415 switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
2416 case SG_SCSI_RESET_NOTHING:
2417 rtn = SUCCESS;
2418 break;
2419 case SG_SCSI_RESET_DEVICE:
2420 rtn = scsi_try_bus_device_reset(scmd);
2421 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2422 break;
2423 fallthrough;
2424 case SG_SCSI_RESET_TARGET:
2425 rtn = scsi_try_target_reset(scmd);
2426 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2427 break;
2428 fallthrough;
2429 case SG_SCSI_RESET_BUS:
2430 rtn = scsi_try_bus_reset(scmd);
2431 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2432 break;
2433 fallthrough;
2434 case SG_SCSI_RESET_HOST:
2435 rtn = scsi_try_host_reset(scmd);
2436 if (rtn == SUCCESS)
2437 break;
2438 fallthrough;
2439 default:
2440 rtn = FAILED;
2441 break;
2442 }
2443
2444 error = (rtn == SUCCESS) ? 0 : -EIO;
2445
2446 spin_lock_irqsave(shost->host_lock, flags);
2447 shost->tmf_in_progress = 0;
2448 spin_unlock_irqrestore(shost->host_lock, flags);
2449
2450
2451
2452
2453
2454 SCSI_LOG_ERROR_RECOVERY(3,
2455 shost_printk(KERN_INFO, shost,
2456 "waking up host to restart after TMF\n"));
2457
2458 wake_up(&shost->host_wait);
2459 scsi_run_host_queues(shost);
2460
2461 kfree(rq);
2462
2463 out_put_autopm_host:
2464 scsi_autopm_put_host(shost);
2465 return error;
2466 }
2467
2468 bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2469 struct scsi_sense_hdr *sshdr)
2470 {
2471 return scsi_normalize_sense(cmd->sense_buffer,
2472 SCSI_SENSE_BUFFERSIZE, sshdr);
2473 }
2474 EXPORT_SYMBOL(scsi_command_normalize_sense);
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len,
2487 u64 *info_out)
2488 {
2489 const u8 * ucp;
2490
2491 if (sb_len < 7)
2492 return false;
2493 switch (sense_buffer[0] & 0x7f) {
2494 case 0x70:
2495 case 0x71:
2496 if (sense_buffer[0] & 0x80) {
2497 *info_out = get_unaligned_be32(&sense_buffer[3]);
2498 return true;
2499 }
2500 return false;
2501 case 0x72:
2502 case 0x73:
2503 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
2504 0 );
2505 if (ucp && (0xa == ucp[1])) {
2506 *info_out = get_unaligned_be64(&ucp[4]);
2507 return true;
2508 }
2509 return false;
2510 default:
2511 return false;
2512 }
2513 }
2514 EXPORT_SYMBOL(scsi_get_sense_info_fld);