0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 #include <linux/module.h>
0042 #include <linux/moduleparam.h>
0043 #include <linux/kernel.h>
0044 #include <linux/timer.h>
0045 #include <linux/string.h>
0046 #include <linux/slab.h>
0047 #include <linux/blkdev.h>
0048 #include <linux/delay.h>
0049 #include <linux/init.h>
0050 #include <linux/completion.h>
0051 #include <linux/unistd.h>
0052 #include <linux/spinlock.h>
0053 #include <linux/kmod.h>
0054 #include <linux/interrupt.h>
0055 #include <linux/notifier.h>
0056 #include <linux/cpu.h>
0057 #include <linux/mutex.h>
0058 #include <asm/unaligned.h>
0059
0060 #include <scsi/scsi.h>
0061 #include <scsi/scsi_cmnd.h>
0062 #include <scsi/scsi_dbg.h>
0063 #include <scsi/scsi_device.h>
0064 #include <scsi/scsi_driver.h>
0065 #include <scsi/scsi_eh.h>
0066 #include <scsi/scsi_host.h>
0067 #include <scsi/scsi_tcq.h>
0068
0069 #include "scsi_priv.h"
0070 #include "scsi_logging.h"
0071
0072 #define CREATE_TRACE_POINTS
0073 #include <trace/events/scsi.h>
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 unsigned int scsi_logging_level;
0084 #if defined(CONFIG_SCSI_LOGGING)
0085 EXPORT_SYMBOL(scsi_logging_level);
0086 #endif
0087
0088 #ifdef CONFIG_SCSI_LOGGING
0089 void scsi_log_send(struct scsi_cmnd *cmd)
0090 {
0091 unsigned int level;
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 if (unlikely(scsi_logging_level)) {
0105 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
0106 SCSI_LOG_MLQUEUE_BITS);
0107 if (level > 1) {
0108 scmd_printk(KERN_INFO, cmd,
0109 "Send: scmd 0x%p\n", cmd);
0110 scsi_print_command(cmd);
0111 }
0112 }
0113 }
0114
0115 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
0116 {
0117 unsigned int level;
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131 if (unlikely(scsi_logging_level)) {
0132 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
0133 SCSI_LOG_MLCOMPLETE_BITS);
0134 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
0135 (level > 1)) {
0136 scsi_print_result(cmd, "Done", disposition);
0137 scsi_print_command(cmd);
0138 if (scsi_status_is_check_condition(cmd->result))
0139 scsi_print_sense(cmd);
0140 if (level > 3)
0141 scmd_printk(KERN_INFO, cmd,
0142 "scsi host busy %d failed %d\n",
0143 scsi_host_busy(cmd->device->host),
0144 cmd->device->host->host_failed);
0145 }
0146 }
0147 }
0148 #endif
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 void scsi_finish_command(struct scsi_cmnd *cmd)
0159 {
0160 struct scsi_device *sdev = cmd->device;
0161 struct scsi_target *starget = scsi_target(sdev);
0162 struct Scsi_Host *shost = sdev->host;
0163 struct scsi_driver *drv;
0164 unsigned int good_bytes;
0165
0166 scsi_device_unbusy(sdev, cmd);
0167
0168
0169
0170
0171
0172 if (atomic_read(&shost->host_blocked))
0173 atomic_set(&shost->host_blocked, 0);
0174 if (atomic_read(&starget->target_blocked))
0175 atomic_set(&starget->target_blocked, 0);
0176 if (atomic_read(&sdev->device_blocked))
0177 atomic_set(&sdev->device_blocked, 0);
0178
0179 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
0180 "Notifying upper driver of completion "
0181 "(result %x)\n", cmd->result));
0182
0183 good_bytes = scsi_bufflen(cmd);
0184 if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
0185 int old_good_bytes = good_bytes;
0186 drv = scsi_cmd_to_driver(cmd);
0187 if (drv->done)
0188 good_bytes = drv->done(cmd);
0189
0190
0191
0192
0193
0194
0195 if (good_bytes == old_good_bytes)
0196 good_bytes -= scsi_get_resid(cmd);
0197 }
0198 scsi_io_completion(cmd, good_bytes);
0199 }
0200
0201
0202
0203
0204
0205 int scsi_device_max_queue_depth(struct scsi_device *sdev)
0206 {
0207 return min_t(int, sdev->host->can_queue, 4096);
0208 }
0209
0210
0211
0212
0213
0214
0215
0216
0217 int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
0218 {
0219 depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
0220
0221 if (depth > 0) {
0222 sdev->queue_depth = depth;
0223 wmb();
0224 }
0225
0226 if (sdev->request_queue)
0227 blk_set_queue_depth(sdev->request_queue, depth);
0228
0229 sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
0230
0231 return sdev->queue_depth;
0232 }
0233 EXPORT_SYMBOL(scsi_change_queue_depth);
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
0255 {
0256
0257
0258
0259
0260
0261
0262 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
0263 return 0;
0264
0265 sdev->last_queue_full_time = jiffies;
0266 if (sdev->last_queue_full_depth != depth) {
0267 sdev->last_queue_full_count = 1;
0268 sdev->last_queue_full_depth = depth;
0269 } else {
0270 sdev->last_queue_full_count++;
0271 }
0272
0273 if (sdev->last_queue_full_count <= 10)
0274 return 0;
0275
0276 return scsi_change_queue_depth(sdev, depth);
0277 }
0278 EXPORT_SYMBOL(scsi_track_queue_full);
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
0293 u8 page, unsigned len)
0294 {
0295 int result;
0296 unsigned char cmd[16];
0297
0298 if (len < 4)
0299 return -EINVAL;
0300
0301 cmd[0] = INQUIRY;
0302 cmd[1] = 1;
0303 cmd[2] = page;
0304 cmd[3] = len >> 8;
0305 cmd[4] = len & 0xff;
0306 cmd[5] = 0;
0307
0308
0309
0310
0311
0312 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
0313 len, NULL, 30 * HZ, 3, NULL);
0314 if (result)
0315 return -EIO;
0316
0317
0318 if (buffer[1] != page)
0319 return -EIO;
0320
0321 return get_unaligned_be16(&buffer[2]) + 4;
0322 }
0323
0324 static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
0325 {
0326 unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
0327 int result;
0328
0329
0330
0331
0332
0333
0334
0335 result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
0336 if (result < 0)
0337 return 0;
0338
0339 if (result < SCSI_VPD_HEADER_SIZE) {
0340 dev_warn_once(&sdev->sdev_gendev,
0341 "%s: short VPD page 0x%02x length: %d bytes\n",
0342 __func__, page, result);
0343 return 0;
0344 }
0345
0346 return result;
0347 }
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
0363 int buf_len)
0364 {
0365 int result, vpd_len;
0366
0367 if (!scsi_device_supports_vpd(sdev))
0368 return -EINVAL;
0369
0370 vpd_len = scsi_get_vpd_size(sdev, page);
0371 if (vpd_len <= 0)
0372 return -EINVAL;
0373
0374 vpd_len = min(vpd_len, buf_len);
0375
0376
0377
0378
0379
0380 memset(buf, 0, buf_len);
0381 result = scsi_vpd_inquiry(sdev, buf, page, vpd_len);
0382 if (result < 0)
0383 return -EINVAL;
0384 else if (result > vpd_len)
0385 dev_warn_once(&sdev->sdev_gendev,
0386 "%s: VPD page 0x%02x result %d > %d bytes\n",
0387 __func__, page, result, vpd_len);
0388
0389 return 0;
0390 }
0391 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
0392
0393
0394
0395
0396
0397
0398
0399
0400 static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
0401 {
0402 struct scsi_vpd *vpd_buf;
0403 int vpd_len, result;
0404
0405 vpd_len = scsi_get_vpd_size(sdev, page);
0406 if (vpd_len <= 0)
0407 return NULL;
0408
0409 retry_pg:
0410
0411
0412
0413
0414 vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
0415 if (!vpd_buf)
0416 return NULL;
0417
0418 result = scsi_vpd_inquiry(sdev, vpd_buf->data, page, vpd_len);
0419 if (result < 0) {
0420 kfree(vpd_buf);
0421 return NULL;
0422 }
0423 if (result > vpd_len) {
0424 dev_warn_once(&sdev->sdev_gendev,
0425 "%s: VPD page 0x%02x result %d > %d bytes\n",
0426 __func__, page, result, vpd_len);
0427 vpd_len = result;
0428 kfree(vpd_buf);
0429 goto retry_pg;
0430 }
0431
0432 vpd_buf->len = result;
0433
0434 return vpd_buf;
0435 }
0436
0437 static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page,
0438 struct scsi_vpd __rcu **sdev_vpd_buf)
0439 {
0440 struct scsi_vpd *vpd_buf;
0441
0442 vpd_buf = scsi_get_vpd_buf(sdev, page);
0443 if (!vpd_buf)
0444 return;
0445
0446 mutex_lock(&sdev->inquiry_mutex);
0447 vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf,
0448 lockdep_is_held(&sdev->inquiry_mutex));
0449 mutex_unlock(&sdev->inquiry_mutex);
0450
0451 if (vpd_buf)
0452 kfree_rcu(vpd_buf, rcu);
0453 }
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464 void scsi_attach_vpd(struct scsi_device *sdev)
0465 {
0466 int i;
0467 struct scsi_vpd *vpd_buf;
0468
0469 if (!scsi_device_supports_vpd(sdev))
0470 return;
0471
0472
0473 vpd_buf = scsi_get_vpd_buf(sdev, 0);
0474 if (!vpd_buf)
0475 return;
0476
0477 for (i = 4; i < vpd_buf->len; i++) {
0478 if (vpd_buf->data[i] == 0x0)
0479 scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
0480 if (vpd_buf->data[i] == 0x80)
0481 scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
0482 if (vpd_buf->data[i] == 0x83)
0483 scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
0484 if (vpd_buf->data[i] == 0x89)
0485 scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
0486 if (vpd_buf->data[i] == 0xb0)
0487 scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
0488 if (vpd_buf->data[i] == 0xb1)
0489 scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
0490 if (vpd_buf->data[i] == 0xb2)
0491 scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
0492 }
0493 kfree(vpd_buf);
0494 }
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
0508 unsigned int len, unsigned char opcode)
0509 {
0510 unsigned char cmd[16];
0511 struct scsi_sense_hdr sshdr;
0512 int result, request_len;
0513
0514 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
0515 return -EINVAL;
0516
0517
0518 request_len = 4 + COMMAND_SIZE(opcode);
0519 if (request_len > len) {
0520 dev_warn_once(&sdev->sdev_gendev,
0521 "%s: len %u bytes, opcode 0x%02x needs %u\n",
0522 __func__, len, opcode, request_len);
0523 return -EINVAL;
0524 }
0525
0526 memset(cmd, 0, 16);
0527 cmd[0] = MAINTENANCE_IN;
0528 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
0529 cmd[2] = 1;
0530 cmd[3] = opcode;
0531 put_unaligned_be32(request_len, &cmd[6]);
0532 memset(buffer, 0, len);
0533
0534 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
0535 request_len, &sshdr, 30 * HZ, 3, NULL);
0536
0537 if (result < 0)
0538 return result;
0539 if (result && scsi_sense_valid(&sshdr) &&
0540 sshdr.sense_key == ILLEGAL_REQUEST &&
0541 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
0542 return -EINVAL;
0543
0544 if ((buffer[1] & 3) == 3)
0545 return 1;
0546
0547 return 0;
0548 }
0549 EXPORT_SYMBOL(scsi_report_opcode);
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562 int scsi_device_get(struct scsi_device *sdev)
0563 {
0564 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
0565 goto fail;
0566 if (!get_device(&sdev->sdev_gendev))
0567 goto fail;
0568 if (!try_module_get(sdev->host->hostt->module))
0569 goto fail_put_device;
0570 return 0;
0571
0572 fail_put_device:
0573 put_device(&sdev->sdev_gendev);
0574 fail:
0575 return -ENXIO;
0576 }
0577 EXPORT_SYMBOL(scsi_device_get);
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587 void scsi_device_put(struct scsi_device *sdev)
0588 {
0589 struct module *mod = sdev->host->hostt->module;
0590
0591 put_device(&sdev->sdev_gendev);
0592 module_put(mod);
0593 }
0594 EXPORT_SYMBOL(scsi_device_put);
0595
0596
0597 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
0598 struct scsi_device *prev)
0599 {
0600 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
0601 struct scsi_device *next = NULL;
0602 unsigned long flags;
0603
0604 spin_lock_irqsave(shost->host_lock, flags);
0605 while (list->next != &shost->__devices) {
0606 next = list_entry(list->next, struct scsi_device, siblings);
0607
0608 if (!scsi_device_get(next))
0609 break;
0610 next = NULL;
0611 list = list->next;
0612 }
0613 spin_unlock_irqrestore(shost->host_lock, flags);
0614
0615 if (prev)
0616 scsi_device_put(prev);
0617 return next;
0618 }
0619 EXPORT_SYMBOL(__scsi_iterate_devices);
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631 void starget_for_each_device(struct scsi_target *starget, void *data,
0632 void (*fn)(struct scsi_device *, void *))
0633 {
0634 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
0635 struct scsi_device *sdev;
0636
0637 shost_for_each_device(sdev, shost) {
0638 if ((sdev->channel == starget->channel) &&
0639 (sdev->id == starget->id))
0640 fn(sdev, data);
0641 }
0642 }
0643 EXPORT_SYMBOL(starget_for_each_device);
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659 void __starget_for_each_device(struct scsi_target *starget, void *data,
0660 void (*fn)(struct scsi_device *, void *))
0661 {
0662 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
0663 struct scsi_device *sdev;
0664
0665 __shost_for_each_device(sdev, shost) {
0666 if ((sdev->channel == starget->channel) &&
0667 (sdev->id == starget->id))
0668 fn(sdev, data);
0669 }
0670 }
0671 EXPORT_SYMBOL(__starget_for_each_device);
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
0689 u64 lun)
0690 {
0691 struct scsi_device *sdev;
0692
0693 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
0694 if (sdev->sdev_state == SDEV_DEL)
0695 continue;
0696 if (sdev->lun ==lun)
0697 return sdev;
0698 }
0699
0700 return NULL;
0701 }
0702 EXPORT_SYMBOL(__scsi_device_lookup_by_target);
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
0714 u64 lun)
0715 {
0716 struct scsi_device *sdev;
0717 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
0718 unsigned long flags;
0719
0720 spin_lock_irqsave(shost->host_lock, flags);
0721 sdev = __scsi_device_lookup_by_target(starget, lun);
0722 if (sdev && scsi_device_get(sdev))
0723 sdev = NULL;
0724 spin_unlock_irqrestore(shost->host_lock, flags);
0725
0726 return sdev;
0727 }
0728 EXPORT_SYMBOL(scsi_device_lookup_by_target);
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
0747 uint channel, uint id, u64 lun)
0748 {
0749 struct scsi_device *sdev;
0750
0751 list_for_each_entry(sdev, &shost->__devices, siblings) {
0752 if (sdev->sdev_state == SDEV_DEL)
0753 continue;
0754 if (sdev->channel == channel && sdev->id == id &&
0755 sdev->lun ==lun)
0756 return sdev;
0757 }
0758
0759 return NULL;
0760 }
0761 EXPORT_SYMBOL(__scsi_device_lookup);
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
0775 uint channel, uint id, u64 lun)
0776 {
0777 struct scsi_device *sdev;
0778 unsigned long flags;
0779
0780 spin_lock_irqsave(shost->host_lock, flags);
0781 sdev = __scsi_device_lookup(shost, channel, id, lun);
0782 if (sdev && scsi_device_get(sdev))
0783 sdev = NULL;
0784 spin_unlock_irqrestore(shost->host_lock, flags);
0785
0786 return sdev;
0787 }
0788 EXPORT_SYMBOL(scsi_device_lookup);
0789
0790 MODULE_DESCRIPTION("SCSI core");
0791 MODULE_LICENSE("GPL");
0792
0793 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
0794 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
0795
0796 static int __init init_scsi(void)
0797 {
0798 int error;
0799
0800 error = scsi_init_procfs();
0801 if (error)
0802 goto cleanup_queue;
0803 error = scsi_init_devinfo();
0804 if (error)
0805 goto cleanup_procfs;
0806 error = scsi_init_hosts();
0807 if (error)
0808 goto cleanup_devlist;
0809 error = scsi_init_sysctl();
0810 if (error)
0811 goto cleanup_hosts;
0812 error = scsi_sysfs_register();
0813 if (error)
0814 goto cleanup_sysctl;
0815
0816 scsi_netlink_init();
0817
0818 printk(KERN_NOTICE "SCSI subsystem initialized\n");
0819 return 0;
0820
0821 cleanup_sysctl:
0822 scsi_exit_sysctl();
0823 cleanup_hosts:
0824 scsi_exit_hosts();
0825 cleanup_devlist:
0826 scsi_exit_devinfo();
0827 cleanup_procfs:
0828 scsi_exit_procfs();
0829 cleanup_queue:
0830 scsi_exit_queue();
0831 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
0832 -error);
0833 return error;
0834 }
0835
0836 static void __exit exit_scsi(void)
0837 {
0838 scsi_netlink_exit();
0839 scsi_sysfs_unregister();
0840 scsi_exit_sysctl();
0841 scsi_exit_hosts();
0842 scsi_exit_devinfo();
0843 scsi_exit_procfs();
0844 scsi_exit_queue();
0845 }
0846
0847 subsys_initcall(init_scsi);
0848 module_exit(exit_scsi);