0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <linux/module.h>
0030 #include <linux/moduleparam.h>
0031 #include <linux/init.h>
0032 #include <linux/blkdev.h>
0033 #include <linux/delay.h>
0034 #include <linux/kthread.h>
0035 #include <linux/spinlock.h>
0036 #include <linux/async.h>
0037 #include <linux/slab.h>
0038 #include <asm/unaligned.h>
0039
0040 #include <scsi/scsi.h>
0041 #include <scsi/scsi_cmnd.h>
0042 #include <scsi/scsi_device.h>
0043 #include <scsi/scsi_driver.h>
0044 #include <scsi/scsi_devinfo.h>
0045 #include <scsi/scsi_host.h>
0046 #include <scsi/scsi_transport.h>
0047 #include <scsi/scsi_dh.h>
0048 #include <scsi/scsi_eh.h>
0049
0050 #include "scsi_priv.h"
0051 #include "scsi_logging.h"
0052
0053 #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
0054 " SCSI scanning, some SCSI devices might not be configured\n"
0055
0056
0057
0058
0059 #define SCSI_TIMEOUT (2*HZ)
0060 #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
0061
0062
0063
0064
0065 #define SCSI_UID_SER_NUM 'S'
0066 #define SCSI_UID_UNKNOWN 'Z'
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080 #define SCSI_SCAN_NO_RESPONSE 0
0081 #define SCSI_SCAN_TARGET_PRESENT 1
0082 #define SCSI_SCAN_LUN_PRESENT 2
0083
0084 static const char *scsi_null_device_strs = "nullnullnullnull";
0085
0086 #define MAX_SCSI_LUNS 512
0087
0088 static u64 max_scsi_luns = MAX_SCSI_LUNS;
0089
0090 module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
0091 MODULE_PARM_DESC(max_luns,
0092 "last scsi LUN (should be between 1 and 2^64-1)");
0093
0094 #ifdef CONFIG_SCSI_SCAN_ASYNC
0095 #define SCSI_SCAN_TYPE_DEFAULT "async"
0096 #else
0097 #define SCSI_SCAN_TYPE_DEFAULT "sync"
0098 #endif
0099
0100 static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
0101
0102 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
0103 S_IRUGO|S_IWUSR);
0104 MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
0105 "Setting to 'manual' disables automatic scanning, but allows "
0106 "for manual device scan via the 'scan' sysfs attribute.");
0107
0108 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
0109
0110 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
0111 MODULE_PARM_DESC(inq_timeout,
0112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
0113 " Default is 20. Some devices may need more; most need less.");
0114
0115
0116 static DEFINE_SPINLOCK(async_scan_lock);
0117 static LIST_HEAD(scanning_hosts);
0118
0119 struct async_scan_data {
0120 struct list_head list;
0121 struct Scsi_Host *shost;
0122 struct completion prev_finished;
0123 };
0124
0125
0126
0127
0128 void scsi_enable_async_suspend(struct device *dev)
0129 {
0130
0131
0132
0133
0134
0135 if (strncmp(scsi_scan_type, "async", 5) != 0)
0136 return;
0137
0138 device_enable_async_suspend(dev);
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 int scsi_complete_async_scans(void)
0150 {
0151 struct async_scan_data *data;
0152
0153 do {
0154 if (list_empty(&scanning_hosts))
0155 return 0;
0156
0157
0158
0159
0160 data = kmalloc(sizeof(*data), GFP_KERNEL);
0161 if (!data)
0162 msleep(1);
0163 } while (!data);
0164
0165 data->shost = NULL;
0166 init_completion(&data->prev_finished);
0167
0168 spin_lock(&async_scan_lock);
0169
0170 if (list_empty(&scanning_hosts))
0171 goto done;
0172 list_add_tail(&data->list, &scanning_hosts);
0173 spin_unlock(&async_scan_lock);
0174
0175 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
0176 wait_for_completion(&data->prev_finished);
0177
0178 spin_lock(&async_scan_lock);
0179 list_del(&data->list);
0180 if (!list_empty(&scanning_hosts)) {
0181 struct async_scan_data *next = list_entry(scanning_hosts.next,
0182 struct async_scan_data, list);
0183 complete(&next->prev_finished);
0184 }
0185 done:
0186 spin_unlock(&async_scan_lock);
0187
0188 kfree(data);
0189 return 0;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 static void scsi_unlock_floptical(struct scsi_device *sdev,
0202 unsigned char *result)
0203 {
0204 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
0205
0206 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
0207 scsi_cmd[0] = MODE_SENSE;
0208 scsi_cmd[1] = 0;
0209 scsi_cmd[2] = 0x2e;
0210 scsi_cmd[3] = 0;
0211 scsi_cmd[4] = 0x2a;
0212 scsi_cmd[5] = 0;
0213 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
0214 SCSI_TIMEOUT, 3, NULL);
0215 }
0216
0217 static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
0218 unsigned int depth)
0219 {
0220 int new_shift = sbitmap_calculate_shift(depth);
0221 bool need_alloc = !sdev->budget_map.map;
0222 bool need_free = false;
0223 int ret;
0224 struct sbitmap sb_backup;
0225
0226 depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
0227
0228
0229
0230
0231
0232 if (!need_alloc && new_shift != sdev->budget_map.shift)
0233 need_alloc = need_free = true;
0234
0235 if (!need_alloc)
0236 return 0;
0237
0238
0239
0240
0241
0242 if (need_free) {
0243 blk_mq_freeze_queue(sdev->request_queue);
0244 sb_backup = sdev->budget_map;
0245 }
0246 ret = sbitmap_init_node(&sdev->budget_map,
0247 scsi_device_max_queue_depth(sdev),
0248 new_shift, GFP_KERNEL,
0249 sdev->request_queue->node, false, true);
0250 if (!ret)
0251 sbitmap_resize(&sdev->budget_map, depth);
0252
0253 if (need_free) {
0254 if (ret)
0255 sdev->budget_map = sb_backup;
0256 else
0257 sbitmap_free(&sb_backup);
0258 ret = 0;
0259 blk_mq_unfreeze_queue(sdev->request_queue);
0260 }
0261 return ret;
0262 }
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
0279 u64 lun, void *hostdata)
0280 {
0281 unsigned int depth;
0282 struct scsi_device *sdev;
0283 struct request_queue *q;
0284 int display_failure_msg = 1, ret;
0285 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
0286
0287 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
0288 GFP_KERNEL);
0289 if (!sdev)
0290 goto out;
0291
0292 sdev->vendor = scsi_null_device_strs;
0293 sdev->model = scsi_null_device_strs;
0294 sdev->rev = scsi_null_device_strs;
0295 sdev->host = shost;
0296 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
0297 sdev->id = starget->id;
0298 sdev->lun = lun;
0299 sdev->channel = starget->channel;
0300 mutex_init(&sdev->state_mutex);
0301 sdev->sdev_state = SDEV_CREATED;
0302 INIT_LIST_HEAD(&sdev->siblings);
0303 INIT_LIST_HEAD(&sdev->same_target_siblings);
0304 INIT_LIST_HEAD(&sdev->starved_entry);
0305 INIT_LIST_HEAD(&sdev->event_list);
0306 spin_lock_init(&sdev->list_lock);
0307 mutex_init(&sdev->inquiry_mutex);
0308 INIT_WORK(&sdev->event_work, scsi_evt_thread);
0309 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
0310
0311 sdev->sdev_gendev.parent = get_device(&starget->dev);
0312 sdev->sdev_target = starget;
0313
0314
0315 sdev->hostdata = hostdata;
0316
0317
0318
0319 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
0320
0321
0322
0323
0324 sdev->type = -1;
0325
0326
0327
0328
0329
0330
0331 sdev->borken = 1;
0332
0333 sdev->sg_reserved_size = INT_MAX;
0334
0335 q = blk_mq_init_queue(&sdev->host->tag_set);
0336 if (IS_ERR(q)) {
0337
0338
0339 put_device(&starget->dev);
0340 kfree(sdev);
0341 goto out;
0342 }
0343 kref_get(&sdev->host->tagset_refcnt);
0344 sdev->request_queue = q;
0345 q->queuedata = sdev;
0346 __scsi_init_queue(sdev->host, q);
0347 WARN_ON_ONCE(!blk_get_queue(q));
0348
0349 depth = sdev->host->cmd_per_lun ?: 1;
0350
0351
0352
0353
0354
0355
0356
0357 if (scsi_realloc_sdev_budget_map(sdev, depth)) {
0358 put_device(&starget->dev);
0359 kfree(sdev);
0360 goto out;
0361 }
0362
0363 scsi_change_queue_depth(sdev, depth);
0364
0365 scsi_sysfs_device_initialize(sdev);
0366
0367 if (shost->hostt->slave_alloc) {
0368 ret = shost->hostt->slave_alloc(sdev);
0369 if (ret) {
0370
0371
0372
0373
0374 if (ret == -ENXIO)
0375 display_failure_msg = 0;
0376 goto out_device_destroy;
0377 }
0378 }
0379
0380 return sdev;
0381
0382 out_device_destroy:
0383 __scsi_remove_device(sdev);
0384 out:
0385 if (display_failure_msg)
0386 printk(ALLOC_FAILURE_MSG, __func__);
0387 return NULL;
0388 }
0389
0390 static void scsi_target_destroy(struct scsi_target *starget)
0391 {
0392 struct device *dev = &starget->dev;
0393 struct Scsi_Host *shost = dev_to_shost(dev->parent);
0394 unsigned long flags;
0395
0396 BUG_ON(starget->state == STARGET_DEL);
0397 starget->state = STARGET_DEL;
0398 transport_destroy_device(dev);
0399 spin_lock_irqsave(shost->host_lock, flags);
0400 if (shost->hostt->target_destroy)
0401 shost->hostt->target_destroy(starget);
0402 list_del_init(&starget->siblings);
0403 spin_unlock_irqrestore(shost->host_lock, flags);
0404 put_device(dev);
0405 }
0406
0407 static void scsi_target_dev_release(struct device *dev)
0408 {
0409 struct device *parent = dev->parent;
0410 struct scsi_target *starget = to_scsi_target(dev);
0411
0412 kfree(starget);
0413 put_device(parent);
0414 }
0415
0416 static struct device_type scsi_target_type = {
0417 .name = "scsi_target",
0418 .release = scsi_target_dev_release,
0419 };
0420
0421 int scsi_is_target_device(const struct device *dev)
0422 {
0423 return dev->type == &scsi_target_type;
0424 }
0425 EXPORT_SYMBOL(scsi_is_target_device);
0426
0427 static struct scsi_target *__scsi_find_target(struct device *parent,
0428 int channel, uint id)
0429 {
0430 struct scsi_target *starget, *found_starget = NULL;
0431 struct Scsi_Host *shost = dev_to_shost(parent);
0432
0433
0434
0435 list_for_each_entry(starget, &shost->__targets, siblings) {
0436 if (starget->id == id &&
0437 starget->channel == channel) {
0438 found_starget = starget;
0439 break;
0440 }
0441 }
0442 if (found_starget)
0443 get_device(&found_starget->dev);
0444
0445 return found_starget;
0446 }
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457 static void scsi_target_reap_ref_release(struct kref *kref)
0458 {
0459 struct scsi_target *starget
0460 = container_of(kref, struct scsi_target, reap_ref);
0461
0462
0463
0464
0465
0466
0467 if ((starget->state != STARGET_CREATED) &&
0468 (starget->state != STARGET_CREATED_REMOVE)) {
0469 transport_remove_device(&starget->dev);
0470 device_del(&starget->dev);
0471 }
0472 scsi_target_destroy(starget);
0473 }
0474
0475 static void scsi_target_reap_ref_put(struct scsi_target *starget)
0476 {
0477 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
0478 }
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 static struct scsi_target *scsi_alloc_target(struct device *parent,
0493 int channel, uint id)
0494 {
0495 struct Scsi_Host *shost = dev_to_shost(parent);
0496 struct device *dev = NULL;
0497 unsigned long flags;
0498 const int size = sizeof(struct scsi_target)
0499 + shost->transportt->target_size;
0500 struct scsi_target *starget;
0501 struct scsi_target *found_target;
0502 int error, ref_got;
0503
0504 starget = kzalloc(size, GFP_KERNEL);
0505 if (!starget) {
0506 printk(KERN_ERR "%s: allocation failure\n", __func__);
0507 return NULL;
0508 }
0509 dev = &starget->dev;
0510 device_initialize(dev);
0511 kref_init(&starget->reap_ref);
0512 dev->parent = get_device(parent);
0513 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
0514 dev->bus = &scsi_bus_type;
0515 dev->type = &scsi_target_type;
0516 scsi_enable_async_suspend(dev);
0517 starget->id = id;
0518 starget->channel = channel;
0519 starget->can_queue = 0;
0520 INIT_LIST_HEAD(&starget->siblings);
0521 INIT_LIST_HEAD(&starget->devices);
0522 starget->state = STARGET_CREATED;
0523 starget->scsi_level = SCSI_2;
0524 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
0525 retry:
0526 spin_lock_irqsave(shost->host_lock, flags);
0527
0528 found_target = __scsi_find_target(parent, channel, id);
0529 if (found_target)
0530 goto found;
0531
0532 list_add_tail(&starget->siblings, &shost->__targets);
0533 spin_unlock_irqrestore(shost->host_lock, flags);
0534
0535 transport_setup_device(dev);
0536 if (shost->hostt->target_alloc) {
0537 error = shost->hostt->target_alloc(starget);
0538
0539 if(error) {
0540 if (error != -ENXIO)
0541 dev_err(dev, "target allocation failed, error %d\n", error);
0542
0543
0544 scsi_target_destroy(starget);
0545 return NULL;
0546 }
0547 }
0548 get_device(dev);
0549
0550 return starget;
0551
0552 found:
0553
0554
0555
0556
0557
0558 ref_got = kref_get_unless_zero(&found_target->reap_ref);
0559
0560 spin_unlock_irqrestore(shost->host_lock, flags);
0561 if (ref_got) {
0562 put_device(dev);
0563 return found_target;
0564 }
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574 put_device(&found_target->dev);
0575
0576
0577
0578
0579 msleep(1);
0580 goto retry;
0581 }
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591 void scsi_target_reap(struct scsi_target *starget)
0592 {
0593
0594
0595
0596
0597
0598 BUG_ON(starget->state == STARGET_DEL);
0599 scsi_target_reap_ref_put(starget);
0600 }
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617 void scsi_sanitize_inquiry_string(unsigned char *s, int len)
0618 {
0619 int terminated = 0;
0620
0621 for (; len > 0; (--len, ++s)) {
0622 if (*s == 0)
0623 terminated = 1;
0624 if (terminated || *s < 0x20 || *s > 0x7e)
0625 *s = ' ';
0626 }
0627 }
0628 EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
0645 int result_len, blist_flags_t *bflags)
0646 {
0647 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
0648 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
0649 int response_len = 0;
0650 int pass, count, result;
0651 struct scsi_sense_hdr sshdr;
0652
0653 *bflags = 0;
0654
0655
0656
0657
0658 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
0659 try_inquiry_len = first_inquiry_len;
0660 pass = 1;
0661
0662 next_pass:
0663 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
0664 "scsi scan: INQUIRY pass %d length %d\n",
0665 pass, try_inquiry_len));
0666
0667
0668 for (count = 0; count < 3; ++count) {
0669 int resid;
0670
0671 memset(scsi_cmd, 0, 6);
0672 scsi_cmd[0] = INQUIRY;
0673 scsi_cmd[4] = (unsigned char) try_inquiry_len;
0674
0675 memset(inq_result, 0, try_inquiry_len);
0676
0677 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
0678 inq_result, try_inquiry_len, &sshdr,
0679 HZ / 2 + HZ * scsi_inq_timeout, 3,
0680 &resid);
0681
0682 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
0683 "scsi scan: INQUIRY %s with code 0x%x\n",
0684 result ? "failed" : "successful", result));
0685
0686 if (result > 0) {
0687
0688
0689
0690
0691
0692
0693 if (scsi_status_is_check_condition(result) &&
0694 scsi_sense_valid(&sshdr)) {
0695 if ((sshdr.sense_key == UNIT_ATTENTION) &&
0696 ((sshdr.asc == 0x28) ||
0697 (sshdr.asc == 0x29)) &&
0698 (sshdr.ascq == 0))
0699 continue;
0700 }
0701 } else if (result == 0) {
0702
0703
0704
0705
0706
0707 if (resid == try_inquiry_len)
0708 continue;
0709 }
0710 break;
0711 }
0712
0713 if (result == 0) {
0714 scsi_sanitize_inquiry_string(&inq_result[8], 8);
0715 scsi_sanitize_inquiry_string(&inq_result[16], 16);
0716 scsi_sanitize_inquiry_string(&inq_result[32], 4);
0717
0718 response_len = inq_result[4] + 5;
0719 if (response_len > 255)
0720 response_len = first_inquiry_len;
0721
0722
0723
0724
0725
0726
0727
0728
0729 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
0730 &inq_result[16]);
0731
0732
0733
0734 if (pass == 1) {
0735 if (BLIST_INQUIRY_36 & *bflags)
0736 next_inquiry_len = 36;
0737
0738
0739
0740
0741
0742
0743
0744
0745 else if (sdev->inquiry_len &&
0746 response_len > sdev->inquiry_len &&
0747 (inq_result[2] & 0x7) < 6)
0748 next_inquiry_len = sdev->inquiry_len;
0749 else
0750 next_inquiry_len = response_len;
0751
0752
0753 if (next_inquiry_len > try_inquiry_len) {
0754 try_inquiry_len = next_inquiry_len;
0755 pass = 2;
0756 goto next_pass;
0757 }
0758 }
0759
0760 } else if (pass == 2) {
0761 sdev_printk(KERN_INFO, sdev,
0762 "scsi scan: %d byte inquiry failed. "
0763 "Consider BLIST_INQUIRY_36 for this device\n",
0764 try_inquiry_len);
0765
0766
0767
0768 try_inquiry_len = first_inquiry_len;
0769 pass = 3;
0770 goto next_pass;
0771 }
0772
0773
0774
0775 if (result)
0776 return -EIO;
0777
0778
0779 sdev->inquiry_len = min(try_inquiry_len, response_len);
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 if (sdev->inquiry_len < 36) {
0797 if (!sdev->host->short_inquiry) {
0798 shost_printk(KERN_INFO, sdev->host,
0799 "scsi scan: INQUIRY result too short (%d),"
0800 " using 36\n", sdev->inquiry_len);
0801 sdev->host->short_inquiry = 1;
0802 }
0803 sdev->inquiry_len = 36;
0804 }
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824 sdev->scsi_level = inq_result[2] & 0x07;
0825 if (sdev->scsi_level >= 2 ||
0826 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
0827 sdev->scsi_level++;
0828 sdev->sdev_target->scsi_level = sdev->scsi_level;
0829
0830
0831
0832
0833
0834 sdev->lun_in_cdb = 0;
0835 if (sdev->scsi_level <= SCSI_2 &&
0836 sdev->scsi_level != SCSI_UNKNOWN &&
0837 !sdev->host->no_scsi2_lun_in_cdb)
0838 sdev->lun_in_cdb = 1;
0839
0840 return 0;
0841 }
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
0859 blist_flags_t *bflags, int async)
0860 {
0861 int ret;
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 sdev->inquiry = kmemdup(inq_result,
0884 max_t(size_t, sdev->inquiry_len, 36),
0885 GFP_KERNEL);
0886 if (sdev->inquiry == NULL)
0887 return SCSI_SCAN_NO_RESPONSE;
0888
0889 sdev->vendor = (char *) (sdev->inquiry + 8);
0890 sdev->model = (char *) (sdev->inquiry + 16);
0891 sdev->rev = (char *) (sdev->inquiry + 32);
0892
0893 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
0894
0895
0896
0897
0898
0899
0900 sdev->allow_restart = 1;
0901 }
0902
0903 if (*bflags & BLIST_ISROM) {
0904 sdev->type = TYPE_ROM;
0905 sdev->removable = 1;
0906 } else {
0907 sdev->type = (inq_result[0] & 0x1f);
0908 sdev->removable = (inq_result[1] & 0x80) >> 7;
0909
0910
0911
0912
0913
0914
0915 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
0916 sdev_printk(KERN_WARNING, sdev,
0917 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
0918 __func__, sdev->type, (unsigned int)sdev->lun);
0919 sdev->type = TYPE_WLUN;
0920 }
0921
0922 }
0923
0924 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
0925
0926
0927
0928
0929 if ((*bflags & BLIST_REPORTLUN2) == 0)
0930 *bflags |= BLIST_NOREPORTLUN;
0931 }
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
0950 sdev->lockable = sdev->removable;
0951 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
0952
0953 if (sdev->scsi_level >= SCSI_3 ||
0954 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
0955 sdev->ppr = 1;
0956 if (inq_result[7] & 0x60)
0957 sdev->wdtr = 1;
0958 if (inq_result[7] & 0x10)
0959 sdev->sdtr = 1;
0960
0961 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
0962 "ANSI: %d%s\n", scsi_device_type(sdev->type),
0963 sdev->vendor, sdev->model, sdev->rev,
0964 sdev->inq_periph_qual, inq_result[2] & 0x07,
0965 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
0966
0967 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
0968 !(*bflags & BLIST_NOTQ)) {
0969 sdev->tagged_supported = 1;
0970 sdev->simple_tags = 1;
0971 }
0972
0973
0974
0975
0976
0977
0978 if ((*bflags & BLIST_BORKEN) == 0)
0979 sdev->borken = 0;
0980
0981 if (*bflags & BLIST_NO_ULD_ATTACH)
0982 sdev->no_uld_attach = 1;
0983
0984
0985
0986
0987
0988 if (*bflags & BLIST_SELECT_NO_ATN)
0989 sdev->select_no_atn = 1;
0990
0991
0992
0993
0994
0995 if (*bflags & BLIST_MAX_512)
0996 blk_queue_max_hw_sectors(sdev->request_queue, 512);
0997
0998
0999
1000
1001 else if (*bflags & BLIST_MAX_1024)
1002 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
1003
1004
1005
1006
1007
1008 if (*bflags & BLIST_NOSTARTONADD)
1009 sdev->no_start_on_add = 1;
1010
1011 if (*bflags & BLIST_SINGLELUN)
1012 scsi_target(sdev)->single_lun = 1;
1013
1014 sdev->use_10_for_rw = 1;
1015
1016
1017
1018
1019 if (*bflags & BLIST_NO_RSOC)
1020 sdev->no_report_opcodes = 1;
1021
1022
1023
1024 mutex_lock(&sdev->state_mutex);
1025 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1026 if (ret)
1027 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1028 mutex_unlock(&sdev->state_mutex);
1029
1030 if (ret) {
1031 sdev_printk(KERN_ERR, sdev,
1032 "in wrong state %s to complete scan\n",
1033 scsi_device_state_name(sdev->sdev_state));
1034 return SCSI_SCAN_NO_RESPONSE;
1035 }
1036
1037 if (*bflags & BLIST_NOT_LOCKABLE)
1038 sdev->lockable = 0;
1039
1040 if (*bflags & BLIST_RETRY_HWERROR)
1041 sdev->retry_hwerror = 1;
1042
1043 if (*bflags & BLIST_NO_DIF)
1044 sdev->no_dif = 1;
1045
1046 if (*bflags & BLIST_UNMAP_LIMIT_WS)
1047 sdev->unmap_limit_for_ws = 1;
1048
1049 if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1050 sdev->ignore_media_change = 1;
1051
1052 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1053
1054 if (*bflags & BLIST_TRY_VPD_PAGES)
1055 sdev->try_vpd_pages = 1;
1056 else if (*bflags & BLIST_SKIP_VPD_PAGES)
1057 sdev->skip_vpd_pages = 1;
1058
1059 transport_configure_device(&sdev->sdev_gendev);
1060
1061 if (sdev->host->hostt->slave_configure) {
1062 ret = sdev->host->hostt->slave_configure(sdev);
1063 if (ret) {
1064
1065
1066
1067
1068 if (ret != -ENXIO) {
1069 sdev_printk(KERN_ERR, sdev,
1070 "failed to configure device\n");
1071 }
1072 return SCSI_SCAN_NO_RESPONSE;
1073 }
1074
1075
1076
1077
1078
1079
1080 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1081 }
1082
1083 if (sdev->scsi_level >= SCSI_3)
1084 scsi_attach_vpd(sdev);
1085
1086 sdev->max_queue_depth = sdev->queue_depth;
1087 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1088 sdev->sdev_bflags = *bflags;
1089
1090
1091
1092
1093
1094
1095 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1096 return SCSI_SCAN_NO_RESPONSE;
1097
1098 return SCSI_SCAN_LUN_PRESENT;
1099 }
1100
1101 #ifdef CONFIG_SCSI_LOGGING
1102
1103
1104
1105
1106
1107
1108
1109 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1110 unsigned first, unsigned end)
1111 {
1112 unsigned term = 0, idx;
1113
1114 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1115 if (inq[idx+first] > ' ') {
1116 buf[idx] = inq[idx+first];
1117 term = idx+1;
1118 } else {
1119 buf[idx] = ' ';
1120 }
1121 }
1122 buf[term] = 0;
1123 return buf;
1124 }
1125 #endif
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 static int scsi_probe_and_add_lun(struct scsi_target *starget,
1149 u64 lun, blist_flags_t *bflagsp,
1150 struct scsi_device **sdevp,
1151 enum scsi_scan_mode rescan,
1152 void *hostdata)
1153 {
1154 struct scsi_device *sdev;
1155 unsigned char *result;
1156 blist_flags_t bflags;
1157 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1158 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1159
1160
1161
1162
1163
1164 sdev = scsi_device_lookup_by_target(starget, lun);
1165 if (sdev) {
1166 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1167 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1168 "scsi scan: device exists on %s\n",
1169 dev_name(&sdev->sdev_gendev)));
1170 if (sdevp)
1171 *sdevp = sdev;
1172 else
1173 scsi_device_put(sdev);
1174
1175 if (bflagsp)
1176 *bflagsp = scsi_get_device_flags(sdev,
1177 sdev->vendor,
1178 sdev->model);
1179 return SCSI_SCAN_LUN_PRESENT;
1180 }
1181 scsi_device_put(sdev);
1182 } else
1183 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1184 if (!sdev)
1185 goto out;
1186
1187 result = kmalloc(result_len, GFP_KERNEL);
1188 if (!result)
1189 goto out_free_sdev;
1190
1191 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1192 goto out_free_result;
1193
1194 if (bflagsp)
1195 *bflagsp = bflags;
1196
1197
1198
1199 if ((result[0] >> 5) == 3) {
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1211 " peripheral qualifier of 3, device not"
1212 " added\n"))
1213 if (lun == 0) {
1214 SCSI_LOG_SCAN_BUS(1, {
1215 unsigned char vend[9];
1216 unsigned char mod[17];
1217
1218 sdev_printk(KERN_INFO, sdev,
1219 "scsi scan: consider passing scsi_mod."
1220 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1221 scsi_inq_str(vend, result, 8, 16),
1222 scsi_inq_str(mod, result, 16, 32));
1223 });
1224
1225 }
1226
1227 res = SCSI_SCAN_TARGET_PRESENT;
1228 goto out_free_result;
1229 }
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251 if (((result[0] >> 5) == 1 ||
1252 (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
1253 !scsi_is_wlun(lun)) {
1254 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1255 "scsi scan: peripheral device type"
1256 " of 31, no device added\n"));
1257 res = SCSI_SCAN_TARGET_PRESENT;
1258 goto out_free_result;
1259 }
1260
1261 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1262 if (res == SCSI_SCAN_LUN_PRESENT) {
1263 if (bflags & BLIST_KEY) {
1264 sdev->lockable = 0;
1265 scsi_unlock_floptical(sdev, result);
1266 }
1267 }
1268
1269 out_free_result:
1270 kfree(result);
1271 out_free_sdev:
1272 if (res == SCSI_SCAN_LUN_PRESENT) {
1273 if (sdevp) {
1274 if (scsi_device_get(sdev) == 0) {
1275 *sdevp = sdev;
1276 } else {
1277 __scsi_remove_device(sdev);
1278 res = SCSI_SCAN_NO_RESPONSE;
1279 }
1280 }
1281 } else
1282 __scsi_remove_device(sdev);
1283 out:
1284 return res;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 static void scsi_sequential_lun_scan(struct scsi_target *starget,
1302 blist_flags_t bflags, int scsi_level,
1303 enum scsi_scan_mode rescan)
1304 {
1305 uint max_dev_lun;
1306 u64 sparse_lun, lun;
1307 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1308
1309 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1310 "scsi scan: Sequential scan\n"));
1311
1312 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1313
1314
1315
1316
1317
1318 if (bflags & BLIST_SPARSELUN) {
1319 max_dev_lun = shost->max_lun;
1320 sparse_lun = 1;
1321 } else
1322 sparse_lun = 0;
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 if (bflags & BLIST_FORCELUN)
1346 max_dev_lun = shost->max_lun;
1347
1348
1349
1350 if (bflags & BLIST_MAX5LUN)
1351 max_dev_lun = min(5U, max_dev_lun);
1352
1353
1354
1355
1356 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1357 max_dev_lun = min(8U, max_dev_lun);
1358 else
1359 max_dev_lun = min(256U, max_dev_lun);
1360
1361
1362
1363
1364
1365
1366 for (lun = 1; lun < max_dev_lun; ++lun)
1367 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1368 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1369 !sparse_lun)
1370 return;
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1394 enum scsi_scan_mode rescan)
1395 {
1396 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1397 unsigned int length;
1398 u64 lun;
1399 unsigned int num_luns;
1400 unsigned int retries;
1401 int result;
1402 struct scsi_lun *lunp, *lun_data;
1403 struct scsi_sense_hdr sshdr;
1404 struct scsi_device *sdev;
1405 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1406 int ret = 0;
1407
1408
1409
1410
1411
1412
1413
1414 if (bflags & BLIST_NOREPORTLUN)
1415 return 1;
1416 if (starget->scsi_level < SCSI_2 &&
1417 starget->scsi_level != SCSI_UNKNOWN)
1418 return 1;
1419 if (starget->scsi_level < SCSI_3 &&
1420 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1421 return 1;
1422 if (bflags & BLIST_NOLUN)
1423 return 0;
1424 if (starget->no_report_luns)
1425 return 1;
1426
1427 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1428 sdev = scsi_alloc_sdev(starget, 0, NULL);
1429 if (!sdev)
1430 return 0;
1431 if (scsi_device_get(sdev)) {
1432 __scsi_remove_device(sdev);
1433 return 0;
1434 }
1435 }
1436
1437
1438
1439
1440
1441
1442 length = (511 + 1) * sizeof(struct scsi_lun);
1443 retry:
1444 lun_data = kmalloc(length, GFP_KERNEL);
1445 if (!lun_data) {
1446 printk(ALLOC_FAILURE_MSG, __func__);
1447 goto out;
1448 }
1449
1450 scsi_cmd[0] = REPORT_LUNS;
1451
1452
1453
1454
1455 memset(&scsi_cmd[1], 0, 5);
1456
1457
1458
1459
1460 put_unaligned_be32(length, &scsi_cmd[6]);
1461
1462 scsi_cmd[10] = 0;
1463 scsi_cmd[11] = 0;
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475 for (retries = 0; retries < 3; retries++) {
1476 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1477 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1478 retries));
1479
1480 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1481 lun_data, length, &sshdr,
1482 SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1483
1484 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1485 "scsi scan: REPORT LUNS"
1486 " %s (try %d) result 0x%x\n",
1487 result ? "failed" : "successful",
1488 retries, result));
1489 if (result == 0)
1490 break;
1491 else if (scsi_sense_valid(&sshdr)) {
1492 if (sshdr.sense_key != UNIT_ATTENTION)
1493 break;
1494 }
1495 }
1496
1497 if (result) {
1498
1499
1500
1501 ret = 1;
1502 goto out_err;
1503 }
1504
1505
1506
1507
1508 if (get_unaligned_be32(lun_data->scsi_lun) +
1509 sizeof(struct scsi_lun) > length) {
1510 length = get_unaligned_be32(lun_data->scsi_lun) +
1511 sizeof(struct scsi_lun);
1512 kfree(lun_data);
1513 goto retry;
1514 }
1515 length = get_unaligned_be32(lun_data->scsi_lun);
1516
1517 num_luns = (length / sizeof(struct scsi_lun));
1518
1519 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1520 "scsi scan: REPORT LUN scan\n"));
1521
1522
1523
1524
1525
1526 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1527 lun = scsilun_to_int(lunp);
1528
1529 if (lun > sdev->host->max_lun) {
1530 sdev_printk(KERN_WARNING, sdev,
1531 "lun%llu has a LUN larger than"
1532 " allowed by the host adapter\n", lun);
1533 } else {
1534 int res;
1535
1536 res = scsi_probe_and_add_lun(starget,
1537 lun, NULL, NULL, rescan, NULL);
1538 if (res == SCSI_SCAN_NO_RESPONSE) {
1539
1540
1541
1542 sdev_printk(KERN_ERR, sdev,
1543 "Unexpected response"
1544 " from lun %llu while scanning, scan"
1545 " aborted\n", (unsigned long long)lun);
1546 break;
1547 }
1548 }
1549 }
1550
1551 out_err:
1552 kfree(lun_data);
1553 out:
1554 if (scsi_device_created(sdev))
1555
1556
1557
1558 __scsi_remove_device(sdev);
1559 scsi_device_put(sdev);
1560 return ret;
1561 }
1562
1563 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1564 uint id, u64 lun, void *hostdata)
1565 {
1566 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1567 struct device *parent = &shost->shost_gendev;
1568 struct scsi_target *starget;
1569
1570 if (strncmp(scsi_scan_type, "none", 4) == 0)
1571 return ERR_PTR(-ENODEV);
1572
1573 starget = scsi_alloc_target(parent, channel, id);
1574 if (!starget)
1575 return ERR_PTR(-ENOMEM);
1576 scsi_autopm_get_target(starget);
1577
1578 mutex_lock(&shost->scan_mutex);
1579 if (!shost->async_scan)
1580 scsi_complete_async_scans();
1581
1582 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1583 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1584 scsi_autopm_put_host(shost);
1585 }
1586 mutex_unlock(&shost->scan_mutex);
1587 scsi_autopm_put_target(starget);
1588
1589
1590
1591
1592 scsi_target_reap(starget);
1593 put_device(&starget->dev);
1594
1595 return sdev;
1596 }
1597 EXPORT_SYMBOL(__scsi_add_device);
1598
1599 int scsi_add_device(struct Scsi_Host *host, uint channel,
1600 uint target, u64 lun)
1601 {
1602 struct scsi_device *sdev =
1603 __scsi_add_device(host, channel, target, lun, NULL);
1604 if (IS_ERR(sdev))
1605 return PTR_ERR(sdev);
1606
1607 scsi_device_put(sdev);
1608 return 0;
1609 }
1610 EXPORT_SYMBOL(scsi_add_device);
1611
1612 void scsi_rescan_device(struct device *dev)
1613 {
1614 struct scsi_device *sdev = to_scsi_device(dev);
1615
1616 device_lock(dev);
1617
1618 scsi_attach_vpd(sdev);
1619
1620 if (sdev->handler && sdev->handler->rescan)
1621 sdev->handler->rescan(sdev);
1622
1623 if (dev->driver && try_module_get(dev->driver->owner)) {
1624 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1625
1626 if (drv->rescan)
1627 drv->rescan(dev);
1628 module_put(dev->driver->owner);
1629 }
1630 device_unlock(dev);
1631 }
1632 EXPORT_SYMBOL(scsi_rescan_device);
1633
1634 static void __scsi_scan_target(struct device *parent, unsigned int channel,
1635 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1636 {
1637 struct Scsi_Host *shost = dev_to_shost(parent);
1638 blist_flags_t bflags = 0;
1639 int res;
1640 struct scsi_target *starget;
1641
1642 if (shost->this_id == id)
1643
1644
1645
1646 return;
1647
1648 starget = scsi_alloc_target(parent, channel, id);
1649 if (!starget)
1650 return;
1651 scsi_autopm_get_target(starget);
1652
1653 if (lun != SCAN_WILD_CARD) {
1654
1655
1656
1657 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1658 goto out_reap;
1659 }
1660
1661
1662
1663
1664
1665 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1666 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1667 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1668
1669
1670
1671
1672 scsi_sequential_lun_scan(starget, bflags,
1673 starget->scsi_level, rescan);
1674 }
1675
1676 out_reap:
1677 scsi_autopm_put_target(starget);
1678
1679
1680
1681
1682 scsi_target_reap(starget);
1683
1684 put_device(&starget->dev);
1685 }
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 void scsi_scan_target(struct device *parent, unsigned int channel,
1706 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1707 {
1708 struct Scsi_Host *shost = dev_to_shost(parent);
1709
1710 if (strncmp(scsi_scan_type, "none", 4) == 0)
1711 return;
1712
1713 if (rescan != SCSI_SCAN_MANUAL &&
1714 strncmp(scsi_scan_type, "manual", 6) == 0)
1715 return;
1716
1717 mutex_lock(&shost->scan_mutex);
1718 if (!shost->async_scan)
1719 scsi_complete_async_scans();
1720
1721 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1722 __scsi_scan_target(parent, channel, id, lun, rescan);
1723 scsi_autopm_put_host(shost);
1724 }
1725 mutex_unlock(&shost->scan_mutex);
1726 }
1727 EXPORT_SYMBOL(scsi_scan_target);
1728
1729 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1730 unsigned int id, u64 lun,
1731 enum scsi_scan_mode rescan)
1732 {
1733 uint order_id;
1734
1735 if (id == SCAN_WILD_CARD)
1736 for (id = 0; id < shost->max_id; ++id) {
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (shost->reverse_ordering)
1747
1748
1749
1750 order_id = shost->max_id - id - 1;
1751 else
1752 order_id = id;
1753 __scsi_scan_target(&shost->shost_gendev, channel,
1754 order_id, lun, rescan);
1755 }
1756 else
1757 __scsi_scan_target(&shost->shost_gendev, channel,
1758 id, lun, rescan);
1759 }
1760
1761 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1762 unsigned int id, u64 lun,
1763 enum scsi_scan_mode rescan)
1764 {
1765 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1766 "%s: <%u:%u:%llu>\n",
1767 __func__, channel, id, lun));
1768
1769 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1770 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1771 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1772 return -EINVAL;
1773
1774 mutex_lock(&shost->scan_mutex);
1775 if (!shost->async_scan)
1776 scsi_complete_async_scans();
1777
1778 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1779 if (channel == SCAN_WILD_CARD)
1780 for (channel = 0; channel <= shost->max_channel;
1781 channel++)
1782 scsi_scan_channel(shost, channel, id, lun,
1783 rescan);
1784 else
1785 scsi_scan_channel(shost, channel, id, lun, rescan);
1786 scsi_autopm_put_host(shost);
1787 }
1788 mutex_unlock(&shost->scan_mutex);
1789
1790 return 0;
1791 }
1792
1793 static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1794 {
1795 struct scsi_device *sdev;
1796 shost_for_each_device(sdev, shost) {
1797
1798 if (sdev->sdev_state == SDEV_DEL)
1799 continue;
1800
1801 if (sdev->is_visible)
1802 continue;
1803 if (!scsi_host_scan_allowed(shost) ||
1804 scsi_sysfs_add_sdev(sdev) != 0)
1805 __scsi_remove_device(sdev);
1806 }
1807 }
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1820 {
1821 struct async_scan_data *data = NULL;
1822 unsigned long flags;
1823
1824 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1825 return NULL;
1826
1827 mutex_lock(&shost->scan_mutex);
1828 if (shost->async_scan) {
1829 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1830 goto err;
1831 }
1832
1833 data = kmalloc(sizeof(*data), GFP_KERNEL);
1834 if (!data)
1835 goto err;
1836 data->shost = scsi_host_get(shost);
1837 if (!data->shost)
1838 goto err;
1839 init_completion(&data->prev_finished);
1840
1841 spin_lock_irqsave(shost->host_lock, flags);
1842 shost->async_scan = 1;
1843 spin_unlock_irqrestore(shost->host_lock, flags);
1844 mutex_unlock(&shost->scan_mutex);
1845
1846 spin_lock(&async_scan_lock);
1847 if (list_empty(&scanning_hosts))
1848 complete(&data->prev_finished);
1849 list_add_tail(&data->list, &scanning_hosts);
1850 spin_unlock(&async_scan_lock);
1851
1852 return data;
1853
1854 err:
1855 mutex_unlock(&shost->scan_mutex);
1856 kfree(data);
1857 return NULL;
1858 }
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868 static void scsi_finish_async_scan(struct async_scan_data *data)
1869 {
1870 struct Scsi_Host *shost;
1871 unsigned long flags;
1872
1873 if (!data)
1874 return;
1875
1876 shost = data->shost;
1877
1878 mutex_lock(&shost->scan_mutex);
1879
1880 if (!shost->async_scan) {
1881 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1882 dump_stack();
1883 mutex_unlock(&shost->scan_mutex);
1884 return;
1885 }
1886
1887 wait_for_completion(&data->prev_finished);
1888
1889 scsi_sysfs_add_devices(shost);
1890
1891 spin_lock_irqsave(shost->host_lock, flags);
1892 shost->async_scan = 0;
1893 spin_unlock_irqrestore(shost->host_lock, flags);
1894
1895 mutex_unlock(&shost->scan_mutex);
1896
1897 spin_lock(&async_scan_lock);
1898 list_del(&data->list);
1899 if (!list_empty(&scanning_hosts)) {
1900 struct async_scan_data *next = list_entry(scanning_hosts.next,
1901 struct async_scan_data, list);
1902 complete(&next->prev_finished);
1903 }
1904 spin_unlock(&async_scan_lock);
1905
1906 scsi_autopm_put_host(shost);
1907 scsi_host_put(shost);
1908 kfree(data);
1909 }
1910
1911 static void do_scsi_scan_host(struct Scsi_Host *shost)
1912 {
1913 if (shost->hostt->scan_finished) {
1914 unsigned long start = jiffies;
1915 if (shost->hostt->scan_start)
1916 shost->hostt->scan_start(shost);
1917
1918 while (!shost->hostt->scan_finished(shost, jiffies - start))
1919 msleep(10);
1920 } else {
1921 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1922 SCAN_WILD_CARD, 0);
1923 }
1924 }
1925
1926 static void do_scan_async(void *_data, async_cookie_t c)
1927 {
1928 struct async_scan_data *data = _data;
1929 struct Scsi_Host *shost = data->shost;
1930
1931 do_scsi_scan_host(shost);
1932 scsi_finish_async_scan(data);
1933 }
1934
1935
1936
1937
1938
1939 void scsi_scan_host(struct Scsi_Host *shost)
1940 {
1941 struct async_scan_data *data;
1942
1943 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1944 strncmp(scsi_scan_type, "manual", 6) == 0)
1945 return;
1946 if (scsi_autopm_get_host(shost) < 0)
1947 return;
1948
1949 data = scsi_prep_async_scan(shost);
1950 if (!data) {
1951 do_scsi_scan_host(shost);
1952 scsi_autopm_put_host(shost);
1953 return;
1954 }
1955
1956
1957
1958
1959 async_schedule(do_scan_async, data);
1960
1961
1962 }
1963 EXPORT_SYMBOL(scsi_scan_host);
1964
1965 void scsi_forget_host(struct Scsi_Host *shost)
1966 {
1967 struct scsi_device *sdev;
1968 unsigned long flags;
1969
1970 restart:
1971 spin_lock_irqsave(shost->host_lock, flags);
1972 list_for_each_entry(sdev, &shost->__devices, siblings) {
1973 if (sdev->sdev_state == SDEV_DEL)
1974 continue;
1975 spin_unlock_irqrestore(shost->host_lock, flags);
1976 __scsi_remove_device(sdev);
1977 goto restart;
1978 }
1979 spin_unlock_irqrestore(shost->host_lock, flags);
1980 }
1981