0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 #include <linux/module.h>
0050 #include <linux/moduleparam.h>
0051 #include <linux/dma-mapping.h>
0052 #include <linux/delay.h>
0053 #include <linux/slab.h>
0054 #include <linux/of.h>
0055 #include <linux/pm.h>
0056 #include <linux/kthread.h>
0057 #include <asm/firmware.h>
0058 #include <asm/vio.h>
0059 #include <scsi/scsi.h>
0060 #include <scsi/scsi_cmnd.h>
0061 #include <scsi/scsi_host.h>
0062 #include <scsi/scsi_device.h>
0063 #include <scsi/scsi_transport_srp.h>
0064 #include "ibmvscsi.h"
0065
0066
0067
0068
0069
0070
0071 static int max_id = 64;
0072 static int max_channel = 3;
0073 static int init_timeout = 300;
0074 static int login_timeout = 60;
0075 static int info_timeout = 30;
0076 static int abort_timeout = 60;
0077 static int reset_timeout = 60;
0078 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
0079 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
0080 static int fast_fail = 1;
0081 static int client_reserve = 1;
0082 static char partition_name[96] = "UNKNOWN";
0083 static unsigned int partition_number = -1;
0084 static LIST_HEAD(ibmvscsi_head);
0085 static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
0086
0087 static struct scsi_transport_template *ibmvscsi_transport_template;
0088
0089 #define IBMVSCSI_VERSION "1.5.9"
0090
0091 MODULE_DESCRIPTION("IBM Virtual SCSI");
0092 MODULE_AUTHOR("Dave Boutcher");
0093 MODULE_LICENSE("GPL");
0094 MODULE_VERSION(IBMVSCSI_VERSION);
0095
0096 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
0097 MODULE_PARM_DESC(max_id, "Largest ID value for each channel [Default=64]");
0098 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
0099 MODULE_PARM_DESC(max_channel, "Largest channel value [Default=3]");
0100 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
0101 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
0102 module_param_named(max_requests, max_requests, int, S_IRUGO);
0103 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
0104 module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
0105 MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
0106 module_param_named(client_reserve, client_reserve, int, S_IRUGO );
0107 MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
0108
0109 static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
0110 struct ibmvscsi_host_data *hostdata);
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
0124 {
0125 struct ibmvscsi_host_data *hostdata =
0126 (struct ibmvscsi_host_data *)dev_instance;
0127 vio_disable_interrupts(to_vio_dev(hostdata->dev));
0128 tasklet_schedule(&hostdata->srp_task);
0129 return IRQ_HANDLED;
0130 }
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
0142 struct ibmvscsi_host_data *hostdata,
0143 int max_requests)
0144 {
0145 long rc = 0;
0146 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
0147 free_irq(vdev->irq, (void *)hostdata);
0148 tasklet_kill(&hostdata->srp_task);
0149 do {
0150 if (rc)
0151 msleep(100);
0152 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
0153 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
0154 dma_unmap_single(hostdata->dev,
0155 queue->msg_token,
0156 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
0157 free_page((unsigned long)queue->msgs);
0158 }
0159
0160
0161
0162
0163
0164
0165
0166
0167 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
0168 {
0169 struct viosrp_crq *crq;
0170 unsigned long flags;
0171
0172 spin_lock_irqsave(&queue->lock, flags);
0173 crq = &queue->msgs[queue->cur];
0174 if (crq->valid != VIOSRP_CRQ_FREE) {
0175 if (++queue->cur == queue->size)
0176 queue->cur = 0;
0177
0178
0179
0180
0181 rmb();
0182 } else
0183 crq = NULL;
0184 spin_unlock_irqrestore(&queue->lock, flags);
0185
0186 return crq;
0187 }
0188
0189
0190
0191
0192
0193
0194
0195 static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
0196 u64 word1, u64 word2)
0197 {
0198 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
0199
0200
0201
0202
0203
0204 mb();
0205 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
0206 }
0207
0208
0209
0210
0211
0212 static void ibmvscsi_task(void *data)
0213 {
0214 struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
0215 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
0216 struct viosrp_crq *crq;
0217 int done = 0;
0218
0219 while (!done) {
0220
0221 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
0222 ibmvscsi_handle_crq(crq, hostdata);
0223 crq->valid = VIOSRP_CRQ_FREE;
0224 wmb();
0225 }
0226
0227 vio_enable_interrupts(vdev);
0228 crq = crq_queue_next_crq(&hostdata->queue);
0229 if (crq != NULL) {
0230 vio_disable_interrupts(vdev);
0231 ibmvscsi_handle_crq(crq, hostdata);
0232 crq->valid = VIOSRP_CRQ_FREE;
0233 wmb();
0234 } else {
0235 done = 1;
0236 }
0237 }
0238 }
0239
0240 static void gather_partition_info(void)
0241 {
0242 const char *ppartition_name;
0243 const __be32 *p_number_ptr;
0244
0245
0246 if (!of_root)
0247 return;
0248
0249 of_node_get(of_root);
0250
0251 ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
0252 if (ppartition_name)
0253 strlcpy(partition_name, ppartition_name,
0254 sizeof(partition_name));
0255 p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
0256 if (p_number_ptr)
0257 partition_number = of_read_number(p_number_ptr, 1);
0258 of_node_put(of_root);
0259 }
0260
0261 static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
0262 {
0263 memset(&hostdata->madapter_info, 0x00,
0264 sizeof(hostdata->madapter_info));
0265
0266 dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
0267 strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
0268
0269 strncpy(hostdata->madapter_info.partition_name, partition_name,
0270 sizeof(hostdata->madapter_info.partition_name));
0271
0272 hostdata->madapter_info.partition_number =
0273 cpu_to_be32(partition_number);
0274
0275 hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1);
0276 hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX);
0277 }
0278
0279
0280
0281
0282
0283
0284 static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
0285 struct ibmvscsi_host_data *hostdata)
0286 {
0287 int rc = 0;
0288 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
0289
0290
0291 do {
0292 if (rc)
0293 msleep(100);
0294 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
0295 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
0296
0297
0298 memset(queue->msgs, 0x00, PAGE_SIZE);
0299 queue->cur = 0;
0300
0301 set_adapter_info(hostdata);
0302
0303
0304 rc = plpar_hcall_norets(H_REG_CRQ,
0305 vdev->unit_address,
0306 queue->msg_token, PAGE_SIZE);
0307 if (rc == H_CLOSED) {
0308
0309 dev_warn(hostdata->dev, "Partner adapter not ready\n");
0310 } else if (rc != 0) {
0311 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
0312 }
0313 return rc;
0314 }
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
0327 struct ibmvscsi_host_data *hostdata,
0328 int max_requests)
0329 {
0330 int rc;
0331 int retrc;
0332 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
0333
0334 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
0335
0336 if (!queue->msgs)
0337 goto malloc_failed;
0338 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
0339
0340 queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
0341 queue->size * sizeof(*queue->msgs),
0342 DMA_BIDIRECTIONAL);
0343
0344 if (dma_mapping_error(hostdata->dev, queue->msg_token))
0345 goto map_failed;
0346
0347 gather_partition_info();
0348 set_adapter_info(hostdata);
0349
0350 retrc = rc = plpar_hcall_norets(H_REG_CRQ,
0351 vdev->unit_address,
0352 queue->msg_token, PAGE_SIZE);
0353 if (rc == H_RESOURCE)
0354
0355 rc = ibmvscsi_reset_crq_queue(queue,
0356 hostdata);
0357
0358 if (rc == H_CLOSED) {
0359
0360 dev_warn(hostdata->dev, "Partner adapter not ready\n");
0361 retrc = 0;
0362 } else if (rc != 0) {
0363 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
0364 goto reg_crq_failed;
0365 }
0366
0367 queue->cur = 0;
0368 spin_lock_init(&queue->lock);
0369
0370 tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
0371 (unsigned long)hostdata);
0372
0373 if (request_irq(vdev->irq,
0374 ibmvscsi_handle_event,
0375 0, "ibmvscsi", (void *)hostdata) != 0) {
0376 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
0377 vdev->irq);
0378 goto req_irq_failed;
0379 }
0380
0381 rc = vio_enable_interrupts(vdev);
0382 if (rc != 0) {
0383 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
0384 goto req_irq_failed;
0385 }
0386
0387 return retrc;
0388
0389 req_irq_failed:
0390 tasklet_kill(&hostdata->srp_task);
0391 rc = 0;
0392 do {
0393 if (rc)
0394 msleep(100);
0395 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
0396 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
0397 reg_crq_failed:
0398 dma_unmap_single(hostdata->dev,
0399 queue->msg_token,
0400 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
0401 map_failed:
0402 free_page((unsigned long)queue->msgs);
0403 malloc_failed:
0404 return -1;
0405 }
0406
0407
0408
0409
0410
0411
0412 static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
0413 struct ibmvscsi_host_data *hostdata)
0414 {
0415 int rc = 0;
0416 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
0417
0418 set_adapter_info(hostdata);
0419
0420
0421 do {
0422 if (rc)
0423 msleep(100);
0424 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
0425 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
0426
0427 if (rc)
0428 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
0429 return rc;
0430 }
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443 static int initialize_event_pool(struct event_pool *pool,
0444 int size, struct ibmvscsi_host_data *hostdata)
0445 {
0446 int i;
0447
0448 pool->size = size;
0449 pool->next = 0;
0450 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
0451 if (!pool->events)
0452 return -ENOMEM;
0453
0454 pool->iu_storage =
0455 dma_alloc_coherent(hostdata->dev,
0456 pool->size * sizeof(*pool->iu_storage),
0457 &pool->iu_token, GFP_KERNEL);
0458 if (!pool->iu_storage) {
0459 kfree(pool->events);
0460 return -ENOMEM;
0461 }
0462
0463 for (i = 0; i < pool->size; ++i) {
0464 struct srp_event_struct *evt = &pool->events[i];
0465 memset(&evt->crq, 0x00, sizeof(evt->crq));
0466 atomic_set(&evt->free, 1);
0467 evt->crq.valid = VIOSRP_CRQ_CMD_RSP;
0468 evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
0469 evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
0470 sizeof(*evt->xfer_iu) * i);
0471 evt->xfer_iu = pool->iu_storage + i;
0472 evt->hostdata = hostdata;
0473 evt->ext_list = NULL;
0474 evt->ext_list_token = 0;
0475 }
0476
0477 return 0;
0478 }
0479
0480
0481
0482
0483
0484
0485
0486
0487 static void release_event_pool(struct event_pool *pool,
0488 struct ibmvscsi_host_data *hostdata)
0489 {
0490 int i, in_use = 0;
0491 for (i = 0; i < pool->size; ++i) {
0492 if (atomic_read(&pool->events[i].free) != 1)
0493 ++in_use;
0494 if (pool->events[i].ext_list) {
0495 dma_free_coherent(hostdata->dev,
0496 SG_ALL * sizeof(struct srp_direct_buf),
0497 pool->events[i].ext_list,
0498 pool->events[i].ext_list_token);
0499 }
0500 }
0501 if (in_use)
0502 dev_warn(hostdata->dev, "releasing event pool with %d "
0503 "events still in use?\n", in_use);
0504 kfree(pool->events);
0505 dma_free_coherent(hostdata->dev,
0506 pool->size * sizeof(*pool->iu_storage),
0507 pool->iu_storage, pool->iu_token);
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517 static int valid_event_struct(struct event_pool *pool,
0518 struct srp_event_struct *evt)
0519 {
0520 int index = evt - pool->events;
0521 if (index < 0 || index >= pool->size)
0522 return 0;
0523 if (evt != pool->events + index)
0524 return 0;
0525 return 1;
0526 }
0527
0528
0529
0530
0531
0532
0533 static void free_event_struct(struct event_pool *pool,
0534 struct srp_event_struct *evt)
0535 {
0536 if (!valid_event_struct(pool, evt)) {
0537 dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
0538 "(not in pool %p)\n", evt, pool->events);
0539 return;
0540 }
0541 if (atomic_inc_return(&evt->free) != 1) {
0542 dev_err(evt->hostdata->dev, "Freeing event_struct %p "
0543 "which is not in use!\n", evt);
0544 return;
0545 }
0546 }
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 static struct srp_event_struct *get_event_struct(struct event_pool *pool)
0557 {
0558 int i;
0559 int poolsize = pool->size;
0560 int offset = pool->next;
0561
0562 for (i = 0; i < poolsize; i++) {
0563 offset = (offset + 1) % poolsize;
0564 if (!atomic_dec_if_positive(&pool->events[offset].free)) {
0565 pool->next = offset;
0566 return &pool->events[offset];
0567 }
0568 }
0569
0570 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
0571 return NULL;
0572 }
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582 static void init_event_struct(struct srp_event_struct *evt_struct,
0583 void (*done) (struct srp_event_struct *),
0584 u8 format,
0585 int timeout)
0586 {
0587 evt_struct->cmnd = NULL;
0588 evt_struct->cmnd_done = NULL;
0589 evt_struct->sync_srp = NULL;
0590 evt_struct->crq.format = format;
0591 evt_struct->crq.timeout = cpu_to_be16(timeout);
0592 evt_struct->done = done;
0593 }
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 static void set_srp_direction(struct scsi_cmnd *cmd,
0605 struct srp_cmd *srp_cmd,
0606 int numbuf)
0607 {
0608 u8 fmt;
0609
0610 if (numbuf == 0)
0611 return;
0612
0613 if (numbuf == 1)
0614 fmt = SRP_DATA_DESC_DIRECT;
0615 else {
0616 fmt = SRP_DATA_DESC_INDIRECT;
0617 numbuf = min(numbuf, MAX_INDIRECT_BUFS);
0618
0619 if (cmd->sc_data_direction == DMA_TO_DEVICE)
0620 srp_cmd->data_out_desc_cnt = numbuf;
0621 else
0622 srp_cmd->data_in_desc_cnt = numbuf;
0623 }
0624
0625 if (cmd->sc_data_direction == DMA_TO_DEVICE)
0626 srp_cmd->buf_fmt = fmt << 4;
0627 else
0628 srp_cmd->buf_fmt = fmt;
0629 }
0630
0631
0632
0633
0634
0635
0636
0637 static void unmap_cmd_data(struct srp_cmd *cmd,
0638 struct srp_event_struct *evt_struct,
0639 struct device *dev)
0640 {
0641 u8 out_fmt, in_fmt;
0642
0643 out_fmt = cmd->buf_fmt >> 4;
0644 in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
0645
0646 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
0647 return;
0648
0649 if (evt_struct->cmnd)
0650 scsi_dma_unmap(evt_struct->cmnd);
0651 }
0652
0653 static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
0654 struct srp_direct_buf *md)
0655 {
0656 int i;
0657 struct scatterlist *sg;
0658 u64 total_length = 0;
0659
0660 scsi_for_each_sg(cmd, sg, nseg, i) {
0661 struct srp_direct_buf *descr = md + i;
0662 descr->va = cpu_to_be64(sg_dma_address(sg));
0663 descr->len = cpu_to_be32(sg_dma_len(sg));
0664 descr->key = 0;
0665 total_length += sg_dma_len(sg);
0666 }
0667 return total_length;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680 static int map_sg_data(struct scsi_cmnd *cmd,
0681 struct srp_event_struct *evt_struct,
0682 struct srp_cmd *srp_cmd, struct device *dev)
0683 {
0684
0685 int sg_mapped;
0686 u64 total_length = 0;
0687 struct srp_direct_buf *data =
0688 (struct srp_direct_buf *) srp_cmd->add_data;
0689 struct srp_indirect_buf *indirect =
0690 (struct srp_indirect_buf *) data;
0691
0692 sg_mapped = scsi_dma_map(cmd);
0693 if (!sg_mapped)
0694 return 1;
0695 else if (sg_mapped < 0)
0696 return 0;
0697
0698 set_srp_direction(cmd, srp_cmd, sg_mapped);
0699
0700
0701 if (sg_mapped == 1) {
0702 map_sg_list(cmd, sg_mapped, data);
0703 return 1;
0704 }
0705
0706 indirect->table_desc.va = 0;
0707 indirect->table_desc.len = cpu_to_be32(sg_mapped *
0708 sizeof(struct srp_direct_buf));
0709 indirect->table_desc.key = 0;
0710
0711 if (sg_mapped <= MAX_INDIRECT_BUFS) {
0712 total_length = map_sg_list(cmd, sg_mapped,
0713 &indirect->desc_list[0]);
0714 indirect->len = cpu_to_be32(total_length);
0715 return 1;
0716 }
0717
0718
0719 if (!evt_struct->ext_list) {
0720 evt_struct->ext_list = dma_alloc_coherent(dev,
0721 SG_ALL * sizeof(struct srp_direct_buf),
0722 &evt_struct->ext_list_token, 0);
0723 if (!evt_struct->ext_list) {
0724 if (!firmware_has_feature(FW_FEATURE_CMO))
0725 sdev_printk(KERN_ERR, cmd->device,
0726 "Can't allocate memory "
0727 "for indirect table\n");
0728 scsi_dma_unmap(cmd);
0729 return 0;
0730 }
0731 }
0732
0733 total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
0734
0735 indirect->len = cpu_to_be32(total_length);
0736 indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
0737 indirect->table_desc.len = cpu_to_be32(sg_mapped *
0738 sizeof(indirect->desc_list[0]));
0739 memcpy(indirect->desc_list, evt_struct->ext_list,
0740 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
0741 return 1;
0742 }
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
0755 struct srp_event_struct *evt_struct,
0756 struct srp_cmd *srp_cmd, struct device *dev)
0757 {
0758 switch (cmd->sc_data_direction) {
0759 case DMA_FROM_DEVICE:
0760 case DMA_TO_DEVICE:
0761 break;
0762 case DMA_NONE:
0763 return 1;
0764 case DMA_BIDIRECTIONAL:
0765 sdev_printk(KERN_ERR, cmd->device,
0766 "Can't map DMA_BIDIRECTIONAL to read/write\n");
0767 return 0;
0768 default:
0769 sdev_printk(KERN_ERR, cmd->device,
0770 "Unknown data direction 0x%02x; can't map!\n",
0771 cmd->sc_data_direction);
0772 return 0;
0773 }
0774
0775 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
0776 }
0777
0778
0779
0780
0781
0782
0783 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
0784 {
0785 struct srp_event_struct *evt;
0786 unsigned long flags;
0787
0788 spin_lock_irqsave(hostdata->host->host_lock, flags);
0789 while (!list_empty(&hostdata->sent)) {
0790 evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
0791 list_del(&evt->list);
0792 del_timer(&evt->timer);
0793
0794 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
0795 if (evt->cmnd) {
0796 evt->cmnd->result = (error_code << 16);
0797 unmap_cmd_data(&evt->iu.srp.cmd, evt,
0798 evt->hostdata->dev);
0799 if (evt->cmnd_done)
0800 evt->cmnd_done(evt->cmnd);
0801 } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
0802 evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
0803 evt->done(evt);
0804 free_event_struct(&evt->hostdata->pool, evt);
0805 spin_lock_irqsave(hostdata->host->host_lock, flags);
0806 }
0807 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
0808 }
0809
0810
0811
0812
0813
0814
0815
0816
0817 static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
0818 {
0819 unsigned long flags;
0820
0821 spin_lock_irqsave(hostdata->host->host_lock, flags);
0822 atomic_set(&hostdata->request_limit, limit);
0823 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
0824 }
0825
0826
0827
0828
0829
0830 static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
0831 {
0832 scsi_block_requests(hostdata->host);
0833 ibmvscsi_set_request_limit(hostdata, 0);
0834
0835 purge_requests(hostdata, DID_ERROR);
0836 hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
0837 wake_up(&hostdata->work_wait_q);
0838 }
0839
0840
0841
0842
0843
0844
0845
0846 static void ibmvscsi_timeout(struct timer_list *t)
0847 {
0848 struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer);
0849 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
0850
0851 dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
0852 evt_struct->iu.srp.cmd.opcode);
0853
0854 ibmvscsi_reset_host(hostdata);
0855 }
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
0871 struct ibmvscsi_host_data *hostdata,
0872 unsigned long timeout)
0873 {
0874 __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
0875 int request_status = 0;
0876 int rc;
0877 int srp_req = 0;
0878
0879
0880
0881
0882
0883
0884
0885 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
0886 srp_req = 1;
0887 request_status =
0888 atomic_dec_if_positive(&hostdata->request_limit);
0889
0890
0891
0892 if (request_status < -1)
0893 goto send_error;
0894
0895
0896
0897
0898
0899 else if (request_status == -1 &&
0900 evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
0901 goto send_busy;
0902
0903
0904
0905
0906 else if (request_status < 2 &&
0907 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
0908
0909
0910
0911
0912
0913
0914
0915 int server_limit = request_status;
0916 struct srp_event_struct *tmp_evt;
0917
0918 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
0919 server_limit++;
0920 }
0921
0922 if (server_limit > 2)
0923 goto send_busy;
0924 }
0925 }
0926
0927
0928 *evt_struct->xfer_iu = evt_struct->iu;
0929 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
0930
0931
0932
0933
0934
0935 list_add_tail(&evt_struct->list, &hostdata->sent);
0936
0937 timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0);
0938 if (timeout) {
0939 evt_struct->timer.expires = jiffies + (timeout * HZ);
0940 add_timer(&evt_struct->timer);
0941 }
0942
0943 rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
0944 be64_to_cpu(crq_as_u64[1]));
0945 if (rc != 0) {
0946 list_del(&evt_struct->list);
0947 del_timer(&evt_struct->timer);
0948
0949
0950
0951
0952
0953
0954 if (rc == H_CLOSED) {
0955 dev_warn(hostdata->dev, "send warning. "
0956 "Receive queue closed, will retry.\n");
0957 goto send_busy;
0958 }
0959 dev_err(hostdata->dev, "send error %d\n", rc);
0960 if (srp_req)
0961 atomic_inc(&hostdata->request_limit);
0962 goto send_error;
0963 }
0964
0965 return 0;
0966
0967 send_busy:
0968 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
0969
0970 free_event_struct(&hostdata->pool, evt_struct);
0971 if (srp_req && request_status != -1)
0972 atomic_inc(&hostdata->request_limit);
0973 return SCSI_MLQUEUE_HOST_BUSY;
0974
0975 send_error:
0976 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
0977
0978 if (evt_struct->cmnd != NULL) {
0979 evt_struct->cmnd->result = DID_ERROR << 16;
0980 evt_struct->cmnd_done(evt_struct->cmnd);
0981 } else if (evt_struct->done)
0982 evt_struct->done(evt_struct);
0983
0984 free_event_struct(&hostdata->pool, evt_struct);
0985 return 0;
0986 }
0987
0988
0989
0990
0991
0992
0993
0994
0995 static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
0996 {
0997 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
0998 struct scsi_cmnd *cmnd = evt_struct->cmnd;
0999
1000 if (unlikely(rsp->opcode != SRP_RSP)) {
1001 if (printk_ratelimit())
1002 dev_warn(evt_struct->hostdata->dev,
1003 "bad SRP RSP type %#02x\n", rsp->opcode);
1004 }
1005
1006 if (cmnd) {
1007 cmnd->result |= rsp->status;
1008 if (scsi_status_is_check_condition(cmnd->result))
1009 memcpy(cmnd->sense_buffer,
1010 rsp->data,
1011 be32_to_cpu(rsp->sense_data_len));
1012 unmap_cmd_data(&evt_struct->iu.srp.cmd,
1013 evt_struct,
1014 evt_struct->hostdata->dev);
1015
1016 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
1017 scsi_set_resid(cmnd,
1018 be32_to_cpu(rsp->data_out_res_cnt));
1019 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
1020 scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
1021 }
1022
1023 if (evt_struct->cmnd_done)
1024 evt_struct->cmnd_done(cmnd);
1025 }
1026
1027
1028
1029
1030
1031
1032 static inline u16 lun_from_dev(struct scsi_device *dev)
1033 {
1034 return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
1035 }
1036
1037
1038
1039
1040
1041
1042 static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd)
1043 {
1044 void (*done)(struct scsi_cmnd *) = scsi_done;
1045 struct srp_cmd *srp_cmd;
1046 struct srp_event_struct *evt_struct;
1047 struct srp_indirect_buf *indirect;
1048 struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
1049 u16 lun = lun_from_dev(cmnd->device);
1050 u8 out_fmt, in_fmt;
1051
1052 cmnd->result = (DID_OK << 16);
1053 evt_struct = get_event_struct(&hostdata->pool);
1054 if (!evt_struct)
1055 return SCSI_MLQUEUE_HOST_BUSY;
1056
1057
1058 BUILD_BUG_ON(sizeof(evt_struct->iu.srp) != SRP_MAX_IU_LEN);
1059 memset(&evt_struct->iu.srp, 0x00, sizeof(evt_struct->iu.srp));
1060 srp_cmd = &evt_struct->iu.srp.cmd;
1061 srp_cmd->opcode = SRP_CMD;
1062 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
1063 int_to_scsilun(lun, &srp_cmd->lun);
1064
1065 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
1066 if (!firmware_has_feature(FW_FEATURE_CMO))
1067 sdev_printk(KERN_ERR, cmnd->device,
1068 "couldn't convert cmd to srp_cmd\n");
1069 free_event_struct(&hostdata->pool, evt_struct);
1070 return SCSI_MLQUEUE_HOST_BUSY;
1071 }
1072
1073 init_event_struct(evt_struct,
1074 handle_cmd_rsp,
1075 VIOSRP_SRP_FORMAT,
1076 scsi_cmd_to_rq(cmnd)->timeout / HZ);
1077
1078 evt_struct->cmnd = cmnd;
1079 evt_struct->cmnd_done = done;
1080
1081
1082 indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
1083 out_fmt = srp_cmd->buf_fmt >> 4;
1084 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
1085 if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
1086 out_fmt == SRP_DATA_DESC_INDIRECT) &&
1087 indirect->table_desc.va == 0) {
1088 indirect->table_desc.va =
1089 cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
1090 offsetof(struct srp_cmd, add_data) +
1091 offsetof(struct srp_indirect_buf, desc_list));
1092 }
1093
1094 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
1095 }
1096
1097 static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
1111 {
1112
1113 hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
1114 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1115
1116 if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
1117 dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
1118 return 1;
1119 }
1120
1121 hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
1122 &hostdata->madapter_info,
1123 sizeof(hostdata->madapter_info),
1124 DMA_BIDIRECTIONAL);
1125 if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
1126 dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
1127 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
1128 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1129 return 1;
1130 }
1131
1132 return 0;
1133 }
1134
1135
1136
1137
1138
1139
1140
1141 static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
1142 {
1143 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
1144 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
1145
1146 dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
1147 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157 static void login_rsp(struct srp_event_struct *evt_struct)
1158 {
1159 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1160 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
1161 case SRP_LOGIN_RSP:
1162 break;
1163 case SRP_LOGIN_REJ:
1164 dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
1165 evt_struct->xfer_iu->srp.login_rej.reason);
1166
1167 ibmvscsi_set_request_limit(hostdata, -1);
1168 return;
1169 default:
1170 dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
1171 evt_struct->xfer_iu->srp.login_rsp.opcode);
1172
1173 ibmvscsi_set_request_limit(hostdata, -1);
1174 return;
1175 }
1176
1177 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
1178 hostdata->client_migrated = 0;
1179
1180
1181
1182
1183
1184 ibmvscsi_set_request_limit(hostdata,
1185 be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
1186
1187
1188 hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK;
1189 wake_up(&hostdata->work_wait_q);
1190 }
1191
1192
1193
1194
1195
1196
1197
1198 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
1199 {
1200 int rc;
1201 unsigned long flags;
1202 struct srp_login_req *login;
1203 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
1204
1205 BUG_ON(!evt_struct);
1206 init_event_struct(evt_struct, login_rsp,
1207 VIOSRP_SRP_FORMAT, login_timeout);
1208
1209 login = &evt_struct->iu.srp.login_req;
1210 memset(login, 0, sizeof(*login));
1211 login->opcode = SRP_LOGIN_REQ;
1212 login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
1213 login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
1214 SRP_BUF_FORMAT_INDIRECT);
1215
1216
1217
1218
1219
1220 ibmvscsi_set_request_limit(hostdata, 0);
1221
1222 spin_lock_irqsave(hostdata->host->host_lock, flags);
1223 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
1224 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1225 dev_info(hostdata->dev, "sent SRP login\n");
1226 return rc;
1227 };
1228
1229
1230
1231
1232
1233
1234
1235 static void capabilities_rsp(struct srp_event_struct *evt_struct)
1236 {
1237 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1238
1239 if (evt_struct->xfer_iu->mad.capabilities.common.status) {
1240 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
1241 evt_struct->xfer_iu->mad.capabilities.common.status);
1242 } else {
1243 if (hostdata->caps.migration.common.server_support !=
1244 cpu_to_be16(SERVER_SUPPORTS_CAP))
1245 dev_info(hostdata->dev, "Partition migration not supported\n");
1246
1247 if (client_reserve) {
1248 if (hostdata->caps.reserve.common.server_support ==
1249 cpu_to_be16(SERVER_SUPPORTS_CAP))
1250 dev_info(hostdata->dev, "Client reserve enabled\n");
1251 else
1252 dev_info(hostdata->dev, "Client reserve not supported\n");
1253 }
1254 }
1255
1256 send_srp_login(hostdata);
1257 }
1258
1259
1260
1261
1262
1263
1264 static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
1265 {
1266 struct viosrp_capabilities *req;
1267 struct srp_event_struct *evt_struct;
1268 unsigned long flags;
1269 struct device_node *of_node = hostdata->dev->of_node;
1270 const char *location;
1271
1272 evt_struct = get_event_struct(&hostdata->pool);
1273 BUG_ON(!evt_struct);
1274
1275 init_event_struct(evt_struct, capabilities_rsp,
1276 VIOSRP_MAD_FORMAT, info_timeout);
1277
1278 req = &evt_struct->iu.mad.capabilities;
1279 memset(req, 0, sizeof(*req));
1280
1281 hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
1282 if (hostdata->client_migrated)
1283 hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
1284
1285 strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
1286 sizeof(hostdata->caps.name));
1287
1288 location = of_get_property(of_node, "ibm,loc-code", NULL);
1289 location = location ? location : dev_name(hostdata->dev);
1290 strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
1291
1292 req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
1293 req->buffer = cpu_to_be64(hostdata->caps_addr);
1294
1295 hostdata->caps.migration.common.cap_type =
1296 cpu_to_be32(MIGRATION_CAPABILITIES);
1297 hostdata->caps.migration.common.length =
1298 cpu_to_be16(sizeof(hostdata->caps.migration));
1299 hostdata->caps.migration.common.server_support =
1300 cpu_to_be16(SERVER_SUPPORTS_CAP);
1301 hostdata->caps.migration.ecl = cpu_to_be32(1);
1302
1303 if (client_reserve) {
1304 hostdata->caps.reserve.common.cap_type =
1305 cpu_to_be32(RESERVATION_CAPABILITIES);
1306 hostdata->caps.reserve.common.length =
1307 cpu_to_be16(sizeof(hostdata->caps.reserve));
1308 hostdata->caps.reserve.common.server_support =
1309 cpu_to_be16(SERVER_SUPPORTS_CAP);
1310 hostdata->caps.reserve.type =
1311 cpu_to_be32(CLIENT_RESERVE_SCSI_2);
1312 req->common.length =
1313 cpu_to_be16(sizeof(hostdata->caps));
1314 } else
1315 req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
1316 sizeof(hostdata->caps.reserve));
1317
1318 spin_lock_irqsave(hostdata->host->host_lock, flags);
1319 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1320 dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1321 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1322 };
1323
1324
1325
1326
1327
1328
1329
1330
1331 static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1332 {
1333 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1334 u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
1335
1336 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1337 dev_err(hostdata->dev, "fast_fail not supported in server\n");
1338 else if (status == VIOSRP_MAD_FAILED)
1339 dev_err(hostdata->dev, "fast_fail request failed\n");
1340 else if (status != VIOSRP_MAD_SUCCESS)
1341 dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1342
1343 send_mad_capabilities(hostdata);
1344 }
1345
1346
1347
1348
1349
1350
1351
1352 static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1353 {
1354 int rc;
1355 unsigned long flags;
1356 struct viosrp_fast_fail *fast_fail_mad;
1357 struct srp_event_struct *evt_struct;
1358
1359 if (!fast_fail) {
1360 send_mad_capabilities(hostdata);
1361 return 0;
1362 }
1363
1364 evt_struct = get_event_struct(&hostdata->pool);
1365 BUG_ON(!evt_struct);
1366
1367 init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1368
1369 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1370 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1371 fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
1372 fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
1373
1374 spin_lock_irqsave(hostdata->host->host_lock, flags);
1375 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1376 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1377 return rc;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387 static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1388 {
1389 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1390
1391 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1392 dev_err(hostdata->dev, "error %d getting adapter info\n",
1393 evt_struct->xfer_iu->mad.adapter_info.common.status);
1394 } else {
1395 dev_info(hostdata->dev, "host srp version: %s, "
1396 "host partition %s (%d), OS %d, max io %u\n",
1397 hostdata->madapter_info.srp_version,
1398 hostdata->madapter_info.partition_name,
1399 be32_to_cpu(hostdata->madapter_info.partition_number),
1400 be32_to_cpu(hostdata->madapter_info.os_type),
1401 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
1402
1403 if (hostdata->madapter_info.port_max_txu[0])
1404 hostdata->host->max_sectors =
1405 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
1406
1407 if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX &&
1408 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1409 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1410 hostdata->madapter_info.srp_version);
1411 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1412 MAX_INDIRECT_BUFS);
1413 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1414 }
1415
1416 if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) {
1417 enable_fast_fail(hostdata);
1418 return;
1419 }
1420 }
1421
1422 send_srp_login(hostdata);
1423 }
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1435 {
1436 struct viosrp_adapter_info *req;
1437 struct srp_event_struct *evt_struct;
1438 unsigned long flags;
1439
1440 evt_struct = get_event_struct(&hostdata->pool);
1441 BUG_ON(!evt_struct);
1442
1443 init_event_struct(evt_struct,
1444 adapter_info_rsp,
1445 VIOSRP_MAD_FORMAT,
1446 info_timeout);
1447
1448 req = &evt_struct->iu.mad.adapter_info;
1449 memset(req, 0x00, sizeof(*req));
1450
1451 req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
1452 req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
1453 req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
1454
1455 spin_lock_irqsave(hostdata->host->host_lock, flags);
1456 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1457 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1458 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1459 };
1460
1461
1462
1463
1464 static void init_adapter(struct ibmvscsi_host_data *hostdata)
1465 {
1466 send_mad_adapter_info(hostdata);
1467 }
1468
1469
1470
1471
1472
1473
1474
1475 static void sync_completion(struct srp_event_struct *evt_struct)
1476 {
1477
1478 if (evt_struct->sync_srp)
1479 *evt_struct->sync_srp = *evt_struct->xfer_iu;
1480
1481 complete(&evt_struct->comp);
1482 }
1483
1484
1485
1486
1487
1488 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1489 {
1490 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1491 struct srp_tsk_mgmt *tsk_mgmt;
1492 struct srp_event_struct *evt;
1493 struct srp_event_struct *tmp_evt, *found_evt;
1494 union viosrp_iu srp_rsp;
1495 int rsp_rc;
1496 unsigned long flags;
1497 u16 lun = lun_from_dev(cmd->device);
1498 unsigned long wait_switch = 0;
1499
1500
1501
1502
1503 spin_lock_irqsave(hostdata->host->host_lock, flags);
1504 wait_switch = jiffies + (init_timeout * HZ);
1505 do {
1506 found_evt = NULL;
1507 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1508 if (tmp_evt->cmnd == cmd) {
1509 found_evt = tmp_evt;
1510 break;
1511 }
1512 }
1513
1514 if (!found_evt) {
1515 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1516 return SUCCESS;
1517 }
1518
1519 evt = get_event_struct(&hostdata->pool);
1520 if (evt == NULL) {
1521 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1522 sdev_printk(KERN_ERR, cmd->device,
1523 "failed to allocate abort event\n");
1524 return FAILED;
1525 }
1526
1527 init_event_struct(evt,
1528 sync_completion,
1529 VIOSRP_SRP_FORMAT,
1530 abort_timeout);
1531
1532 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1533
1534
1535 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1536 tsk_mgmt->opcode = SRP_TSK_MGMT;
1537 int_to_scsilun(lun, &tsk_mgmt->lun);
1538 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
1539 tsk_mgmt->task_tag = (u64) found_evt;
1540
1541 evt->sync_srp = &srp_rsp;
1542
1543 init_completion(&evt->comp);
1544 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1545
1546 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1547 break;
1548
1549 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1550 msleep(10);
1551 spin_lock_irqsave(hostdata->host->host_lock, flags);
1552 } while (time_before(jiffies, wait_switch));
1553
1554 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1555
1556 if (rsp_rc != 0) {
1557 sdev_printk(KERN_ERR, cmd->device,
1558 "failed to send abort() event. rc=%d\n", rsp_rc);
1559 return FAILED;
1560 }
1561
1562 sdev_printk(KERN_INFO, cmd->device,
1563 "aborting command. lun 0x%llx, tag 0x%llx\n",
1564 (((u64) lun) << 48), (u64) found_evt);
1565
1566 wait_for_completion(&evt->comp);
1567
1568
1569 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1570 if (printk_ratelimit())
1571 sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
1572 srp_rsp.srp.rsp.opcode);
1573 return FAILED;
1574 }
1575
1576 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1577 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1578 else
1579 rsp_rc = srp_rsp.srp.rsp.status;
1580
1581 if (rsp_rc) {
1582 if (printk_ratelimit())
1583 sdev_printk(KERN_WARNING, cmd->device,
1584 "abort code %d for task tag 0x%llx\n",
1585 rsp_rc, tsk_mgmt->task_tag);
1586 return FAILED;
1587 }
1588
1589
1590
1591
1592
1593 spin_lock_irqsave(hostdata->host->host_lock, flags);
1594 found_evt = NULL;
1595 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
1596 if (tmp_evt->cmnd == cmd) {
1597 found_evt = tmp_evt;
1598 break;
1599 }
1600 }
1601
1602 if (found_evt == NULL) {
1603 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1604 sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
1605 tsk_mgmt->task_tag);
1606 return SUCCESS;
1607 }
1608
1609 sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
1610 tsk_mgmt->task_tag);
1611
1612 cmd->result = (DID_ABORT << 16);
1613 list_del(&found_evt->list);
1614 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1615 found_evt->hostdata->dev);
1616 free_event_struct(&found_evt->hostdata->pool, found_evt);
1617 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1618 atomic_inc(&hostdata->request_limit);
1619 return SUCCESS;
1620 }
1621
1622
1623
1624
1625
1626
1627 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1628 {
1629 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1630 struct srp_tsk_mgmt *tsk_mgmt;
1631 struct srp_event_struct *evt;
1632 struct srp_event_struct *tmp_evt, *pos;
1633 union viosrp_iu srp_rsp;
1634 int rsp_rc;
1635 unsigned long flags;
1636 u16 lun = lun_from_dev(cmd->device);
1637 unsigned long wait_switch = 0;
1638
1639 spin_lock_irqsave(hostdata->host->host_lock, flags);
1640 wait_switch = jiffies + (init_timeout * HZ);
1641 do {
1642 evt = get_event_struct(&hostdata->pool);
1643 if (evt == NULL) {
1644 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1645 sdev_printk(KERN_ERR, cmd->device,
1646 "failed to allocate reset event\n");
1647 return FAILED;
1648 }
1649
1650 init_event_struct(evt,
1651 sync_completion,
1652 VIOSRP_SRP_FORMAT,
1653 reset_timeout);
1654
1655 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1656
1657
1658 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1659 tsk_mgmt->opcode = SRP_TSK_MGMT;
1660 int_to_scsilun(lun, &tsk_mgmt->lun);
1661 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1662
1663 evt->sync_srp = &srp_rsp;
1664
1665 init_completion(&evt->comp);
1666 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1667
1668 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1669 break;
1670
1671 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1672 msleep(10);
1673 spin_lock_irqsave(hostdata->host->host_lock, flags);
1674 } while (time_before(jiffies, wait_switch));
1675
1676 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1677
1678 if (rsp_rc != 0) {
1679 sdev_printk(KERN_ERR, cmd->device,
1680 "failed to send reset event. rc=%d\n", rsp_rc);
1681 return FAILED;
1682 }
1683
1684 sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
1685 (((u64) lun) << 48));
1686
1687 wait_for_completion(&evt->comp);
1688
1689
1690 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1691 if (printk_ratelimit())
1692 sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
1693 srp_rsp.srp.rsp.opcode);
1694 return FAILED;
1695 }
1696
1697 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1698 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1699 else
1700 rsp_rc = srp_rsp.srp.rsp.status;
1701
1702 if (rsp_rc) {
1703 if (printk_ratelimit())
1704 sdev_printk(KERN_WARNING, cmd->device,
1705 "reset code %d for task tag 0x%llx\n",
1706 rsp_rc, tsk_mgmt->task_tag);
1707 return FAILED;
1708 }
1709
1710
1711
1712
1713 spin_lock_irqsave(hostdata->host->host_lock, flags);
1714 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
1715 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
1716 if (tmp_evt->cmnd)
1717 tmp_evt->cmnd->result = (DID_RESET << 16);
1718 list_del(&tmp_evt->list);
1719 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1720 tmp_evt->hostdata->dev);
1721 free_event_struct(&tmp_evt->hostdata->pool,
1722 tmp_evt);
1723 atomic_inc(&hostdata->request_limit);
1724 if (tmp_evt->cmnd_done)
1725 tmp_evt->cmnd_done(tmp_evt->cmnd);
1726 else if (tmp_evt->done)
1727 tmp_evt->done(tmp_evt);
1728 }
1729 }
1730 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1731 return SUCCESS;
1732 }
1733
1734
1735
1736
1737
1738 static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
1739 {
1740 unsigned long wait_switch = 0;
1741 struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
1742
1743 dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
1744
1745 ibmvscsi_reset_host(hostdata);
1746
1747 for (wait_switch = jiffies + (init_timeout * HZ);
1748 time_before(jiffies, wait_switch) &&
1749 atomic_read(&hostdata->request_limit) < 2;) {
1750
1751 msleep(10);
1752 }
1753
1754 if (atomic_read(&hostdata->request_limit) <= 0)
1755 return FAILED;
1756
1757 return SUCCESS;
1758 }
1759
1760
1761
1762
1763
1764
1765
1766 static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1767 struct ibmvscsi_host_data *hostdata)
1768 {
1769 long rc;
1770 unsigned long flags;
1771
1772 struct srp_event_struct *evt_struct =
1773 (__force struct srp_event_struct *)crq->IU_data_ptr;
1774 switch (crq->valid) {
1775 case VIOSRP_CRQ_INIT_RSP:
1776 switch (crq->format) {
1777 case VIOSRP_CRQ_INIT:
1778 dev_info(hostdata->dev, "partner initialized\n");
1779
1780 rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
1781 if (rc == 0) {
1782
1783 init_adapter(hostdata);
1784 } else {
1785 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1786 }
1787
1788 break;
1789 case VIOSRP_CRQ_INIT_COMPLETE:
1790 dev_info(hostdata->dev, "partner initialization complete\n");
1791
1792
1793 init_adapter(hostdata);
1794 break;
1795 default:
1796 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
1797 }
1798 return;
1799 case VIOSRP_CRQ_XPORT_EVENT:
1800 scsi_block_requests(hostdata->host);
1801 ibmvscsi_set_request_limit(hostdata, 0);
1802 if (crq->format == 0x06) {
1803
1804 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1805 hostdata->client_migrated = 1;
1806 hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE;
1807 purge_requests(hostdata, DID_REQUEUE);
1808 wake_up(&hostdata->work_wait_q);
1809 } else {
1810 dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1811 crq->format);
1812 ibmvscsi_reset_host(hostdata);
1813 }
1814 return;
1815 case VIOSRP_CRQ_CMD_RSP:
1816 break;
1817 default:
1818 dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
1819 crq->valid);
1820 return;
1821 }
1822
1823
1824
1825
1826
1827 if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1828 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1829 evt_struct);
1830 return;
1831 }
1832
1833 if (atomic_read(&evt_struct->free)) {
1834 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1835 evt_struct);
1836 return;
1837 }
1838
1839 if (crq->format == VIOSRP_SRP_FORMAT)
1840 atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
1841 &hostdata->request_limit);
1842
1843 del_timer(&evt_struct->timer);
1844
1845 if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
1846 evt_struct->cmnd->result = DID_ERROR << 16;
1847 if (evt_struct->done)
1848 evt_struct->done(evt_struct);
1849 else
1850 dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
1851
1852
1853
1854
1855
1856 spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
1857 list_del(&evt_struct->list);
1858 free_event_struct(&evt_struct->hostdata->pool, evt_struct);
1859 spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
1860 }
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870 static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1871 {
1872 struct Scsi_Host *shost = sdev->host;
1873 unsigned long lock_flags = 0;
1874
1875 spin_lock_irqsave(shost->host_lock, lock_flags);
1876 if (sdev->type == TYPE_DISK) {
1877 sdev->allow_restart = 1;
1878 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1879 }
1880 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1881 return 0;
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892 static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1893 {
1894 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1895 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1896 return scsi_change_queue_depth(sdev, qdepth);
1897 }
1898
1899
1900
1901
1902 static ssize_t show_host_vhost_loc(struct device *dev,
1903 struct device_attribute *attr, char *buf)
1904 {
1905 struct Scsi_Host *shost = class_to_shost(dev);
1906 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1907 int len;
1908
1909 len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1910 hostdata->caps.loc);
1911 return len;
1912 }
1913
1914 static struct device_attribute ibmvscsi_host_vhost_loc = {
1915 .attr = {
1916 .name = "vhost_loc",
1917 .mode = S_IRUGO,
1918 },
1919 .show = show_host_vhost_loc,
1920 };
1921
1922 static ssize_t show_host_vhost_name(struct device *dev,
1923 struct device_attribute *attr, char *buf)
1924 {
1925 struct Scsi_Host *shost = class_to_shost(dev);
1926 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1927 int len;
1928
1929 len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1930 hostdata->caps.name);
1931 return len;
1932 }
1933
1934 static struct device_attribute ibmvscsi_host_vhost_name = {
1935 .attr = {
1936 .name = "vhost_name",
1937 .mode = S_IRUGO,
1938 },
1939 .show = show_host_vhost_name,
1940 };
1941
1942 static ssize_t show_host_srp_version(struct device *dev,
1943 struct device_attribute *attr, char *buf)
1944 {
1945 struct Scsi_Host *shost = class_to_shost(dev);
1946 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1947 int len;
1948
1949 len = snprintf(buf, PAGE_SIZE, "%s\n",
1950 hostdata->madapter_info.srp_version);
1951 return len;
1952 }
1953
1954 static struct device_attribute ibmvscsi_host_srp_version = {
1955 .attr = {
1956 .name = "srp_version",
1957 .mode = S_IRUGO,
1958 },
1959 .show = show_host_srp_version,
1960 };
1961
1962 static ssize_t show_host_partition_name(struct device *dev,
1963 struct device_attribute *attr,
1964 char *buf)
1965 {
1966 struct Scsi_Host *shost = class_to_shost(dev);
1967 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1968 int len;
1969
1970 len = snprintf(buf, PAGE_SIZE, "%s\n",
1971 hostdata->madapter_info.partition_name);
1972 return len;
1973 }
1974
1975 static struct device_attribute ibmvscsi_host_partition_name = {
1976 .attr = {
1977 .name = "partition_name",
1978 .mode = S_IRUGO,
1979 },
1980 .show = show_host_partition_name,
1981 };
1982
1983 static ssize_t show_host_partition_number(struct device *dev,
1984 struct device_attribute *attr,
1985 char *buf)
1986 {
1987 struct Scsi_Host *shost = class_to_shost(dev);
1988 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1989 int len;
1990
1991 len = snprintf(buf, PAGE_SIZE, "%d\n",
1992 be32_to_cpu(hostdata->madapter_info.partition_number));
1993 return len;
1994 }
1995
1996 static struct device_attribute ibmvscsi_host_partition_number = {
1997 .attr = {
1998 .name = "partition_number",
1999 .mode = S_IRUGO,
2000 },
2001 .show = show_host_partition_number,
2002 };
2003
2004 static ssize_t show_host_mad_version(struct device *dev,
2005 struct device_attribute *attr, char *buf)
2006 {
2007 struct Scsi_Host *shost = class_to_shost(dev);
2008 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2009 int len;
2010
2011 len = snprintf(buf, PAGE_SIZE, "%d\n",
2012 be32_to_cpu(hostdata->madapter_info.mad_version));
2013 return len;
2014 }
2015
2016 static struct device_attribute ibmvscsi_host_mad_version = {
2017 .attr = {
2018 .name = "mad_version",
2019 .mode = S_IRUGO,
2020 },
2021 .show = show_host_mad_version,
2022 };
2023
2024 static ssize_t show_host_os_type(struct device *dev,
2025 struct device_attribute *attr, char *buf)
2026 {
2027 struct Scsi_Host *shost = class_to_shost(dev);
2028 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2029 int len;
2030
2031 len = snprintf(buf, PAGE_SIZE, "%d\n",
2032 be32_to_cpu(hostdata->madapter_info.os_type));
2033 return len;
2034 }
2035
2036 static struct device_attribute ibmvscsi_host_os_type = {
2037 .attr = {
2038 .name = "os_type",
2039 .mode = S_IRUGO,
2040 },
2041 .show = show_host_os_type,
2042 };
2043
2044 static ssize_t show_host_config(struct device *dev,
2045 struct device_attribute *attr, char *buf)
2046 {
2047 return 0;
2048 }
2049
2050 static struct device_attribute ibmvscsi_host_config = {
2051 .attr = {
2052 .name = "config",
2053 .mode = S_IRUGO,
2054 },
2055 .show = show_host_config,
2056 };
2057
2058 static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
2059 {
2060 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
2061
2062 dev_info(hostdata->dev, "Initiating adapter reset!\n");
2063 ibmvscsi_reset_host(hostdata);
2064
2065 return 0;
2066 }
2067
2068 static struct attribute *ibmvscsi_host_attrs[] = {
2069 &ibmvscsi_host_vhost_loc.attr,
2070 &ibmvscsi_host_vhost_name.attr,
2071 &ibmvscsi_host_srp_version.attr,
2072 &ibmvscsi_host_partition_name.attr,
2073 &ibmvscsi_host_partition_number.attr,
2074 &ibmvscsi_host_mad_version.attr,
2075 &ibmvscsi_host_os_type.attr,
2076 &ibmvscsi_host_config.attr,
2077 NULL
2078 };
2079
2080 ATTRIBUTE_GROUPS(ibmvscsi_host);
2081
2082
2083
2084
2085 static struct scsi_host_template driver_template = {
2086 .module = THIS_MODULE,
2087 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
2088 .proc_name = "ibmvscsi",
2089 .queuecommand = ibmvscsi_queuecommand,
2090 .eh_timed_out = srp_timed_out,
2091 .eh_abort_handler = ibmvscsi_eh_abort_handler,
2092 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
2093 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
2094 .slave_configure = ibmvscsi_slave_configure,
2095 .change_queue_depth = ibmvscsi_change_queue_depth,
2096 .host_reset = ibmvscsi_host_reset,
2097 .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
2098 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
2099 .this_id = -1,
2100 .sg_tablesize = SG_ALL,
2101 .shost_groups = ibmvscsi_host_groups,
2102 };
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112 static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
2113 {
2114
2115 unsigned long desired_io = max_events * sizeof(union viosrp_iu);
2116
2117
2118 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
2119 IBMVSCSI_CMDS_PER_LUN_DEFAULT);
2120
2121 return desired_io;
2122 }
2123
2124 static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
2125 {
2126 unsigned long flags;
2127 int rc;
2128 char *action = "reset";
2129
2130 spin_lock_irqsave(hostdata->host->host_lock, flags);
2131 switch (hostdata->action) {
2132 case IBMVSCSI_HOST_ACTION_UNBLOCK:
2133 rc = 0;
2134 break;
2135 case IBMVSCSI_HOST_ACTION_RESET:
2136 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2137 rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
2138 spin_lock_irqsave(hostdata->host->host_lock, flags);
2139 if (!rc)
2140 rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2141 vio_enable_interrupts(to_vio_dev(hostdata->dev));
2142 break;
2143 case IBMVSCSI_HOST_ACTION_REENABLE:
2144 action = "enable";
2145 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2146 rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
2147 spin_lock_irqsave(hostdata->host->host_lock, flags);
2148 if (!rc)
2149 rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2150 break;
2151 case IBMVSCSI_HOST_ACTION_NONE:
2152 default:
2153 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2154 return;
2155 }
2156
2157 hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
2158 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2159
2160 if (rc) {
2161 ibmvscsi_set_request_limit(hostdata, -1);
2162 dev_err(hostdata->dev, "error after %s\n", action);
2163 }
2164
2165 scsi_unblock_requests(hostdata->host);
2166 }
2167
2168 static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
2169 {
2170 if (kthread_should_stop())
2171 return 1;
2172 switch (hostdata->action) {
2173 case IBMVSCSI_HOST_ACTION_NONE:
2174 return 0;
2175 case IBMVSCSI_HOST_ACTION_RESET:
2176 case IBMVSCSI_HOST_ACTION_REENABLE:
2177 case IBMVSCSI_HOST_ACTION_UNBLOCK:
2178 default:
2179 break;
2180 }
2181
2182 return 1;
2183 }
2184
2185 static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
2186 {
2187 unsigned long flags;
2188 int rc;
2189
2190 spin_lock_irqsave(hostdata->host->host_lock, flags);
2191 rc = __ibmvscsi_work_to_do(hostdata);
2192 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2193
2194 return rc;
2195 }
2196
2197 static int ibmvscsi_work(void *data)
2198 {
2199 struct ibmvscsi_host_data *hostdata = data;
2200 int rc;
2201
2202 set_user_nice(current, MIN_NICE);
2203
2204 while (1) {
2205 rc = wait_event_interruptible(hostdata->work_wait_q,
2206 ibmvscsi_work_to_do(hostdata));
2207
2208 BUG_ON(rc);
2209
2210 if (kthread_should_stop())
2211 break;
2212
2213 ibmvscsi_do_work(hostdata);
2214 }
2215
2216 return 0;
2217 }
2218
2219
2220
2221
2222 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2223 {
2224 struct ibmvscsi_host_data *hostdata;
2225 struct Scsi_Host *host;
2226 struct device *dev = &vdev->dev;
2227 struct srp_rport_identifiers ids;
2228 struct srp_rport *rport;
2229 unsigned long wait_switch = 0;
2230 int rc;
2231
2232 dev_set_drvdata(&vdev->dev, NULL);
2233
2234 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
2235 if (!host) {
2236 dev_err(&vdev->dev, "couldn't allocate host data\n");
2237 goto scsi_host_alloc_failed;
2238 }
2239
2240 host->transportt = ibmvscsi_transport_template;
2241 hostdata = shost_priv(host);
2242 memset(hostdata, 0x00, sizeof(*hostdata));
2243 INIT_LIST_HEAD(&hostdata->sent);
2244 init_waitqueue_head(&hostdata->work_wait_q);
2245 hostdata->host = host;
2246 hostdata->dev = dev;
2247 ibmvscsi_set_request_limit(hostdata, -1);
2248 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
2249
2250 if (map_persist_bufs(hostdata)) {
2251 dev_err(&vdev->dev, "couldn't map persistent buffers\n");
2252 goto persist_bufs_failed;
2253 }
2254
2255 hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
2256 "ibmvscsi", host->host_no);
2257
2258 if (IS_ERR(hostdata->work_thread)) {
2259 dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
2260 PTR_ERR(hostdata->work_thread));
2261 goto init_crq_failed;
2262 }
2263
2264 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
2265 if (rc != 0 && rc != H_RESOURCE) {
2266 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
2267 goto kill_kthread;
2268 }
2269 if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
2270 dev_err(&vdev->dev, "couldn't initialize event pool\n");
2271 goto init_pool_failed;
2272 }
2273
2274 host->max_lun = IBMVSCSI_MAX_LUN;
2275 host->max_id = max_id;
2276 host->max_channel = max_channel;
2277 host->max_cmd_len = 16;
2278
2279 dev_info(dev,
2280 "Maximum ID: %d Maximum LUN: %llu Maximum Channel: %d\n",
2281 host->max_id, host->max_lun, host->max_channel);
2282
2283 if (scsi_add_host(hostdata->host, hostdata->dev))
2284 goto add_host_failed;
2285
2286
2287 memcpy(ids.port_id, hostdata->madapter_info.partition_name,
2288 sizeof(ids.port_id));
2289 ids.roles = SRP_RPORT_ROLE_TARGET;
2290 rport = srp_rport_add(host, &ids);
2291 if (IS_ERR(rport))
2292 goto add_srp_port_failed;
2293
2294
2295
2296
2297
2298 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
2299 || rc == H_RESOURCE) {
2300
2301
2302
2303
2304
2305
2306 for (wait_switch = jiffies + (init_timeout * HZ);
2307 time_before(jiffies, wait_switch) &&
2308 atomic_read(&hostdata->request_limit) < 2;) {
2309
2310 msleep(10);
2311 }
2312
2313
2314 if (atomic_read(&hostdata->request_limit) > 0)
2315 scsi_scan_host(host);
2316 }
2317
2318 dev_set_drvdata(&vdev->dev, hostdata);
2319 spin_lock(&ibmvscsi_driver_lock);
2320 list_add_tail(&hostdata->host_list, &ibmvscsi_head);
2321 spin_unlock(&ibmvscsi_driver_lock);
2322 return 0;
2323
2324 add_srp_port_failed:
2325 scsi_remove_host(hostdata->host);
2326 add_host_failed:
2327 release_event_pool(&hostdata->pool, hostdata);
2328 init_pool_failed:
2329 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
2330 kill_kthread:
2331 kthread_stop(hostdata->work_thread);
2332 init_crq_failed:
2333 unmap_persist_bufs(hostdata);
2334 persist_bufs_failed:
2335 scsi_host_put(host);
2336 scsi_host_alloc_failed:
2337 return -1;
2338 }
2339
2340 static void ibmvscsi_remove(struct vio_dev *vdev)
2341 {
2342 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2343
2344 srp_remove_host(hostdata->host);
2345 scsi_remove_host(hostdata->host);
2346
2347 purge_requests(hostdata, DID_ERROR);
2348 release_event_pool(&hostdata->pool, hostdata);
2349
2350 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
2351 max_events);
2352
2353 kthread_stop(hostdata->work_thread);
2354 unmap_persist_bufs(hostdata);
2355
2356 spin_lock(&ibmvscsi_driver_lock);
2357 list_del(&hostdata->host_list);
2358 spin_unlock(&ibmvscsi_driver_lock);
2359
2360 scsi_host_put(hostdata->host);
2361 }
2362
2363
2364
2365
2366
2367
2368
2369
2370 static int ibmvscsi_resume(struct device *dev)
2371 {
2372 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
2373 vio_disable_interrupts(to_vio_dev(hostdata->dev));
2374 tasklet_schedule(&hostdata->srp_task);
2375
2376 return 0;
2377 }
2378
2379
2380
2381
2382
2383 static const struct vio_device_id ibmvscsi_device_table[] = {
2384 {"vscsi", "IBM,v-scsi"},
2385 { "", "" }
2386 };
2387 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
2388
2389 static const struct dev_pm_ops ibmvscsi_pm_ops = {
2390 .resume = ibmvscsi_resume
2391 };
2392
2393 static struct vio_driver ibmvscsi_driver = {
2394 .id_table = ibmvscsi_device_table,
2395 .probe = ibmvscsi_probe,
2396 .remove = ibmvscsi_remove,
2397 .get_desired_dma = ibmvscsi_get_desired_dma,
2398 .name = "ibmvscsi",
2399 .pm = &ibmvscsi_pm_ops,
2400 };
2401
2402 static struct srp_function_template ibmvscsi_transport_functions = {
2403 };
2404
2405 static int __init ibmvscsi_module_init(void)
2406 {
2407 int ret;
2408
2409
2410 driver_template.can_queue = max_requests;
2411 max_events = max_requests + 2;
2412
2413 if (!firmware_has_feature(FW_FEATURE_VIO))
2414 return -ENODEV;
2415
2416 ibmvscsi_transport_template =
2417 srp_attach_transport(&ibmvscsi_transport_functions);
2418 if (!ibmvscsi_transport_template)
2419 return -ENOMEM;
2420
2421 ret = vio_register_driver(&ibmvscsi_driver);
2422 if (ret)
2423 srp_release_transport(ibmvscsi_transport_template);
2424 return ret;
2425 }
2426
2427 static void __exit ibmvscsi_module_exit(void)
2428 {
2429 vio_unregister_driver(&ibmvscsi_driver);
2430 srp_release_transport(ibmvscsi_transport_template);
2431 }
2432
2433 module_init(ibmvscsi_module_init);
2434 module_exit(ibmvscsi_module_exit);