0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0034
0035 #include <linux/module.h>
0036 #include <linux/init.h>
0037 #include <linux/slab.h>
0038 #include <linux/err.h>
0039 #include <linux/string.h>
0040 #include <linux/parser.h>
0041 #include <linux/random.h>
0042 #include <linux/jiffies.h>
0043 #include <linux/lockdep.h>
0044 #include <linux/inet.h>
0045 #include <rdma/ib_cache.h>
0046
0047 #include <linux/atomic.h>
0048
0049 #include <scsi/scsi.h>
0050 #include <scsi/scsi_device.h>
0051 #include <scsi/scsi_dbg.h>
0052 #include <scsi/scsi_tcq.h>
0053 #include <scsi/srp.h>
0054 #include <scsi/scsi_transport_srp.h>
0055
0056 #include "ib_srp.h"
0057
0058 #define DRV_NAME "ib_srp"
0059 #define PFX DRV_NAME ": "
0060
0061 MODULE_AUTHOR("Roland Dreier");
0062 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
0063 MODULE_LICENSE("Dual BSD/GPL");
0064
0065 #if !defined(CONFIG_DYNAMIC_DEBUG)
0066 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
0067 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
0068 #endif
0069
0070 static unsigned int srp_sg_tablesize;
0071 static unsigned int cmd_sg_entries;
0072 static unsigned int indirect_sg_entries;
0073 static bool allow_ext_sg;
0074 static bool register_always = true;
0075 static bool never_register;
0076 static int topspin_workarounds = 1;
0077
0078 module_param(srp_sg_tablesize, uint, 0444);
0079 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
0080
0081 module_param(cmd_sg_entries, uint, 0444);
0082 MODULE_PARM_DESC(cmd_sg_entries,
0083 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
0084
0085 module_param(indirect_sg_entries, uint, 0444);
0086 MODULE_PARM_DESC(indirect_sg_entries,
0087 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
0088
0089 module_param(allow_ext_sg, bool, 0444);
0090 MODULE_PARM_DESC(allow_ext_sg,
0091 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
0092
0093 module_param(topspin_workarounds, int, 0444);
0094 MODULE_PARM_DESC(topspin_workarounds,
0095 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
0096
0097 module_param(register_always, bool, 0444);
0098 MODULE_PARM_DESC(register_always,
0099 "Use memory registration even for contiguous memory regions");
0100
0101 module_param(never_register, bool, 0444);
0102 MODULE_PARM_DESC(never_register, "Never register memory");
0103
0104 static const struct kernel_param_ops srp_tmo_ops;
0105
0106 static int srp_reconnect_delay = 10;
0107 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
0108 S_IRUGO | S_IWUSR);
0109 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
0110
0111 static int srp_fast_io_fail_tmo = 15;
0112 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
0113 S_IRUGO | S_IWUSR);
0114 MODULE_PARM_DESC(fast_io_fail_tmo,
0115 "Number of seconds between the observation of a transport"
0116 " layer error and failing all I/O. \"off\" means that this"
0117 " functionality is disabled.");
0118
0119 static int srp_dev_loss_tmo = 600;
0120 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
0121 S_IRUGO | S_IWUSR);
0122 MODULE_PARM_DESC(dev_loss_tmo,
0123 "Maximum number of seconds that the SRP transport should"
0124 " insulate transport layer errors. After this time has been"
0125 " exceeded the SCSI host is removed. Should be"
0126 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
0127 " if fast_io_fail_tmo has not been set. \"off\" means that"
0128 " this functionality is disabled.");
0129
0130 static bool srp_use_imm_data = true;
0131 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
0132 MODULE_PARM_DESC(use_imm_data,
0133 "Whether or not to request permission to use immediate data during SRP login.");
0134
0135 static unsigned int srp_max_imm_data = 8 * 1024;
0136 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
0137 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
0138
0139 static unsigned ch_count;
0140 module_param(ch_count, uint, 0444);
0141 MODULE_PARM_DESC(ch_count,
0142 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
0143
0144 static int srp_add_one(struct ib_device *device);
0145 static void srp_remove_one(struct ib_device *device, void *client_data);
0146 static void srp_rename_dev(struct ib_device *device, void *client_data);
0147 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
0148 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
0149 const char *opname);
0150 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
0151 const struct ib_cm_event *event);
0152 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
0153 struct rdma_cm_event *event);
0154
0155 static struct scsi_transport_template *ib_srp_transport_template;
0156 static struct workqueue_struct *srp_remove_wq;
0157
0158 static struct ib_client srp_client = {
0159 .name = "srp",
0160 .add = srp_add_one,
0161 .remove = srp_remove_one,
0162 .rename = srp_rename_dev
0163 };
0164
0165 static struct ib_sa_client srp_sa_client;
0166
0167 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
0168 {
0169 int tmo = *(int *)kp->arg;
0170
0171 if (tmo >= 0)
0172 return sysfs_emit(buffer, "%d\n", tmo);
0173 else
0174 return sysfs_emit(buffer, "off\n");
0175 }
0176
0177 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
0178 {
0179 int tmo, res;
0180
0181 res = srp_parse_tmo(&tmo, val);
0182 if (res)
0183 goto out;
0184
0185 if (kp->arg == &srp_reconnect_delay)
0186 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
0187 srp_dev_loss_tmo);
0188 else if (kp->arg == &srp_fast_io_fail_tmo)
0189 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
0190 else
0191 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
0192 tmo);
0193 if (res)
0194 goto out;
0195 *(int *)kp->arg = tmo;
0196
0197 out:
0198 return res;
0199 }
0200
0201 static const struct kernel_param_ops srp_tmo_ops = {
0202 .get = srp_tmo_get,
0203 .set = srp_tmo_set,
0204 };
0205
0206 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
0207 {
0208 return (struct srp_target_port *) host->hostdata;
0209 }
0210
0211 static const char *srp_target_info(struct Scsi_Host *host)
0212 {
0213 return host_to_target(host)->target_name;
0214 }
0215
0216 static int srp_target_is_topspin(struct srp_target_port *target)
0217 {
0218 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
0219 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
0220
0221 return topspin_workarounds &&
0222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
0223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
0224 }
0225
0226 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
0227 gfp_t gfp_mask,
0228 enum dma_data_direction direction)
0229 {
0230 struct srp_iu *iu;
0231
0232 iu = kmalloc(sizeof *iu, gfp_mask);
0233 if (!iu)
0234 goto out;
0235
0236 iu->buf = kzalloc(size, gfp_mask);
0237 if (!iu->buf)
0238 goto out_free_iu;
0239
0240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
0241 direction);
0242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
0243 goto out_free_buf;
0244
0245 iu->size = size;
0246 iu->direction = direction;
0247
0248 return iu;
0249
0250 out_free_buf:
0251 kfree(iu->buf);
0252 out_free_iu:
0253 kfree(iu);
0254 out:
0255 return NULL;
0256 }
0257
0258 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
0259 {
0260 if (!iu)
0261 return;
0262
0263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
0264 iu->direction);
0265 kfree(iu->buf);
0266 kfree(iu);
0267 }
0268
0269 static void srp_qp_event(struct ib_event *event, void *context)
0270 {
0271 pr_debug("QP event %s (%d)\n",
0272 ib_event_msg(event->event), event->event);
0273 }
0274
0275 static int srp_init_ib_qp(struct srp_target_port *target,
0276 struct ib_qp *qp)
0277 {
0278 struct ib_qp_attr *attr;
0279 int ret;
0280
0281 attr = kmalloc(sizeof *attr, GFP_KERNEL);
0282 if (!attr)
0283 return -ENOMEM;
0284
0285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
0286 target->srp_host->port,
0287 be16_to_cpu(target->ib_cm.pkey),
0288 &attr->pkey_index);
0289 if (ret)
0290 goto out;
0291
0292 attr->qp_state = IB_QPS_INIT;
0293 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
0294 IB_ACCESS_REMOTE_WRITE);
0295 attr->port_num = target->srp_host->port;
0296
0297 ret = ib_modify_qp(qp, attr,
0298 IB_QP_STATE |
0299 IB_QP_PKEY_INDEX |
0300 IB_QP_ACCESS_FLAGS |
0301 IB_QP_PORT);
0302
0303 out:
0304 kfree(attr);
0305 return ret;
0306 }
0307
0308 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
0309 {
0310 struct srp_target_port *target = ch->target;
0311 struct ib_cm_id *new_cm_id;
0312
0313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
0314 srp_ib_cm_handler, ch);
0315 if (IS_ERR(new_cm_id))
0316 return PTR_ERR(new_cm_id);
0317
0318 if (ch->ib_cm.cm_id)
0319 ib_destroy_cm_id(ch->ib_cm.cm_id);
0320 ch->ib_cm.cm_id = new_cm_id;
0321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
0322 target->srp_host->port))
0323 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
0324 else
0325 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
0326 ch->ib_cm.path.sgid = target->sgid;
0327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
0328 ch->ib_cm.path.pkey = target->ib_cm.pkey;
0329 ch->ib_cm.path.service_id = target->ib_cm.service_id;
0330
0331 return 0;
0332 }
0333
0334 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
0335 {
0336 struct srp_target_port *target = ch->target;
0337 struct rdma_cm_id *new_cm_id;
0338 int ret;
0339
0340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
0341 RDMA_PS_TCP, IB_QPT_RC);
0342 if (IS_ERR(new_cm_id)) {
0343 ret = PTR_ERR(new_cm_id);
0344 new_cm_id = NULL;
0345 goto out;
0346 }
0347
0348 init_completion(&ch->done);
0349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
0350 &target->rdma_cm.src.sa : NULL,
0351 &target->rdma_cm.dst.sa,
0352 SRP_PATH_REC_TIMEOUT_MS);
0353 if (ret) {
0354 pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
0355 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
0356 goto out;
0357 }
0358 ret = wait_for_completion_interruptible(&ch->done);
0359 if (ret < 0)
0360 goto out;
0361
0362 ret = ch->status;
0363 if (ret) {
0364 pr_err("Resolving address %pISpsc failed (%d)\n",
0365 &target->rdma_cm.dst, ret);
0366 goto out;
0367 }
0368
0369 swap(ch->rdma_cm.cm_id, new_cm_id);
0370
0371 out:
0372 if (new_cm_id)
0373 rdma_destroy_id(new_cm_id);
0374
0375 return ret;
0376 }
0377
0378 static int srp_new_cm_id(struct srp_rdma_ch *ch)
0379 {
0380 struct srp_target_port *target = ch->target;
0381
0382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
0383 srp_new_ib_cm_id(ch);
0384 }
0385
0386
0387
0388
0389
0390 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
0391 {
0392 int i;
0393 struct srp_fr_desc *d;
0394
0395 if (!pool)
0396 return;
0397
0398 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
0399 if (d->mr)
0400 ib_dereg_mr(d->mr);
0401 }
0402 kfree(pool);
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
0413 struct ib_pd *pd, int pool_size,
0414 int max_page_list_len)
0415 {
0416 struct srp_fr_pool *pool;
0417 struct srp_fr_desc *d;
0418 struct ib_mr *mr;
0419 int i, ret = -EINVAL;
0420 enum ib_mr_type mr_type;
0421
0422 if (pool_size <= 0)
0423 goto err;
0424 ret = -ENOMEM;
0425 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
0426 if (!pool)
0427 goto err;
0428 pool->size = pool_size;
0429 pool->max_page_list_len = max_page_list_len;
0430 spin_lock_init(&pool->lock);
0431 INIT_LIST_HEAD(&pool->free_list);
0432
0433 if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
0434 mr_type = IB_MR_TYPE_SG_GAPS;
0435 else
0436 mr_type = IB_MR_TYPE_MEM_REG;
0437
0438 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
0439 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
0440 if (IS_ERR(mr)) {
0441 ret = PTR_ERR(mr);
0442 if (ret == -ENOMEM)
0443 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
0444 dev_name(&device->dev));
0445 goto destroy_pool;
0446 }
0447 d->mr = mr;
0448 list_add_tail(&d->entry, &pool->free_list);
0449 }
0450
0451 out:
0452 return pool;
0453
0454 destroy_pool:
0455 srp_destroy_fr_pool(pool);
0456
0457 err:
0458 pool = ERR_PTR(ret);
0459 goto out;
0460 }
0461
0462
0463
0464
0465
0466 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
0467 {
0468 struct srp_fr_desc *d = NULL;
0469 unsigned long flags;
0470
0471 spin_lock_irqsave(&pool->lock, flags);
0472 if (!list_empty(&pool->free_list)) {
0473 d = list_first_entry(&pool->free_list, typeof(*d), entry);
0474 list_del(&d->entry);
0475 }
0476 spin_unlock_irqrestore(&pool->lock, flags);
0477
0478 return d;
0479 }
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
0491 int n)
0492 {
0493 unsigned long flags;
0494 int i;
0495
0496 spin_lock_irqsave(&pool->lock, flags);
0497 for (i = 0; i < n; i++)
0498 list_add(&desc[i]->entry, &pool->free_list);
0499 spin_unlock_irqrestore(&pool->lock, flags);
0500 }
0501
0502 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
0503 {
0504 struct srp_device *dev = target->srp_host->srp_dev;
0505
0506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
0507 dev->max_pages_per_mr);
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 static void srp_destroy_qp(struct srp_rdma_ch *ch)
0519 {
0520 spin_lock_irq(&ch->lock);
0521 ib_process_cq_direct(ch->send_cq, -1);
0522 spin_unlock_irq(&ch->lock);
0523
0524 ib_drain_qp(ch->qp);
0525 ib_destroy_qp(ch->qp);
0526 }
0527
0528 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
0529 {
0530 struct srp_target_port *target = ch->target;
0531 struct srp_device *dev = target->srp_host->srp_dev;
0532 const struct ib_device_attr *attr = &dev->dev->attrs;
0533 struct ib_qp_init_attr *init_attr;
0534 struct ib_cq *recv_cq, *send_cq;
0535 struct ib_qp *qp;
0536 struct srp_fr_pool *fr_pool = NULL;
0537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
0538 int ret;
0539
0540 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
0541 if (!init_attr)
0542 return -ENOMEM;
0543
0544
0545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
0546 ch->comp_vector, IB_POLL_SOFTIRQ);
0547 if (IS_ERR(recv_cq)) {
0548 ret = PTR_ERR(recv_cq);
0549 goto err;
0550 }
0551
0552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
0553 ch->comp_vector, IB_POLL_DIRECT);
0554 if (IS_ERR(send_cq)) {
0555 ret = PTR_ERR(send_cq);
0556 goto err_recv_cq;
0557 }
0558
0559 init_attr->event_handler = srp_qp_event;
0560 init_attr->cap.max_send_wr = m * target->queue_size;
0561 init_attr->cap.max_recv_wr = target->queue_size + 1;
0562 init_attr->cap.max_recv_sge = 1;
0563 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
0564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
0565 init_attr->qp_type = IB_QPT_RC;
0566 init_attr->send_cq = send_cq;
0567 init_attr->recv_cq = recv_cq;
0568
0569 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
0570
0571 if (target->using_rdma_cm) {
0572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
0573 qp = ch->rdma_cm.cm_id->qp;
0574 } else {
0575 qp = ib_create_qp(dev->pd, init_attr);
0576 if (!IS_ERR(qp)) {
0577 ret = srp_init_ib_qp(target, qp);
0578 if (ret)
0579 ib_destroy_qp(qp);
0580 } else {
0581 ret = PTR_ERR(qp);
0582 }
0583 }
0584 if (ret) {
0585 pr_err("QP creation failed for dev %s: %d\n",
0586 dev_name(&dev->dev->dev), ret);
0587 goto err_send_cq;
0588 }
0589
0590 if (dev->use_fast_reg) {
0591 fr_pool = srp_alloc_fr_pool(target);
0592 if (IS_ERR(fr_pool)) {
0593 ret = PTR_ERR(fr_pool);
0594 shost_printk(KERN_WARNING, target->scsi_host, PFX
0595 "FR pool allocation failed (%d)\n", ret);
0596 goto err_qp;
0597 }
0598 }
0599
0600 if (ch->qp)
0601 srp_destroy_qp(ch);
0602 if (ch->recv_cq)
0603 ib_free_cq(ch->recv_cq);
0604 if (ch->send_cq)
0605 ib_free_cq(ch->send_cq);
0606
0607 ch->qp = qp;
0608 ch->recv_cq = recv_cq;
0609 ch->send_cq = send_cq;
0610
0611 if (dev->use_fast_reg) {
0612 if (ch->fr_pool)
0613 srp_destroy_fr_pool(ch->fr_pool);
0614 ch->fr_pool = fr_pool;
0615 }
0616
0617 kfree(init_attr);
0618 return 0;
0619
0620 err_qp:
0621 if (target->using_rdma_cm)
0622 rdma_destroy_qp(ch->rdma_cm.cm_id);
0623 else
0624 ib_destroy_qp(qp);
0625
0626 err_send_cq:
0627 ib_free_cq(send_cq);
0628
0629 err_recv_cq:
0630 ib_free_cq(recv_cq);
0631
0632 err:
0633 kfree(init_attr);
0634 return ret;
0635 }
0636
0637
0638
0639
0640
0641 static void srp_free_ch_ib(struct srp_target_port *target,
0642 struct srp_rdma_ch *ch)
0643 {
0644 struct srp_device *dev = target->srp_host->srp_dev;
0645 int i;
0646
0647 if (!ch->target)
0648 return;
0649
0650 if (target->using_rdma_cm) {
0651 if (ch->rdma_cm.cm_id) {
0652 rdma_destroy_id(ch->rdma_cm.cm_id);
0653 ch->rdma_cm.cm_id = NULL;
0654 }
0655 } else {
0656 if (ch->ib_cm.cm_id) {
0657 ib_destroy_cm_id(ch->ib_cm.cm_id);
0658 ch->ib_cm.cm_id = NULL;
0659 }
0660 }
0661
0662
0663 if (!ch->qp)
0664 return;
0665
0666 if (dev->use_fast_reg) {
0667 if (ch->fr_pool)
0668 srp_destroy_fr_pool(ch->fr_pool);
0669 }
0670
0671 srp_destroy_qp(ch);
0672 ib_free_cq(ch->send_cq);
0673 ib_free_cq(ch->recv_cq);
0674
0675
0676
0677
0678
0679
0680
0681 ch->target = NULL;
0682
0683 ch->qp = NULL;
0684 ch->send_cq = ch->recv_cq = NULL;
0685
0686 if (ch->rx_ring) {
0687 for (i = 0; i < target->queue_size; ++i)
0688 srp_free_iu(target->srp_host, ch->rx_ring[i]);
0689 kfree(ch->rx_ring);
0690 ch->rx_ring = NULL;
0691 }
0692 if (ch->tx_ring) {
0693 for (i = 0; i < target->queue_size; ++i)
0694 srp_free_iu(target->srp_host, ch->tx_ring[i]);
0695 kfree(ch->tx_ring);
0696 ch->tx_ring = NULL;
0697 }
0698 }
0699
0700 static void srp_path_rec_completion(int status,
0701 struct sa_path_rec *pathrec,
0702 void *ch_ptr)
0703 {
0704 struct srp_rdma_ch *ch = ch_ptr;
0705 struct srp_target_port *target = ch->target;
0706
0707 ch->status = status;
0708 if (status)
0709 shost_printk(KERN_ERR, target->scsi_host,
0710 PFX "Got failed path rec status %d\n", status);
0711 else
0712 ch->ib_cm.path = *pathrec;
0713 complete(&ch->done);
0714 }
0715
0716 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
0717 {
0718 struct srp_target_port *target = ch->target;
0719 int ret;
0720
0721 ch->ib_cm.path.numb_path = 1;
0722
0723 init_completion(&ch->done);
0724
0725 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
0726 target->srp_host->srp_dev->dev,
0727 target->srp_host->port,
0728 &ch->ib_cm.path,
0729 IB_SA_PATH_REC_SERVICE_ID |
0730 IB_SA_PATH_REC_DGID |
0731 IB_SA_PATH_REC_SGID |
0732 IB_SA_PATH_REC_NUMB_PATH |
0733 IB_SA_PATH_REC_PKEY,
0734 SRP_PATH_REC_TIMEOUT_MS,
0735 GFP_KERNEL,
0736 srp_path_rec_completion,
0737 ch, &ch->ib_cm.path_query);
0738 if (ch->ib_cm.path_query_id < 0)
0739 return ch->ib_cm.path_query_id;
0740
0741 ret = wait_for_completion_interruptible(&ch->done);
0742 if (ret < 0)
0743 return ret;
0744
0745 if (ch->status < 0)
0746 shost_printk(KERN_WARNING, target->scsi_host,
0747 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
0748 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
0749 be16_to_cpu(target->ib_cm.pkey),
0750 be64_to_cpu(target->ib_cm.service_id));
0751
0752 return ch->status;
0753 }
0754
0755 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
0756 {
0757 struct srp_target_port *target = ch->target;
0758 int ret;
0759
0760 init_completion(&ch->done);
0761
0762 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
0763 if (ret)
0764 return ret;
0765
0766 wait_for_completion_interruptible(&ch->done);
0767
0768 if (ch->status != 0)
0769 shost_printk(KERN_WARNING, target->scsi_host,
0770 PFX "Path resolution failed\n");
0771
0772 return ch->status;
0773 }
0774
0775 static int srp_lookup_path(struct srp_rdma_ch *ch)
0776 {
0777 struct srp_target_port *target = ch->target;
0778
0779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
0780 srp_ib_lookup_path(ch);
0781 }
0782
0783 static u8 srp_get_subnet_timeout(struct srp_host *host)
0784 {
0785 struct ib_port_attr attr;
0786 int ret;
0787 u8 subnet_timeout = 18;
0788
0789 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
0790 if (ret == 0)
0791 subnet_timeout = attr.subnet_timeout;
0792
0793 if (unlikely(subnet_timeout < 15))
0794 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
0795 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
0796
0797 return subnet_timeout;
0798 }
0799
0800 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
0801 bool multich)
0802 {
0803 struct srp_target_port *target = ch->target;
0804 struct {
0805 struct rdma_conn_param rdma_param;
0806 struct srp_login_req_rdma rdma_req;
0807 struct ib_cm_req_param ib_param;
0808 struct srp_login_req ib_req;
0809 } *req = NULL;
0810 char *ipi, *tpi;
0811 int status;
0812
0813 req = kzalloc(sizeof *req, GFP_KERNEL);
0814 if (!req)
0815 return -ENOMEM;
0816
0817 req->ib_param.flow_control = 1;
0818 req->ib_param.retry_count = target->tl_retry_count;
0819
0820
0821
0822
0823
0824 req->ib_param.responder_resources = 4;
0825 req->ib_param.rnr_retry_count = 7;
0826 req->ib_param.max_cm_retries = 15;
0827
0828 req->ib_req.opcode = SRP_LOGIN_REQ;
0829 req->ib_req.tag = 0;
0830 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
0831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
0832 SRP_BUF_FORMAT_INDIRECT);
0833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
0834 SRP_MULTICHAN_SINGLE);
0835 if (srp_use_imm_data) {
0836 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
0837 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
0838 }
0839
0840 if (target->using_rdma_cm) {
0841 req->rdma_param.flow_control = req->ib_param.flow_control;
0842 req->rdma_param.responder_resources =
0843 req->ib_param.responder_resources;
0844 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
0845 req->rdma_param.retry_count = req->ib_param.retry_count;
0846 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
0847 req->rdma_param.private_data = &req->rdma_req;
0848 req->rdma_param.private_data_len = sizeof(req->rdma_req);
0849
0850 req->rdma_req.opcode = req->ib_req.opcode;
0851 req->rdma_req.tag = req->ib_req.tag;
0852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
0853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
0854 req->rdma_req.req_flags = req->ib_req.req_flags;
0855 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
0856
0857 ipi = req->rdma_req.initiator_port_id;
0858 tpi = req->rdma_req.target_port_id;
0859 } else {
0860 u8 subnet_timeout;
0861
0862 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
0863
0864 req->ib_param.primary_path = &ch->ib_cm.path;
0865 req->ib_param.alternate_path = NULL;
0866 req->ib_param.service_id = target->ib_cm.service_id;
0867 get_random_bytes(&req->ib_param.starting_psn, 4);
0868 req->ib_param.starting_psn &= 0xffffff;
0869 req->ib_param.qp_num = ch->qp->qp_num;
0870 req->ib_param.qp_type = ch->qp->qp_type;
0871 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
0872 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
0873 req->ib_param.private_data = &req->ib_req;
0874 req->ib_param.private_data_len = sizeof(req->ib_req);
0875
0876 ipi = req->ib_req.initiator_port_id;
0877 tpi = req->ib_req.target_port_id;
0878 }
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
0890 memcpy(ipi, &target->sgid.global.interface_id, 8);
0891 memcpy(ipi + 8, &target->initiator_ext, 8);
0892 memcpy(tpi, &target->ioc_guid, 8);
0893 memcpy(tpi + 8, &target->id_ext, 8);
0894 } else {
0895 memcpy(ipi, &target->initiator_ext, 8);
0896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
0897 memcpy(tpi, &target->id_ext, 8);
0898 memcpy(tpi + 8, &target->ioc_guid, 8);
0899 }
0900
0901
0902
0903
0904
0905
0906 if (srp_target_is_topspin(target)) {
0907 shost_printk(KERN_DEBUG, target->scsi_host,
0908 PFX "Topspin/Cisco initiator port ID workaround "
0909 "activated for target GUID %016llx\n",
0910 be64_to_cpu(target->ioc_guid));
0911 memset(ipi, 0, 8);
0912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
0913 }
0914
0915 if (target->using_rdma_cm)
0916 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
0917 else
0918 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
0919
0920 kfree(req);
0921
0922 return status;
0923 }
0924
0925 static bool srp_queue_remove_work(struct srp_target_port *target)
0926 {
0927 bool changed = false;
0928
0929 spin_lock_irq(&target->lock);
0930 if (target->state != SRP_TARGET_REMOVED) {
0931 target->state = SRP_TARGET_REMOVED;
0932 changed = true;
0933 }
0934 spin_unlock_irq(&target->lock);
0935
0936 if (changed)
0937 queue_work(srp_remove_wq, &target->remove_work);
0938
0939 return changed;
0940 }
0941
0942 static void srp_disconnect_target(struct srp_target_port *target)
0943 {
0944 struct srp_rdma_ch *ch;
0945 int i, ret;
0946
0947
0948
0949 for (i = 0; i < target->ch_count; i++) {
0950 ch = &target->ch[i];
0951 ch->connected = false;
0952 ret = 0;
0953 if (target->using_rdma_cm) {
0954 if (ch->rdma_cm.cm_id)
0955 rdma_disconnect(ch->rdma_cm.cm_id);
0956 } else {
0957 if (ch->ib_cm.cm_id)
0958 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
0959 NULL, 0);
0960 }
0961 if (ret < 0) {
0962 shost_printk(KERN_DEBUG, target->scsi_host,
0963 PFX "Sending CM DREQ failed\n");
0964 }
0965 }
0966 }
0967
0968 static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
0969 {
0970 struct srp_target_port *target = host_to_target(shost);
0971 struct srp_device *dev = target->srp_host->srp_dev;
0972 struct ib_device *ibdev = dev->dev;
0973 struct srp_request *req = scsi_cmd_priv(cmd);
0974
0975 kfree(req->fr_list);
0976 if (req->indirect_dma_addr) {
0977 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
0978 target->indirect_size,
0979 DMA_TO_DEVICE);
0980 }
0981 kfree(req->indirect_desc);
0982
0983 return 0;
0984 }
0985
0986 static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
0987 {
0988 struct srp_target_port *target = host_to_target(shost);
0989 struct srp_device *srp_dev = target->srp_host->srp_dev;
0990 struct ib_device *ibdev = srp_dev->dev;
0991 struct srp_request *req = scsi_cmd_priv(cmd);
0992 dma_addr_t dma_addr;
0993 int ret = -ENOMEM;
0994
0995 if (srp_dev->use_fast_reg) {
0996 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
0997 GFP_KERNEL);
0998 if (!req->fr_list)
0999 goto out;
1000 }
1001 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1002 if (!req->indirect_desc)
1003 goto out;
1004
1005 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1006 target->indirect_size,
1007 DMA_TO_DEVICE);
1008 if (ib_dma_mapping_error(ibdev, dma_addr)) {
1009 srp_exit_cmd_priv(shost, cmd);
1010 goto out;
1011 }
1012
1013 req->indirect_dma_addr = dma_addr;
1014 ret = 0;
1015
1016 out:
1017 return ret;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026
1027 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1028 {
1029 const struct attribute_group **g;
1030 struct attribute **attr;
1031
1032 for (g = shost->hostt->shost_groups; *g; ++g) {
1033 for (attr = (*g)->attrs; *attr; ++attr) {
1034 struct device_attribute *dev_attr =
1035 container_of(*attr, typeof(*dev_attr), attr);
1036
1037 device_remove_file(&shost->shost_dev, dev_attr);
1038 }
1039 }
1040 }
1041
1042 static void srp_remove_target(struct srp_target_port *target)
1043 {
1044 struct srp_rdma_ch *ch;
1045 int i;
1046
1047 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1048
1049 srp_del_scsi_host_attr(target->scsi_host);
1050 srp_rport_get(target->rport);
1051 srp_remove_host(target->scsi_host);
1052 scsi_remove_host(target->scsi_host);
1053 srp_stop_rport_timers(target->rport);
1054 srp_disconnect_target(target);
1055 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1056 for (i = 0; i < target->ch_count; i++) {
1057 ch = &target->ch[i];
1058 srp_free_ch_ib(target, ch);
1059 }
1060 cancel_work_sync(&target->tl_err_work);
1061 srp_rport_put(target->rport);
1062 kfree(target->ch);
1063 target->ch = NULL;
1064
1065 spin_lock(&target->srp_host->target_lock);
1066 list_del(&target->list);
1067 spin_unlock(&target->srp_host->target_lock);
1068
1069 scsi_host_put(target->scsi_host);
1070 }
1071
1072 static void srp_remove_work(struct work_struct *work)
1073 {
1074 struct srp_target_port *target =
1075 container_of(work, struct srp_target_port, remove_work);
1076
1077 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1078
1079 srp_remove_target(target);
1080 }
1081
1082 static void srp_rport_delete(struct srp_rport *rport)
1083 {
1084 struct srp_target_port *target = rport->lld_data;
1085
1086 srp_queue_remove_work(target);
1087 }
1088
1089
1090
1091
1092
1093 static int srp_connected_ch(struct srp_target_port *target)
1094 {
1095 int i, c = 0;
1096
1097 for (i = 0; i < target->ch_count; i++)
1098 c += target->ch[i].connected;
1099
1100 return c;
1101 }
1102
1103 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1104 bool multich)
1105 {
1106 struct srp_target_port *target = ch->target;
1107 int ret;
1108
1109 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1110
1111 ret = srp_lookup_path(ch);
1112 if (ret)
1113 goto out;
1114
1115 while (1) {
1116 init_completion(&ch->done);
1117 ret = srp_send_req(ch, max_iu_len, multich);
1118 if (ret)
1119 goto out;
1120 ret = wait_for_completion_interruptible(&ch->done);
1121 if (ret < 0)
1122 goto out;
1123
1124
1125
1126
1127
1128
1129
1130 ret = ch->status;
1131 switch (ret) {
1132 case 0:
1133 ch->connected = true;
1134 goto out;
1135
1136 case SRP_PORT_REDIRECT:
1137 ret = srp_lookup_path(ch);
1138 if (ret)
1139 goto out;
1140 break;
1141
1142 case SRP_DLID_REDIRECT:
1143 break;
1144
1145 case SRP_STALE_CONN:
1146 shost_printk(KERN_ERR, target->scsi_host, PFX
1147 "giving up on stale connection\n");
1148 ret = -ECONNRESET;
1149 goto out;
1150
1151 default:
1152 goto out;
1153 }
1154 }
1155
1156 out:
1157 return ret <= 0 ? ret : -ENODEV;
1158 }
1159
1160 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1161 {
1162 srp_handle_qp_err(cq, wc, "INV RKEY");
1163 }
1164
1165 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1166 u32 rkey)
1167 {
1168 struct ib_send_wr wr = {
1169 .opcode = IB_WR_LOCAL_INV,
1170 .next = NULL,
1171 .num_sge = 0,
1172 .send_flags = 0,
1173 .ex.invalidate_rkey = rkey,
1174 };
1175
1176 wr.wr_cqe = &req->reg_cqe;
1177 req->reg_cqe.done = srp_inv_rkey_err_done;
1178 return ib_post_send(ch->qp, &wr, NULL);
1179 }
1180
1181 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1182 struct srp_rdma_ch *ch,
1183 struct srp_request *req)
1184 {
1185 struct srp_target_port *target = ch->target;
1186 struct srp_device *dev = target->srp_host->srp_dev;
1187 struct ib_device *ibdev = dev->dev;
1188 int i, res;
1189
1190 if (!scsi_sglist(scmnd) ||
1191 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1192 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1193 return;
1194
1195 if (dev->use_fast_reg) {
1196 struct srp_fr_desc **pfr;
1197
1198 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1199 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1200 if (res < 0) {
1201 shost_printk(KERN_ERR, target->scsi_host, PFX
1202 "Queueing INV WR for rkey %#x failed (%d)\n",
1203 (*pfr)->mr->rkey, res);
1204 queue_work(system_long_wq,
1205 &target->tl_err_work);
1206 }
1207 }
1208 if (req->nmdesc)
1209 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1210 req->nmdesc);
1211 }
1212
1213 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1214 scmnd->sc_data_direction);
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1229 struct srp_request *req,
1230 struct scsi_device *sdev,
1231 struct scsi_cmnd *scmnd)
1232 {
1233 unsigned long flags;
1234
1235 spin_lock_irqsave(&ch->lock, flags);
1236 if (req->scmnd &&
1237 (!sdev || req->scmnd->device == sdev) &&
1238 (!scmnd || req->scmnd == scmnd)) {
1239 scmnd = req->scmnd;
1240 req->scmnd = NULL;
1241 } else {
1242 scmnd = NULL;
1243 }
1244 spin_unlock_irqrestore(&ch->lock, flags);
1245
1246 return scmnd;
1247 }
1248
1249
1250
1251
1252
1253
1254
1255
1256 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1257 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1258 {
1259 unsigned long flags;
1260
1261 srp_unmap_data(scmnd, ch, req);
1262
1263 spin_lock_irqsave(&ch->lock, flags);
1264 ch->req_lim += req_lim_delta;
1265 spin_unlock_irqrestore(&ch->lock, flags);
1266 }
1267
1268 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1269 struct scsi_device *sdev, int result)
1270 {
1271 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1272
1273 if (scmnd) {
1274 srp_free_req(ch, req, scmnd, 0);
1275 scmnd->result = result;
1276 scsi_done(scmnd);
1277 }
1278 }
1279
1280 struct srp_terminate_context {
1281 struct srp_target_port *srp_target;
1282 int scsi_result;
1283 };
1284
1285 static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
1286 {
1287 struct srp_terminate_context *context = context_ptr;
1288 struct srp_target_port *target = context->srp_target;
1289 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
1290 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1291 struct srp_request *req = scsi_cmd_priv(scmnd);
1292
1293 srp_finish_req(ch, req, NULL, context->scsi_result);
1294
1295 return true;
1296 }
1297
1298 static void srp_terminate_io(struct srp_rport *rport)
1299 {
1300 struct srp_target_port *target = rport->lld_data;
1301 struct srp_terminate_context context = { .srp_target = target,
1302 .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1303
1304 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1305 }
1306
1307
1308 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1309 uint32_t max_it_iu_size)
1310 {
1311 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1312 sizeof(struct srp_indirect_buf) +
1313 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1314
1315 if (use_imm_data)
1316 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1317 srp_max_imm_data);
1318
1319 if (max_it_iu_size)
1320 max_iu_len = min(max_iu_len, max_it_iu_size);
1321
1322 pr_debug("max_iu_len = %d\n", max_iu_len);
1323
1324 return max_iu_len;
1325 }
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 static int srp_rport_reconnect(struct srp_rport *rport)
1337 {
1338 struct srp_target_port *target = rport->lld_data;
1339 struct srp_rdma_ch *ch;
1340 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1341 srp_use_imm_data,
1342 target->max_it_iu_size);
1343 int i, j, ret = 0;
1344 bool multich = false;
1345
1346 srp_disconnect_target(target);
1347
1348 if (target->state == SRP_TARGET_SCANNING)
1349 return -ENODEV;
1350
1351
1352
1353
1354
1355
1356 for (i = 0; i < target->ch_count; i++) {
1357 ch = &target->ch[i];
1358 ret += srp_new_cm_id(ch);
1359 }
1360 {
1361 struct srp_terminate_context context = {
1362 .srp_target = target, .scsi_result = DID_RESET << 16};
1363
1364 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1365 &context);
1366 }
1367 for (i = 0; i < target->ch_count; i++) {
1368 ch = &target->ch[i];
1369
1370
1371
1372
1373
1374 ret += srp_create_ch_ib(ch);
1375
1376 INIT_LIST_HEAD(&ch->free_tx);
1377 for (j = 0; j < target->queue_size; ++j)
1378 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1379 }
1380
1381 target->qp_in_error = false;
1382
1383 for (i = 0; i < target->ch_count; i++) {
1384 ch = &target->ch[i];
1385 if (ret)
1386 break;
1387 ret = srp_connect_ch(ch, max_iu_len, multich);
1388 multich = true;
1389 }
1390
1391 if (ret == 0)
1392 shost_printk(KERN_INFO, target->scsi_host,
1393 PFX "reconnect succeeded\n");
1394
1395 return ret;
1396 }
1397
1398 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1399 unsigned int dma_len, u32 rkey)
1400 {
1401 struct srp_direct_buf *desc = state->desc;
1402
1403 WARN_ON_ONCE(!dma_len);
1404
1405 desc->va = cpu_to_be64(dma_addr);
1406 desc->key = cpu_to_be32(rkey);
1407 desc->len = cpu_to_be32(dma_len);
1408
1409 state->total_len += dma_len;
1410 state->desc++;
1411 state->ndesc++;
1412 }
1413
1414 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1415 {
1416 srp_handle_qp_err(cq, wc, "FAST REG");
1417 }
1418
1419
1420
1421
1422
1423
1424
1425 static int srp_map_finish_fr(struct srp_map_state *state,
1426 struct srp_request *req,
1427 struct srp_rdma_ch *ch, int sg_nents,
1428 unsigned int *sg_offset_p)
1429 {
1430 struct srp_target_port *target = ch->target;
1431 struct srp_device *dev = target->srp_host->srp_dev;
1432 struct ib_reg_wr wr;
1433 struct srp_fr_desc *desc;
1434 u32 rkey;
1435 int n, err;
1436
1437 if (state->fr.next >= state->fr.end) {
1438 shost_printk(KERN_ERR, ch->target->scsi_host,
1439 PFX "Out of MRs (mr_per_cmd = %d)\n",
1440 ch->target->mr_per_cmd);
1441 return -ENOMEM;
1442 }
1443
1444 WARN_ON_ONCE(!dev->use_fast_reg);
1445
1446 if (sg_nents == 1 && target->global_rkey) {
1447 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1448
1449 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1450 sg_dma_len(state->sg) - sg_offset,
1451 target->global_rkey);
1452 if (sg_offset_p)
1453 *sg_offset_p = 0;
1454 return 1;
1455 }
1456
1457 desc = srp_fr_pool_get(ch->fr_pool);
1458 if (!desc)
1459 return -ENOMEM;
1460
1461 rkey = ib_inc_rkey(desc->mr->rkey);
1462 ib_update_fast_reg_key(desc->mr, rkey);
1463
1464 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1465 dev->mr_page_size);
1466 if (unlikely(n < 0)) {
1467 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1468 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1469 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1470 sg_offset_p ? *sg_offset_p : -1, n);
1471 return n;
1472 }
1473
1474 WARN_ON_ONCE(desc->mr->length == 0);
1475
1476 req->reg_cqe.done = srp_reg_mr_err_done;
1477
1478 wr.wr.next = NULL;
1479 wr.wr.opcode = IB_WR_REG_MR;
1480 wr.wr.wr_cqe = &req->reg_cqe;
1481 wr.wr.num_sge = 0;
1482 wr.wr.send_flags = 0;
1483 wr.mr = desc->mr;
1484 wr.key = desc->mr->rkey;
1485 wr.access = (IB_ACCESS_LOCAL_WRITE |
1486 IB_ACCESS_REMOTE_READ |
1487 IB_ACCESS_REMOTE_WRITE);
1488
1489 *state->fr.next++ = desc;
1490 state->nmdesc++;
1491
1492 srp_map_desc(state, desc->mr->iova,
1493 desc->mr->length, desc->mr->rkey);
1494
1495 err = ib_post_send(ch->qp, &wr.wr, NULL);
1496 if (unlikely(err)) {
1497 WARN_ON_ONCE(err == -ENOMEM);
1498 return err;
1499 }
1500
1501 return n;
1502 }
1503
1504 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1505 struct srp_request *req, struct scatterlist *scat,
1506 int count)
1507 {
1508 unsigned int sg_offset = 0;
1509
1510 state->fr.next = req->fr_list;
1511 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1512 state->sg = scat;
1513
1514 if (count == 0)
1515 return 0;
1516
1517 while (count) {
1518 int i, n;
1519
1520 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1521 if (unlikely(n < 0))
1522 return n;
1523
1524 count -= n;
1525 for (i = 0; i < n; i++)
1526 state->sg = sg_next(state->sg);
1527 }
1528
1529 return 0;
1530 }
1531
1532 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1533 struct srp_request *req, struct scatterlist *scat,
1534 int count)
1535 {
1536 struct srp_target_port *target = ch->target;
1537 struct scatterlist *sg;
1538 int i;
1539
1540 for_each_sg(scat, sg, count, i) {
1541 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1542 target->global_rkey);
1543 }
1544
1545 return 0;
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1556 void **next_mr, void **end_mr, u32 idb_len,
1557 __be32 *idb_rkey)
1558 {
1559 struct srp_target_port *target = ch->target;
1560 struct srp_device *dev = target->srp_host->srp_dev;
1561 struct srp_map_state state;
1562 struct srp_direct_buf idb_desc;
1563 struct scatterlist idb_sg[1];
1564 int ret;
1565
1566 memset(&state, 0, sizeof(state));
1567 memset(&idb_desc, 0, sizeof(idb_desc));
1568 state.gen.next = next_mr;
1569 state.gen.end = end_mr;
1570 state.desc = &idb_desc;
1571 state.base_dma_addr = req->indirect_dma_addr;
1572 state.dma_len = idb_len;
1573
1574 if (dev->use_fast_reg) {
1575 state.sg = idb_sg;
1576 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1577 idb_sg->dma_address = req->indirect_dma_addr;
1578 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1579 idb_sg->dma_length = idb_sg->length;
1580 #endif
1581 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1582 if (ret < 0)
1583 return ret;
1584 WARN_ON_ONCE(ret < 1);
1585 } else {
1586 return -EINVAL;
1587 }
1588
1589 *idb_rkey = idb_desc.key;
1590
1591 return 0;
1592 }
1593
1594 static void srp_check_mapping(struct srp_map_state *state,
1595 struct srp_rdma_ch *ch, struct srp_request *req,
1596 struct scatterlist *scat, int count)
1597 {
1598 struct srp_device *dev = ch->target->srp_host->srp_dev;
1599 struct srp_fr_desc **pfr;
1600 u64 desc_len = 0, mr_len = 0;
1601 int i;
1602
1603 for (i = 0; i < state->ndesc; i++)
1604 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1605 if (dev->use_fast_reg)
1606 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1607 mr_len += (*pfr)->mr->length;
1608 if (desc_len != scsi_bufflen(req->scmnd) ||
1609 mr_len > scsi_bufflen(req->scmnd))
1610 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1611 scsi_bufflen(req->scmnd), desc_len, mr_len,
1612 state->ndesc, state->nmdesc);
1613 }
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1626 struct srp_request *req)
1627 {
1628 struct srp_target_port *target = ch->target;
1629 struct scatterlist *scat, *sg;
1630 struct srp_cmd *cmd = req->cmd->buf;
1631 int i, len, nents, count, ret;
1632 struct srp_device *dev;
1633 struct ib_device *ibdev;
1634 struct srp_map_state state;
1635 struct srp_indirect_buf *indirect_hdr;
1636 u64 data_len;
1637 u32 idb_len, table_len;
1638 __be32 idb_rkey;
1639 u8 fmt;
1640
1641 req->cmd->num_sge = 1;
1642
1643 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1644 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1645
1646 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1647 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1648 shost_printk(KERN_WARNING, target->scsi_host,
1649 PFX "Unhandled data direction %d\n",
1650 scmnd->sc_data_direction);
1651 return -EINVAL;
1652 }
1653
1654 nents = scsi_sg_count(scmnd);
1655 scat = scsi_sglist(scmnd);
1656 data_len = scsi_bufflen(scmnd);
1657
1658 dev = target->srp_host->srp_dev;
1659 ibdev = dev->dev;
1660
1661 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1662 if (unlikely(count == 0))
1663 return -EIO;
1664
1665 if (ch->use_imm_data &&
1666 count <= ch->max_imm_sge &&
1667 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1668 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1669 struct srp_imm_buf *buf;
1670 struct ib_sge *sge = &req->cmd->sge[1];
1671
1672 fmt = SRP_DATA_DESC_IMM;
1673 len = SRP_IMM_DATA_OFFSET;
1674 req->nmdesc = 0;
1675 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1676 buf->len = cpu_to_be32(data_len);
1677 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1678 for_each_sg(scat, sg, count, i) {
1679 sge[i].addr = sg_dma_address(sg);
1680 sge[i].length = sg_dma_len(sg);
1681 sge[i].lkey = target->lkey;
1682 }
1683 req->cmd->num_sge += count;
1684 goto map_complete;
1685 }
1686
1687 fmt = SRP_DATA_DESC_DIRECT;
1688 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1689 sizeof(struct srp_direct_buf);
1690
1691 if (count == 1 && target->global_rkey) {
1692
1693
1694
1695
1696
1697
1698 struct srp_direct_buf *buf;
1699
1700 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1701 buf->va = cpu_to_be64(sg_dma_address(scat));
1702 buf->key = cpu_to_be32(target->global_rkey);
1703 buf->len = cpu_to_be32(sg_dma_len(scat));
1704
1705 req->nmdesc = 0;
1706 goto map_complete;
1707 }
1708
1709
1710
1711
1712
1713 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1714
1715 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1716 target->indirect_size, DMA_TO_DEVICE);
1717
1718 memset(&state, 0, sizeof(state));
1719 state.desc = req->indirect_desc;
1720 if (dev->use_fast_reg)
1721 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1722 else
1723 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1724 req->nmdesc = state.nmdesc;
1725 if (ret < 0)
1726 goto unmap;
1727
1728 {
1729 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1730 "Memory mapping consistency check");
1731 if (DYNAMIC_DEBUG_BRANCH(ddm))
1732 srp_check_mapping(&state, ch, req, scat, count);
1733 }
1734
1735
1736
1737
1738
1739
1740
1741 if (state.ndesc == 1) {
1742
1743
1744
1745
1746 struct srp_direct_buf *buf;
1747
1748 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1749 *buf = req->indirect_desc[0];
1750 goto map_complete;
1751 }
1752
1753 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1754 !target->allow_ext_sg)) {
1755 shost_printk(KERN_ERR, target->scsi_host,
1756 "Could not fit S/G list into SRP_CMD\n");
1757 ret = -EIO;
1758 goto unmap;
1759 }
1760
1761 count = min(state.ndesc, target->cmd_sg_cnt);
1762 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1763 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1764
1765 fmt = SRP_DATA_DESC_INDIRECT;
1766 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1767 sizeof(struct srp_indirect_buf);
1768 len += count * sizeof (struct srp_direct_buf);
1769
1770 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1771 count * sizeof (struct srp_direct_buf));
1772
1773 if (!target->global_rkey) {
1774 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1775 idb_len, &idb_rkey);
1776 if (ret < 0)
1777 goto unmap;
1778 req->nmdesc++;
1779 } else {
1780 idb_rkey = cpu_to_be32(target->global_rkey);
1781 }
1782
1783 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1784 indirect_hdr->table_desc.key = idb_rkey;
1785 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1786 indirect_hdr->len = cpu_to_be32(state.total_len);
1787
1788 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1789 cmd->data_out_desc_cnt = count;
1790 else
1791 cmd->data_in_desc_cnt = count;
1792
1793 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1794 DMA_TO_DEVICE);
1795
1796 map_complete:
1797 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1798 cmd->buf_fmt = fmt << 4;
1799 else
1800 cmd->buf_fmt = fmt;
1801
1802 return len;
1803
1804 unmap:
1805 srp_unmap_data(scmnd, ch, req);
1806 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1807 ret = -E2BIG;
1808 return ret;
1809 }
1810
1811
1812
1813
1814 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1815 enum srp_iu_type iu_type)
1816 {
1817 unsigned long flags;
1818
1819 spin_lock_irqsave(&ch->lock, flags);
1820 list_add(&iu->list, &ch->free_tx);
1821 if (iu_type != SRP_IU_RSP)
1822 ++ch->req_lim;
1823 spin_unlock_irqrestore(&ch->lock, flags);
1824 }
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1840 enum srp_iu_type iu_type)
1841 {
1842 struct srp_target_port *target = ch->target;
1843 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1844 struct srp_iu *iu;
1845
1846 lockdep_assert_held(&ch->lock);
1847
1848 ib_process_cq_direct(ch->send_cq, -1);
1849
1850 if (list_empty(&ch->free_tx))
1851 return NULL;
1852
1853
1854 if (iu_type != SRP_IU_RSP) {
1855 if (ch->req_lim <= rsv) {
1856 ++target->zero_req_lim;
1857 return NULL;
1858 }
1859
1860 --ch->req_lim;
1861 }
1862
1863 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1864 list_del(&iu->list);
1865 return iu;
1866 }
1867
1868
1869
1870
1871
1872
1873 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1874 {
1875 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1876 struct srp_rdma_ch *ch = cq->cq_context;
1877
1878 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1879 srp_handle_qp_err(cq, wc, "SEND");
1880 return;
1881 }
1882
1883 lockdep_assert_held(&ch->lock);
1884
1885 list_add(&iu->list, &ch->free_tx);
1886 }
1887
1888
1889
1890
1891
1892
1893
1894 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1895 {
1896 struct srp_target_port *target = ch->target;
1897 struct ib_send_wr wr;
1898
1899 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1900 return -EINVAL;
1901
1902 iu->sge[0].addr = iu->dma;
1903 iu->sge[0].length = len;
1904 iu->sge[0].lkey = target->lkey;
1905
1906 iu->cqe.done = srp_send_done;
1907
1908 wr.next = NULL;
1909 wr.wr_cqe = &iu->cqe;
1910 wr.sg_list = &iu->sge[0];
1911 wr.num_sge = iu->num_sge;
1912 wr.opcode = IB_WR_SEND;
1913 wr.send_flags = IB_SEND_SIGNALED;
1914
1915 return ib_post_send(ch->qp, &wr, NULL);
1916 }
1917
1918 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1919 {
1920 struct srp_target_port *target = ch->target;
1921 struct ib_recv_wr wr;
1922 struct ib_sge list;
1923
1924 list.addr = iu->dma;
1925 list.length = iu->size;
1926 list.lkey = target->lkey;
1927
1928 iu->cqe.done = srp_recv_done;
1929
1930 wr.next = NULL;
1931 wr.wr_cqe = &iu->cqe;
1932 wr.sg_list = &list;
1933 wr.num_sge = 1;
1934
1935 return ib_post_recv(ch->qp, &wr, NULL);
1936 }
1937
1938 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1939 {
1940 struct srp_target_port *target = ch->target;
1941 struct srp_request *req;
1942 struct scsi_cmnd *scmnd;
1943 unsigned long flags;
1944
1945 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1946 spin_lock_irqsave(&ch->lock, flags);
1947 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1948 if (rsp->tag == ch->tsk_mgmt_tag) {
1949 ch->tsk_mgmt_status = -1;
1950 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1951 ch->tsk_mgmt_status = rsp->data[3];
1952 complete(&ch->tsk_mgmt_done);
1953 } else {
1954 shost_printk(KERN_ERR, target->scsi_host,
1955 "Received tsk mgmt response too late for tag %#llx\n",
1956 rsp->tag);
1957 }
1958 spin_unlock_irqrestore(&ch->lock, flags);
1959 } else {
1960 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1961 if (scmnd) {
1962 req = scsi_cmd_priv(scmnd);
1963 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1964 }
1965 if (!scmnd) {
1966 shost_printk(KERN_ERR, target->scsi_host,
1967 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1968 rsp->tag, ch - target->ch, ch->qp->qp_num);
1969
1970 spin_lock_irqsave(&ch->lock, flags);
1971 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1972 spin_unlock_irqrestore(&ch->lock, flags);
1973
1974 return;
1975 }
1976 scmnd->result = rsp->status;
1977
1978 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1979 memcpy(scmnd->sense_buffer, rsp->data +
1980 be32_to_cpu(rsp->resp_data_len),
1981 min_t(int, be32_to_cpu(rsp->sense_data_len),
1982 SCSI_SENSE_BUFFERSIZE));
1983 }
1984
1985 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1986 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1987 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1988 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1989 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1990 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1991 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1992 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1993
1994 srp_free_req(ch, req, scmnd,
1995 be32_to_cpu(rsp->req_lim_delta));
1996
1997 scsi_done(scmnd);
1998 }
1999 }
2000
2001 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2002 void *rsp, int len)
2003 {
2004 struct srp_target_port *target = ch->target;
2005 struct ib_device *dev = target->srp_host->srp_dev->dev;
2006 unsigned long flags;
2007 struct srp_iu *iu;
2008 int err;
2009
2010 spin_lock_irqsave(&ch->lock, flags);
2011 ch->req_lim += req_delta;
2012 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2013 spin_unlock_irqrestore(&ch->lock, flags);
2014
2015 if (!iu) {
2016 shost_printk(KERN_ERR, target->scsi_host, PFX
2017 "no IU available to send response\n");
2018 return 1;
2019 }
2020
2021 iu->num_sge = 1;
2022 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2023 memcpy(iu->buf, rsp, len);
2024 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2025
2026 err = srp_post_send(ch, iu, len);
2027 if (err) {
2028 shost_printk(KERN_ERR, target->scsi_host, PFX
2029 "unable to post response: %d\n", err);
2030 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2031 }
2032
2033 return err;
2034 }
2035
2036 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2037 struct srp_cred_req *req)
2038 {
2039 struct srp_cred_rsp rsp = {
2040 .opcode = SRP_CRED_RSP,
2041 .tag = req->tag,
2042 };
2043 s32 delta = be32_to_cpu(req->req_lim_delta);
2044
2045 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2046 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2047 "problems processing SRP_CRED_REQ\n");
2048 }
2049
2050 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2051 struct srp_aer_req *req)
2052 {
2053 struct srp_target_port *target = ch->target;
2054 struct srp_aer_rsp rsp = {
2055 .opcode = SRP_AER_RSP,
2056 .tag = req->tag,
2057 };
2058 s32 delta = be32_to_cpu(req->req_lim_delta);
2059
2060 shost_printk(KERN_ERR, target->scsi_host, PFX
2061 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2062
2063 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2064 shost_printk(KERN_ERR, target->scsi_host, PFX
2065 "problems processing SRP_AER_REQ\n");
2066 }
2067
2068 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2069 {
2070 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2071 struct srp_rdma_ch *ch = cq->cq_context;
2072 struct srp_target_port *target = ch->target;
2073 struct ib_device *dev = target->srp_host->srp_dev->dev;
2074 int res;
2075 u8 opcode;
2076
2077 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2078 srp_handle_qp_err(cq, wc, "RECV");
2079 return;
2080 }
2081
2082 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2083 DMA_FROM_DEVICE);
2084
2085 opcode = *(u8 *) iu->buf;
2086
2087 if (0) {
2088 shost_printk(KERN_ERR, target->scsi_host,
2089 PFX "recv completion, opcode 0x%02x\n", opcode);
2090 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2091 iu->buf, wc->byte_len, true);
2092 }
2093
2094 switch (opcode) {
2095 case SRP_RSP:
2096 srp_process_rsp(ch, iu->buf);
2097 break;
2098
2099 case SRP_CRED_REQ:
2100 srp_process_cred_req(ch, iu->buf);
2101 break;
2102
2103 case SRP_AER_REQ:
2104 srp_process_aer_req(ch, iu->buf);
2105 break;
2106
2107 case SRP_T_LOGOUT:
2108
2109 shost_printk(KERN_WARNING, target->scsi_host,
2110 PFX "Got target logout request\n");
2111 break;
2112
2113 default:
2114 shost_printk(KERN_WARNING, target->scsi_host,
2115 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2116 break;
2117 }
2118
2119 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2120 DMA_FROM_DEVICE);
2121
2122 res = srp_post_recv(ch, iu);
2123 if (res != 0)
2124 shost_printk(KERN_ERR, target->scsi_host,
2125 PFX "Recv failed with error code %d\n", res);
2126 }
2127
2128
2129
2130
2131
2132
2133
2134
2135 static void srp_tl_err_work(struct work_struct *work)
2136 {
2137 struct srp_target_port *target;
2138
2139 target = container_of(work, struct srp_target_port, tl_err_work);
2140 if (target->rport)
2141 srp_start_tl_fail_timers(target->rport);
2142 }
2143
2144 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2145 const char *opname)
2146 {
2147 struct srp_rdma_ch *ch = cq->cq_context;
2148 struct srp_target_port *target = ch->target;
2149
2150 if (ch->connected && !target->qp_in_error) {
2151 shost_printk(KERN_ERR, target->scsi_host,
2152 PFX "failed %s status %s (%d) for CQE %p\n",
2153 opname, ib_wc_status_msg(wc->status), wc->status,
2154 wc->wr_cqe);
2155 queue_work(system_long_wq, &target->tl_err_work);
2156 }
2157 target->qp_in_error = true;
2158 }
2159
2160 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2161 {
2162 struct request *rq = scsi_cmd_to_rq(scmnd);
2163 struct srp_target_port *target = host_to_target(shost);
2164 struct srp_rdma_ch *ch;
2165 struct srp_request *req = scsi_cmd_priv(scmnd);
2166 struct srp_iu *iu;
2167 struct srp_cmd *cmd;
2168 struct ib_device *dev;
2169 unsigned long flags;
2170 u32 tag;
2171 int len, ret;
2172
2173 scmnd->result = srp_chkready(target->rport);
2174 if (unlikely(scmnd->result))
2175 goto err;
2176
2177 WARN_ON_ONCE(rq->tag < 0);
2178 tag = blk_mq_unique_tag(rq);
2179 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2180
2181 spin_lock_irqsave(&ch->lock, flags);
2182 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2183 spin_unlock_irqrestore(&ch->lock, flags);
2184
2185 if (!iu)
2186 goto err;
2187
2188 dev = target->srp_host->srp_dev->dev;
2189 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2190 DMA_TO_DEVICE);
2191
2192 cmd = iu->buf;
2193 memset(cmd, 0, sizeof *cmd);
2194
2195 cmd->opcode = SRP_CMD;
2196 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2197 cmd->tag = tag;
2198 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2199 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2200 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2201 4);
2202 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2203 goto err_iu;
2204 }
2205
2206 req->scmnd = scmnd;
2207 req->cmd = iu;
2208
2209 len = srp_map_data(scmnd, ch, req);
2210 if (len < 0) {
2211 shost_printk(KERN_ERR, target->scsi_host,
2212 PFX "Failed to map data (%d)\n", len);
2213
2214
2215
2216
2217
2218
2219 scmnd->result = len == -ENOMEM ?
2220 DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2221 goto err_iu;
2222 }
2223
2224 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2225 DMA_TO_DEVICE);
2226
2227 if (srp_post_send(ch, iu, len)) {
2228 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2229 scmnd->result = DID_ERROR << 16;
2230 goto err_unmap;
2231 }
2232
2233 return 0;
2234
2235 err_unmap:
2236 srp_unmap_data(scmnd, ch, req);
2237
2238 err_iu:
2239 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2240
2241
2242
2243
2244
2245 req->scmnd = NULL;
2246
2247 err:
2248 if (scmnd->result) {
2249 scsi_done(scmnd);
2250 ret = 0;
2251 } else {
2252 ret = SCSI_MLQUEUE_HOST_BUSY;
2253 }
2254
2255 return ret;
2256 }
2257
2258
2259
2260
2261
2262 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2263 {
2264 struct srp_target_port *target = ch->target;
2265 int i;
2266
2267 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2268 GFP_KERNEL);
2269 if (!ch->rx_ring)
2270 goto err_no_ring;
2271 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2272 GFP_KERNEL);
2273 if (!ch->tx_ring)
2274 goto err_no_ring;
2275
2276 for (i = 0; i < target->queue_size; ++i) {
2277 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2278 ch->max_ti_iu_len,
2279 GFP_KERNEL, DMA_FROM_DEVICE);
2280 if (!ch->rx_ring[i])
2281 goto err;
2282 }
2283
2284 for (i = 0; i < target->queue_size; ++i) {
2285 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2286 ch->max_it_iu_len,
2287 GFP_KERNEL, DMA_TO_DEVICE);
2288 if (!ch->tx_ring[i])
2289 goto err;
2290
2291 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2292 }
2293
2294 return 0;
2295
2296 err:
2297 for (i = 0; i < target->queue_size; ++i) {
2298 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2299 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2300 }
2301
2302
2303 err_no_ring:
2304 kfree(ch->tx_ring);
2305 ch->tx_ring = NULL;
2306 kfree(ch->rx_ring);
2307 ch->rx_ring = NULL;
2308
2309 return -ENOMEM;
2310 }
2311
2312 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2313 {
2314 uint64_t T_tr_ns, max_compl_time_ms;
2315 uint32_t rq_tmo_jiffies;
2316
2317
2318
2319
2320
2321
2322 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2323 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2324
2325
2326
2327
2328
2329
2330
2331 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2332 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2333 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2334 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2335
2336 return rq_tmo_jiffies;
2337 }
2338
2339 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2340 const struct srp_login_rsp *lrsp,
2341 struct srp_rdma_ch *ch)
2342 {
2343 struct srp_target_port *target = ch->target;
2344 struct ib_qp_attr *qp_attr = NULL;
2345 int attr_mask = 0;
2346 int ret = 0;
2347 int i;
2348
2349 if (lrsp->opcode == SRP_LOGIN_RSP) {
2350 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2351 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2352 ch->use_imm_data = srp_use_imm_data &&
2353 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2354 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2355 ch->use_imm_data,
2356 target->max_it_iu_size);
2357 WARN_ON_ONCE(ch->max_it_iu_len >
2358 be32_to_cpu(lrsp->max_it_iu_len));
2359
2360 if (ch->use_imm_data)
2361 shost_printk(KERN_DEBUG, target->scsi_host,
2362 PFX "using immediate data\n");
2363
2364
2365
2366
2367
2368 target->scsi_host->can_queue
2369 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2370 target->scsi_host->can_queue);
2371 target->scsi_host->cmd_per_lun
2372 = min_t(int, target->scsi_host->can_queue,
2373 target->scsi_host->cmd_per_lun);
2374 } else {
2375 shost_printk(KERN_WARNING, target->scsi_host,
2376 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2377 ret = -ECONNRESET;
2378 goto error;
2379 }
2380
2381 if (!ch->rx_ring) {
2382 ret = srp_alloc_iu_bufs(ch);
2383 if (ret)
2384 goto error;
2385 }
2386
2387 for (i = 0; i < target->queue_size; i++) {
2388 struct srp_iu *iu = ch->rx_ring[i];
2389
2390 ret = srp_post_recv(ch, iu);
2391 if (ret)
2392 goto error;
2393 }
2394
2395 if (!target->using_rdma_cm) {
2396 ret = -ENOMEM;
2397 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2398 if (!qp_attr)
2399 goto error;
2400
2401 qp_attr->qp_state = IB_QPS_RTR;
2402 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2403 if (ret)
2404 goto error_free;
2405
2406 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2407 if (ret)
2408 goto error_free;
2409
2410 qp_attr->qp_state = IB_QPS_RTS;
2411 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2412 if (ret)
2413 goto error_free;
2414
2415 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2416
2417 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2418 if (ret)
2419 goto error_free;
2420
2421 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2422 }
2423
2424 error_free:
2425 kfree(qp_attr);
2426
2427 error:
2428 ch->status = ret;
2429 }
2430
2431 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2432 const struct ib_cm_event *event,
2433 struct srp_rdma_ch *ch)
2434 {
2435 struct srp_target_port *target = ch->target;
2436 struct Scsi_Host *shost = target->scsi_host;
2437 struct ib_class_port_info *cpi;
2438 int opcode;
2439 u16 dlid;
2440
2441 switch (event->param.rej_rcvd.reason) {
2442 case IB_CM_REJ_PORT_CM_REDIRECT:
2443 cpi = event->param.rej_rcvd.ari;
2444 dlid = be16_to_cpu(cpi->redirect_lid);
2445 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2446 ch->ib_cm.path.pkey = cpi->redirect_pkey;
2447 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2448 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2449
2450 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2451 break;
2452
2453 case IB_CM_REJ_PORT_REDIRECT:
2454 if (srp_target_is_topspin(target)) {
2455 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2456
2457
2458
2459
2460
2461
2462 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2463
2464 shost_printk(KERN_DEBUG, shost,
2465 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2466 be64_to_cpu(dgid->global.subnet_prefix),
2467 be64_to_cpu(dgid->global.interface_id));
2468
2469 ch->status = SRP_PORT_REDIRECT;
2470 } else {
2471 shost_printk(KERN_WARNING, shost,
2472 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2473 ch->status = -ECONNRESET;
2474 }
2475 break;
2476
2477 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2478 shost_printk(KERN_WARNING, shost,
2479 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2480 ch->status = -ECONNRESET;
2481 break;
2482
2483 case IB_CM_REJ_CONSUMER_DEFINED:
2484 opcode = *(u8 *) event->private_data;
2485 if (opcode == SRP_LOGIN_REJ) {
2486 struct srp_login_rej *rej = event->private_data;
2487 u32 reason = be32_to_cpu(rej->reason);
2488
2489 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2490 shost_printk(KERN_WARNING, shost,
2491 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2492 else
2493 shost_printk(KERN_WARNING, shost, PFX
2494 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2495 target->sgid.raw,
2496 target->ib_cm.orig_dgid.raw,
2497 reason);
2498 } else
2499 shost_printk(KERN_WARNING, shost,
2500 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2501 " opcode 0x%02x\n", opcode);
2502 ch->status = -ECONNRESET;
2503 break;
2504
2505 case IB_CM_REJ_STALE_CONN:
2506 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2507 ch->status = SRP_STALE_CONN;
2508 break;
2509
2510 default:
2511 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2512 event->param.rej_rcvd.reason);
2513 ch->status = -ECONNRESET;
2514 }
2515 }
2516
2517 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2518 const struct ib_cm_event *event)
2519 {
2520 struct srp_rdma_ch *ch = cm_id->context;
2521 struct srp_target_port *target = ch->target;
2522 int comp = 0;
2523
2524 switch (event->event) {
2525 case IB_CM_REQ_ERROR:
2526 shost_printk(KERN_DEBUG, target->scsi_host,
2527 PFX "Sending CM REQ failed\n");
2528 comp = 1;
2529 ch->status = -ECONNRESET;
2530 break;
2531
2532 case IB_CM_REP_RECEIVED:
2533 comp = 1;
2534 srp_cm_rep_handler(cm_id, event->private_data, ch);
2535 break;
2536
2537 case IB_CM_REJ_RECEIVED:
2538 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2539 comp = 1;
2540
2541 srp_ib_cm_rej_handler(cm_id, event, ch);
2542 break;
2543
2544 case IB_CM_DREQ_RECEIVED:
2545 shost_printk(KERN_WARNING, target->scsi_host,
2546 PFX "DREQ received - connection closed\n");
2547 ch->connected = false;
2548 if (ib_send_cm_drep(cm_id, NULL, 0))
2549 shost_printk(KERN_ERR, target->scsi_host,
2550 PFX "Sending CM DREP failed\n");
2551 queue_work(system_long_wq, &target->tl_err_work);
2552 break;
2553
2554 case IB_CM_TIMEWAIT_EXIT:
2555 shost_printk(KERN_ERR, target->scsi_host,
2556 PFX "connection closed\n");
2557 comp = 1;
2558
2559 ch->status = 0;
2560 break;
2561
2562 case IB_CM_MRA_RECEIVED:
2563 case IB_CM_DREQ_ERROR:
2564 case IB_CM_DREP_RECEIVED:
2565 break;
2566
2567 default:
2568 shost_printk(KERN_WARNING, target->scsi_host,
2569 PFX "Unhandled CM event %d\n", event->event);
2570 break;
2571 }
2572
2573 if (comp)
2574 complete(&ch->done);
2575
2576 return 0;
2577 }
2578
2579 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2580 struct rdma_cm_event *event)
2581 {
2582 struct srp_target_port *target = ch->target;
2583 struct Scsi_Host *shost = target->scsi_host;
2584 int opcode;
2585
2586 switch (event->status) {
2587 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2588 shost_printk(KERN_WARNING, shost,
2589 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2590 ch->status = -ECONNRESET;
2591 break;
2592
2593 case IB_CM_REJ_CONSUMER_DEFINED:
2594 opcode = *(u8 *) event->param.conn.private_data;
2595 if (opcode == SRP_LOGIN_REJ) {
2596 struct srp_login_rej *rej =
2597 (struct srp_login_rej *)
2598 event->param.conn.private_data;
2599 u32 reason = be32_to_cpu(rej->reason);
2600
2601 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2602 shost_printk(KERN_WARNING, shost,
2603 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2604 else
2605 shost_printk(KERN_WARNING, shost,
2606 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2607 } else {
2608 shost_printk(KERN_WARNING, shost,
2609 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2610 opcode);
2611 }
2612 ch->status = -ECONNRESET;
2613 break;
2614
2615 case IB_CM_REJ_STALE_CONN:
2616 shost_printk(KERN_WARNING, shost,
2617 " REJ reason: stale connection\n");
2618 ch->status = SRP_STALE_CONN;
2619 break;
2620
2621 default:
2622 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2623 event->status);
2624 ch->status = -ECONNRESET;
2625 break;
2626 }
2627 }
2628
2629 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2630 struct rdma_cm_event *event)
2631 {
2632 struct srp_rdma_ch *ch = cm_id->context;
2633 struct srp_target_port *target = ch->target;
2634 int comp = 0;
2635
2636 switch (event->event) {
2637 case RDMA_CM_EVENT_ADDR_RESOLVED:
2638 ch->status = 0;
2639 comp = 1;
2640 break;
2641
2642 case RDMA_CM_EVENT_ADDR_ERROR:
2643 ch->status = -ENXIO;
2644 comp = 1;
2645 break;
2646
2647 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2648 ch->status = 0;
2649 comp = 1;
2650 break;
2651
2652 case RDMA_CM_EVENT_ROUTE_ERROR:
2653 case RDMA_CM_EVENT_UNREACHABLE:
2654 ch->status = -EHOSTUNREACH;
2655 comp = 1;
2656 break;
2657
2658 case RDMA_CM_EVENT_CONNECT_ERROR:
2659 shost_printk(KERN_DEBUG, target->scsi_host,
2660 PFX "Sending CM REQ failed\n");
2661 comp = 1;
2662 ch->status = -ECONNRESET;
2663 break;
2664
2665 case RDMA_CM_EVENT_ESTABLISHED:
2666 comp = 1;
2667 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2668 break;
2669
2670 case RDMA_CM_EVENT_REJECTED:
2671 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2672 comp = 1;
2673
2674 srp_rdma_cm_rej_handler(ch, event);
2675 break;
2676
2677 case RDMA_CM_EVENT_DISCONNECTED:
2678 if (ch->connected) {
2679 shost_printk(KERN_WARNING, target->scsi_host,
2680 PFX "received DREQ\n");
2681 rdma_disconnect(ch->rdma_cm.cm_id);
2682 comp = 1;
2683 ch->status = 0;
2684 queue_work(system_long_wq, &target->tl_err_work);
2685 }
2686 break;
2687
2688 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2689 shost_printk(KERN_ERR, target->scsi_host,
2690 PFX "connection closed\n");
2691
2692 comp = 1;
2693 ch->status = 0;
2694 break;
2695
2696 default:
2697 shost_printk(KERN_WARNING, target->scsi_host,
2698 PFX "Unhandled CM event %d\n", event->event);
2699 break;
2700 }
2701
2702 if (comp)
2703 complete(&ch->done);
2704
2705 return 0;
2706 }
2707
2708
2709
2710
2711
2712
2713
2714
2715 static int
2716 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2717 {
2718 if (!sdev->tagged_supported)
2719 qdepth = 1;
2720 return scsi_change_queue_depth(sdev, qdepth);
2721 }
2722
2723 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2724 u8 func, u8 *status)
2725 {
2726 struct srp_target_port *target = ch->target;
2727 struct srp_rport *rport = target->rport;
2728 struct ib_device *dev = target->srp_host->srp_dev->dev;
2729 struct srp_iu *iu;
2730 struct srp_tsk_mgmt *tsk_mgmt;
2731 int res;
2732
2733 if (!ch->connected || target->qp_in_error)
2734 return -1;
2735
2736
2737
2738
2739
2740 mutex_lock(&rport->mutex);
2741 spin_lock_irq(&ch->lock);
2742 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2743 spin_unlock_irq(&ch->lock);
2744
2745 if (!iu) {
2746 mutex_unlock(&rport->mutex);
2747
2748 return -1;
2749 }
2750
2751 iu->num_sge = 1;
2752
2753 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2754 DMA_TO_DEVICE);
2755 tsk_mgmt = iu->buf;
2756 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2757
2758 tsk_mgmt->opcode = SRP_TSK_MGMT;
2759 int_to_scsilun(lun, &tsk_mgmt->lun);
2760 tsk_mgmt->tsk_mgmt_func = func;
2761 tsk_mgmt->task_tag = req_tag;
2762
2763 spin_lock_irq(&ch->lock);
2764 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2765 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2766 spin_unlock_irq(&ch->lock);
2767
2768 init_completion(&ch->tsk_mgmt_done);
2769
2770 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2771 DMA_TO_DEVICE);
2772 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2773 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2774 mutex_unlock(&rport->mutex);
2775
2776 return -1;
2777 }
2778 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2779 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2780 if (res > 0 && status)
2781 *status = ch->tsk_mgmt_status;
2782 mutex_unlock(&rport->mutex);
2783
2784 WARN_ON_ONCE(res < 0);
2785
2786 return res > 0 ? 0 : -1;
2787 }
2788
2789 static int srp_abort(struct scsi_cmnd *scmnd)
2790 {
2791 struct srp_target_port *target = host_to_target(scmnd->device->host);
2792 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2793 u32 tag;
2794 u16 ch_idx;
2795 struct srp_rdma_ch *ch;
2796 int ret;
2797
2798 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2799
2800 if (!req)
2801 return SUCCESS;
2802 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
2803 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2804 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2805 return SUCCESS;
2806 ch = &target->ch[ch_idx];
2807 if (!srp_claim_req(ch, req, NULL, scmnd))
2808 return SUCCESS;
2809 shost_printk(KERN_ERR, target->scsi_host,
2810 "Sending SRP abort for tag %#x\n", tag);
2811 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2812 SRP_TSK_ABORT_TASK, NULL) == 0)
2813 ret = SUCCESS;
2814 else if (target->rport->state == SRP_RPORT_LOST)
2815 ret = FAST_IO_FAIL;
2816 else
2817 ret = FAILED;
2818 if (ret == SUCCESS) {
2819 srp_free_req(ch, req, scmnd, 0);
2820 scmnd->result = DID_ABORT << 16;
2821 scsi_done(scmnd);
2822 }
2823
2824 return ret;
2825 }
2826
2827 static int srp_reset_device(struct scsi_cmnd *scmnd)
2828 {
2829 struct srp_target_port *target = host_to_target(scmnd->device->host);
2830 struct srp_rdma_ch *ch;
2831 u8 status;
2832
2833 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2834
2835 ch = &target->ch[0];
2836 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2837 SRP_TSK_LUN_RESET, &status))
2838 return FAILED;
2839 if (status)
2840 return FAILED;
2841
2842 return SUCCESS;
2843 }
2844
2845 static int srp_reset_host(struct scsi_cmnd *scmnd)
2846 {
2847 struct srp_target_port *target = host_to_target(scmnd->device->host);
2848
2849 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2850
2851 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2852 }
2853
2854 static int srp_target_alloc(struct scsi_target *starget)
2855 {
2856 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2857 struct srp_target_port *target = host_to_target(shost);
2858
2859 if (target->target_can_queue)
2860 starget->can_queue = target->target_can_queue;
2861 return 0;
2862 }
2863
2864 static int srp_slave_configure(struct scsi_device *sdev)
2865 {
2866 struct Scsi_Host *shost = sdev->host;
2867 struct srp_target_port *target = host_to_target(shost);
2868 struct request_queue *q = sdev->request_queue;
2869 unsigned long timeout;
2870
2871 if (sdev->type == TYPE_DISK) {
2872 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2873 blk_queue_rq_timeout(q, timeout);
2874 }
2875
2876 return 0;
2877 }
2878
2879 static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2880 char *buf)
2881 {
2882 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2883
2884 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2885 }
2886
2887 static DEVICE_ATTR_RO(id_ext);
2888
2889 static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2890 char *buf)
2891 {
2892 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2893
2894 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2895 }
2896
2897 static DEVICE_ATTR_RO(ioc_guid);
2898
2899 static ssize_t service_id_show(struct device *dev,
2900 struct device_attribute *attr, char *buf)
2901 {
2902 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2903
2904 if (target->using_rdma_cm)
2905 return -ENOENT;
2906 return sysfs_emit(buf, "0x%016llx\n",
2907 be64_to_cpu(target->ib_cm.service_id));
2908 }
2909
2910 static DEVICE_ATTR_RO(service_id);
2911
2912 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2913 char *buf)
2914 {
2915 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2916
2917 if (target->using_rdma_cm)
2918 return -ENOENT;
2919
2920 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2921 }
2922
2923 static DEVICE_ATTR_RO(pkey);
2924
2925 static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2926 char *buf)
2927 {
2928 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2929
2930 return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2931 }
2932
2933 static DEVICE_ATTR_RO(sgid);
2934
2935 static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2936 char *buf)
2937 {
2938 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2939 struct srp_rdma_ch *ch = &target->ch[0];
2940
2941 if (target->using_rdma_cm)
2942 return -ENOENT;
2943
2944 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2945 }
2946
2947 static DEVICE_ATTR_RO(dgid);
2948
2949 static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2950 char *buf)
2951 {
2952 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2953
2954 if (target->using_rdma_cm)
2955 return -ENOENT;
2956
2957 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2958 }
2959
2960 static DEVICE_ATTR_RO(orig_dgid);
2961
2962 static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2963 char *buf)
2964 {
2965 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2966 struct srp_rdma_ch *ch;
2967 int i, req_lim = INT_MAX;
2968
2969 for (i = 0; i < target->ch_count; i++) {
2970 ch = &target->ch[i];
2971 req_lim = min(req_lim, ch->req_lim);
2972 }
2973
2974 return sysfs_emit(buf, "%d\n", req_lim);
2975 }
2976
2977 static DEVICE_ATTR_RO(req_lim);
2978
2979 static ssize_t zero_req_lim_show(struct device *dev,
2980 struct device_attribute *attr, char *buf)
2981 {
2982 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2983
2984 return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2985 }
2986
2987 static DEVICE_ATTR_RO(zero_req_lim);
2988
2989 static ssize_t local_ib_port_show(struct device *dev,
2990 struct device_attribute *attr, char *buf)
2991 {
2992 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2993
2994 return sysfs_emit(buf, "%d\n", target->srp_host->port);
2995 }
2996
2997 static DEVICE_ATTR_RO(local_ib_port);
2998
2999 static ssize_t local_ib_device_show(struct device *dev,
3000 struct device_attribute *attr, char *buf)
3001 {
3002 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3003
3004 return sysfs_emit(buf, "%s\n",
3005 dev_name(&target->srp_host->srp_dev->dev->dev));
3006 }
3007
3008 static DEVICE_ATTR_RO(local_ib_device);
3009
3010 static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
3011 char *buf)
3012 {
3013 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3014
3015 return sysfs_emit(buf, "%d\n", target->ch_count);
3016 }
3017
3018 static DEVICE_ATTR_RO(ch_count);
3019
3020 static ssize_t comp_vector_show(struct device *dev,
3021 struct device_attribute *attr, char *buf)
3022 {
3023 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3024
3025 return sysfs_emit(buf, "%d\n", target->comp_vector);
3026 }
3027
3028 static DEVICE_ATTR_RO(comp_vector);
3029
3030 static ssize_t tl_retry_count_show(struct device *dev,
3031 struct device_attribute *attr, char *buf)
3032 {
3033 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3034
3035 return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3036 }
3037
3038 static DEVICE_ATTR_RO(tl_retry_count);
3039
3040 static ssize_t cmd_sg_entries_show(struct device *dev,
3041 struct device_attribute *attr, char *buf)
3042 {
3043 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3044
3045 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3046 }
3047
3048 static DEVICE_ATTR_RO(cmd_sg_entries);
3049
3050 static ssize_t allow_ext_sg_show(struct device *dev,
3051 struct device_attribute *attr, char *buf)
3052 {
3053 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3054
3055 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3056 }
3057
3058 static DEVICE_ATTR_RO(allow_ext_sg);
3059
3060 static struct attribute *srp_host_attrs[] = {
3061 &dev_attr_id_ext.attr,
3062 &dev_attr_ioc_guid.attr,
3063 &dev_attr_service_id.attr,
3064 &dev_attr_pkey.attr,
3065 &dev_attr_sgid.attr,
3066 &dev_attr_dgid.attr,
3067 &dev_attr_orig_dgid.attr,
3068 &dev_attr_req_lim.attr,
3069 &dev_attr_zero_req_lim.attr,
3070 &dev_attr_local_ib_port.attr,
3071 &dev_attr_local_ib_device.attr,
3072 &dev_attr_ch_count.attr,
3073 &dev_attr_comp_vector.attr,
3074 &dev_attr_tl_retry_count.attr,
3075 &dev_attr_cmd_sg_entries.attr,
3076 &dev_attr_allow_ext_sg.attr,
3077 NULL
3078 };
3079
3080 ATTRIBUTE_GROUPS(srp_host);
3081
3082 static struct scsi_host_template srp_template = {
3083 .module = THIS_MODULE,
3084 .name = "InfiniBand SRP initiator",
3085 .proc_name = DRV_NAME,
3086 .target_alloc = srp_target_alloc,
3087 .slave_configure = srp_slave_configure,
3088 .info = srp_target_info,
3089 .init_cmd_priv = srp_init_cmd_priv,
3090 .exit_cmd_priv = srp_exit_cmd_priv,
3091 .queuecommand = srp_queuecommand,
3092 .change_queue_depth = srp_change_queue_depth,
3093 .eh_timed_out = srp_timed_out,
3094 .eh_abort_handler = srp_abort,
3095 .eh_device_reset_handler = srp_reset_device,
3096 .eh_host_reset_handler = srp_reset_host,
3097 .skip_settle_delay = true,
3098 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
3099 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
3100 .this_id = -1,
3101 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3102 .shost_groups = srp_host_groups,
3103 .track_queue_depth = 1,
3104 .cmd_size = sizeof(struct srp_request),
3105 };
3106
3107 static int srp_sdev_count(struct Scsi_Host *host)
3108 {
3109 struct scsi_device *sdev;
3110 int c = 0;
3111
3112 shost_for_each_device(sdev, host)
3113 c++;
3114
3115 return c;
3116 }
3117
3118
3119
3120
3121
3122
3123
3124
3125 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3126 {
3127 struct srp_rport_identifiers ids;
3128 struct srp_rport *rport;
3129
3130 target->state = SRP_TARGET_SCANNING;
3131 sprintf(target->target_name, "SRP.T10:%016llX",
3132 be64_to_cpu(target->id_ext));
3133
3134 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3135 return -ENODEV;
3136
3137 memcpy(ids.port_id, &target->id_ext, 8);
3138 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3139 ids.roles = SRP_RPORT_ROLE_TARGET;
3140 rport = srp_rport_add(target->scsi_host, &ids);
3141 if (IS_ERR(rport)) {
3142 scsi_remove_host(target->scsi_host);
3143 return PTR_ERR(rport);
3144 }
3145
3146 rport->lld_data = target;
3147 target->rport = rport;
3148
3149 spin_lock(&host->target_lock);
3150 list_add_tail(&target->list, &host->target_list);
3151 spin_unlock(&host->target_lock);
3152
3153 scsi_scan_target(&target->scsi_host->shost_gendev,
3154 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3155
3156 if (srp_connected_ch(target) < target->ch_count ||
3157 target->qp_in_error) {
3158 shost_printk(KERN_INFO, target->scsi_host,
3159 PFX "SCSI scan failed - removing SCSI host\n");
3160 srp_queue_remove_work(target);
3161 goto out;
3162 }
3163
3164 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3165 dev_name(&target->scsi_host->shost_gendev),
3166 srp_sdev_count(target->scsi_host));
3167
3168 spin_lock_irq(&target->lock);
3169 if (target->state == SRP_TARGET_SCANNING)
3170 target->state = SRP_TARGET_LIVE;
3171 spin_unlock_irq(&target->lock);
3172
3173 out:
3174 return 0;
3175 }
3176
3177 static void srp_release_dev(struct device *dev)
3178 {
3179 struct srp_host *host =
3180 container_of(dev, struct srp_host, dev);
3181
3182 complete(&host->released);
3183 }
3184
3185 static struct class srp_class = {
3186 .name = "infiniband_srp",
3187 .dev_release = srp_release_dev
3188 };
3189
3190
3191
3192
3193
3194
3195 static bool srp_conn_unique(struct srp_host *host,
3196 struct srp_target_port *target)
3197 {
3198 struct srp_target_port *t;
3199 bool ret = false;
3200
3201 if (target->state == SRP_TARGET_REMOVED)
3202 goto out;
3203
3204 ret = true;
3205
3206 spin_lock(&host->target_lock);
3207 list_for_each_entry(t, &host->target_list, list) {
3208 if (t != target &&
3209 target->id_ext == t->id_ext &&
3210 target->ioc_guid == t->ioc_guid &&
3211 target->initiator_ext == t->initiator_ext) {
3212 ret = false;
3213 break;
3214 }
3215 }
3216 spin_unlock(&host->target_lock);
3217
3218 out:
3219 return ret;
3220 }
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233 enum {
3234 SRP_OPT_ERR = 0,
3235 SRP_OPT_ID_EXT = 1 << 0,
3236 SRP_OPT_IOC_GUID = 1 << 1,
3237 SRP_OPT_DGID = 1 << 2,
3238 SRP_OPT_PKEY = 1 << 3,
3239 SRP_OPT_SERVICE_ID = 1 << 4,
3240 SRP_OPT_MAX_SECT = 1 << 5,
3241 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3242 SRP_OPT_IO_CLASS = 1 << 7,
3243 SRP_OPT_INITIATOR_EXT = 1 << 8,
3244 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3245 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3246 SRP_OPT_SG_TABLESIZE = 1 << 11,
3247 SRP_OPT_COMP_VECTOR = 1 << 12,
3248 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3249 SRP_OPT_QUEUE_SIZE = 1 << 14,
3250 SRP_OPT_IP_SRC = 1 << 15,
3251 SRP_OPT_IP_DEST = 1 << 16,
3252 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3253 SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
3254 SRP_OPT_CH_COUNT = 1 << 19,
3255 };
3256
3257 static unsigned int srp_opt_mandatory[] = {
3258 SRP_OPT_ID_EXT |
3259 SRP_OPT_IOC_GUID |
3260 SRP_OPT_DGID |
3261 SRP_OPT_PKEY |
3262 SRP_OPT_SERVICE_ID,
3263 SRP_OPT_ID_EXT |
3264 SRP_OPT_IOC_GUID |
3265 SRP_OPT_IP_DEST,
3266 };
3267
3268 static const match_table_t srp_opt_tokens = {
3269 { SRP_OPT_ID_EXT, "id_ext=%s" },
3270 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3271 { SRP_OPT_DGID, "dgid=%s" },
3272 { SRP_OPT_PKEY, "pkey=%x" },
3273 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3274 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3275 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3276 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
3277 { SRP_OPT_IO_CLASS, "io_class=%x" },
3278 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3279 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3280 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3281 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3282 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3283 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3284 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3285 { SRP_OPT_IP_SRC, "src=%s" },
3286 { SRP_OPT_IP_DEST, "dest=%s" },
3287 { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
3288 { SRP_OPT_CH_COUNT, "ch_count=%u", },
3289 { SRP_OPT_ERR, NULL }
3290 };
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3304 const char *addr_port_str, bool *has_port)
3305 {
3306 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3307 char *port_str;
3308 int ret;
3309
3310 if (!addr)
3311 return -ENOMEM;
3312 port_str = strrchr(addr, ':');
3313 if (port_str && strchr(port_str, ']'))
3314 port_str = NULL;
3315 if (port_str)
3316 *port_str++ = '\0';
3317 if (has_port)
3318 *has_port = port_str != NULL;
3319 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3320 if (ret && addr[0]) {
3321 addr_end = addr + strlen(addr) - 1;
3322 if (addr[0] == '[' && *addr_end == ']') {
3323 *addr_end = '\0';
3324 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3325 port_str, sa);
3326 }
3327 }
3328 kfree(addr);
3329 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3330 return ret;
3331 }
3332
3333 static int srp_parse_options(struct net *net, const char *buf,
3334 struct srp_target_port *target)
3335 {
3336 char *options, *sep_opt;
3337 char *p;
3338 substring_t args[MAX_OPT_ARGS];
3339 unsigned long long ull;
3340 bool has_port;
3341 int opt_mask = 0;
3342 int token;
3343 int ret = -EINVAL;
3344 int i;
3345
3346 options = kstrdup(buf, GFP_KERNEL);
3347 if (!options)
3348 return -ENOMEM;
3349
3350 sep_opt = options;
3351 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3352 if (!*p)
3353 continue;
3354
3355 token = match_token(p, srp_opt_tokens, args);
3356 opt_mask |= token;
3357
3358 switch (token) {
3359 case SRP_OPT_ID_EXT:
3360 p = match_strdup(args);
3361 if (!p) {
3362 ret = -ENOMEM;
3363 goto out;
3364 }
3365 ret = kstrtoull(p, 16, &ull);
3366 if (ret) {
3367 pr_warn("invalid id_ext parameter '%s'\n", p);
3368 kfree(p);
3369 goto out;
3370 }
3371 target->id_ext = cpu_to_be64(ull);
3372 kfree(p);
3373 break;
3374
3375 case SRP_OPT_IOC_GUID:
3376 p = match_strdup(args);
3377 if (!p) {
3378 ret = -ENOMEM;
3379 goto out;
3380 }
3381 ret = kstrtoull(p, 16, &ull);
3382 if (ret) {
3383 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3384 kfree(p);
3385 goto out;
3386 }
3387 target->ioc_guid = cpu_to_be64(ull);
3388 kfree(p);
3389 break;
3390
3391 case SRP_OPT_DGID:
3392 p = match_strdup(args);
3393 if (!p) {
3394 ret = -ENOMEM;
3395 goto out;
3396 }
3397 if (strlen(p) != 32) {
3398 pr_warn("bad dest GID parameter '%s'\n", p);
3399 kfree(p);
3400 goto out;
3401 }
3402
3403 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3404 kfree(p);
3405 if (ret < 0)
3406 goto out;
3407 break;
3408
3409 case SRP_OPT_PKEY:
3410 if (match_hex(args, &token)) {
3411 pr_warn("bad P_Key parameter '%s'\n", p);
3412 goto out;
3413 }
3414 target->ib_cm.pkey = cpu_to_be16(token);
3415 break;
3416
3417 case SRP_OPT_SERVICE_ID:
3418 p = match_strdup(args);
3419 if (!p) {
3420 ret = -ENOMEM;
3421 goto out;
3422 }
3423 ret = kstrtoull(p, 16, &ull);
3424 if (ret) {
3425 pr_warn("bad service_id parameter '%s'\n", p);
3426 kfree(p);
3427 goto out;
3428 }
3429 target->ib_cm.service_id = cpu_to_be64(ull);
3430 kfree(p);
3431 break;
3432
3433 case SRP_OPT_IP_SRC:
3434 p = match_strdup(args);
3435 if (!p) {
3436 ret = -ENOMEM;
3437 goto out;
3438 }
3439 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3440 NULL);
3441 if (ret < 0) {
3442 pr_warn("bad source parameter '%s'\n", p);
3443 kfree(p);
3444 goto out;
3445 }
3446 target->rdma_cm.src_specified = true;
3447 kfree(p);
3448 break;
3449
3450 case SRP_OPT_IP_DEST:
3451 p = match_strdup(args);
3452 if (!p) {
3453 ret = -ENOMEM;
3454 goto out;
3455 }
3456 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3457 &has_port);
3458 if (!has_port)
3459 ret = -EINVAL;
3460 if (ret < 0) {
3461 pr_warn("bad dest parameter '%s'\n", p);
3462 kfree(p);
3463 goto out;
3464 }
3465 target->using_rdma_cm = true;
3466 kfree(p);
3467 break;
3468
3469 case SRP_OPT_MAX_SECT:
3470 if (match_int(args, &token)) {
3471 pr_warn("bad max sect parameter '%s'\n", p);
3472 goto out;
3473 }
3474 target->scsi_host->max_sectors = token;
3475 break;
3476
3477 case SRP_OPT_QUEUE_SIZE:
3478 if (match_int(args, &token) || token < 1) {
3479 pr_warn("bad queue_size parameter '%s'\n", p);
3480 goto out;
3481 }
3482 target->scsi_host->can_queue = token;
3483 target->queue_size = token + SRP_RSP_SQ_SIZE +
3484 SRP_TSK_MGMT_SQ_SIZE;
3485 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3486 target->scsi_host->cmd_per_lun = token;
3487 break;
3488
3489 case SRP_OPT_MAX_CMD_PER_LUN:
3490 if (match_int(args, &token) || token < 1) {
3491 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3492 p);
3493 goto out;
3494 }
3495 target->scsi_host->cmd_per_lun = token;
3496 break;
3497
3498 case SRP_OPT_TARGET_CAN_QUEUE:
3499 if (match_int(args, &token) || token < 1) {
3500 pr_warn("bad max target_can_queue parameter '%s'\n",
3501 p);
3502 goto out;
3503 }
3504 target->target_can_queue = token;
3505 break;
3506
3507 case SRP_OPT_IO_CLASS:
3508 if (match_hex(args, &token)) {
3509 pr_warn("bad IO class parameter '%s'\n", p);
3510 goto out;
3511 }
3512 if (token != SRP_REV10_IB_IO_CLASS &&
3513 token != SRP_REV16A_IB_IO_CLASS) {
3514 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3515 token, SRP_REV10_IB_IO_CLASS,
3516 SRP_REV16A_IB_IO_CLASS);
3517 goto out;
3518 }
3519 target->io_class = token;
3520 break;
3521
3522 case SRP_OPT_INITIATOR_EXT:
3523 p = match_strdup(args);
3524 if (!p) {
3525 ret = -ENOMEM;
3526 goto out;
3527 }
3528 ret = kstrtoull(p, 16, &ull);
3529 if (ret) {
3530 pr_warn("bad initiator_ext value '%s'\n", p);
3531 kfree(p);
3532 goto out;
3533 }
3534 target->initiator_ext = cpu_to_be64(ull);
3535 kfree(p);
3536 break;
3537
3538 case SRP_OPT_CMD_SG_ENTRIES:
3539 if (match_int(args, &token) || token < 1 || token > 255) {
3540 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3541 p);
3542 goto out;
3543 }
3544 target->cmd_sg_cnt = token;
3545 break;
3546
3547 case SRP_OPT_ALLOW_EXT_SG:
3548 if (match_int(args, &token)) {
3549 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3550 goto out;
3551 }
3552 target->allow_ext_sg = !!token;
3553 break;
3554
3555 case SRP_OPT_SG_TABLESIZE:
3556 if (match_int(args, &token) || token < 1 ||
3557 token > SG_MAX_SEGMENTS) {
3558 pr_warn("bad max sg_tablesize parameter '%s'\n",
3559 p);
3560 goto out;
3561 }
3562 target->sg_tablesize = token;
3563 break;
3564
3565 case SRP_OPT_COMP_VECTOR:
3566 if (match_int(args, &token) || token < 0) {
3567 pr_warn("bad comp_vector parameter '%s'\n", p);
3568 goto out;
3569 }
3570 target->comp_vector = token;
3571 break;
3572
3573 case SRP_OPT_TL_RETRY_COUNT:
3574 if (match_int(args, &token) || token < 2 || token > 7) {
3575 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3576 p);
3577 goto out;
3578 }
3579 target->tl_retry_count = token;
3580 break;
3581
3582 case SRP_OPT_MAX_IT_IU_SIZE:
3583 if (match_int(args, &token) || token < 0) {
3584 pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3585 goto out;
3586 }
3587 target->max_it_iu_size = token;
3588 break;
3589
3590 case SRP_OPT_CH_COUNT:
3591 if (match_int(args, &token) || token < 1) {
3592 pr_warn("bad channel count %s\n", p);
3593 goto out;
3594 }
3595 target->ch_count = token;
3596 break;
3597
3598 default:
3599 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3600 p);
3601 goto out;
3602 }
3603 }
3604
3605 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3606 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3607 ret = 0;
3608 break;
3609 }
3610 }
3611 if (ret)
3612 pr_warn("target creation request is missing one or more parameters\n");
3613
3614 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3615 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3616 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3617 target->scsi_host->cmd_per_lun,
3618 target->scsi_host->can_queue);
3619
3620 out:
3621 kfree(options);
3622 return ret;
3623 }
3624
3625 static ssize_t add_target_store(struct device *dev,
3626 struct device_attribute *attr, const char *buf,
3627 size_t count)
3628 {
3629 struct srp_host *host =
3630 container_of(dev, struct srp_host, dev);
3631 struct Scsi_Host *target_host;
3632 struct srp_target_port *target;
3633 struct srp_rdma_ch *ch;
3634 struct srp_device *srp_dev = host->srp_dev;
3635 struct ib_device *ibdev = srp_dev->dev;
3636 int ret, i, ch_idx;
3637 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3638 bool multich = false;
3639 uint32_t max_iu_len;
3640
3641 target_host = scsi_host_alloc(&srp_template,
3642 sizeof (struct srp_target_port));
3643 if (!target_host)
3644 return -ENOMEM;
3645
3646 target_host->transportt = ib_srp_transport_template;
3647 target_host->max_channel = 0;
3648 target_host->max_id = 1;
3649 target_host->max_lun = -1LL;
3650 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3651 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3652
3653 if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
3654 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3655
3656 target = host_to_target(target_host);
3657
3658 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3659 target->io_class = SRP_REV16A_IB_IO_CLASS;
3660 target->scsi_host = target_host;
3661 target->srp_host = host;
3662 target->lkey = host->srp_dev->pd->local_dma_lkey;
3663 target->global_rkey = host->srp_dev->global_rkey;
3664 target->cmd_sg_cnt = cmd_sg_entries;
3665 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3666 target->allow_ext_sg = allow_ext_sg;
3667 target->tl_retry_count = 7;
3668 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3669
3670
3671
3672
3673
3674 scsi_host_get(target->scsi_host);
3675
3676 ret = mutex_lock_interruptible(&host->add_target_mutex);
3677 if (ret < 0)
3678 goto put;
3679
3680 ret = srp_parse_options(target->net, buf, target);
3681 if (ret)
3682 goto out;
3683
3684 if (!srp_conn_unique(target->srp_host, target)) {
3685 if (target->using_rdma_cm) {
3686 shost_printk(KERN_INFO, target->scsi_host,
3687 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3688 be64_to_cpu(target->id_ext),
3689 be64_to_cpu(target->ioc_guid),
3690 &target->rdma_cm.dst);
3691 } else {
3692 shost_printk(KERN_INFO, target->scsi_host,
3693 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3694 be64_to_cpu(target->id_ext),
3695 be64_to_cpu(target->ioc_guid),
3696 be64_to_cpu(target->initiator_ext));
3697 }
3698 ret = -EEXIST;
3699 goto out;
3700 }
3701
3702 if (!srp_dev->has_fr && !target->allow_ext_sg &&
3703 target->cmd_sg_cnt < target->sg_tablesize) {
3704 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3705 target->sg_tablesize = target->cmd_sg_cnt;
3706 }
3707
3708 if (srp_dev->use_fast_reg) {
3709 bool gaps_reg = ibdev->attrs.kernel_cap_flags &
3710 IBK_SG_GAPS_REG;
3711
3712 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3713 (ilog2(srp_dev->mr_page_size) - 9);
3714 if (!gaps_reg) {
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728 mr_per_cmd = register_always +
3729 (target->scsi_host->max_sectors + 1 +
3730 max_sectors_per_mr - 1) / max_sectors_per_mr;
3731 } else {
3732 mr_per_cmd = register_always +
3733 (target->sg_tablesize +
3734 srp_dev->max_pages_per_mr - 1) /
3735 srp_dev->max_pages_per_mr;
3736 }
3737 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3738 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3739 max_sectors_per_mr, mr_per_cmd);
3740 }
3741
3742 target_host->sg_tablesize = target->sg_tablesize;
3743 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3744 target->mr_per_cmd = mr_per_cmd;
3745 target->indirect_size = target->sg_tablesize *
3746 sizeof (struct srp_direct_buf);
3747 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3748 srp_use_imm_data,
3749 target->max_it_iu_size);
3750
3751 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3752 INIT_WORK(&target->remove_work, srp_remove_work);
3753 spin_lock_init(&target->lock);
3754 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3755 if (ret)
3756 goto out;
3757
3758 ret = -ENOMEM;
3759 if (target->ch_count == 0) {
3760 target->ch_count =
3761 min(ch_count ?:
3762 max(4 * num_online_nodes(),
3763 ibdev->num_comp_vectors),
3764 num_online_cpus());
3765 }
3766
3767 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3768 GFP_KERNEL);
3769 if (!target->ch)
3770 goto out;
3771
3772 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3773 ch = &target->ch[ch_idx];
3774 ch->target = target;
3775 ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3776 spin_lock_init(&ch->lock);
3777 INIT_LIST_HEAD(&ch->free_tx);
3778 ret = srp_new_cm_id(ch);
3779 if (ret)
3780 goto err_disconnect;
3781
3782 ret = srp_create_ch_ib(ch);
3783 if (ret)
3784 goto err_disconnect;
3785
3786 ret = srp_connect_ch(ch, max_iu_len, multich);
3787 if (ret) {
3788 char dst[64];
3789
3790 if (target->using_rdma_cm)
3791 snprintf(dst, sizeof(dst), "%pIS",
3792 &target->rdma_cm.dst);
3793 else
3794 snprintf(dst, sizeof(dst), "%pI6",
3795 target->ib_cm.orig_dgid.raw);
3796 shost_printk(KERN_ERR, target->scsi_host,
3797 PFX "Connection %d/%d to %s failed\n",
3798 ch_idx,
3799 target->ch_count, dst);
3800 if (ch_idx == 0) {
3801 goto free_ch;
3802 } else {
3803 srp_free_ch_ib(target, ch);
3804 target->ch_count = ch - target->ch;
3805 goto connected;
3806 }
3807 }
3808 multich = true;
3809 }
3810
3811 connected:
3812 target->scsi_host->nr_hw_queues = target->ch_count;
3813
3814 ret = srp_add_target(host, target);
3815 if (ret)
3816 goto err_disconnect;
3817
3818 if (target->state != SRP_TARGET_REMOVED) {
3819 if (target->using_rdma_cm) {
3820 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3821 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3822 be64_to_cpu(target->id_ext),
3823 be64_to_cpu(target->ioc_guid),
3824 target->sgid.raw, &target->rdma_cm.dst);
3825 } else {
3826 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3827 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3828 be64_to_cpu(target->id_ext),
3829 be64_to_cpu(target->ioc_guid),
3830 be16_to_cpu(target->ib_cm.pkey),
3831 be64_to_cpu(target->ib_cm.service_id),
3832 target->sgid.raw,
3833 target->ib_cm.orig_dgid.raw);
3834 }
3835 }
3836
3837 ret = count;
3838
3839 out:
3840 mutex_unlock(&host->add_target_mutex);
3841
3842 put:
3843 scsi_host_put(target->scsi_host);
3844 if (ret < 0) {
3845
3846
3847
3848
3849
3850 if (target->state != SRP_TARGET_REMOVED)
3851 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3852 scsi_host_put(target->scsi_host);
3853 }
3854
3855 return ret;
3856
3857 err_disconnect:
3858 srp_disconnect_target(target);
3859
3860 free_ch:
3861 for (i = 0; i < target->ch_count; i++) {
3862 ch = &target->ch[i];
3863 srp_free_ch_ib(target, ch);
3864 }
3865
3866 kfree(target->ch);
3867 goto out;
3868 }
3869
3870 static DEVICE_ATTR_WO(add_target);
3871
3872 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3873 char *buf)
3874 {
3875 struct srp_host *host = container_of(dev, struct srp_host, dev);
3876
3877 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3878 }
3879
3880 static DEVICE_ATTR_RO(ibdev);
3881
3882 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3883 char *buf)
3884 {
3885 struct srp_host *host = container_of(dev, struct srp_host, dev);
3886
3887 return sysfs_emit(buf, "%d\n", host->port);
3888 }
3889
3890 static DEVICE_ATTR_RO(port);
3891
3892 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3893 {
3894 struct srp_host *host;
3895
3896 host = kzalloc(sizeof *host, GFP_KERNEL);
3897 if (!host)
3898 return NULL;
3899
3900 INIT_LIST_HEAD(&host->target_list);
3901 spin_lock_init(&host->target_lock);
3902 init_completion(&host->released);
3903 mutex_init(&host->add_target_mutex);
3904 host->srp_dev = device;
3905 host->port = port;
3906
3907 host->dev.class = &srp_class;
3908 host->dev.parent = device->dev->dev.parent;
3909 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3910 port);
3911
3912 if (device_register(&host->dev))
3913 goto free_host;
3914 if (device_create_file(&host->dev, &dev_attr_add_target))
3915 goto err_class;
3916 if (device_create_file(&host->dev, &dev_attr_ibdev))
3917 goto err_class;
3918 if (device_create_file(&host->dev, &dev_attr_port))
3919 goto err_class;
3920
3921 return host;
3922
3923 err_class:
3924 device_unregister(&host->dev);
3925
3926 free_host:
3927 kfree(host);
3928
3929 return NULL;
3930 }
3931
3932 static void srp_rename_dev(struct ib_device *device, void *client_data)
3933 {
3934 struct srp_device *srp_dev = client_data;
3935 struct srp_host *host, *tmp_host;
3936
3937 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3938 char name[IB_DEVICE_NAME_MAX + 8];
3939
3940 snprintf(name, sizeof(name), "srp-%s-%d",
3941 dev_name(&device->dev), host->port);
3942 device_rename(&host->dev, name);
3943 }
3944 }
3945
3946 static int srp_add_one(struct ib_device *device)
3947 {
3948 struct srp_device *srp_dev;
3949 struct ib_device_attr *attr = &device->attrs;
3950 struct srp_host *host;
3951 int mr_page_shift;
3952 unsigned int p;
3953 u64 max_pages_per_mr;
3954 unsigned int flags = 0;
3955
3956 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3957 if (!srp_dev)
3958 return -ENOMEM;
3959
3960
3961
3962
3963
3964
3965 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3966 srp_dev->mr_page_size = 1 << mr_page_shift;
3967 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3968 max_pages_per_mr = attr->max_mr_size;
3969 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3970 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3971 attr->max_mr_size, srp_dev->mr_page_size,
3972 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3973 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3974 max_pages_per_mr);
3975
3976 srp_dev->has_fr = (attr->device_cap_flags &
3977 IB_DEVICE_MEM_MGT_EXTENSIONS);
3978 if (!never_register && !srp_dev->has_fr)
3979 dev_warn(&device->dev, "FR is not supported\n");
3980 else if (!never_register &&
3981 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3982 srp_dev->use_fast_reg = srp_dev->has_fr;
3983
3984 if (never_register || !register_always || !srp_dev->has_fr)
3985 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3986
3987 if (srp_dev->use_fast_reg) {
3988 srp_dev->max_pages_per_mr =
3989 min_t(u32, srp_dev->max_pages_per_mr,
3990 attr->max_fast_reg_page_list_len);
3991 }
3992 srp_dev->mr_max_size = srp_dev->mr_page_size *
3993 srp_dev->max_pages_per_mr;
3994 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3995 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
3996 attr->max_fast_reg_page_list_len,
3997 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3998
3999 INIT_LIST_HEAD(&srp_dev->dev_list);
4000
4001 srp_dev->dev = device;
4002 srp_dev->pd = ib_alloc_pd(device, flags);
4003 if (IS_ERR(srp_dev->pd)) {
4004 int ret = PTR_ERR(srp_dev->pd);
4005
4006 kfree(srp_dev);
4007 return ret;
4008 }
4009
4010 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4011 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4012 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4013 }
4014
4015 rdma_for_each_port (device, p) {
4016 host = srp_add_port(srp_dev, p);
4017 if (host)
4018 list_add_tail(&host->list, &srp_dev->dev_list);
4019 }
4020
4021 ib_set_client_data(device, &srp_client, srp_dev);
4022 return 0;
4023 }
4024
4025 static void srp_remove_one(struct ib_device *device, void *client_data)
4026 {
4027 struct srp_device *srp_dev;
4028 struct srp_host *host, *tmp_host;
4029 struct srp_target_port *target;
4030
4031 srp_dev = client_data;
4032
4033 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4034 device_unregister(&host->dev);
4035
4036
4037
4038
4039 wait_for_completion(&host->released);
4040
4041
4042
4043
4044 spin_lock(&host->target_lock);
4045 list_for_each_entry(target, &host->target_list, list)
4046 srp_queue_remove_work(target);
4047 spin_unlock(&host->target_lock);
4048
4049
4050
4051
4052
4053
4054
4055 flush_workqueue(srp_remove_wq);
4056
4057 kfree(host);
4058 }
4059
4060 ib_dealloc_pd(srp_dev->pd);
4061
4062 kfree(srp_dev);
4063 }
4064
4065 static struct srp_function_template ib_srp_transport_functions = {
4066 .has_rport_state = true,
4067 .reset_timer_if_blocked = true,
4068 .reconnect_delay = &srp_reconnect_delay,
4069 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4070 .dev_loss_tmo = &srp_dev_loss_tmo,
4071 .reconnect = srp_rport_reconnect,
4072 .rport_delete = srp_rport_delete,
4073 .terminate_rport_io = srp_terminate_io,
4074 };
4075
4076 static int __init srp_init_module(void)
4077 {
4078 int ret;
4079
4080 BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4081 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4082 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4083 BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4084 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4085 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4086 BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4087
4088 if (srp_sg_tablesize) {
4089 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4090 if (!cmd_sg_entries)
4091 cmd_sg_entries = srp_sg_tablesize;
4092 }
4093
4094 if (!cmd_sg_entries)
4095 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4096
4097 if (cmd_sg_entries > 255) {
4098 pr_warn("Clamping cmd_sg_entries to 255\n");
4099 cmd_sg_entries = 255;
4100 }
4101
4102 if (!indirect_sg_entries)
4103 indirect_sg_entries = cmd_sg_entries;
4104 else if (indirect_sg_entries < cmd_sg_entries) {
4105 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4106 cmd_sg_entries);
4107 indirect_sg_entries = cmd_sg_entries;
4108 }
4109
4110 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4111 pr_warn("Clamping indirect_sg_entries to %u\n",
4112 SG_MAX_SEGMENTS);
4113 indirect_sg_entries = SG_MAX_SEGMENTS;
4114 }
4115
4116 srp_remove_wq = create_workqueue("srp_remove");
4117 if (!srp_remove_wq) {
4118 ret = -ENOMEM;
4119 goto out;
4120 }
4121
4122 ret = -ENOMEM;
4123 ib_srp_transport_template =
4124 srp_attach_transport(&ib_srp_transport_functions);
4125 if (!ib_srp_transport_template)
4126 goto destroy_wq;
4127
4128 ret = class_register(&srp_class);
4129 if (ret) {
4130 pr_err("couldn't register class infiniband_srp\n");
4131 goto release_tr;
4132 }
4133
4134 ib_sa_register_client(&srp_sa_client);
4135
4136 ret = ib_register_client(&srp_client);
4137 if (ret) {
4138 pr_err("couldn't register IB client\n");
4139 goto unreg_sa;
4140 }
4141
4142 out:
4143 return ret;
4144
4145 unreg_sa:
4146 ib_sa_unregister_client(&srp_sa_client);
4147 class_unregister(&srp_class);
4148
4149 release_tr:
4150 srp_release_transport(ib_srp_transport_template);
4151
4152 destroy_wq:
4153 destroy_workqueue(srp_remove_wq);
4154 goto out;
4155 }
4156
4157 static void __exit srp_cleanup_module(void)
4158 {
4159 ib_unregister_client(&srp_client);
4160 ib_sa_unregister_client(&srp_sa_client);
4161 class_unregister(&srp_class);
4162 srp_release_transport(ib_srp_transport_template);
4163 destroy_workqueue(srp_remove_wq);
4164 }
4165
4166 module_init(srp_init_module);
4167 module_exit(srp_cleanup_module);