0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/kernel.h>
0035 #include <linux/slab.h>
0036 #include <linux/delay.h>
0037
0038 #include "iscsi_iser.h"
0039
0040 #define ISCSI_ISER_MAX_CONN 8
0041 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
0042 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
0043 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
0044 ISCSI_ISER_MAX_CONN)
0045
0046 static void iser_qp_event_callback(struct ib_event *cause, void *context)
0047 {
0048 iser_err("qp event %s (%d)\n",
0049 ib_event_msg(cause->event), cause->event);
0050 }
0051
0052 static void iser_event_handler(struct ib_event_handler *handler,
0053 struct ib_event *event)
0054 {
0055 iser_err("async event %s (%d) on device %s port %d\n",
0056 ib_event_msg(event->event), event->event,
0057 dev_name(&event->device->dev), event->element.port_num);
0058 }
0059
0060
0061
0062
0063
0064
0065
0066
0067 static int iser_create_device_ib_res(struct iser_device *device)
0068 {
0069 struct ib_device *ib_dev = device->ib_device;
0070
0071 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
0072 iser_err("IB device does not support memory registrations\n");
0073 return -1;
0074 }
0075
0076 device->pd = ib_alloc_pd(ib_dev,
0077 iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
0078 if (IS_ERR(device->pd))
0079 goto pd_err;
0080
0081 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
0082 iser_event_handler);
0083 ib_register_event_handler(&device->event_handler);
0084 return 0;
0085
0086 pd_err:
0087 iser_err("failed to allocate an IB resource\n");
0088 return -1;
0089 }
0090
0091
0092
0093
0094
0095 static void iser_free_device_ib_res(struct iser_device *device)
0096 {
0097 ib_unregister_event_handler(&device->event_handler);
0098 ib_dealloc_pd(device->pd);
0099
0100 device->pd = NULL;
0101 }
0102
0103 static struct iser_fr_desc *
0104 iser_create_fastreg_desc(struct iser_device *device,
0105 struct ib_pd *pd,
0106 bool pi_enable,
0107 unsigned int size)
0108 {
0109 struct iser_fr_desc *desc;
0110 struct ib_device *ib_dev = device->ib_device;
0111 enum ib_mr_type mr_type;
0112 int ret;
0113
0114 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0115 if (!desc)
0116 return ERR_PTR(-ENOMEM);
0117
0118 if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
0119 mr_type = IB_MR_TYPE_SG_GAPS;
0120 else
0121 mr_type = IB_MR_TYPE_MEM_REG;
0122
0123 desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
0124 if (IS_ERR(desc->rsc.mr)) {
0125 ret = PTR_ERR(desc->rsc.mr);
0126 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
0127 goto err_alloc_mr;
0128 }
0129
0130 if (pi_enable) {
0131 desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
0132 if (IS_ERR(desc->rsc.sig_mr)) {
0133 ret = PTR_ERR(desc->rsc.sig_mr);
0134 iser_err("Failed to allocate sig_mr err=%d\n", ret);
0135 goto err_alloc_mr_integrity;
0136 }
0137 }
0138 desc->rsc.mr_valid = 0;
0139
0140 return desc;
0141
0142 err_alloc_mr_integrity:
0143 ib_dereg_mr(desc->rsc.mr);
0144 err_alloc_mr:
0145 kfree(desc);
0146
0147 return ERR_PTR(ret);
0148 }
0149
0150 static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
0151 {
0152 struct iser_reg_resources *res = &desc->rsc;
0153
0154 ib_dereg_mr(res->mr);
0155 if (res->sig_mr) {
0156 ib_dereg_mr(res->sig_mr);
0157 res->sig_mr = NULL;
0158 }
0159 kfree(desc);
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
0172 unsigned cmds_max,
0173 unsigned int size)
0174 {
0175 struct iser_device *device = ib_conn->device;
0176 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
0177 struct iser_fr_desc *desc;
0178 int i, ret;
0179
0180 INIT_LIST_HEAD(&fr_pool->list);
0181 INIT_LIST_HEAD(&fr_pool->all_list);
0182 spin_lock_init(&fr_pool->lock);
0183 fr_pool->size = 0;
0184 for (i = 0; i < cmds_max; i++) {
0185 desc = iser_create_fastreg_desc(device, device->pd,
0186 ib_conn->pi_support, size);
0187 if (IS_ERR(desc)) {
0188 ret = PTR_ERR(desc);
0189 goto err;
0190 }
0191
0192 list_add_tail(&desc->list, &fr_pool->list);
0193 list_add_tail(&desc->all_list, &fr_pool->all_list);
0194 fr_pool->size++;
0195 }
0196
0197 return 0;
0198
0199 err:
0200 iser_free_fastreg_pool(ib_conn);
0201 return ret;
0202 }
0203
0204
0205
0206
0207
0208 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
0209 {
0210 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
0211 struct iser_fr_desc *desc, *tmp;
0212 int i = 0;
0213
0214 if (list_empty(&fr_pool->all_list))
0215 return;
0216
0217 iser_info("freeing conn %p fr pool\n", ib_conn);
0218
0219 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
0220 list_del(&desc->all_list);
0221 iser_destroy_fastreg_desc(desc);
0222 ++i;
0223 }
0224
0225 if (i < fr_pool->size)
0226 iser_warn("pool still has %d regions registered\n",
0227 fr_pool->size - i);
0228 }
0229
0230
0231
0232
0233
0234
0235 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
0236 {
0237 struct iser_conn *iser_conn = to_iser_conn(ib_conn);
0238 struct iser_device *device;
0239 struct ib_device *ib_dev;
0240 struct ib_qp_init_attr init_attr;
0241 int ret = -ENOMEM;
0242 unsigned int max_send_wr, cq_size;
0243
0244 BUG_ON(ib_conn->device == NULL);
0245
0246 device = ib_conn->device;
0247 ib_dev = device->ib_device;
0248
0249
0250 if (ib_conn->pi_support)
0251 max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
0252 else
0253 max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
0254 max_send_wr = min_t(unsigned int, max_send_wr,
0255 (unsigned int)ib_dev->attrs.max_qp_wr);
0256
0257 cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS;
0258 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ);
0259 if (IS_ERR(ib_conn->cq)) {
0260 ret = PTR_ERR(ib_conn->cq);
0261 goto cq_err;
0262 }
0263 ib_conn->cq_size = cq_size;
0264
0265 memset(&init_attr, 0, sizeof(init_attr));
0266
0267 init_attr.event_handler = iser_qp_event_callback;
0268 init_attr.qp_context = (void *)ib_conn;
0269 init_attr.send_cq = ib_conn->cq;
0270 init_attr.recv_cq = ib_conn->cq;
0271
0272 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
0273 init_attr.cap.max_send_sge = 2;
0274 init_attr.cap.max_recv_sge = 1;
0275 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
0276 init_attr.qp_type = IB_QPT_RC;
0277 init_attr.cap.max_send_wr = max_send_wr;
0278 if (ib_conn->pi_support)
0279 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
0280 iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1);
0281
0282 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
0283 if (ret)
0284 goto out_err;
0285
0286 ib_conn->qp = ib_conn->cma_id->qp;
0287 iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
0288 ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
0289 return ret;
0290
0291 out_err:
0292 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
0293 cq_err:
0294 iser_err("unable to alloc mem or create resource, err %d\n", ret);
0295
0296 return ret;
0297 }
0298
0299
0300
0301
0302
0303 static
0304 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
0305 {
0306 struct iser_device *device;
0307
0308 mutex_lock(&ig.device_list_mutex);
0309
0310 list_for_each_entry(device, &ig.device_list, ig_list)
0311
0312 if (device->ib_device->node_guid == cma_id->device->node_guid)
0313 goto inc_refcnt;
0314
0315 device = kzalloc(sizeof *device, GFP_KERNEL);
0316 if (!device)
0317 goto out;
0318
0319
0320 device->ib_device = cma_id->device;
0321
0322 if (iser_create_device_ib_res(device)) {
0323 kfree(device);
0324 device = NULL;
0325 goto out;
0326 }
0327 list_add(&device->ig_list, &ig.device_list);
0328
0329 inc_refcnt:
0330 device->refcount++;
0331 out:
0332 mutex_unlock(&ig.device_list_mutex);
0333 return device;
0334 }
0335
0336
0337 static void iser_device_try_release(struct iser_device *device)
0338 {
0339 mutex_lock(&ig.device_list_mutex);
0340 device->refcount--;
0341 iser_info("device %p refcount %d\n", device, device->refcount);
0342 if (!device->refcount) {
0343 iser_free_device_ib_res(device);
0344 list_del(&device->ig_list);
0345 kfree(device);
0346 }
0347 mutex_unlock(&ig.device_list_mutex);
0348 }
0349
0350
0351
0352
0353 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
0354 enum iser_conn_state comp,
0355 enum iser_conn_state exch)
0356 {
0357 int ret;
0358
0359 ret = (iser_conn->state == comp);
0360 if (ret)
0361 iser_conn->state = exch;
0362
0363 return ret;
0364 }
0365
0366 void iser_release_work(struct work_struct *work)
0367 {
0368 struct iser_conn *iser_conn;
0369
0370 iser_conn = container_of(work, struct iser_conn, release_work);
0371
0372
0373 wait_for_completion(&iser_conn->stop_completion);
0374
0375 wait_for_completion(&iser_conn->ib_completion);
0376
0377 mutex_lock(&iser_conn->state_mutex);
0378 iser_conn->state = ISER_CONN_DOWN;
0379 mutex_unlock(&iser_conn->state_mutex);
0380
0381 iser_conn_release(iser_conn);
0382 }
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
0396 {
0397 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0398 struct iser_device *device = ib_conn->device;
0399
0400 iser_info("freeing conn %p cma_id %p qp %p\n",
0401 iser_conn, ib_conn->cma_id, ib_conn->qp);
0402
0403 if (ib_conn->qp) {
0404 rdma_destroy_qp(ib_conn->cma_id);
0405 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
0406 ib_conn->qp = NULL;
0407 }
0408
0409 if (destroy) {
0410 if (iser_conn->rx_descs)
0411 iser_free_rx_descriptors(iser_conn);
0412
0413 if (device) {
0414 iser_device_try_release(device);
0415 ib_conn->device = NULL;
0416 }
0417 }
0418 }
0419
0420
0421
0422
0423
0424 void iser_conn_release(struct iser_conn *iser_conn)
0425 {
0426 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0427
0428 mutex_lock(&ig.connlist_mutex);
0429 list_del(&iser_conn->conn_list);
0430 mutex_unlock(&ig.connlist_mutex);
0431
0432 mutex_lock(&iser_conn->state_mutex);
0433
0434 if (iser_conn->state != ISER_CONN_DOWN) {
0435 iser_warn("iser conn %p state %d, expected state down.\n",
0436 iser_conn, iser_conn->state);
0437 iscsi_destroy_endpoint(iser_conn->ep);
0438 iser_conn->state = ISER_CONN_DOWN;
0439 }
0440
0441
0442
0443
0444 iser_free_ib_conn_res(iser_conn, true);
0445 mutex_unlock(&iser_conn->state_mutex);
0446
0447 if (ib_conn->cma_id) {
0448 rdma_destroy_id(ib_conn->cma_id);
0449 ib_conn->cma_id = NULL;
0450 }
0451
0452 kfree(iser_conn);
0453 }
0454
0455
0456
0457
0458
0459
0460
0461
0462 int iser_conn_terminate(struct iser_conn *iser_conn)
0463 {
0464 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0465 int err = 0;
0466
0467
0468 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
0469 ISER_CONN_TERMINATING))
0470 return 0;
0471
0472 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
0473
0474
0475 if (iser_conn->iscsi_conn)
0476 iscsi_suspend_queue(iser_conn->iscsi_conn);
0477
0478
0479
0480
0481
0482
0483 if (ib_conn->cma_id) {
0484 err = rdma_disconnect(ib_conn->cma_id);
0485 if (err)
0486 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
0487 iser_conn, err);
0488
0489
0490 ib_drain_qp(ib_conn->qp);
0491 }
0492
0493 return 1;
0494 }
0495
0496
0497
0498
0499 static void iser_connect_error(struct rdma_cm_id *cma_id)
0500 {
0501 struct iser_conn *iser_conn;
0502
0503 iser_conn = cma_id->context;
0504 iser_conn->state = ISER_CONN_TERMINATING;
0505 }
0506
0507 static void iser_calc_scsi_params(struct iser_conn *iser_conn,
0508 unsigned int max_sectors)
0509 {
0510 struct iser_device *device = iser_conn->ib_conn.device;
0511 struct ib_device_attr *attr = &device->ib_device->attrs;
0512 unsigned short sg_tablesize, sup_sg_tablesize;
0513 unsigned short reserved_mr_pages;
0514 u32 max_num_sg;
0515
0516
0517
0518
0519
0520
0521
0522 if (attr->kernel_cap_flags & IBK_SG_GAPS_REG)
0523 reserved_mr_pages = 0;
0524 else
0525 reserved_mr_pages = 1;
0526
0527 if (iser_conn->ib_conn.pi_support)
0528 max_num_sg = attr->max_pi_fast_reg_page_list_len;
0529 else
0530 max_num_sg = attr->max_fast_reg_page_list_len;
0531
0532 sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
0533 sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
0534 max_num_sg - reserved_mr_pages);
0535 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
0536 iser_conn->pages_per_mr =
0537 iser_conn->scsi_sg_tablesize + reserved_mr_pages;
0538 }
0539
0540
0541
0542
0543 static void iser_addr_handler(struct rdma_cm_id *cma_id)
0544 {
0545 struct iser_device *device;
0546 struct iser_conn *iser_conn;
0547 struct ib_conn *ib_conn;
0548 int ret;
0549
0550 iser_conn = cma_id->context;
0551 if (iser_conn->state != ISER_CONN_PENDING)
0552
0553 return;
0554
0555 ib_conn = &iser_conn->ib_conn;
0556 device = iser_device_find_by_ib_device(cma_id);
0557 if (!device) {
0558 iser_err("device lookup/creation failed\n");
0559 iser_connect_error(cma_id);
0560 return;
0561 }
0562
0563 ib_conn->device = device;
0564
0565
0566 if (iser_pi_enable) {
0567 if (!(device->ib_device->attrs.kernel_cap_flags &
0568 IBK_INTEGRITY_HANDOVER)) {
0569 iser_warn("T10-PI requested but not supported on %s, "
0570 "continue without T10-PI\n",
0571 dev_name(&ib_conn->device->ib_device->dev));
0572 ib_conn->pi_support = false;
0573 } else {
0574 ib_conn->pi_support = true;
0575 }
0576 }
0577
0578 iser_calc_scsi_params(iser_conn, iser_max_sectors);
0579
0580 ret = rdma_resolve_route(cma_id, 1000);
0581 if (ret) {
0582 iser_err("resolve route failed: %d\n", ret);
0583 iser_connect_error(cma_id);
0584 return;
0585 }
0586 }
0587
0588
0589
0590
0591 static void iser_route_handler(struct rdma_cm_id *cma_id)
0592 {
0593 struct rdma_conn_param conn_param;
0594 int ret;
0595 struct iser_cm_hdr req_hdr;
0596 struct iser_conn *iser_conn = cma_id->context;
0597 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0598 struct ib_device *ib_dev = ib_conn->device->ib_device;
0599
0600 if (iser_conn->state != ISER_CONN_PENDING)
0601
0602 return;
0603
0604 ret = iser_create_ib_conn_res(ib_conn);
0605 if (ret)
0606 goto failure;
0607
0608 memset(&conn_param, 0, sizeof conn_param);
0609 conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
0610 conn_param.initiator_depth = 1;
0611 conn_param.retry_count = 7;
0612 conn_param.rnr_retry_count = 6;
0613
0614 memset(&req_hdr, 0, sizeof(req_hdr));
0615 req_hdr.flags = ISER_ZBVA_NOT_SUP;
0616 if (!iser_always_reg)
0617 req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
0618 conn_param.private_data = (void *)&req_hdr;
0619 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
0620
0621 ret = rdma_connect_locked(cma_id, &conn_param);
0622 if (ret) {
0623 iser_err("failure connecting: %d\n", ret);
0624 goto failure;
0625 }
0626
0627 return;
0628 failure:
0629 iser_connect_error(cma_id);
0630 }
0631
0632 static void iser_connected_handler(struct rdma_cm_id *cma_id,
0633 const void *private_data)
0634 {
0635 struct iser_conn *iser_conn;
0636 struct ib_qp_attr attr;
0637 struct ib_qp_init_attr init_attr;
0638
0639 iser_conn = cma_id->context;
0640 if (iser_conn->state != ISER_CONN_PENDING)
0641
0642 return;
0643
0644 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
0645 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
0646
0647 if (private_data) {
0648 u8 flags = *(u8 *)private_data;
0649
0650 iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
0651 }
0652
0653 iser_info("conn %p: negotiated %s invalidation\n",
0654 iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
0655
0656 iser_conn->state = ISER_CONN_UP;
0657 complete(&iser_conn->up_completion);
0658 }
0659
0660 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
0661 {
0662 struct iser_conn *iser_conn = cma_id->context;
0663
0664 if (iser_conn_terminate(iser_conn)) {
0665 if (iser_conn->iscsi_conn)
0666 iscsi_conn_failure(iser_conn->iscsi_conn,
0667 ISCSI_ERR_CONN_FAILED);
0668 else
0669 iser_err("iscsi_iser connection isn't bound\n");
0670 }
0671 }
0672
0673 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
0674 bool destroy)
0675 {
0676 struct iser_conn *iser_conn = cma_id->context;
0677
0678
0679
0680
0681
0682
0683 iser_disconnected_handler(cma_id);
0684 iser_free_ib_conn_res(iser_conn, destroy);
0685 complete(&iser_conn->ib_completion);
0686 }
0687
0688 static int iser_cma_handler(struct rdma_cm_id *cma_id,
0689 struct rdma_cm_event *event)
0690 {
0691 struct iser_conn *iser_conn;
0692 int ret = 0;
0693
0694 iser_conn = cma_id->context;
0695 iser_info("%s (%d): status %d conn %p id %p\n",
0696 rdma_event_msg(event->event), event->event,
0697 event->status, cma_id->context, cma_id);
0698
0699 mutex_lock(&iser_conn->state_mutex);
0700 switch (event->event) {
0701 case RDMA_CM_EVENT_ADDR_RESOLVED:
0702 iser_addr_handler(cma_id);
0703 break;
0704 case RDMA_CM_EVENT_ROUTE_RESOLVED:
0705 iser_route_handler(cma_id);
0706 break;
0707 case RDMA_CM_EVENT_ESTABLISHED:
0708 iser_connected_handler(cma_id, event->param.conn.private_data);
0709 break;
0710 case RDMA_CM_EVENT_REJECTED:
0711 iser_info("Connection rejected: %s\n",
0712 rdma_reject_msg(cma_id, event->status));
0713 fallthrough;
0714 case RDMA_CM_EVENT_ADDR_ERROR:
0715 case RDMA_CM_EVENT_ROUTE_ERROR:
0716 case RDMA_CM_EVENT_CONNECT_ERROR:
0717 case RDMA_CM_EVENT_UNREACHABLE:
0718 iser_connect_error(cma_id);
0719 break;
0720 case RDMA_CM_EVENT_DISCONNECTED:
0721 case RDMA_CM_EVENT_ADDR_CHANGE:
0722 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
0723 iser_cleanup_handler(cma_id, false);
0724 break;
0725 case RDMA_CM_EVENT_DEVICE_REMOVAL:
0726
0727
0728
0729
0730
0731
0732 iser_cleanup_handler(cma_id, true);
0733 if (iser_conn->state != ISER_CONN_DOWN) {
0734 iser_conn->ib_conn.cma_id = NULL;
0735 ret = 1;
0736 }
0737 break;
0738 default:
0739 iser_err("Unexpected RDMA CM event: %s (%d)\n",
0740 rdma_event_msg(event->event), event->event);
0741 break;
0742 }
0743 mutex_unlock(&iser_conn->state_mutex);
0744
0745 return ret;
0746 }
0747
0748 void iser_conn_init(struct iser_conn *iser_conn)
0749 {
0750 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0751
0752 iser_conn->state = ISER_CONN_INIT;
0753 init_completion(&iser_conn->stop_completion);
0754 init_completion(&iser_conn->ib_completion);
0755 init_completion(&iser_conn->up_completion);
0756 INIT_LIST_HEAD(&iser_conn->conn_list);
0757 mutex_init(&iser_conn->state_mutex);
0758
0759 ib_conn->reg_cqe.done = iser_reg_comp;
0760 }
0761
0762
0763
0764
0765
0766 int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
0767 struct sockaddr *dst_addr, int non_blocking)
0768 {
0769 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0770 int err = 0;
0771
0772 mutex_lock(&iser_conn->state_mutex);
0773
0774 sprintf(iser_conn->name, "%pISp", dst_addr);
0775
0776 iser_info("connecting to: %s\n", iser_conn->name);
0777
0778
0779 ib_conn->device = NULL;
0780
0781 iser_conn->state = ISER_CONN_PENDING;
0782
0783 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
0784 iser_conn, RDMA_PS_TCP, IB_QPT_RC);
0785 if (IS_ERR(ib_conn->cma_id)) {
0786 err = PTR_ERR(ib_conn->cma_id);
0787 iser_err("rdma_create_id failed: %d\n", err);
0788 goto id_failure;
0789 }
0790
0791 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
0792 if (err) {
0793 iser_err("rdma_resolve_addr failed: %d\n", err);
0794 goto addr_failure;
0795 }
0796
0797 if (!non_blocking) {
0798 wait_for_completion_interruptible(&iser_conn->up_completion);
0799
0800 if (iser_conn->state != ISER_CONN_UP) {
0801 err = -EIO;
0802 goto connect_failure;
0803 }
0804 }
0805 mutex_unlock(&iser_conn->state_mutex);
0806
0807 mutex_lock(&ig.connlist_mutex);
0808 list_add(&iser_conn->conn_list, &ig.connlist);
0809 mutex_unlock(&ig.connlist_mutex);
0810 return 0;
0811
0812 id_failure:
0813 ib_conn->cma_id = NULL;
0814 addr_failure:
0815 iser_conn->state = ISER_CONN_DOWN;
0816 connect_failure:
0817 mutex_unlock(&iser_conn->state_mutex);
0818 iser_conn_release(iser_conn);
0819 return err;
0820 }
0821
0822 int iser_post_recvl(struct iser_conn *iser_conn)
0823 {
0824 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0825 struct iser_login_desc *desc = &iser_conn->login_desc;
0826 struct ib_recv_wr wr;
0827 int ret;
0828
0829 desc->sge.addr = desc->rsp_dma;
0830 desc->sge.length = ISER_RX_LOGIN_SIZE;
0831 desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
0832
0833 desc->cqe.done = iser_login_rsp;
0834 wr.wr_cqe = &desc->cqe;
0835 wr.sg_list = &desc->sge;
0836 wr.num_sge = 1;
0837 wr.next = NULL;
0838
0839 ret = ib_post_recv(ib_conn->qp, &wr, NULL);
0840 if (unlikely(ret))
0841 iser_err("ib_post_recv login failed ret=%d\n", ret);
0842
0843 return ret;
0844 }
0845
0846 int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
0847 {
0848 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0849 struct ib_recv_wr wr;
0850 int ret;
0851
0852 rx_desc->cqe.done = iser_task_rsp;
0853 wr.wr_cqe = &rx_desc->cqe;
0854 wr.sg_list = &rx_desc->rx_sg;
0855 wr.num_sge = 1;
0856 wr.next = NULL;
0857
0858 ret = ib_post_recv(ib_conn->qp, &wr, NULL);
0859 if (unlikely(ret))
0860 iser_err("ib_post_recv failed ret=%d\n", ret);
0861
0862 return ret;
0863 }
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
0874 {
0875 struct ib_send_wr *wr = &tx_desc->send_wr;
0876 struct ib_send_wr *first_wr;
0877 int ret;
0878
0879 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
0880 tx_desc->dma_addr, ISER_HEADERS_LEN,
0881 DMA_TO_DEVICE);
0882
0883 wr->next = NULL;
0884 wr->wr_cqe = &tx_desc->cqe;
0885 wr->sg_list = tx_desc->tx_sg;
0886 wr->num_sge = tx_desc->num_sge;
0887 wr->opcode = IB_WR_SEND;
0888 wr->send_flags = IB_SEND_SIGNALED;
0889
0890 if (tx_desc->inv_wr.next)
0891 first_wr = &tx_desc->inv_wr;
0892 else if (tx_desc->reg_wr.wr.next)
0893 first_wr = &tx_desc->reg_wr.wr;
0894 else
0895 first_wr = wr;
0896
0897 ret = ib_post_send(ib_conn->qp, first_wr, NULL);
0898 if (unlikely(ret))
0899 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
0900 ret, wr->opcode);
0901
0902 return ret;
0903 }
0904
0905 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
0906 enum iser_data_dir cmd_dir, sector_t *sector)
0907 {
0908 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
0909 struct iser_fr_desc *desc = reg->desc;
0910 unsigned long sector_size = iser_task->sc->device->sector_size;
0911 struct ib_mr_status mr_status;
0912 int ret;
0913
0914 if (desc && desc->sig_protected) {
0915 desc->sig_protected = false;
0916 ret = ib_check_mr_status(desc->rsc.sig_mr,
0917 IB_MR_CHECK_SIG_STATUS, &mr_status);
0918 if (ret) {
0919 iser_err("ib_check_mr_status failed, ret %d\n", ret);
0920
0921 *sector = 0;
0922 return 0x1;
0923 }
0924
0925 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
0926 sector_t sector_off = mr_status.sig_err.sig_err_offset;
0927
0928 sector_div(sector_off, sector_size + 8);
0929 *sector = scsi_get_sector(iser_task->sc) + sector_off;
0930
0931 iser_err("PI error found type %d at sector %llx "
0932 "expected %x vs actual %x\n",
0933 mr_status.sig_err.err_type,
0934 (unsigned long long)*sector,
0935 mr_status.sig_err.expected,
0936 mr_status.sig_err.actual);
0937
0938 switch (mr_status.sig_err.err_type) {
0939 case IB_SIG_BAD_GUARD:
0940 return 0x1;
0941 case IB_SIG_BAD_REFTAG:
0942 return 0x3;
0943 case IB_SIG_BAD_APPTAG:
0944 return 0x2;
0945 }
0946 }
0947 }
0948
0949 return 0;
0950 }
0951
0952 void iser_err_comp(struct ib_wc *wc, const char *type)
0953 {
0954 if (wc->status != IB_WC_WR_FLUSH_ERR) {
0955 struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
0956
0957 iser_err("%s failure: %s (%d) vend_err %#x\n", type,
0958 ib_wc_status_msg(wc->status), wc->status,
0959 wc->vendor_err);
0960
0961 if (iser_conn->iscsi_conn)
0962 iscsi_conn_failure(iser_conn->iscsi_conn,
0963 ISCSI_ERR_CONN_FAILED);
0964 } else {
0965 iser_dbg("%s failure: %s (%d)\n", type,
0966 ib_wc_status_msg(wc->status), wc->status);
0967 }
0968 }