0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/module.h>
0036 #include <linux/init.h>
0037 #include <linux/slab.h>
0038 #include <linux/err.h>
0039 #include <linux/ctype.h>
0040 #include <linux/kthread.h>
0041 #include <linux/string.h>
0042 #include <linux/delay.h>
0043 #include <linux/atomic.h>
0044 #include <linux/inet.h>
0045 #include <rdma/ib_cache.h>
0046 #include <scsi/scsi_proto.h>
0047 #include <scsi/scsi_tcq.h>
0048 #include <target/target_core_base.h>
0049 #include <target/target_core_fabric.h>
0050 #include "ib_srpt.h"
0051
0052
0053 #define DRV_NAME "ib_srpt"
0054
0055 #define SRPT_ID_STRING "Linux SRP target"
0056
0057 #undef pr_fmt
0058 #define pr_fmt(fmt) DRV_NAME " " fmt
0059
0060 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
0061 MODULE_DESCRIPTION("SCSI RDMA Protocol target driver");
0062 MODULE_LICENSE("Dual BSD/GPL");
0063
0064
0065
0066
0067
0068 static u64 srpt_service_guid;
0069 static DEFINE_SPINLOCK(srpt_dev_lock);
0070 static LIST_HEAD(srpt_dev_list);
0071
0072 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
0073 module_param(srp_max_req_size, int, 0444);
0074 MODULE_PARM_DESC(srp_max_req_size,
0075 "Maximum size of SRP request messages in bytes.");
0076
0077 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
0078 module_param(srpt_srq_size, int, 0444);
0079 MODULE_PARM_DESC(srpt_srq_size,
0080 "Shared receive queue (SRQ) size.");
0081
0082 static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
0083 {
0084 return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
0085 }
0086 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
0087 0444);
0088 MODULE_PARM_DESC(srpt_service_guid,
0089 "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
0090
0091 static struct ib_client srpt_client;
0092
0093 static DEFINE_MUTEX(rdma_cm_mutex);
0094
0095 static u16 rdma_cm_port;
0096 static struct rdma_cm_id *rdma_cm_id;
0097 static void srpt_release_cmd(struct se_cmd *se_cmd);
0098 static void srpt_free_ch(struct kref *kref);
0099 static int srpt_queue_status(struct se_cmd *cmd);
0100 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
0101 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
0102 static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
0103
0104
0105
0106
0107
0108 static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
0109 {
0110 unsigned long flags;
0111 enum rdma_ch_state prev;
0112 bool changed = false;
0113
0114 spin_lock_irqsave(&ch->spinlock, flags);
0115 prev = ch->state;
0116 if (new > prev) {
0117 ch->state = new;
0118 changed = true;
0119 }
0120 spin_unlock_irqrestore(&ch->spinlock, flags);
0121
0122 return changed;
0123 }
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 static void srpt_event_handler(struct ib_event_handler *handler,
0136 struct ib_event *event)
0137 {
0138 struct srpt_device *sdev =
0139 container_of(handler, struct srpt_device, event_handler);
0140 struct srpt_port *sport;
0141 u8 port_num;
0142
0143 pr_debug("ASYNC event= %d on device= %s\n", event->event,
0144 dev_name(&sdev->device->dev));
0145
0146 switch (event->event) {
0147 case IB_EVENT_PORT_ERR:
0148 port_num = event->element.port_num - 1;
0149 if (port_num < sdev->device->phys_port_cnt) {
0150 sport = &sdev->port[port_num];
0151 sport->lid = 0;
0152 sport->sm_lid = 0;
0153 } else {
0154 WARN(true, "event %d: port_num %d out of range 1..%d\n",
0155 event->event, port_num + 1,
0156 sdev->device->phys_port_cnt);
0157 }
0158 break;
0159 case IB_EVENT_PORT_ACTIVE:
0160 case IB_EVENT_LID_CHANGE:
0161 case IB_EVENT_PKEY_CHANGE:
0162 case IB_EVENT_SM_CHANGE:
0163 case IB_EVENT_CLIENT_REREGISTER:
0164 case IB_EVENT_GID_CHANGE:
0165
0166 port_num = event->element.port_num - 1;
0167 if (port_num < sdev->device->phys_port_cnt) {
0168 sport = &sdev->port[port_num];
0169 if (!sport->lid && !sport->sm_lid)
0170 schedule_work(&sport->work);
0171 } else {
0172 WARN(true, "event %d: port_num %d out of range 1..%d\n",
0173 event->event, port_num + 1,
0174 sdev->device->phys_port_cnt);
0175 }
0176 break;
0177 default:
0178 pr_err("received unrecognized IB event %d\n", event->event);
0179 break;
0180 }
0181 }
0182
0183
0184
0185
0186
0187
0188 static void srpt_srq_event(struct ib_event *event, void *ctx)
0189 {
0190 pr_debug("SRQ event %d\n", event->event);
0191 }
0192
0193 static const char *get_ch_state_name(enum rdma_ch_state s)
0194 {
0195 switch (s) {
0196 case CH_CONNECTING:
0197 return "connecting";
0198 case CH_LIVE:
0199 return "live";
0200 case CH_DISCONNECTING:
0201 return "disconnecting";
0202 case CH_DRAINING:
0203 return "draining";
0204 case CH_DISCONNECTED:
0205 return "disconnected";
0206 }
0207 return "???";
0208 }
0209
0210
0211
0212
0213
0214
0215 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
0216 {
0217 pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
0218 event->event, ch, ch->sess_name, ch->qp->qp_num,
0219 get_ch_state_name(ch->state));
0220
0221 switch (event->event) {
0222 case IB_EVENT_COMM_EST:
0223 if (ch->using_rdma_cm)
0224 rdma_notify(ch->rdma_cm.cm_id, event->event);
0225 else
0226 ib_cm_notify(ch->ib_cm.cm_id, event->event);
0227 break;
0228 case IB_EVENT_QP_LAST_WQE_REACHED:
0229 pr_debug("%s-%d, state %s: received Last WQE event.\n",
0230 ch->sess_name, ch->qp->qp_num,
0231 get_ch_state_name(ch->state));
0232 break;
0233 default:
0234 pr_err("received unrecognized IB QP event %d\n", event->event);
0235 break;
0236 }
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
0249 {
0250 u16 id;
0251 u8 tmp;
0252
0253 id = (slot - 1) / 2;
0254 if (slot & 0x1) {
0255 tmp = c_list[id] & 0xf;
0256 c_list[id] = (value << 4) | tmp;
0257 } else {
0258 tmp = c_list[id] & 0xf0;
0259 c_list[id] = (value & 0xf) | tmp;
0260 }
0261 }
0262
0263
0264
0265
0266
0267
0268
0269
0270 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
0271 {
0272 struct ib_class_port_info *cif;
0273
0274 cif = (struct ib_class_port_info *)mad->data;
0275 memset(cif, 0, sizeof(*cif));
0276 cif->base_version = 1;
0277 cif->class_version = 1;
0278
0279 ib_set_cpi_resp_time(cif, 20);
0280 mad->mad_hdr.status = 0;
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290 static void srpt_get_iou(struct ib_dm_mad *mad)
0291 {
0292 struct ib_dm_iou_info *ioui;
0293 u8 slot;
0294 int i;
0295
0296 ioui = (struct ib_dm_iou_info *)mad->data;
0297 ioui->change_id = cpu_to_be16(1);
0298 ioui->max_controllers = 16;
0299
0300
0301 srpt_set_ioc(ioui->controller_list, 1, 1);
0302 for (i = 1, slot = 2; i < 16; i++, slot++)
0303 srpt_set_ioc(ioui->controller_list, slot, 0);
0304
0305 mad->mad_hdr.status = 0;
0306 }
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
0319 struct ib_dm_mad *mad)
0320 {
0321 struct srpt_device *sdev = sport->sdev;
0322 struct ib_dm_ioc_profile *iocp;
0323 int send_queue_depth;
0324
0325 iocp = (struct ib_dm_ioc_profile *)mad->data;
0326
0327 if (!slot || slot > 16) {
0328 mad->mad_hdr.status
0329 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
0330 return;
0331 }
0332
0333 if (slot > 2) {
0334 mad->mad_hdr.status
0335 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
0336 return;
0337 }
0338
0339 if (sdev->use_srq)
0340 send_queue_depth = sdev->srq_size;
0341 else
0342 send_queue_depth = min(MAX_SRPT_RQ_SIZE,
0343 sdev->device->attrs.max_qp_wr);
0344
0345 memset(iocp, 0, sizeof(*iocp));
0346 strcpy(iocp->id_string, SRPT_ID_STRING);
0347 iocp->guid = cpu_to_be64(srpt_service_guid);
0348 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
0349 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
0350 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
0351 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
0352 iocp->subsys_device_id = 0x0;
0353 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
0354 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
0355 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
0356 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
0357 iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
0358 iocp->rdma_read_depth = 4;
0359 iocp->send_size = cpu_to_be32(srp_max_req_size);
0360 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
0361 1U << 24));
0362 iocp->num_svc_entries = 1;
0363 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
0364 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
0365
0366 mad->mad_hdr.status = 0;
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 static void srpt_get_svc_entries(u64 ioc_guid,
0381 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
0382 {
0383 struct ib_dm_svc_entries *svc_entries;
0384
0385 WARN_ON(!ioc_guid);
0386
0387 if (!slot || slot > 16) {
0388 mad->mad_hdr.status
0389 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
0390 return;
0391 }
0392
0393 if (slot > 2 || lo > hi || hi > 1) {
0394 mad->mad_hdr.status
0395 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
0396 return;
0397 }
0398
0399 svc_entries = (struct ib_dm_svc_entries *)mad->data;
0400 memset(svc_entries, 0, sizeof(*svc_entries));
0401 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
0402 snprintf(svc_entries->service_entries[0].name,
0403 sizeof(svc_entries->service_entries[0].name),
0404 "%s%016llx",
0405 SRP_SERVICE_NAME_PREFIX,
0406 ioc_guid);
0407
0408 mad->mad_hdr.status = 0;
0409 }
0410
0411
0412
0413
0414
0415
0416
0417 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
0418 struct ib_dm_mad *rsp_mad)
0419 {
0420 u16 attr_id;
0421 u32 slot;
0422 u8 hi, lo;
0423
0424 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
0425 switch (attr_id) {
0426 case DM_ATTR_CLASS_PORT_INFO:
0427 srpt_get_class_port_info(rsp_mad);
0428 break;
0429 case DM_ATTR_IOU_INFO:
0430 srpt_get_iou(rsp_mad);
0431 break;
0432 case DM_ATTR_IOC_PROFILE:
0433 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
0434 srpt_get_ioc(sp, slot, rsp_mad);
0435 break;
0436 case DM_ATTR_SVC_ENTRIES:
0437 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
0438 hi = (u8) ((slot >> 8) & 0xff);
0439 lo = (u8) (slot & 0xff);
0440 slot = (u16) ((slot >> 16) & 0xffff);
0441 srpt_get_svc_entries(srpt_service_guid,
0442 slot, hi, lo, rsp_mad);
0443 break;
0444 default:
0445 rsp_mad->mad_hdr.status =
0446 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
0447 break;
0448 }
0449 }
0450
0451
0452
0453
0454
0455
0456 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
0457 struct ib_mad_send_wc *mad_wc)
0458 {
0459 rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
0460 ib_free_send_mad(mad_wc->send_buf);
0461 }
0462
0463
0464
0465
0466
0467
0468
0469 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
0470 struct ib_mad_send_buf *send_buf,
0471 struct ib_mad_recv_wc *mad_wc)
0472 {
0473 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
0474 struct ib_ah *ah;
0475 struct ib_mad_send_buf *rsp;
0476 struct ib_dm_mad *dm_mad;
0477
0478 if (!mad_wc || !mad_wc->recv_buf.mad)
0479 return;
0480
0481 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
0482 mad_wc->recv_buf.grh, mad_agent->port_num);
0483 if (IS_ERR(ah))
0484 goto err;
0485
0486 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
0487
0488 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
0489 mad_wc->wc->pkey_index, 0,
0490 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
0491 GFP_KERNEL,
0492 IB_MGMT_BASE_VERSION);
0493 if (IS_ERR(rsp))
0494 goto err_rsp;
0495
0496 rsp->ah = ah;
0497
0498 dm_mad = rsp->mad;
0499 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
0500 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
0501 dm_mad->mad_hdr.status = 0;
0502
0503 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
0504 case IB_MGMT_METHOD_GET:
0505 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
0506 break;
0507 case IB_MGMT_METHOD_SET:
0508 dm_mad->mad_hdr.status =
0509 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
0510 break;
0511 default:
0512 dm_mad->mad_hdr.status =
0513 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
0514 break;
0515 }
0516
0517 if (!ib_post_send_mad(rsp, NULL)) {
0518 ib_free_recv_mad(mad_wc);
0519
0520 return;
0521 }
0522
0523 ib_free_send_mad(rsp);
0524
0525 err_rsp:
0526 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
0527 err:
0528 ib_free_recv_mad(mad_wc);
0529 }
0530
0531 static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
0532 {
0533 const __be16 *g = (const __be16 *)guid;
0534
0535 return snprintf(buf, size, "%04x:%04x:%04x:%04x",
0536 be16_to_cpu(g[0]), be16_to_cpu(g[1]),
0537 be16_to_cpu(g[2]), be16_to_cpu(g[3]));
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550 static int srpt_refresh_port(struct srpt_port *sport)
0551 {
0552 struct ib_mad_reg_req reg_req;
0553 struct ib_port_modify port_modify;
0554 struct ib_port_attr port_attr;
0555 int ret;
0556
0557 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
0558 if (ret)
0559 return ret;
0560
0561 sport->sm_lid = port_attr.sm_lid;
0562 sport->lid = port_attr.lid;
0563
0564 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
0565 if (ret)
0566 return ret;
0567
0568 srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
0569 &sport->gid.global.interface_id);
0570 snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
0571 "0x%016llx%016llx",
0572 be64_to_cpu(sport->gid.global.subnet_prefix),
0573 be64_to_cpu(sport->gid.global.interface_id));
0574
0575 if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
0576 return 0;
0577
0578 memset(&port_modify, 0, sizeof(port_modify));
0579 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
0580 port_modify.clr_port_cap_mask = 0;
0581
0582 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
0583 if (ret) {
0584 pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
0585 dev_name(&sport->sdev->device->dev), sport->port, ret);
0586 return 0;
0587 }
0588
0589 if (!sport->mad_agent) {
0590 memset(®_req, 0, sizeof(reg_req));
0591 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
0592 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
0593 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
0594 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
0595
0596 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
0597 sport->port,
0598 IB_QPT_GSI,
0599 ®_req, 0,
0600 srpt_mad_send_handler,
0601 srpt_mad_recv_handler,
0602 sport, 0);
0603 if (IS_ERR(sport->mad_agent)) {
0604 pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
0605 dev_name(&sport->sdev->device->dev), sport->port,
0606 PTR_ERR(sport->mad_agent));
0607 sport->mad_agent = NULL;
0608 memset(&port_modify, 0, sizeof(port_modify));
0609 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
0610 ib_modify_port(sport->sdev->device, sport->port, 0,
0611 &port_modify);
0612
0613 }
0614 }
0615
0616 return 0;
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626 static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
0627 {
0628 struct ib_port_modify port_modify = {
0629 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
0630 };
0631 struct srpt_port *sport;
0632 int i;
0633
0634 for (i = 1; i <= port_cnt; i++) {
0635 sport = &sdev->port[i - 1];
0636 WARN_ON(sport->port != i);
0637 if (sport->mad_agent) {
0638 ib_modify_port(sdev->device, i, 0, &port_modify);
0639 ib_unregister_mad_agent(sport->mad_agent);
0640 sport->mad_agent = NULL;
0641 }
0642 }
0643 }
0644
0645
0646
0647
0648
0649
0650
0651
0652 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
0653 int ioctx_size,
0654 struct kmem_cache *buf_cache,
0655 enum dma_data_direction dir)
0656 {
0657 struct srpt_ioctx *ioctx;
0658
0659 ioctx = kzalloc(ioctx_size, GFP_KERNEL);
0660 if (!ioctx)
0661 goto err;
0662
0663 ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
0664 if (!ioctx->buf)
0665 goto err_free_ioctx;
0666
0667 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
0668 kmem_cache_size(buf_cache), dir);
0669 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
0670 goto err_free_buf;
0671
0672 return ioctx;
0673
0674 err_free_buf:
0675 kmem_cache_free(buf_cache, ioctx->buf);
0676 err_free_ioctx:
0677 kfree(ioctx);
0678 err:
0679 return NULL;
0680 }
0681
0682
0683
0684
0685
0686
0687
0688
0689 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
0690 struct kmem_cache *buf_cache,
0691 enum dma_data_direction dir)
0692 {
0693 if (!ioctx)
0694 return;
0695
0696 ib_dma_unmap_single(sdev->device, ioctx->dma,
0697 kmem_cache_size(buf_cache), dir);
0698 kmem_cache_free(buf_cache, ioctx->buf);
0699 kfree(ioctx);
0700 }
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
0713 int ring_size, int ioctx_size,
0714 struct kmem_cache *buf_cache,
0715 int alignment_offset,
0716 enum dma_data_direction dir)
0717 {
0718 struct srpt_ioctx **ring;
0719 int i;
0720
0721 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
0722 ioctx_size != sizeof(struct srpt_send_ioctx));
0723
0724 ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
0725 if (!ring)
0726 goto out;
0727 for (i = 0; i < ring_size; ++i) {
0728 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
0729 if (!ring[i])
0730 goto err;
0731 ring[i]->index = i;
0732 ring[i]->offset = alignment_offset;
0733 }
0734 goto out;
0735
0736 err:
0737 while (--i >= 0)
0738 srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
0739 kvfree(ring);
0740 ring = NULL;
0741 out:
0742 return ring;
0743 }
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
0754 struct srpt_device *sdev, int ring_size,
0755 struct kmem_cache *buf_cache,
0756 enum dma_data_direction dir)
0757 {
0758 int i;
0759
0760 if (!ioctx_ring)
0761 return;
0762
0763 for (i = 0; i < ring_size; ++i)
0764 srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
0765 kvfree(ioctx_ring);
0766 }
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
0777 enum srpt_command_state new)
0778 {
0779 enum srpt_command_state previous;
0780
0781 previous = ioctx->state;
0782 if (previous != SRPT_STATE_DONE)
0783 ioctx->state = new;
0784
0785 return previous;
0786 }
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
0797 enum srpt_command_state old,
0798 enum srpt_command_state new)
0799 {
0800 enum srpt_command_state previous;
0801
0802 WARN_ON(!ioctx);
0803 WARN_ON(old == SRPT_STATE_DONE);
0804 WARN_ON(new == SRPT_STATE_NEW);
0805
0806 previous = ioctx->state;
0807 if (previous == old)
0808 ioctx->state = new;
0809
0810 return previous == old;
0811 }
0812
0813
0814
0815
0816
0817
0818
0819 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
0820 struct srpt_recv_ioctx *ioctx)
0821 {
0822 struct ib_sge list;
0823 struct ib_recv_wr wr;
0824
0825 BUG_ON(!sdev);
0826 list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
0827 list.length = srp_max_req_size;
0828 list.lkey = sdev->lkey;
0829
0830 ioctx->ioctx.cqe.done = srpt_recv_done;
0831 wr.wr_cqe = &ioctx->ioctx.cqe;
0832 wr.next = NULL;
0833 wr.sg_list = &list;
0834 wr.num_sge = 1;
0835
0836 if (sdev->use_srq)
0837 return ib_post_srq_recv(sdev->srq, &wr, NULL);
0838 else
0839 return ib_post_recv(ch->qp, &wr, NULL);
0840 }
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851 static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
0852 {
0853 struct ib_rdma_wr wr = {
0854 .wr = {
0855 .next = NULL,
0856 { .wr_cqe = &ch->zw_cqe, },
0857 .opcode = IB_WR_RDMA_WRITE,
0858 .send_flags = IB_SEND_SIGNALED,
0859 }
0860 };
0861
0862 pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
0863 ch->qp->qp_num);
0864
0865 return ib_post_send(ch->qp, &wr.wr, NULL);
0866 }
0867
0868 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
0869 {
0870 struct srpt_rdma_ch *ch = wc->qp->qp_context;
0871
0872 pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
0873 wc->status);
0874
0875 if (wc->status == IB_WC_SUCCESS) {
0876 srpt_process_wait_list(ch);
0877 } else {
0878 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
0879 schedule_work(&ch->release_work);
0880 else
0881 pr_debug("%s-%d: already disconnected.\n",
0882 ch->sess_name, ch->qp->qp_num);
0883 }
0884 }
0885
0886 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
0887 struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
0888 unsigned *sg_cnt)
0889 {
0890 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
0891 struct srpt_rdma_ch *ch = ioctx->ch;
0892 struct scatterlist *prev = NULL;
0893 unsigned prev_nents;
0894 int ret, i;
0895
0896 if (nbufs == 1) {
0897 ioctx->rw_ctxs = &ioctx->s_rw_ctx;
0898 } else {
0899 ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
0900 GFP_KERNEL);
0901 if (!ioctx->rw_ctxs)
0902 return -ENOMEM;
0903 }
0904
0905 for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
0906 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
0907 u64 remote_addr = be64_to_cpu(db->va);
0908 u32 size = be32_to_cpu(db->len);
0909 u32 rkey = be32_to_cpu(db->key);
0910
0911 ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
0912 i < nbufs - 1);
0913 if (ret)
0914 goto unwind;
0915
0916 ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
0917 ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
0918 if (ret < 0) {
0919 target_free_sgl(ctx->sg, ctx->nents);
0920 goto unwind;
0921 }
0922
0923 ioctx->n_rdma += ret;
0924 ioctx->n_rw_ctx++;
0925
0926 if (prev) {
0927 sg_unmark_end(&prev[prev_nents - 1]);
0928 sg_chain(prev, prev_nents + 1, ctx->sg);
0929 } else {
0930 *sg = ctx->sg;
0931 }
0932
0933 prev = ctx->sg;
0934 prev_nents = ctx->nents;
0935
0936 *sg_cnt += ctx->nents;
0937 }
0938
0939 return 0;
0940
0941 unwind:
0942 while (--i >= 0) {
0943 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
0944
0945 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
0946 ctx->sg, ctx->nents, dir);
0947 target_free_sgl(ctx->sg, ctx->nents);
0948 }
0949 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
0950 kfree(ioctx->rw_ctxs);
0951 return ret;
0952 }
0953
0954 static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
0955 struct srpt_send_ioctx *ioctx)
0956 {
0957 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
0958 int i;
0959
0960 for (i = 0; i < ioctx->n_rw_ctx; i++) {
0961 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
0962
0963 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
0964 ctx->sg, ctx->nents, dir);
0965 target_free_sgl(ctx->sg, ctx->nents);
0966 }
0967
0968 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
0969 kfree(ioctx->rw_ctxs);
0970 }
0971
0972 static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
0973 {
0974
0975
0976
0977
0978
0979 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
0980 !__same_type(srp_cmd->add_data[0], (u8)0));
0981
0982
0983
0984
0985
0986
0987 return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
0988 }
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009 static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx,
1010 struct srpt_send_ioctx *ioctx,
1011 struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
1012 struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len,
1013 u16 imm_data_offset)
1014 {
1015 BUG_ON(!dir);
1016 BUG_ON(!data_len);
1017
1018
1019
1020
1021
1022
1023 if (srp_cmd->buf_fmt & 0xf)
1024
1025 *dir = DMA_FROM_DEVICE;
1026 else if (srp_cmd->buf_fmt >> 4)
1027
1028 *dir = DMA_TO_DEVICE;
1029 else
1030 *dir = DMA_NONE;
1031
1032
1033 ioctx->cmd.data_direction = *dir;
1034
1035 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
1036 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
1037 struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
1038
1039 *data_len = be32_to_cpu(db->len);
1040 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
1041 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
1042 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
1043 struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
1044 int nbufs = be32_to_cpu(idb->table_desc.len) /
1045 sizeof(struct srp_direct_buf);
1046
1047 if (nbufs >
1048 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
1049 pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n",
1050 srp_cmd->data_out_desc_cnt,
1051 srp_cmd->data_in_desc_cnt,
1052 be32_to_cpu(idb->table_desc.len),
1053 sizeof(struct srp_direct_buf));
1054 return -EINVAL;
1055 }
1056
1057 *data_len = be32_to_cpu(idb->len);
1058 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
1059 sg, sg_cnt);
1060 } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) {
1061 struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd);
1062 void *data = (void *)srp_cmd + imm_data_offset;
1063 uint32_t len = be32_to_cpu(imm_buf->len);
1064 uint32_t req_size = imm_data_offset + len;
1065
1066 if (req_size > srp_max_req_size) {
1067 pr_err("Immediate data (length %d + %d) exceeds request size %d\n",
1068 imm_data_offset, len, srp_max_req_size);
1069 return -EINVAL;
1070 }
1071 if (recv_ioctx->byte_len < req_size) {
1072 pr_err("Received too few data - %d < %d\n",
1073 recv_ioctx->byte_len, req_size);
1074 return -EIO;
1075 }
1076
1077
1078
1079
1080 if ((void *)(imm_buf + 1) > (void *)data) {
1081 pr_err("Received invalid write request\n");
1082 return -EINVAL;
1083 }
1084 *data_len = len;
1085 ioctx->recv_ioctx = recv_ioctx;
1086 if ((uintptr_t)data & 511) {
1087 pr_warn_once("Internal error - the receive buffers are not aligned properly.\n");
1088 return -EINVAL;
1089 }
1090 sg_init_one(&ioctx->imm_sg, data, len);
1091 *sg = &ioctx->imm_sg;
1092 *sg_cnt = 1;
1093 return 0;
1094 } else {
1095 *data_len = 0;
1096 return 0;
1097 }
1098 }
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1109 {
1110 struct ib_qp_attr *attr;
1111 int ret;
1112
1113 WARN_ON_ONCE(ch->using_rdma_cm);
1114
1115 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1116 if (!attr)
1117 return -ENOMEM;
1118
1119 attr->qp_state = IB_QPS_INIT;
1120 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1121 attr->port_num = ch->sport->port;
1122
1123 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1124 ch->pkey, &attr->pkey_index);
1125 if (ret < 0)
1126 pr_err("Translating pkey %#x failed (%d) - using index 0\n",
1127 ch->pkey, ret);
1128
1129 ret = ib_modify_qp(qp, attr,
1130 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
1131 IB_QP_PKEY_INDEX);
1132
1133 kfree(attr);
1134 return ret;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1149 {
1150 struct ib_qp_attr qp_attr;
1151 int attr_mask;
1152 int ret;
1153
1154 WARN_ON_ONCE(ch->using_rdma_cm);
1155
1156 qp_attr.qp_state = IB_QPS_RTR;
1157 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1158 if (ret)
1159 goto out;
1160
1161 qp_attr.max_dest_rd_atomic = 4;
1162
1163 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1164
1165 out:
1166 return ret;
1167 }
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1181 {
1182 struct ib_qp_attr qp_attr;
1183 int attr_mask;
1184 int ret;
1185
1186 qp_attr.qp_state = IB_QPS_RTS;
1187 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1188 if (ret)
1189 goto out;
1190
1191 qp_attr.max_rd_atomic = 4;
1192
1193 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1194
1195 out:
1196 return ret;
1197 }
1198
1199
1200
1201
1202
1203 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1204 {
1205 struct ib_qp_attr qp_attr;
1206
1207 qp_attr.qp_state = IB_QPS_ERR;
1208 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1209 }
1210
1211
1212
1213
1214
1215 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1216 {
1217 struct srpt_send_ioctx *ioctx;
1218 int tag, cpu;
1219
1220 BUG_ON(!ch);
1221
1222 tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
1223 if (tag < 0)
1224 return NULL;
1225
1226 ioctx = ch->ioctx_ring[tag];
1227 BUG_ON(ioctx->ch != ch);
1228 ioctx->state = SRPT_STATE_NEW;
1229 WARN_ON_ONCE(ioctx->recv_ioctx);
1230 ioctx->n_rdma = 0;
1231 ioctx->n_rw_ctx = 0;
1232 ioctx->queue_status_only = false;
1233
1234
1235
1236
1237 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1238 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1239 ioctx->cmd.map_tag = tag;
1240 ioctx->cmd.map_cpu = cpu;
1241
1242 return ioctx;
1243 }
1244
1245
1246
1247
1248
1249 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1250 {
1251 enum srpt_command_state state;
1252
1253 BUG_ON(!ioctx);
1254
1255
1256
1257
1258
1259
1260 state = ioctx->state;
1261 switch (state) {
1262 case SRPT_STATE_NEED_DATA:
1263 ioctx->state = SRPT_STATE_DATA_IN;
1264 break;
1265 case SRPT_STATE_CMD_RSP_SENT:
1266 case SRPT_STATE_MGMT_RSP_SENT:
1267 ioctx->state = SRPT_STATE_DONE;
1268 break;
1269 default:
1270 WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1271 __func__, state);
1272 break;
1273 }
1274
1275 pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
1276 ioctx->state, ioctx->cmd.tag);
1277
1278 switch (state) {
1279 case SRPT_STATE_NEW:
1280 case SRPT_STATE_DATA_IN:
1281 case SRPT_STATE_MGMT:
1282 case SRPT_STATE_DONE:
1283
1284
1285
1286
1287 break;
1288 case SRPT_STATE_NEED_DATA:
1289 pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1290 transport_generic_request_failure(&ioctx->cmd,
1291 TCM_CHECK_CONDITION_ABORT_CMD);
1292 break;
1293 case SRPT_STATE_CMD_RSP_SENT:
1294
1295
1296
1297
1298 transport_generic_free_cmd(&ioctx->cmd, 0);
1299 break;
1300 case SRPT_STATE_MGMT_RSP_SENT:
1301 transport_generic_free_cmd(&ioctx->cmd, 0);
1302 break;
1303 default:
1304 WARN(1, "Unexpected command state (%d)", state);
1305 break;
1306 }
1307
1308 return state;
1309 }
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1322 {
1323 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1324 struct srpt_send_ioctx *ioctx =
1325 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1326
1327 WARN_ON(ioctx->n_rdma <= 0);
1328 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1329 ioctx->n_rdma = 0;
1330
1331 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1332 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1333 ioctx, wc->status);
1334 srpt_abort_cmd(ioctx);
1335 return;
1336 }
1337
1338 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1339 SRPT_STATE_DATA_IN))
1340 target_execute_cmd(&ioctx->cmd);
1341 else
1342 pr_err("%s[%d]: wrong state = %d\n", __func__,
1343 __LINE__, ioctx->state);
1344 }
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1362 struct srpt_send_ioctx *ioctx, u64 tag,
1363 int status)
1364 {
1365 struct se_cmd *cmd = &ioctx->cmd;
1366 struct srp_rsp *srp_rsp;
1367 const u8 *sense_data;
1368 int sense_data_len, max_sense_len;
1369 u32 resid = cmd->residual_count;
1370
1371
1372
1373
1374
1375 WARN_ON(status & 1);
1376
1377 srp_rsp = ioctx->ioctx.buf;
1378 BUG_ON(!srp_rsp);
1379
1380 sense_data = ioctx->sense_data;
1381 sense_data_len = ioctx->cmd.scsi_sense_length;
1382 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1383
1384 memset(srp_rsp, 0, sizeof(*srp_rsp));
1385 srp_rsp->opcode = SRP_RSP;
1386 srp_rsp->req_lim_delta =
1387 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1388 srp_rsp->tag = tag;
1389 srp_rsp->status = status;
1390
1391 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1392 if (cmd->data_direction == DMA_TO_DEVICE) {
1393
1394 srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
1395 srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
1396 } else if (cmd->data_direction == DMA_FROM_DEVICE) {
1397
1398 srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
1399 srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
1400 }
1401 } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1402 if (cmd->data_direction == DMA_TO_DEVICE) {
1403
1404 srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
1405 srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
1406 } else if (cmd->data_direction == DMA_FROM_DEVICE) {
1407
1408 srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
1409 srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
1410 }
1411 }
1412
1413 if (sense_data_len) {
1414 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1415 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1416 if (sense_data_len > max_sense_len) {
1417 pr_warn("truncated sense data from %d to %d bytes\n",
1418 sense_data_len, max_sense_len);
1419 sense_data_len = max_sense_len;
1420 }
1421
1422 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1423 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1424 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1425 }
1426
1427 return sizeof(*srp_rsp) + sense_data_len;
1428 }
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1444 struct srpt_send_ioctx *ioctx,
1445 u8 rsp_code, u64 tag)
1446 {
1447 struct srp_rsp *srp_rsp;
1448 int resp_data_len;
1449 int resp_len;
1450
1451 resp_data_len = 4;
1452 resp_len = sizeof(*srp_rsp) + resp_data_len;
1453
1454 srp_rsp = ioctx->ioctx.buf;
1455 BUG_ON(!srp_rsp);
1456 memset(srp_rsp, 0, sizeof(*srp_rsp));
1457
1458 srp_rsp->opcode = SRP_RSP;
1459 srp_rsp->req_lim_delta =
1460 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1461 srp_rsp->tag = tag;
1462
1463 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1464 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1465 srp_rsp->data[3] = rsp_code;
1466
1467 return resp_len;
1468 }
1469
1470 static int srpt_check_stop_free(struct se_cmd *cmd)
1471 {
1472 struct srpt_send_ioctx *ioctx = container_of(cmd,
1473 struct srpt_send_ioctx, cmd);
1474
1475 return target_put_sess_cmd(&ioctx->cmd);
1476 }
1477
1478
1479
1480
1481
1482
1483
1484 static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1485 struct srpt_recv_ioctx *recv_ioctx,
1486 struct srpt_send_ioctx *send_ioctx)
1487 {
1488 struct se_cmd *cmd;
1489 struct srp_cmd *srp_cmd;
1490 struct scatterlist *sg = NULL;
1491 unsigned sg_cnt = 0;
1492 u64 data_len;
1493 enum dma_data_direction dir;
1494 int rc;
1495
1496 BUG_ON(!send_ioctx);
1497
1498 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1499 cmd = &send_ioctx->cmd;
1500 cmd->tag = srp_cmd->tag;
1501
1502 switch (srp_cmd->task_attr) {
1503 case SRP_CMD_SIMPLE_Q:
1504 cmd->sam_task_attr = TCM_SIMPLE_TAG;
1505 break;
1506 case SRP_CMD_ORDERED_Q:
1507 default:
1508 cmd->sam_task_attr = TCM_ORDERED_TAG;
1509 break;
1510 case SRP_CMD_HEAD_OF_Q:
1511 cmd->sam_task_attr = TCM_HEAD_TAG;
1512 break;
1513 case SRP_CMD_ACA:
1514 cmd->sam_task_attr = TCM_ACA_TAG;
1515 break;
1516 }
1517
1518 rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir,
1519 &sg, &sg_cnt, &data_len, ch->imm_data_offset);
1520 if (rc) {
1521 if (rc != -EAGAIN) {
1522 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1523 srp_cmd->tag);
1524 }
1525 goto busy;
1526 }
1527
1528 rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
1529 scsilun_to_int(&srp_cmd->lun), data_len,
1530 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
1531 if (rc != 0) {
1532 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
1533 srp_cmd->tag);
1534 goto busy;
1535 }
1536
1537 if (target_submit_prep(cmd, srp_cmd->cdb, sg, sg_cnt, NULL, 0, NULL, 0,
1538 GFP_KERNEL))
1539 return;
1540
1541 target_submit(cmd);
1542 return;
1543
1544 busy:
1545 target_send_busy(cmd);
1546 }
1547
1548 static int srp_tmr_to_tcm(int fn)
1549 {
1550 switch (fn) {
1551 case SRP_TSK_ABORT_TASK:
1552 return TMR_ABORT_TASK;
1553 case SRP_TSK_ABORT_TASK_SET:
1554 return TMR_ABORT_TASK_SET;
1555 case SRP_TSK_CLEAR_TASK_SET:
1556 return TMR_CLEAR_TASK_SET;
1557 case SRP_TSK_LUN_RESET:
1558 return TMR_LUN_RESET;
1559 case SRP_TSK_CLEAR_ACA:
1560 return TMR_CLEAR_ACA;
1561 default:
1562 return -1;
1563 }
1564 }
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1578 struct srpt_recv_ioctx *recv_ioctx,
1579 struct srpt_send_ioctx *send_ioctx)
1580 {
1581 struct srp_tsk_mgmt *srp_tsk;
1582 struct se_cmd *cmd;
1583 struct se_session *sess = ch->sess;
1584 int tcm_tmr;
1585 int rc;
1586
1587 BUG_ON(!send_ioctx);
1588
1589 srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1590 cmd = &send_ioctx->cmd;
1591
1592 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
1593 srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
1594 ch->sess);
1595
1596 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1597 send_ioctx->cmd.tag = srp_tsk->tag;
1598 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1599 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1600 scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1601 GFP_KERNEL, srp_tsk->task_tag,
1602 TARGET_SCF_ACK_KREF);
1603 if (rc != 0) {
1604 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1605 cmd->se_tfo->queue_tm_rsp(cmd);
1606 }
1607 return;
1608 }
1609
1610
1611
1612
1613
1614
1615 static bool
1616 srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
1617 {
1618 struct srpt_send_ioctx *send_ioctx = NULL;
1619 struct srp_cmd *srp_cmd;
1620 bool res = false;
1621 u8 opcode;
1622
1623 BUG_ON(!ch);
1624 BUG_ON(!recv_ioctx);
1625
1626 if (unlikely(ch->state == CH_CONNECTING))
1627 goto push;
1628
1629 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1630 recv_ioctx->ioctx.dma,
1631 recv_ioctx->ioctx.offset + srp_max_req_size,
1632 DMA_FROM_DEVICE);
1633
1634 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1635 opcode = srp_cmd->opcode;
1636 if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
1637 send_ioctx = srpt_get_send_ioctx(ch);
1638 if (unlikely(!send_ioctx))
1639 goto push;
1640 }
1641
1642 if (!list_empty(&recv_ioctx->wait_list)) {
1643 WARN_ON_ONCE(!ch->processing_wait_list);
1644 list_del_init(&recv_ioctx->wait_list);
1645 }
1646
1647 switch (opcode) {
1648 case SRP_CMD:
1649 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1650 break;
1651 case SRP_TSK_MGMT:
1652 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1653 break;
1654 case SRP_I_LOGOUT:
1655 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1656 break;
1657 case SRP_CRED_RSP:
1658 pr_debug("received SRP_CRED_RSP\n");
1659 break;
1660 case SRP_AER_RSP:
1661 pr_debug("received SRP_AER_RSP\n");
1662 break;
1663 case SRP_RSP:
1664 pr_err("Received SRP_RSP\n");
1665 break;
1666 default:
1667 pr_err("received IU with unknown opcode 0x%x\n", opcode);
1668 break;
1669 }
1670
1671 if (!send_ioctx || !send_ioctx->recv_ioctx)
1672 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1673 res = true;
1674
1675 out:
1676 return res;
1677
1678 push:
1679 if (list_empty(&recv_ioctx->wait_list)) {
1680 WARN_ON_ONCE(ch->processing_wait_list);
1681 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1682 }
1683 goto out;
1684 }
1685
1686 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1687 {
1688 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1689 struct srpt_recv_ioctx *ioctx =
1690 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1691
1692 if (wc->status == IB_WC_SUCCESS) {
1693 int req_lim;
1694
1695 req_lim = atomic_dec_return(&ch->req_lim);
1696 if (unlikely(req_lim < 0))
1697 pr_err("req_lim = %d < 0\n", req_lim);
1698 ioctx->byte_len = wc->byte_len;
1699 srpt_handle_new_iu(ch, ioctx);
1700 } else {
1701 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1702 ioctx, wc->status);
1703 }
1704 }
1705
1706
1707
1708
1709
1710
1711 static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1712 {
1713 struct srpt_recv_ioctx *recv_ioctx, *tmp;
1714
1715 WARN_ON_ONCE(ch->state == CH_CONNECTING);
1716
1717 if (list_empty(&ch->cmd_wait_list))
1718 return;
1719
1720 WARN_ON_ONCE(ch->processing_wait_list);
1721 ch->processing_wait_list = true;
1722 list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
1723 wait_list) {
1724 if (!srpt_handle_new_iu(ch, recv_ioctx))
1725 break;
1726 }
1727 ch->processing_wait_list = false;
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1748 {
1749 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1750 struct srpt_send_ioctx *ioctx =
1751 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1752 enum srpt_command_state state;
1753
1754 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1755
1756 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1757 state != SRPT_STATE_MGMT_RSP_SENT);
1758
1759 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1760
1761 if (wc->status != IB_WC_SUCCESS)
1762 pr_info("sending response for ioctx 0x%p failed with status %d\n",
1763 ioctx, wc->status);
1764
1765 if (state != SRPT_STATE_DONE) {
1766 transport_generic_free_cmd(&ioctx->cmd, 0);
1767 } else {
1768 pr_err("IB completion has been received too late for wr_id = %u.\n",
1769 ioctx->ioctx.index);
1770 }
1771
1772 srpt_process_wait_list(ch);
1773 }
1774
1775
1776
1777
1778
1779 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1780 {
1781 struct ib_qp_init_attr *qp_init;
1782 struct srpt_port *sport = ch->sport;
1783 struct srpt_device *sdev = sport->sdev;
1784 const struct ib_device_attr *attrs = &sdev->device->attrs;
1785 int sq_size = sport->port_attrib.srp_sq_size;
1786 int i, ret;
1787
1788 WARN_ON(ch->rq_size < 1);
1789
1790 ret = -ENOMEM;
1791 qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1792 if (!qp_init)
1793 goto out;
1794
1795 retry:
1796 ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
1797 IB_POLL_WORKQUEUE);
1798 if (IS_ERR(ch->cq)) {
1799 ret = PTR_ERR(ch->cq);
1800 pr_err("failed to create CQ cqe= %d ret= %d\n",
1801 ch->rq_size + sq_size, ret);
1802 goto out;
1803 }
1804 ch->cq_size = ch->rq_size + sq_size;
1805
1806 qp_init->qp_context = (void *)ch;
1807 qp_init->event_handler
1808 = (void(*)(struct ib_event *, void*))srpt_qp_event;
1809 qp_init->send_cq = ch->cq;
1810 qp_init->recv_cq = ch->cq;
1811 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1812 qp_init->qp_type = IB_QPT_RC;
1813
1814
1815
1816
1817
1818
1819
1820 qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
1821 qp_init->cap.max_rdma_ctxs = sq_size / 2;
1822 qp_init->cap.max_send_sge = attrs->max_send_sge;
1823 qp_init->cap.max_recv_sge = 1;
1824 qp_init->port_num = ch->sport->port;
1825 if (sdev->use_srq)
1826 qp_init->srq = sdev->srq;
1827 else
1828 qp_init->cap.max_recv_wr = ch->rq_size;
1829
1830 if (ch->using_rdma_cm) {
1831 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
1832 ch->qp = ch->rdma_cm.cm_id->qp;
1833 } else {
1834 ch->qp = ib_create_qp(sdev->pd, qp_init);
1835 if (!IS_ERR(ch->qp)) {
1836 ret = srpt_init_ch_qp(ch, ch->qp);
1837 if (ret)
1838 ib_destroy_qp(ch->qp);
1839 } else {
1840 ret = PTR_ERR(ch->qp);
1841 }
1842 }
1843 if (ret) {
1844 bool retry = sq_size > MIN_SRPT_SQ_SIZE;
1845
1846 if (retry) {
1847 pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
1848 sq_size, ret);
1849 ib_cq_pool_put(ch->cq, ch->cq_size);
1850 sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
1851 goto retry;
1852 } else {
1853 pr_err("failed to create queue pair with sq_size = %d (%d)\n",
1854 sq_size, ret);
1855 goto err_destroy_cq;
1856 }
1857 }
1858
1859 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1860
1861 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1862 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1863 qp_init->cap.max_send_wr, ch);
1864
1865 if (!sdev->use_srq)
1866 for (i = 0; i < ch->rq_size; i++)
1867 srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
1868
1869 out:
1870 kfree(qp_init);
1871 return ret;
1872
1873 err_destroy_cq:
1874 ch->qp = NULL;
1875 ib_cq_pool_put(ch->cq, ch->cq_size);
1876 goto out;
1877 }
1878
1879 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1880 {
1881 ib_destroy_qp(ch->qp);
1882 ib_cq_pool_put(ch->cq, ch->cq_size);
1883 }
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1896 {
1897 int ret;
1898
1899 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1900 pr_debug("%s: already closed\n", ch->sess_name);
1901 return false;
1902 }
1903
1904 kref_get(&ch->kref);
1905
1906 ret = srpt_ch_qp_err(ch);
1907 if (ret < 0)
1908 pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1909 ch->sess_name, ch->qp->qp_num, ret);
1910
1911 ret = srpt_zerolength_write(ch);
1912 if (ret < 0) {
1913 pr_err("%s-%d: queuing zero-length write failed: %d\n",
1914 ch->sess_name, ch->qp->qp_num, ret);
1915 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1916 schedule_work(&ch->release_work);
1917 else
1918 WARN_ON_ONCE(true);
1919 }
1920
1921 kref_put(&ch->kref, srpt_free_ch);
1922
1923 return true;
1924 }
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935 static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1936 {
1937 int ret;
1938
1939 if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1940 return -ENOTCONN;
1941
1942 if (ch->using_rdma_cm) {
1943 ret = rdma_disconnect(ch->rdma_cm.cm_id);
1944 } else {
1945 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
1946 if (ret < 0)
1947 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
1948 }
1949
1950 if (ret < 0 && srpt_close_ch(ch))
1951 ret = 0;
1952
1953 return ret;
1954 }
1955
1956
1957 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
1958 {
1959 DECLARE_COMPLETION_ONSTACK(closed);
1960 struct srpt_port *sport = ch->sport;
1961
1962 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
1963 ch->state);
1964
1965 ch->closed = &closed;
1966
1967 mutex_lock(&sport->mutex);
1968 srpt_disconnect_ch(ch);
1969 mutex_unlock(&sport->mutex);
1970
1971 while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
1972 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
1973 ch->sess_name, ch->qp->qp_num, ch->state);
1974
1975 }
1976
1977 static void __srpt_close_all_ch(struct srpt_port *sport)
1978 {
1979 struct srpt_nexus *nexus;
1980 struct srpt_rdma_ch *ch;
1981
1982 lockdep_assert_held(&sport->mutex);
1983
1984 list_for_each_entry(nexus, &sport->nexus_list, entry) {
1985 list_for_each_entry(ch, &nexus->ch_list, list) {
1986 if (srpt_disconnect_ch(ch) >= 0)
1987 pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
1988 ch->sess_name, ch->qp->qp_num,
1989 dev_name(&sport->sdev->device->dev),
1990 sport->port);
1991 srpt_close_ch(ch);
1992 }
1993 }
1994 }
1995
1996
1997
1998
1999
2000 static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
2001 const u8 i_port_id[16],
2002 const u8 t_port_id[16])
2003 {
2004 struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
2005
2006 for (;;) {
2007 mutex_lock(&sport->mutex);
2008 list_for_each_entry(n, &sport->nexus_list, entry) {
2009 if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
2010 memcmp(n->t_port_id, t_port_id, 16) == 0) {
2011 nexus = n;
2012 break;
2013 }
2014 }
2015 if (!nexus && tmp_nexus) {
2016 list_add_tail_rcu(&tmp_nexus->entry,
2017 &sport->nexus_list);
2018 swap(nexus, tmp_nexus);
2019 }
2020 mutex_unlock(&sport->mutex);
2021
2022 if (nexus)
2023 break;
2024 tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2025 if (!tmp_nexus) {
2026 nexus = ERR_PTR(-ENOMEM);
2027 break;
2028 }
2029 INIT_LIST_HEAD(&tmp_nexus->ch_list);
2030 memcpy(tmp_nexus->i_port_id, i_port_id, 16);
2031 memcpy(tmp_nexus->t_port_id, t_port_id, 16);
2032 }
2033
2034 kfree(tmp_nexus);
2035
2036 return nexus;
2037 }
2038
2039 static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
2040 __must_hold(&sport->mutex)
2041 {
2042 lockdep_assert_held(&sport->mutex);
2043
2044 if (sport->enabled == enabled)
2045 return;
2046 sport->enabled = enabled;
2047 if (!enabled)
2048 __srpt_close_all_ch(sport);
2049 }
2050
2051 static void srpt_drop_sport_ref(struct srpt_port *sport)
2052 {
2053 if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
2054 complete(sport->freed_channels);
2055 }
2056
2057 static void srpt_free_ch(struct kref *kref)
2058 {
2059 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
2060
2061 srpt_drop_sport_ref(ch->sport);
2062 kfree_rcu(ch, rcu);
2063 }
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073 static void srpt_release_channel_work(struct work_struct *w)
2074 {
2075 struct srpt_rdma_ch *ch;
2076 struct srpt_device *sdev;
2077 struct srpt_port *sport;
2078 struct se_session *se_sess;
2079
2080 ch = container_of(w, struct srpt_rdma_ch, release_work);
2081 pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
2082
2083 sdev = ch->sport->sdev;
2084 BUG_ON(!sdev);
2085
2086 se_sess = ch->sess;
2087 BUG_ON(!se_sess);
2088
2089 target_stop_session(se_sess);
2090 target_wait_for_sess_cmds(se_sess);
2091
2092 target_remove_session(se_sess);
2093 ch->sess = NULL;
2094
2095 if (ch->using_rdma_cm)
2096 rdma_destroy_id(ch->rdma_cm.cm_id);
2097 else
2098 ib_destroy_cm_id(ch->ib_cm.cm_id);
2099
2100 sport = ch->sport;
2101 mutex_lock(&sport->mutex);
2102 list_del_rcu(&ch->list);
2103 mutex_unlock(&sport->mutex);
2104
2105 if (ch->closed)
2106 complete(ch->closed);
2107
2108 srpt_destroy_ch_ib(ch);
2109
2110 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2111 ch->sport->sdev, ch->rq_size,
2112 ch->rsp_buf_cache, DMA_TO_DEVICE);
2113
2114 kmem_cache_destroy(ch->rsp_buf_cache);
2115
2116 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2117 sdev, ch->rq_size,
2118 ch->req_buf_cache, DMA_FROM_DEVICE);
2119
2120 kmem_cache_destroy(ch->req_buf_cache);
2121
2122 kref_put(&ch->kref, srpt_free_ch);
2123 }
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139 static int srpt_cm_req_recv(struct srpt_device *const sdev,
2140 struct ib_cm_id *ib_cm_id,
2141 struct rdma_cm_id *rdma_cm_id,
2142 u8 port_num, __be16 pkey,
2143 const struct srp_login_req *req,
2144 const char *src_addr)
2145 {
2146 struct srpt_port *sport = &sdev->port[port_num - 1];
2147 struct srpt_nexus *nexus;
2148 struct srp_login_rsp *rsp = NULL;
2149 struct srp_login_rej *rej = NULL;
2150 union {
2151 struct rdma_conn_param rdma_cm;
2152 struct ib_cm_rep_param ib_cm;
2153 } *rep_param = NULL;
2154 struct srpt_rdma_ch *ch = NULL;
2155 char i_port_id[36];
2156 u32 it_iu_len;
2157 int i, tag_num, tag_size, ret;
2158 struct srpt_tpg *stpg;
2159
2160 WARN_ON_ONCE(irqs_disabled());
2161
2162 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2163
2164 pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
2165 req->initiator_port_id, req->target_port_id, it_iu_len,
2166 port_num, &sport->gid, be16_to_cpu(pkey));
2167
2168 nexus = srpt_get_nexus(sport, req->initiator_port_id,
2169 req->target_port_id);
2170 if (IS_ERR(nexus)) {
2171 ret = PTR_ERR(nexus);
2172 goto out;
2173 }
2174
2175 ret = -ENOMEM;
2176 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2177 rej = kzalloc(sizeof(*rej), GFP_KERNEL);
2178 rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
2179 if (!rsp || !rej || !rep_param)
2180 goto out;
2181
2182 ret = -EINVAL;
2183 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2184 rej->reason = cpu_to_be32(
2185 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2186 pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
2187 it_iu_len, 64, srp_max_req_size);
2188 goto reject;
2189 }
2190
2191 if (!sport->enabled) {
2192 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2193 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2194 dev_name(&sport->sdev->device->dev), port_num);
2195 goto reject;
2196 }
2197
2198 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2199 || *(__be64 *)(req->target_port_id + 8) !=
2200 cpu_to_be64(srpt_service_guid)) {
2201 rej->reason = cpu_to_be32(
2202 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2203 pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
2204 goto reject;
2205 }
2206
2207 ret = -ENOMEM;
2208 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2209 if (!ch) {
2210 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2211 pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
2212 goto reject;
2213 }
2214
2215 kref_init(&ch->kref);
2216 ch->pkey = be16_to_cpu(pkey);
2217 ch->nexus = nexus;
2218 ch->zw_cqe.done = srpt_zerolength_write_done;
2219 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2220 ch->sport = sport;
2221 if (rdma_cm_id) {
2222 ch->using_rdma_cm = true;
2223 ch->rdma_cm.cm_id = rdma_cm_id;
2224 rdma_cm_id->context = ch;
2225 } else {
2226 ch->ib_cm.cm_id = ib_cm_id;
2227 ib_cm_id->context = ch;
2228 }
2229
2230
2231
2232
2233
2234 ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2235 spin_lock_init(&ch->spinlock);
2236 ch->state = CH_CONNECTING;
2237 INIT_LIST_HEAD(&ch->cmd_wait_list);
2238 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2239
2240 ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
2241 512, 0, NULL);
2242 if (!ch->rsp_buf_cache)
2243 goto free_ch;
2244
2245 ch->ioctx_ring = (struct srpt_send_ioctx **)
2246 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2247 sizeof(*ch->ioctx_ring[0]),
2248 ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
2249 if (!ch->ioctx_ring) {
2250 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
2251 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2252 goto free_rsp_cache;
2253 }
2254
2255 for (i = 0; i < ch->rq_size; i++)
2256 ch->ioctx_ring[i]->ch = ch;
2257 if (!sdev->use_srq) {
2258 u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
2259 be16_to_cpu(req->imm_data_offset) : 0;
2260 u16 alignment_offset;
2261 u32 req_sz;
2262
2263 if (req->req_flags & SRP_IMMED_REQUESTED)
2264 pr_debug("imm_data_offset = %d\n",
2265 be16_to_cpu(req->imm_data_offset));
2266 if (imm_data_offset >= sizeof(struct srp_cmd)) {
2267 ch->imm_data_offset = imm_data_offset;
2268 rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP;
2269 } else {
2270 ch->imm_data_offset = 0;
2271 }
2272 alignment_offset = round_up(imm_data_offset, 512) -
2273 imm_data_offset;
2274 req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
2275 ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
2276 512, 0, NULL);
2277 if (!ch->req_buf_cache)
2278 goto free_rsp_ring;
2279
2280 ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
2281 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2282 sizeof(*ch->ioctx_recv_ring[0]),
2283 ch->req_buf_cache,
2284 alignment_offset,
2285 DMA_FROM_DEVICE);
2286 if (!ch->ioctx_recv_ring) {
2287 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
2288 rej->reason =
2289 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2290 goto free_recv_cache;
2291 }
2292 for (i = 0; i < ch->rq_size; i++)
2293 INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
2294 }
2295
2296 ret = srpt_create_ch_ib(ch);
2297 if (ret) {
2298 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2299 pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
2300 goto free_recv_ring;
2301 }
2302
2303 strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
2304 snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
2305 be64_to_cpu(*(__be64 *)nexus->i_port_id),
2306 be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
2307
2308 pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
2309 i_port_id);
2310
2311 tag_num = ch->rq_size;
2312 tag_size = 1;
2313
2314 if (sport->guid_id) {
2315 mutex_lock(&sport->guid_id->mutex);
2316 list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
2317 if (!IS_ERR_OR_NULL(ch->sess))
2318 break;
2319 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2320 tag_size, TARGET_PROT_NORMAL,
2321 ch->sess_name, ch, NULL);
2322 }
2323 mutex_unlock(&sport->guid_id->mutex);
2324 }
2325
2326 if (sport->gid_id) {
2327 mutex_lock(&sport->gid_id->mutex);
2328 list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
2329 if (!IS_ERR_OR_NULL(ch->sess))
2330 break;
2331 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2332 tag_size, TARGET_PROT_NORMAL, i_port_id,
2333 ch, NULL);
2334 if (!IS_ERR_OR_NULL(ch->sess))
2335 break;
2336
2337 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2338 tag_size, TARGET_PROT_NORMAL,
2339 i_port_id + 2, ch, NULL);
2340 }
2341 mutex_unlock(&sport->gid_id->mutex);
2342 }
2343
2344 if (IS_ERR_OR_NULL(ch->sess)) {
2345 WARN_ON_ONCE(ch->sess == NULL);
2346 ret = PTR_ERR(ch->sess);
2347 ch->sess = NULL;
2348 pr_info("Rejected login for initiator %s: ret = %d.\n",
2349 ch->sess_name, ret);
2350 rej->reason = cpu_to_be32(ret == -ENOMEM ?
2351 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2352 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2353 goto destroy_ib;
2354 }
2355
2356
2357
2358
2359
2360 atomic_inc(&sport->refcount);
2361
2362 mutex_lock(&sport->mutex);
2363
2364 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2365 struct srpt_rdma_ch *ch2;
2366
2367 list_for_each_entry(ch2, &nexus->ch_list, list) {
2368 if (srpt_disconnect_ch(ch2) < 0)
2369 continue;
2370 pr_info("Relogin - closed existing channel %s\n",
2371 ch2->sess_name);
2372 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2373 }
2374 } else {
2375 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2376 }
2377
2378 list_add_tail_rcu(&ch->list, &nexus->ch_list);
2379
2380 if (!sport->enabled) {
2381 rej->reason = cpu_to_be32(
2382 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2383 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2384 dev_name(&sdev->device->dev), port_num);
2385 mutex_unlock(&sport->mutex);
2386 ret = -EINVAL;
2387 goto reject;
2388 }
2389
2390 mutex_unlock(&sport->mutex);
2391
2392 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
2393 if (ret) {
2394 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2395 pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
2396 ret);
2397 goto reject;
2398 }
2399
2400 pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
2401 ch->sess_name, ch);
2402
2403
2404 rsp->opcode = SRP_LOGIN_RSP;
2405 rsp->tag = req->tag;
2406 rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size);
2407 rsp->max_ti_iu_len = req->req_it_iu_len;
2408 ch->max_ti_iu_len = it_iu_len;
2409 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2410 SRP_BUF_FORMAT_INDIRECT);
2411 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2412 atomic_set(&ch->req_lim, ch->rq_size);
2413 atomic_set(&ch->req_lim_delta, 0);
2414
2415
2416 if (ch->using_rdma_cm) {
2417 rep_param->rdma_cm.private_data = (void *)rsp;
2418 rep_param->rdma_cm.private_data_len = sizeof(*rsp);
2419 rep_param->rdma_cm.rnr_retry_count = 7;
2420 rep_param->rdma_cm.flow_control = 1;
2421 rep_param->rdma_cm.responder_resources = 4;
2422 rep_param->rdma_cm.initiator_depth = 4;
2423 } else {
2424 rep_param->ib_cm.qp_num = ch->qp->qp_num;
2425 rep_param->ib_cm.private_data = (void *)rsp;
2426 rep_param->ib_cm.private_data_len = sizeof(*rsp);
2427 rep_param->ib_cm.rnr_retry_count = 7;
2428 rep_param->ib_cm.flow_control = 1;
2429 rep_param->ib_cm.failover_accepted = 0;
2430 rep_param->ib_cm.srq = 1;
2431 rep_param->ib_cm.responder_resources = 4;
2432 rep_param->ib_cm.initiator_depth = 4;
2433 }
2434
2435
2436
2437
2438
2439 mutex_lock(&sport->mutex);
2440 if (sport->enabled && ch->state == CH_CONNECTING) {
2441 if (ch->using_rdma_cm)
2442 ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
2443 else
2444 ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
2445 } else {
2446 ret = -EINVAL;
2447 }
2448 mutex_unlock(&sport->mutex);
2449
2450 switch (ret) {
2451 case 0:
2452 break;
2453 case -EINVAL:
2454 goto reject;
2455 default:
2456 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2457 pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
2458 ret);
2459 goto reject;
2460 }
2461
2462 goto out;
2463
2464 destroy_ib:
2465 srpt_destroy_ch_ib(ch);
2466
2467 free_recv_ring:
2468 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2469 ch->sport->sdev, ch->rq_size,
2470 ch->req_buf_cache, DMA_FROM_DEVICE);
2471
2472 free_recv_cache:
2473 kmem_cache_destroy(ch->req_buf_cache);
2474
2475 free_rsp_ring:
2476 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2477 ch->sport->sdev, ch->rq_size,
2478 ch->rsp_buf_cache, DMA_TO_DEVICE);
2479
2480 free_rsp_cache:
2481 kmem_cache_destroy(ch->rsp_buf_cache);
2482
2483 free_ch:
2484 if (rdma_cm_id)
2485 rdma_cm_id->context = NULL;
2486 else
2487 ib_cm_id->context = NULL;
2488 kfree(ch);
2489 ch = NULL;
2490
2491 WARN_ON_ONCE(ret == 0);
2492
2493 reject:
2494 pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
2495 rej->opcode = SRP_LOGIN_REJ;
2496 rej->tag = req->tag;
2497 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2498 SRP_BUF_FORMAT_INDIRECT);
2499
2500 if (rdma_cm_id)
2501 rdma_reject(rdma_cm_id, rej, sizeof(*rej),
2502 IB_CM_REJ_CONSUMER_DEFINED);
2503 else
2504 ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2505 rej, sizeof(*rej));
2506
2507 if (ch && ch->sess) {
2508 srpt_close_ch(ch);
2509
2510
2511
2512
2513 ret = 0;
2514 }
2515
2516 out:
2517 kfree(rep_param);
2518 kfree(rsp);
2519 kfree(rej);
2520
2521 return ret;
2522 }
2523
2524 static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
2525 const struct ib_cm_req_event_param *param,
2526 void *private_data)
2527 {
2528 char sguid[40];
2529
2530 srpt_format_guid(sguid, sizeof(sguid),
2531 ¶m->primary_path->dgid.global.interface_id);
2532
2533 return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
2534 param->primary_path->pkey,
2535 private_data, sguid);
2536 }
2537
2538 static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
2539 struct rdma_cm_event *event)
2540 {
2541 struct srpt_device *sdev;
2542 struct srp_login_req req;
2543 const struct srp_login_req_rdma *req_rdma;
2544 struct sa_path_rec *path_rec = cm_id->route.path_rec;
2545 char src_addr[40];
2546
2547 sdev = ib_get_client_data(cm_id->device, &srpt_client);
2548 if (!sdev)
2549 return -ECONNREFUSED;
2550
2551 if (event->param.conn.private_data_len < sizeof(*req_rdma))
2552 return -EINVAL;
2553
2554
2555 req_rdma = event->param.conn.private_data;
2556 memset(&req, 0, sizeof(req));
2557 req.opcode = req_rdma->opcode;
2558 req.tag = req_rdma->tag;
2559 req.req_it_iu_len = req_rdma->req_it_iu_len;
2560 req.req_buf_fmt = req_rdma->req_buf_fmt;
2561 req.req_flags = req_rdma->req_flags;
2562 memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
2563 memcpy(req.target_port_id, req_rdma->target_port_id, 16);
2564 req.imm_data_offset = req_rdma->imm_data_offset;
2565
2566 snprintf(src_addr, sizeof(src_addr), "%pIS",
2567 &cm_id->route.addr.src_addr);
2568
2569 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
2570 path_rec ? path_rec->pkey : 0, &req, src_addr);
2571 }
2572
2573 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2574 enum ib_cm_rej_reason reason,
2575 const u8 *private_data,
2576 u8 private_data_len)
2577 {
2578 char *priv = NULL;
2579 int i;
2580
2581 if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
2582 GFP_KERNEL))) {
2583 for (i = 0; i < private_data_len; i++)
2584 sprintf(priv + 3 * i, " %02x", private_data[i]);
2585 }
2586 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2587 ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2588 "; private data" : "", priv ? priv : " (?)");
2589 kfree(priv);
2590 }
2591
2592
2593
2594
2595
2596
2597
2598
2599 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2600 {
2601 int ret;
2602
2603 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
2604 if (ret < 0) {
2605 pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
2606 ch->qp->qp_num);
2607 srpt_close_ch(ch);
2608 return;
2609 }
2610
2611
2612
2613
2614
2615
2616 if (!srpt_set_ch_state(ch, CH_LIVE)) {
2617 pr_err("%s-%d: channel transition to LIVE state failed\n",
2618 ch->sess_name, ch->qp->qp_num);
2619 return;
2620 }
2621
2622
2623 ret = srpt_zerolength_write(ch);
2624 WARN_ONCE(ret < 0, "%d\n", ret);
2625 }
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639 static int srpt_cm_handler(struct ib_cm_id *cm_id,
2640 const struct ib_cm_event *event)
2641 {
2642 struct srpt_rdma_ch *ch = cm_id->context;
2643 int ret;
2644
2645 ret = 0;
2646 switch (event->event) {
2647 case IB_CM_REQ_RECEIVED:
2648 ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
2649 event->private_data);
2650 break;
2651 case IB_CM_REJ_RECEIVED:
2652 srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2653 event->private_data,
2654 IB_CM_REJ_PRIVATE_DATA_SIZE);
2655 break;
2656 case IB_CM_RTU_RECEIVED:
2657 case IB_CM_USER_ESTABLISHED:
2658 srpt_cm_rtu_recv(ch);
2659 break;
2660 case IB_CM_DREQ_RECEIVED:
2661 srpt_disconnect_ch(ch);
2662 break;
2663 case IB_CM_DREP_RECEIVED:
2664 pr_info("Received CM DREP message for ch %s-%d.\n",
2665 ch->sess_name, ch->qp->qp_num);
2666 srpt_close_ch(ch);
2667 break;
2668 case IB_CM_TIMEWAIT_EXIT:
2669 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2670 ch->sess_name, ch->qp->qp_num);
2671 srpt_close_ch(ch);
2672 break;
2673 case IB_CM_REP_ERROR:
2674 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2675 ch->qp->qp_num);
2676 break;
2677 case IB_CM_DREQ_ERROR:
2678 pr_info("Received CM DREQ ERROR event.\n");
2679 break;
2680 case IB_CM_MRA_RECEIVED:
2681 pr_info("Received CM MRA event\n");
2682 break;
2683 default:
2684 pr_err("received unrecognized CM event %d\n", event->event);
2685 break;
2686 }
2687
2688 return ret;
2689 }
2690
2691 static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
2692 struct rdma_cm_event *event)
2693 {
2694 struct srpt_rdma_ch *ch = cm_id->context;
2695 int ret = 0;
2696
2697 switch (event->event) {
2698 case RDMA_CM_EVENT_CONNECT_REQUEST:
2699 ret = srpt_rdma_cm_req_recv(cm_id, event);
2700 break;
2701 case RDMA_CM_EVENT_REJECTED:
2702 srpt_cm_rej_recv(ch, event->status,
2703 event->param.conn.private_data,
2704 event->param.conn.private_data_len);
2705 break;
2706 case RDMA_CM_EVENT_ESTABLISHED:
2707 srpt_cm_rtu_recv(ch);
2708 break;
2709 case RDMA_CM_EVENT_DISCONNECTED:
2710 if (ch->state < CH_DISCONNECTING)
2711 srpt_disconnect_ch(ch);
2712 else
2713 srpt_close_ch(ch);
2714 break;
2715 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2716 srpt_close_ch(ch);
2717 break;
2718 case RDMA_CM_EVENT_UNREACHABLE:
2719 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2720 ch->qp->qp_num);
2721 break;
2722 case RDMA_CM_EVENT_DEVICE_REMOVAL:
2723 case RDMA_CM_EVENT_ADDR_CHANGE:
2724 break;
2725 default:
2726 pr_err("received unrecognized RDMA CM event %d\n",
2727 event->event);
2728 break;
2729 }
2730
2731 return ret;
2732 }
2733
2734
2735
2736
2737 static int srpt_write_pending(struct se_cmd *se_cmd)
2738 {
2739 struct srpt_send_ioctx *ioctx =
2740 container_of(se_cmd, struct srpt_send_ioctx, cmd);
2741 struct srpt_rdma_ch *ch = ioctx->ch;
2742 struct ib_send_wr *first_wr = NULL;
2743 struct ib_cqe *cqe = &ioctx->rdma_cqe;
2744 enum srpt_command_state new_state;
2745 int ret, i;
2746
2747 if (ioctx->recv_ioctx) {
2748 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2749 target_execute_cmd(&ioctx->cmd);
2750 return 0;
2751 }
2752
2753 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2754 WARN_ON(new_state == SRPT_STATE_DONE);
2755
2756 if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2757 pr_warn("%s: IB send queue full (needed %d)\n",
2758 __func__, ioctx->n_rdma);
2759 ret = -ENOMEM;
2760 goto out_undo;
2761 }
2762
2763 cqe->done = srpt_rdma_read_done;
2764 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2765 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2766
2767 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2768 cqe, first_wr);
2769 cqe = NULL;
2770 }
2771
2772 ret = ib_post_send(ch->qp, first_wr, NULL);
2773 if (ret) {
2774 pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
2775 __func__, ret, ioctx->n_rdma,
2776 atomic_read(&ch->sq_wr_avail));
2777 goto out_undo;
2778 }
2779
2780 return 0;
2781 out_undo:
2782 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2783 return ret;
2784 }
2785
2786 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2787 {
2788 switch (tcm_mgmt_status) {
2789 case TMR_FUNCTION_COMPLETE:
2790 return SRP_TSK_MGMT_SUCCESS;
2791 case TMR_FUNCTION_REJECTED:
2792 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2793 }
2794 return SRP_TSK_MGMT_FAILED;
2795 }
2796
2797
2798
2799
2800
2801
2802
2803
2804 static void srpt_queue_response(struct se_cmd *cmd)
2805 {
2806 struct srpt_send_ioctx *ioctx =
2807 container_of(cmd, struct srpt_send_ioctx, cmd);
2808 struct srpt_rdma_ch *ch = ioctx->ch;
2809 struct srpt_device *sdev = ch->sport->sdev;
2810 struct ib_send_wr send_wr, *first_wr = &send_wr;
2811 struct ib_sge sge;
2812 enum srpt_command_state state;
2813 int resp_len, ret, i;
2814 u8 srp_tm_status;
2815
2816 state = ioctx->state;
2817 switch (state) {
2818 case SRPT_STATE_NEW:
2819 case SRPT_STATE_DATA_IN:
2820 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2821 break;
2822 case SRPT_STATE_MGMT:
2823 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2824 break;
2825 default:
2826 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2827 ch, ioctx->ioctx.index, ioctx->state);
2828 break;
2829 }
2830
2831 if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
2832 return;
2833
2834
2835 if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
2836 ioctx->cmd.data_length &&
2837 !ioctx->queue_status_only) {
2838 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2839 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2840
2841 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
2842 ch->sport->port, NULL, first_wr);
2843 }
2844 }
2845
2846 if (state != SRPT_STATE_MGMT)
2847 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2848 cmd->scsi_status);
2849 else {
2850 srp_tm_status
2851 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2852 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2853 ioctx->cmd.tag);
2854 }
2855
2856 atomic_inc(&ch->req_lim);
2857
2858 if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
2859 &ch->sq_wr_avail) < 0)) {
2860 pr_warn("%s: IB send queue full (needed %d)\n",
2861 __func__, ioctx->n_rdma);
2862 goto out;
2863 }
2864
2865 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2866 DMA_TO_DEVICE);
2867
2868 sge.addr = ioctx->ioctx.dma;
2869 sge.length = resp_len;
2870 sge.lkey = sdev->lkey;
2871
2872 ioctx->ioctx.cqe.done = srpt_send_done;
2873 send_wr.next = NULL;
2874 send_wr.wr_cqe = &ioctx->ioctx.cqe;
2875 send_wr.sg_list = &sge;
2876 send_wr.num_sge = 1;
2877 send_wr.opcode = IB_WR_SEND;
2878 send_wr.send_flags = IB_SEND_SIGNALED;
2879
2880 ret = ib_post_send(ch->qp, first_wr, NULL);
2881 if (ret < 0) {
2882 pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
2883 __func__, ioctx->cmd.tag, ret);
2884 goto out;
2885 }
2886
2887 return;
2888
2889 out:
2890 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2891 atomic_dec(&ch->req_lim);
2892 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2893 target_put_sess_cmd(&ioctx->cmd);
2894 }
2895
2896 static int srpt_queue_data_in(struct se_cmd *cmd)
2897 {
2898 srpt_queue_response(cmd);
2899 return 0;
2900 }
2901
2902 static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2903 {
2904 srpt_queue_response(cmd);
2905 }
2906
2907
2908
2909
2910
2911
2912
2913 static void srpt_aborted_task(struct se_cmd *cmd)
2914 {
2915 struct srpt_send_ioctx *ioctx = container_of(cmd,
2916 struct srpt_send_ioctx, cmd);
2917 struct srpt_rdma_ch *ch = ioctx->ch;
2918
2919 atomic_inc(&ch->req_lim_delta);
2920 }
2921
2922 static int srpt_queue_status(struct se_cmd *cmd)
2923 {
2924 struct srpt_send_ioctx *ioctx;
2925
2926 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2927 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2928 if (cmd->se_cmd_flags &
2929 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2930 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2931 ioctx->queue_status_only = true;
2932 srpt_queue_response(cmd);
2933 return 0;
2934 }
2935
2936 static void srpt_refresh_port_work(struct work_struct *work)
2937 {
2938 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2939
2940 srpt_refresh_port(sport);
2941 }
2942
2943
2944
2945
2946
2947 static int srpt_release_sport(struct srpt_port *sport)
2948 {
2949 DECLARE_COMPLETION_ONSTACK(c);
2950 struct srpt_nexus *nexus, *next_n;
2951 struct srpt_rdma_ch *ch;
2952
2953 WARN_ON_ONCE(irqs_disabled());
2954
2955 sport->freed_channels = &c;
2956
2957 mutex_lock(&sport->mutex);
2958 srpt_set_enabled(sport, false);
2959 mutex_unlock(&sport->mutex);
2960
2961 while (atomic_read(&sport->refcount) > 0 &&
2962 wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
2963 pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
2964 dev_name(&sport->sdev->device->dev), sport->port,
2965 atomic_read(&sport->refcount));
2966 rcu_read_lock();
2967 list_for_each_entry(nexus, &sport->nexus_list, entry) {
2968 list_for_each_entry(ch, &nexus->ch_list, list) {
2969 pr_info("%s-%d: state %s\n",
2970 ch->sess_name, ch->qp->qp_num,
2971 get_ch_state_name(ch->state));
2972 }
2973 }
2974 rcu_read_unlock();
2975 }
2976
2977 mutex_lock(&sport->mutex);
2978 list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
2979 list_del(&nexus->entry);
2980 kfree_rcu(nexus, rcu);
2981 }
2982 mutex_unlock(&sport->mutex);
2983
2984 return 0;
2985 }
2986
2987 struct port_and_port_id {
2988 struct srpt_port *sport;
2989 struct srpt_port_id **port_id;
2990 };
2991
2992 static struct port_and_port_id __srpt_lookup_port(const char *name)
2993 {
2994 struct ib_device *dev;
2995 struct srpt_device *sdev;
2996 struct srpt_port *sport;
2997 int i;
2998
2999 list_for_each_entry(sdev, &srpt_dev_list, list) {
3000 dev = sdev->device;
3001 if (!dev)
3002 continue;
3003
3004 for (i = 0; i < dev->phys_port_cnt; i++) {
3005 sport = &sdev->port[i];
3006
3007 if (strcmp(sport->guid_name, name) == 0) {
3008 kref_get(&sdev->refcnt);
3009 return (struct port_and_port_id){
3010 sport, &sport->guid_id};
3011 }
3012 if (strcmp(sport->gid_name, name) == 0) {
3013 kref_get(&sdev->refcnt);
3014 return (struct port_and_port_id){
3015 sport, &sport->gid_id};
3016 }
3017 }
3018 }
3019
3020 return (struct port_and_port_id){};
3021 }
3022
3023
3024
3025
3026
3027
3028
3029
3030 static struct port_and_port_id srpt_lookup_port(const char *name)
3031 {
3032 struct port_and_port_id papi;
3033
3034 spin_lock(&srpt_dev_lock);
3035 papi = __srpt_lookup_port(name);
3036 spin_unlock(&srpt_dev_lock);
3037
3038 return papi;
3039 }
3040
3041 static void srpt_free_srq(struct srpt_device *sdev)
3042 {
3043 if (!sdev->srq)
3044 return;
3045
3046 ib_destroy_srq(sdev->srq);
3047 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3048 sdev->srq_size, sdev->req_buf_cache,
3049 DMA_FROM_DEVICE);
3050 kmem_cache_destroy(sdev->req_buf_cache);
3051 sdev->srq = NULL;
3052 }
3053
3054 static int srpt_alloc_srq(struct srpt_device *sdev)
3055 {
3056 struct ib_srq_init_attr srq_attr = {
3057 .event_handler = srpt_srq_event,
3058 .srq_context = (void *)sdev,
3059 .attr.max_wr = sdev->srq_size,
3060 .attr.max_sge = 1,
3061 .srq_type = IB_SRQT_BASIC,
3062 };
3063 struct ib_device *device = sdev->device;
3064 struct ib_srq *srq;
3065 int i;
3066
3067 WARN_ON_ONCE(sdev->srq);
3068 srq = ib_create_srq(sdev->pd, &srq_attr);
3069 if (IS_ERR(srq)) {
3070 pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
3071 return PTR_ERR(srq);
3072 }
3073
3074 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
3075 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
3076
3077 sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
3078 srp_max_req_size, 0, 0, NULL);
3079 if (!sdev->req_buf_cache)
3080 goto free_srq;
3081
3082 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3083 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3084 sizeof(*sdev->ioctx_ring[0]),
3085 sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
3086 if (!sdev->ioctx_ring)
3087 goto free_cache;
3088
3089 sdev->use_srq = true;
3090 sdev->srq = srq;
3091
3092 for (i = 0; i < sdev->srq_size; ++i) {
3093 INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
3094 srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
3095 }
3096
3097 return 0;
3098
3099 free_cache:
3100 kmem_cache_destroy(sdev->req_buf_cache);
3101
3102 free_srq:
3103 ib_destroy_srq(srq);
3104 return -ENOMEM;
3105 }
3106
3107 static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
3108 {
3109 struct ib_device *device = sdev->device;
3110 int ret = 0;
3111
3112 if (!use_srq) {
3113 srpt_free_srq(sdev);
3114 sdev->use_srq = false;
3115 } else if (use_srq && !sdev->srq) {
3116 ret = srpt_alloc_srq(sdev);
3117 }
3118 pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
3119 dev_name(&device->dev), sdev->use_srq, ret);
3120 return ret;
3121 }
3122
3123 static void srpt_free_sdev(struct kref *refcnt)
3124 {
3125 struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
3126
3127 kfree(sdev);
3128 }
3129
3130 static void srpt_sdev_put(struct srpt_device *sdev)
3131 {
3132 kref_put(&sdev->refcnt, srpt_free_sdev);
3133 }
3134
3135
3136
3137
3138
3139 static int srpt_add_one(struct ib_device *device)
3140 {
3141 struct srpt_device *sdev;
3142 struct srpt_port *sport;
3143 int ret;
3144 u32 i;
3145
3146 pr_debug("device = %p\n", device);
3147
3148 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
3149 GFP_KERNEL);
3150 if (!sdev)
3151 return -ENOMEM;
3152
3153 kref_init(&sdev->refcnt);
3154 sdev->device = device;
3155 mutex_init(&sdev->sdev_mutex);
3156
3157 sdev->pd = ib_alloc_pd(device, 0);
3158 if (IS_ERR(sdev->pd)) {
3159 ret = PTR_ERR(sdev->pd);
3160 goto free_dev;
3161 }
3162
3163 sdev->lkey = sdev->pd->local_dma_lkey;
3164
3165 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
3166
3167 srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
3168
3169 if (!srpt_service_guid)
3170 srpt_service_guid = be64_to_cpu(device->node_guid);
3171
3172 if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
3173 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3174 if (IS_ERR(sdev->cm_id)) {
3175 pr_info("ib_create_cm_id() failed: %ld\n",
3176 PTR_ERR(sdev->cm_id));
3177 ret = PTR_ERR(sdev->cm_id);
3178 sdev->cm_id = NULL;
3179 if (!rdma_cm_id)
3180 goto err_ring;
3181 }
3182
3183
3184 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
3185 srpt_service_guid, srpt_service_guid, srpt_service_guid);
3186
3187
3188
3189
3190
3191
3192
3193 ret = sdev->cm_id ?
3194 ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
3195 0;
3196 if (ret < 0) {
3197 pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
3198 sdev->cm_id->state);
3199 goto err_cm;
3200 }
3201
3202 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3203 srpt_event_handler);
3204 ib_register_event_handler(&sdev->event_handler);
3205
3206 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3207 sport = &sdev->port[i - 1];
3208 INIT_LIST_HEAD(&sport->nexus_list);
3209 mutex_init(&sport->mutex);
3210 sport->sdev = sdev;
3211 sport->port = i;
3212 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3213 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3214 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3215 sport->port_attrib.use_srq = false;
3216 INIT_WORK(&sport->work, srpt_refresh_port_work);
3217
3218 ret = srpt_refresh_port(sport);
3219 if (ret) {
3220 pr_err("MAD registration failed for %s-%d.\n",
3221 dev_name(&sdev->device->dev), i);
3222 i--;
3223 goto err_port;
3224 }
3225 }
3226
3227 spin_lock(&srpt_dev_lock);
3228 list_add_tail(&sdev->list, &srpt_dev_list);
3229 spin_unlock(&srpt_dev_lock);
3230
3231 ib_set_client_data(device, &srpt_client, sdev);
3232 pr_debug("added %s.\n", dev_name(&device->dev));
3233 return 0;
3234
3235 err_port:
3236 srpt_unregister_mad_agent(sdev, i);
3237 ib_unregister_event_handler(&sdev->event_handler);
3238 err_cm:
3239 if (sdev->cm_id)
3240 ib_destroy_cm_id(sdev->cm_id);
3241 err_ring:
3242 srpt_free_srq(sdev);
3243 ib_dealloc_pd(sdev->pd);
3244 free_dev:
3245 srpt_sdev_put(sdev);
3246 pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
3247 return ret;
3248 }
3249
3250
3251
3252
3253
3254
3255 static void srpt_remove_one(struct ib_device *device, void *client_data)
3256 {
3257 struct srpt_device *sdev = client_data;
3258 int i;
3259
3260 srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
3261
3262 ib_unregister_event_handler(&sdev->event_handler);
3263
3264
3265 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3266 cancel_work_sync(&sdev->port[i].work);
3267
3268 if (sdev->cm_id)
3269 ib_destroy_cm_id(sdev->cm_id);
3270
3271 ib_set_client_data(device, &srpt_client, NULL);
3272
3273
3274
3275
3276
3277
3278 spin_lock(&srpt_dev_lock);
3279 list_del(&sdev->list);
3280 spin_unlock(&srpt_dev_lock);
3281
3282 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3283 srpt_release_sport(&sdev->port[i]);
3284
3285 srpt_free_srq(sdev);
3286
3287 ib_dealloc_pd(sdev->pd);
3288
3289 srpt_sdev_put(sdev);
3290 }
3291
3292 static struct ib_client srpt_client = {
3293 .name = DRV_NAME,
3294 .add = srpt_add_one,
3295 .remove = srpt_remove_one
3296 };
3297
3298 static int srpt_check_true(struct se_portal_group *se_tpg)
3299 {
3300 return 1;
3301 }
3302
3303 static int srpt_check_false(struct se_portal_group *se_tpg)
3304 {
3305 return 0;
3306 }
3307
3308 static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
3309 {
3310 return tpg->se_tpg_wwn->priv;
3311 }
3312
3313 static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
3314 {
3315 struct srpt_port *sport = wwn->priv;
3316
3317 if (sport->guid_id && &sport->guid_id->wwn == wwn)
3318 return sport->guid_id;
3319 if (sport->gid_id && &sport->gid_id->wwn == wwn)
3320 return sport->gid_id;
3321 WARN_ON_ONCE(true);
3322 return NULL;
3323 }
3324
3325 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3326 {
3327 struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
3328
3329 return stpg->sport_id->name;
3330 }
3331
3332 static u16 srpt_get_tag(struct se_portal_group *tpg)
3333 {
3334 return 1;
3335 }
3336
3337 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3338 {
3339 return 1;
3340 }
3341
3342 static void srpt_release_cmd(struct se_cmd *se_cmd)
3343 {
3344 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3345 struct srpt_send_ioctx, cmd);
3346 struct srpt_rdma_ch *ch = ioctx->ch;
3347 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
3348
3349 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
3350 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3351
3352 if (recv_ioctx) {
3353 WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list));
3354 ioctx->recv_ioctx = NULL;
3355 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3356 }
3357
3358 if (ioctx->n_rw_ctx) {
3359 srpt_free_rw_ctxs(ch, ioctx);
3360 ioctx->n_rw_ctx = 0;
3361 }
3362
3363 target_free_tag(se_cmd->se_sess, se_cmd);
3364 }
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374 static void srpt_close_session(struct se_session *se_sess)
3375 {
3376 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
3377
3378 srpt_disconnect_ch_sync(ch);
3379 }
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390 static u32 srpt_sess_get_index(struct se_session *se_sess)
3391 {
3392 return 0;
3393 }
3394
3395 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3396 {
3397 }
3398
3399
3400 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3401 {
3402 struct srpt_send_ioctx *ioctx;
3403
3404 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3405 return ioctx->state;
3406 }
3407
3408 static int srpt_parse_guid(u64 *guid, const char *name)
3409 {
3410 u16 w[4];
3411 int ret = -EINVAL;
3412
3413 if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
3414 goto out;
3415 *guid = get_unaligned_be64(w);
3416 ret = 0;
3417 out:
3418 return ret;
3419 }
3420
3421
3422
3423
3424
3425
3426 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3427 {
3428 const char *p;
3429 unsigned len, count, leading_zero_bytes;
3430 int ret;
3431
3432 p = name;
3433 if (strncasecmp(p, "0x", 2) == 0)
3434 p += 2;
3435 ret = -EINVAL;
3436 len = strlen(p);
3437 if (len % 2)
3438 goto out;
3439 count = min(len / 2, 16U);
3440 leading_zero_bytes = 16 - count;
3441 memset(i_port_id, 0, leading_zero_bytes);
3442 ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
3443
3444 out:
3445 return ret;
3446 }
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3461 {
3462 struct sockaddr_storage sa;
3463 u64 guid;
3464 u8 i_port_id[16];
3465 int ret;
3466
3467 ret = srpt_parse_guid(&guid, name);
3468 if (ret < 0)
3469 ret = srpt_parse_i_port_id(i_port_id, name);
3470 if (ret < 0)
3471 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
3472 &sa);
3473 if (ret < 0)
3474 pr_err("invalid initiator port ID %s\n", name);
3475 return ret;
3476 }
3477
3478 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3479 char *page)
3480 {
3481 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3482 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3483
3484 return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3485 }
3486
3487 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3488 const char *page, size_t count)
3489 {
3490 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3491 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3492 unsigned long val;
3493 int ret;
3494
3495 ret = kstrtoul(page, 0, &val);
3496 if (ret < 0) {
3497 pr_err("kstrtoul() failed with ret: %d\n", ret);
3498 return -EINVAL;
3499 }
3500 if (val > MAX_SRPT_RDMA_SIZE) {
3501 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3502 MAX_SRPT_RDMA_SIZE);
3503 return -EINVAL;
3504 }
3505 if (val < DEFAULT_MAX_RDMA_SIZE) {
3506 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3507 val, DEFAULT_MAX_RDMA_SIZE);
3508 return -EINVAL;
3509 }
3510 sport->port_attrib.srp_max_rdma_size = val;
3511
3512 return count;
3513 }
3514
3515 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3516 char *page)
3517 {
3518 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3519 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3520
3521 return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3522 }
3523
3524 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3525 const char *page, size_t count)
3526 {
3527 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3528 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3529 unsigned long val;
3530 int ret;
3531
3532 ret = kstrtoul(page, 0, &val);
3533 if (ret < 0) {
3534 pr_err("kstrtoul() failed with ret: %d\n", ret);
3535 return -EINVAL;
3536 }
3537 if (val > MAX_SRPT_RSP_SIZE) {
3538 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3539 MAX_SRPT_RSP_SIZE);
3540 return -EINVAL;
3541 }
3542 if (val < MIN_MAX_RSP_SIZE) {
3543 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3544 MIN_MAX_RSP_SIZE);
3545 return -EINVAL;
3546 }
3547 sport->port_attrib.srp_max_rsp_size = val;
3548
3549 return count;
3550 }
3551
3552 static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3553 char *page)
3554 {
3555 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3556 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3557
3558 return sysfs_emit(page, "%u\n", sport->port_attrib.srp_sq_size);
3559 }
3560
3561 static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3562 const char *page, size_t count)
3563 {
3564 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3565 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3566 unsigned long val;
3567 int ret;
3568
3569 ret = kstrtoul(page, 0, &val);
3570 if (ret < 0) {
3571 pr_err("kstrtoul() failed with ret: %d\n", ret);
3572 return -EINVAL;
3573 }
3574 if (val > MAX_SRPT_SRQ_SIZE) {
3575 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3576 MAX_SRPT_SRQ_SIZE);
3577 return -EINVAL;
3578 }
3579 if (val < MIN_SRPT_SRQ_SIZE) {
3580 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3581 MIN_SRPT_SRQ_SIZE);
3582 return -EINVAL;
3583 }
3584 sport->port_attrib.srp_sq_size = val;
3585
3586 return count;
3587 }
3588
3589 static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
3590 char *page)
3591 {
3592 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3593 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3594
3595 return sysfs_emit(page, "%d\n", sport->port_attrib.use_srq);
3596 }
3597
3598 static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
3599 const char *page, size_t count)
3600 {
3601 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3602 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3603 struct srpt_device *sdev = sport->sdev;
3604 unsigned long val;
3605 bool enabled;
3606 int ret;
3607
3608 ret = kstrtoul(page, 0, &val);
3609 if (ret < 0)
3610 return ret;
3611 if (val != !!val)
3612 return -EINVAL;
3613
3614 ret = mutex_lock_interruptible(&sdev->sdev_mutex);
3615 if (ret < 0)
3616 return ret;
3617 ret = mutex_lock_interruptible(&sport->mutex);
3618 if (ret < 0)
3619 goto unlock_sdev;
3620 enabled = sport->enabled;
3621
3622 srpt_set_enabled(sport, false);
3623 sport->port_attrib.use_srq = val;
3624 srpt_use_srq(sdev, sport->port_attrib.use_srq);
3625 srpt_set_enabled(sport, enabled);
3626 ret = count;
3627 mutex_unlock(&sport->mutex);
3628 unlock_sdev:
3629 mutex_unlock(&sdev->sdev_mutex);
3630
3631 return ret;
3632 }
3633
3634 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
3635 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
3636 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
3637 CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
3638
3639 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3640 &srpt_tpg_attrib_attr_srp_max_rdma_size,
3641 &srpt_tpg_attrib_attr_srp_max_rsp_size,
3642 &srpt_tpg_attrib_attr_srp_sq_size,
3643 &srpt_tpg_attrib_attr_use_srq,
3644 NULL,
3645 };
3646
3647 static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
3648 {
3649 struct rdma_cm_id *rdma_cm_id;
3650 int ret;
3651
3652 rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
3653 NULL, RDMA_PS_TCP, IB_QPT_RC);
3654 if (IS_ERR(rdma_cm_id)) {
3655 pr_err("RDMA/CM ID creation failed: %ld\n",
3656 PTR_ERR(rdma_cm_id));
3657 goto out;
3658 }
3659
3660 ret = rdma_bind_addr(rdma_cm_id, listen_addr);
3661 if (ret) {
3662 char addr_str[64];
3663
3664 snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr);
3665 pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
3666 addr_str, ret);
3667 rdma_destroy_id(rdma_cm_id);
3668 rdma_cm_id = ERR_PTR(ret);
3669 goto out;
3670 }
3671
3672 ret = rdma_listen(rdma_cm_id, 128);
3673 if (ret) {
3674 pr_err("rdma_listen() failed: %d\n", ret);
3675 rdma_destroy_id(rdma_cm_id);
3676 rdma_cm_id = ERR_PTR(ret);
3677 }
3678
3679 out:
3680 return rdma_cm_id;
3681 }
3682
3683 static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
3684 {
3685 return sysfs_emit(page, "%d\n", rdma_cm_port);
3686 }
3687
3688 static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
3689 const char *page, size_t count)
3690 {
3691 struct sockaddr_in addr4 = { .sin_family = AF_INET };
3692 struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 };
3693 struct rdma_cm_id *new_id = NULL;
3694 u16 val;
3695 int ret;
3696
3697 ret = kstrtou16(page, 0, &val);
3698 if (ret < 0)
3699 return ret;
3700 ret = count;
3701 if (rdma_cm_port == val)
3702 goto out;
3703
3704 if (val) {
3705 addr6.sin6_port = cpu_to_be16(val);
3706 new_id = srpt_create_rdma_id((struct sockaddr *)&addr6);
3707 if (IS_ERR(new_id)) {
3708 addr4.sin_port = cpu_to_be16(val);
3709 new_id = srpt_create_rdma_id((struct sockaddr *)&addr4);
3710 if (IS_ERR(new_id)) {
3711 ret = PTR_ERR(new_id);
3712 goto out;
3713 }
3714 }
3715 }
3716
3717 mutex_lock(&rdma_cm_mutex);
3718 rdma_cm_port = val;
3719 swap(rdma_cm_id, new_id);
3720 mutex_unlock(&rdma_cm_mutex);
3721
3722 if (new_id)
3723 rdma_destroy_id(new_id);
3724 ret = count;
3725 out:
3726 return ret;
3727 }
3728
3729 CONFIGFS_ATTR(srpt_, rdma_cm_port);
3730
3731 static struct configfs_attribute *srpt_da_attrs[] = {
3732 &srpt_attr_rdma_cm_port,
3733 NULL,
3734 };
3735
3736 static int srpt_enable_tpg(struct se_portal_group *se_tpg, bool enable)
3737 {
3738 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3739
3740 mutex_lock(&sport->mutex);
3741 srpt_set_enabled(sport, enable);
3742 mutex_unlock(&sport->mutex);
3743
3744 return 0;
3745 }
3746
3747
3748
3749
3750
3751
3752 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3753 const char *name)
3754 {
3755 struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
3756 struct srpt_tpg *stpg;
3757 int res = -ENOMEM;
3758
3759 stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
3760 if (!stpg)
3761 return ERR_PTR(res);
3762 stpg->sport_id = sport_id;
3763 res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
3764 if (res) {
3765 kfree(stpg);
3766 return ERR_PTR(res);
3767 }
3768
3769 mutex_lock(&sport_id->mutex);
3770 list_add_tail(&stpg->entry, &sport_id->tpg_list);
3771 mutex_unlock(&sport_id->mutex);
3772
3773 return &stpg->tpg;
3774 }
3775
3776
3777
3778
3779
3780 static void srpt_drop_tpg(struct se_portal_group *tpg)
3781 {
3782 struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
3783 struct srpt_port_id *sport_id = stpg->sport_id;
3784 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3785
3786 mutex_lock(&sport_id->mutex);
3787 list_del(&stpg->entry);
3788 mutex_unlock(&sport_id->mutex);
3789
3790 sport->enabled = false;
3791 core_tpg_deregister(tpg);
3792 kfree(stpg);
3793 }
3794
3795
3796
3797
3798
3799
3800
3801 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3802 struct config_group *group,
3803 const char *name)
3804 {
3805 struct port_and_port_id papi = srpt_lookup_port(name);
3806 struct srpt_port *sport = papi.sport;
3807 struct srpt_port_id *port_id;
3808
3809 if (!papi.port_id)
3810 return ERR_PTR(-EINVAL);
3811 if (*papi.port_id) {
3812
3813 WARN_ON_ONCE(true);
3814 return &(*papi.port_id)->wwn;
3815 }
3816 port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
3817 if (!port_id) {
3818 srpt_sdev_put(sport->sdev);
3819 return ERR_PTR(-ENOMEM);
3820 }
3821 mutex_init(&port_id->mutex);
3822 INIT_LIST_HEAD(&port_id->tpg_list);
3823 port_id->wwn.priv = sport;
3824 memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
3825 sport->gid_name, ARRAY_SIZE(port_id->name));
3826
3827 *papi.port_id = port_id;
3828
3829 return &port_id->wwn;
3830 }
3831
3832
3833
3834
3835
3836 static void srpt_drop_tport(struct se_wwn *wwn)
3837 {
3838 struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
3839 struct srpt_port *sport = wwn->priv;
3840
3841 if (sport->guid_id == port_id)
3842 sport->guid_id = NULL;
3843 else if (sport->gid_id == port_id)
3844 sport->gid_id = NULL;
3845 else
3846 WARN_ON_ONCE(true);
3847
3848 srpt_sdev_put(sport->sdev);
3849 kfree(port_id);
3850 }
3851
3852 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3853 {
3854 return sysfs_emit(buf, "\n");
3855 }
3856
3857 CONFIGFS_ATTR_RO(srpt_wwn_, version);
3858
3859 static struct configfs_attribute *srpt_wwn_attrs[] = {
3860 &srpt_wwn_attr_version,
3861 NULL,
3862 };
3863
3864 static const struct target_core_fabric_ops srpt_template = {
3865 .module = THIS_MODULE,
3866 .fabric_name = "srpt",
3867 .tpg_get_wwn = srpt_get_fabric_wwn,
3868 .tpg_get_tag = srpt_get_tag,
3869 .tpg_check_demo_mode = srpt_check_false,
3870 .tpg_check_demo_mode_cache = srpt_check_true,
3871 .tpg_check_demo_mode_write_protect = srpt_check_true,
3872 .tpg_check_prod_mode_write_protect = srpt_check_false,
3873 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3874 .release_cmd = srpt_release_cmd,
3875 .check_stop_free = srpt_check_stop_free,
3876 .close_session = srpt_close_session,
3877 .sess_get_index = srpt_sess_get_index,
3878 .sess_get_initiator_sid = NULL,
3879 .write_pending = srpt_write_pending,
3880 .set_default_node_attributes = srpt_set_default_node_attrs,
3881 .get_cmd_state = srpt_get_tcm_cmd_state,
3882 .queue_data_in = srpt_queue_data_in,
3883 .queue_status = srpt_queue_status,
3884 .queue_tm_rsp = srpt_queue_tm_rsp,
3885 .aborted_task = srpt_aborted_task,
3886
3887
3888
3889
3890 .fabric_make_wwn = srpt_make_tport,
3891 .fabric_drop_wwn = srpt_drop_tport,
3892 .fabric_make_tpg = srpt_make_tpg,
3893 .fabric_enable_tpg = srpt_enable_tpg,
3894 .fabric_drop_tpg = srpt_drop_tpg,
3895 .fabric_init_nodeacl = srpt_init_nodeacl,
3896
3897 .tfc_discovery_attrs = srpt_da_attrs,
3898 .tfc_wwn_attrs = srpt_wwn_attrs,
3899 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
3900 };
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910 static int __init srpt_init_module(void)
3911 {
3912 int ret;
3913
3914 ret = -EINVAL;
3915 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3916 pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n",
3917 srp_max_req_size, MIN_MAX_REQ_SIZE);
3918 goto out;
3919 }
3920
3921 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3922 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3923 pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n",
3924 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3925 goto out;
3926 }
3927
3928 ret = target_register_template(&srpt_template);
3929 if (ret)
3930 goto out;
3931
3932 ret = ib_register_client(&srpt_client);
3933 if (ret) {
3934 pr_err("couldn't register IB client\n");
3935 goto out_unregister_target;
3936 }
3937
3938 return 0;
3939
3940 out_unregister_target:
3941 target_unregister_template(&srpt_template);
3942 out:
3943 return ret;
3944 }
3945
3946 static void __exit srpt_cleanup_module(void)
3947 {
3948 if (rdma_cm_id)
3949 rdma_destroy_id(rdma_cm_id);
3950 ib_unregister_client(&srpt_client);
3951 target_unregister_template(&srpt_template);
3952 }
3953
3954 module_init(srpt_init_module);
3955 module_exit(srpt_cleanup_module);