0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/kernel.h>
0034 #include <linux/slab.h>
0035 #include <linux/mm.h>
0036 #include <linux/scatterlist.h>
0037 #include <linux/kfifo.h>
0038 #include <scsi/scsi_cmnd.h>
0039 #include <scsi/scsi_host.h>
0040
0041 #include "iscsi_iser.h"
0042
0043
0044
0045
0046
0047
0048 static int iser_prepare_read_cmd(struct iscsi_task *task)
0049
0050 {
0051 struct iscsi_iser_task *iser_task = task->dd_data;
0052 struct iser_mem_reg *mem_reg;
0053 int err;
0054 struct iser_ctrl *hdr = &iser_task->desc.iser_header;
0055
0056 err = iser_dma_map_task_data(iser_task,
0057 ISER_DIR_IN,
0058 DMA_FROM_DEVICE);
0059 if (err)
0060 return err;
0061
0062 err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
0063 if (err) {
0064 iser_err("Failed to set up Data-IN RDMA\n");
0065 goto out_err;
0066 }
0067 mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
0068
0069 hdr->flags |= ISER_RSV;
0070 hdr->read_stag = cpu_to_be32(mem_reg->rkey);
0071 hdr->read_va = cpu_to_be64(mem_reg->sge.addr);
0072
0073 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
0074 task->itt, mem_reg->rkey,
0075 (unsigned long long)mem_reg->sge.addr);
0076
0077 return 0;
0078
0079 out_err:
0080 iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
0081 return err;
0082 }
0083
0084
0085
0086
0087
0088
0089 static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
0090 unsigned int unsol_sz, unsigned int edtl)
0091 {
0092 struct iscsi_iser_task *iser_task = task->dd_data;
0093 struct iser_mem_reg *mem_reg;
0094 int err;
0095 struct iser_ctrl *hdr = &iser_task->desc.iser_header;
0096 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
0097 struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
0098
0099 err = iser_dma_map_task_data(iser_task,
0100 ISER_DIR_OUT,
0101 DMA_TO_DEVICE);
0102 if (err)
0103 return err;
0104
0105 err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
0106 buf_out->data_len == imm_sz);
0107 if (err) {
0108 iser_err("Failed to register write cmd RDMA mem\n");
0109 goto out_err;
0110 }
0111
0112 mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
0113
0114 if (unsol_sz < edtl) {
0115 hdr->flags |= ISER_WSV;
0116 if (buf_out->data_len > imm_sz) {
0117 hdr->write_stag = cpu_to_be32(mem_reg->rkey);
0118 hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
0119 }
0120
0121 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
0122 task->itt, mem_reg->rkey,
0123 (unsigned long long)mem_reg->sge.addr, unsol_sz);
0124 }
0125
0126 if (imm_sz > 0) {
0127 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
0128 task->itt, imm_sz);
0129 tx_dsg->addr = mem_reg->sge.addr;
0130 tx_dsg->length = imm_sz;
0131 tx_dsg->lkey = mem_reg->sge.lkey;
0132 iser_task->desc.num_sge = 2;
0133 }
0134
0135 return 0;
0136
0137 out_err:
0138 iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
0139 return err;
0140 }
0141
0142
0143 static void iser_create_send_desc(struct iser_conn *iser_conn,
0144 struct iser_tx_desc *tx_desc)
0145 {
0146 struct iser_device *device = iser_conn->ib_conn.device;
0147
0148 ib_dma_sync_single_for_cpu(device->ib_device,
0149 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
0150
0151 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
0152 tx_desc->iser_header.flags = ISER_VER;
0153 tx_desc->num_sge = 1;
0154 }
0155
0156 static void iser_free_login_buf(struct iser_conn *iser_conn)
0157 {
0158 struct iser_device *device = iser_conn->ib_conn.device;
0159 struct iser_login_desc *desc = &iser_conn->login_desc;
0160
0161 if (!desc->req)
0162 return;
0163
0164 ib_dma_unmap_single(device->ib_device, desc->req_dma,
0165 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
0166
0167 ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
0168 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
0169
0170 kfree(desc->req);
0171 kfree(desc->rsp);
0172
0173
0174 desc->req = NULL;
0175 desc->rsp = NULL;
0176 }
0177
0178 static int iser_alloc_login_buf(struct iser_conn *iser_conn)
0179 {
0180 struct iser_device *device = iser_conn->ib_conn.device;
0181 struct iser_login_desc *desc = &iser_conn->login_desc;
0182
0183 desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
0184 if (!desc->req)
0185 return -ENOMEM;
0186
0187 desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
0188 ISCSI_DEF_MAX_RECV_SEG_LEN,
0189 DMA_TO_DEVICE);
0190 if (ib_dma_mapping_error(device->ib_device,
0191 desc->req_dma))
0192 goto free_req;
0193
0194 desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
0195 if (!desc->rsp)
0196 goto unmap_req;
0197
0198 desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
0199 ISER_RX_LOGIN_SIZE,
0200 DMA_FROM_DEVICE);
0201 if (ib_dma_mapping_error(device->ib_device,
0202 desc->rsp_dma))
0203 goto free_rsp;
0204
0205 return 0;
0206
0207 free_rsp:
0208 kfree(desc->rsp);
0209 unmap_req:
0210 ib_dma_unmap_single(device->ib_device, desc->req_dma,
0211 ISCSI_DEF_MAX_RECV_SEG_LEN,
0212 DMA_TO_DEVICE);
0213 free_req:
0214 kfree(desc->req);
0215
0216 return -ENOMEM;
0217 }
0218
0219 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
0220 struct iscsi_session *session)
0221 {
0222 int i, j;
0223 u64 dma_addr;
0224 struct iser_rx_desc *rx_desc;
0225 struct ib_sge *rx_sg;
0226 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0227 struct iser_device *device = ib_conn->device;
0228
0229 iser_conn->qp_max_recv_dtos = session->cmds_max;
0230
0231 if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
0232 iser_conn->pages_per_mr))
0233 goto create_rdma_reg_res_failed;
0234
0235 if (iser_alloc_login_buf(iser_conn))
0236 goto alloc_login_buf_fail;
0237
0238 iser_conn->num_rx_descs = session->cmds_max;
0239 iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
0240 sizeof(struct iser_rx_desc),
0241 GFP_KERNEL);
0242 if (!iser_conn->rx_descs)
0243 goto rx_desc_alloc_fail;
0244
0245 rx_desc = iser_conn->rx_descs;
0246
0247 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
0248 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
0249 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
0250 if (ib_dma_mapping_error(device->ib_device, dma_addr))
0251 goto rx_desc_dma_map_failed;
0252
0253 rx_desc->dma_addr = dma_addr;
0254 rx_desc->cqe.done = iser_task_rsp;
0255 rx_sg = &rx_desc->rx_sg;
0256 rx_sg->addr = rx_desc->dma_addr;
0257 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
0258 rx_sg->lkey = device->pd->local_dma_lkey;
0259 }
0260
0261 return 0;
0262
0263 rx_desc_dma_map_failed:
0264 rx_desc = iser_conn->rx_descs;
0265 for (j = 0; j < i; j++, rx_desc++)
0266 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
0267 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
0268 kfree(iser_conn->rx_descs);
0269 iser_conn->rx_descs = NULL;
0270 rx_desc_alloc_fail:
0271 iser_free_login_buf(iser_conn);
0272 alloc_login_buf_fail:
0273 iser_free_fastreg_pool(ib_conn);
0274 create_rdma_reg_res_failed:
0275 iser_err("failed allocating rx descriptors / data buffers\n");
0276 return -ENOMEM;
0277 }
0278
0279 void iser_free_rx_descriptors(struct iser_conn *iser_conn)
0280 {
0281 int i;
0282 struct iser_rx_desc *rx_desc;
0283 struct ib_conn *ib_conn = &iser_conn->ib_conn;
0284 struct iser_device *device = ib_conn->device;
0285
0286 iser_free_fastreg_pool(ib_conn);
0287
0288 rx_desc = iser_conn->rx_descs;
0289 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
0290 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
0291 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
0292 kfree(iser_conn->rx_descs);
0293
0294 iser_conn->rx_descs = NULL;
0295
0296 iser_free_login_buf(iser_conn);
0297 }
0298
0299 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
0300 {
0301 struct iser_conn *iser_conn = conn->dd_data;
0302 struct iscsi_session *session = conn->session;
0303 int err = 0;
0304 int i;
0305
0306 iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
0307
0308 if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
0309 goto out;
0310
0311 if (session->discovery_sess) {
0312 iser_info("Discovery session, re-using login RX buffer\n");
0313 goto out;
0314 }
0315
0316 iser_info("Normal session, posting batch of RX %d buffers\n",
0317 iser_conn->qp_max_recv_dtos - 1);
0318
0319
0320
0321
0322
0323
0324 for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
0325 err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
0326 if (err)
0327 goto out;
0328 }
0329 out:
0330 return err;
0331 }
0332
0333
0334
0335
0336
0337
0338 int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
0339 {
0340 struct iser_conn *iser_conn = conn->dd_data;
0341 struct iscsi_iser_task *iser_task = task->dd_data;
0342 unsigned long edtl;
0343 int err;
0344 struct iser_data_buf *data_buf, *prot_buf;
0345 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
0346 struct scsi_cmnd *sc = task->sc;
0347 struct iser_tx_desc *tx_desc = &iser_task->desc;
0348
0349 edtl = ntohl(hdr->data_length);
0350
0351
0352 tx_desc->type = ISCSI_TX_SCSI_COMMAND;
0353 tx_desc->cqe.done = iser_cmd_comp;
0354 iser_create_send_desc(iser_conn, tx_desc);
0355
0356 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
0357 data_buf = &iser_task->data[ISER_DIR_IN];
0358 prot_buf = &iser_task->prot[ISER_DIR_IN];
0359 } else {
0360 data_buf = &iser_task->data[ISER_DIR_OUT];
0361 prot_buf = &iser_task->prot[ISER_DIR_OUT];
0362 }
0363
0364 if (scsi_sg_count(sc)) {
0365 data_buf->sg = scsi_sglist(sc);
0366 data_buf->size = scsi_sg_count(sc);
0367 }
0368 data_buf->data_len = scsi_bufflen(sc);
0369
0370 if (scsi_prot_sg_count(sc)) {
0371 prot_buf->sg = scsi_prot_sglist(sc);
0372 prot_buf->size = scsi_prot_sg_count(sc);
0373 prot_buf->data_len = (data_buf->data_len >>
0374 ilog2(sc->device->sector_size)) * 8;
0375 }
0376
0377 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
0378 err = iser_prepare_read_cmd(task);
0379 if (err)
0380 goto send_command_error;
0381 }
0382 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
0383 err = iser_prepare_write_cmd(task,
0384 task->imm_count,
0385 task->imm_count +
0386 task->unsol_r2t.data_length,
0387 edtl);
0388 if (err)
0389 goto send_command_error;
0390 }
0391
0392 iser_task->status = ISER_TASK_STATUS_STARTED;
0393
0394 err = iser_post_send(&iser_conn->ib_conn, tx_desc);
0395 if (!err)
0396 return 0;
0397
0398 send_command_error:
0399 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
0400 return err;
0401 }
0402
0403
0404
0405
0406
0407
0408
0409 int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
0410 struct iscsi_data *hdr)
0411 {
0412 struct iser_conn *iser_conn = conn->dd_data;
0413 struct iscsi_iser_task *iser_task = task->dd_data;
0414 struct iser_tx_desc *tx_desc;
0415 struct iser_mem_reg *mem_reg;
0416 unsigned long buf_offset;
0417 unsigned long data_seg_len;
0418 uint32_t itt;
0419 int err;
0420 struct ib_sge *tx_dsg;
0421
0422 itt = (__force uint32_t)hdr->itt;
0423 data_seg_len = ntoh24(hdr->dlength);
0424 buf_offset = ntohl(hdr->offset);
0425
0426 iser_dbg("%s itt %d dseg_len %d offset %d\n",
0427 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
0428
0429 tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
0430 if (!tx_desc)
0431 return -ENOMEM;
0432
0433 tx_desc->type = ISCSI_TX_DATAOUT;
0434 tx_desc->cqe.done = iser_dataout_comp;
0435 tx_desc->iser_header.flags = ISER_VER;
0436 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
0437
0438
0439 err = iser_initialize_task_headers(task, tx_desc);
0440 if (err)
0441 goto send_data_out_error;
0442
0443 mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
0444 tx_dsg = &tx_desc->tx_sg[1];
0445 tx_dsg->addr = mem_reg->sge.addr + buf_offset;
0446 tx_dsg->length = data_seg_len;
0447 tx_dsg->lkey = mem_reg->sge.lkey;
0448 tx_desc->num_sge = 2;
0449
0450 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
0451 iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
0452 buf_offset, data_seg_len,
0453 iser_task->data[ISER_DIR_OUT].data_len, itt);
0454 err = -EINVAL;
0455 goto send_data_out_error;
0456 }
0457 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
0458 itt, buf_offset, data_seg_len);
0459
0460
0461 err = iser_post_send(&iser_conn->ib_conn, tx_desc);
0462 if (!err)
0463 return 0;
0464
0465 send_data_out_error:
0466 kmem_cache_free(ig.desc_cache, tx_desc);
0467 iser_err("conn %p failed err %d\n", conn, err);
0468 return err;
0469 }
0470
0471 int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
0472 {
0473 struct iser_conn *iser_conn = conn->dd_data;
0474 struct iscsi_iser_task *iser_task = task->dd_data;
0475 struct iser_tx_desc *mdesc = &iser_task->desc;
0476 unsigned long data_seg_len;
0477 int err = 0;
0478 struct iser_device *device;
0479
0480
0481 mdesc->type = ISCSI_TX_CONTROL;
0482 mdesc->cqe.done = iser_ctrl_comp;
0483 iser_create_send_desc(iser_conn, mdesc);
0484
0485 device = iser_conn->ib_conn.device;
0486
0487 data_seg_len = ntoh24(task->hdr->dlength);
0488
0489 if (data_seg_len > 0) {
0490 struct iser_login_desc *desc = &iser_conn->login_desc;
0491 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
0492
0493 if (task != conn->login_task) {
0494 iser_err("data present on non login task!!!\n");
0495 goto send_control_error;
0496 }
0497
0498 ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
0499 task->data_count, DMA_TO_DEVICE);
0500
0501 memcpy(desc->req, task->data, task->data_count);
0502
0503 ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
0504 task->data_count, DMA_TO_DEVICE);
0505
0506 tx_dsg->addr = desc->req_dma;
0507 tx_dsg->length = task->data_count;
0508 tx_dsg->lkey = device->pd->local_dma_lkey;
0509 mdesc->num_sge = 2;
0510 }
0511
0512 if (task == conn->login_task) {
0513 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
0514 task->hdr->opcode, data_seg_len);
0515 err = iser_post_recvl(iser_conn);
0516 if (err)
0517 goto send_control_error;
0518 err = iser_post_rx_bufs(conn, task->hdr);
0519 if (err)
0520 goto send_control_error;
0521 }
0522
0523 err = iser_post_send(&iser_conn->ib_conn, mdesc);
0524 if (!err)
0525 return 0;
0526
0527 send_control_error:
0528 iser_err("conn %p failed err %d\n",conn, err);
0529 return err;
0530 }
0531
0532 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
0533 {
0534 struct ib_conn *ib_conn = wc->qp->qp_context;
0535 struct iser_conn *iser_conn = to_iser_conn(ib_conn);
0536 struct iser_login_desc *desc = iser_login(wc->wr_cqe);
0537 struct iscsi_hdr *hdr;
0538 char *data;
0539 int length;
0540 bool full_feature_phase;
0541
0542 if (unlikely(wc->status != IB_WC_SUCCESS)) {
0543 iser_err_comp(wc, "login_rsp");
0544 return;
0545 }
0546
0547 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
0548 desc->rsp_dma, ISER_RX_LOGIN_SIZE,
0549 DMA_FROM_DEVICE);
0550
0551 hdr = desc->rsp + sizeof(struct iser_ctrl);
0552 data = desc->rsp + ISER_HEADERS_LEN;
0553 length = wc->byte_len - ISER_HEADERS_LEN;
0554 full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
0555 ISCSI_FULL_FEATURE_PHASE) &&
0556 (hdr->flags & ISCSI_FLAG_CMD_FINAL);
0557
0558 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
0559 hdr->itt, length);
0560
0561 iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
0562
0563 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
0564 desc->rsp_dma, ISER_RX_LOGIN_SIZE,
0565 DMA_FROM_DEVICE);
0566
0567 if (!full_feature_phase ||
0568 iser_conn->iscsi_conn->session->discovery_sess)
0569 return;
0570
0571
0572 iser_post_recvm(iser_conn, iser_conn->rx_descs);
0573 }
0574
0575 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
0576 {
0577 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
0578 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
0579 iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
0580 return -EINVAL;
0581 }
0582
0583 desc->rsc.mr_valid = 0;
0584
0585 return 0;
0586 }
0587
0588 static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
0589 struct iscsi_hdr *hdr)
0590 {
0591 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
0592 struct iscsi_task *task;
0593 u32 rkey = wc->ex.invalidate_rkey;
0594
0595 iser_dbg("conn %p: remote invalidation for rkey %#x\n",
0596 iser_conn, rkey);
0597
0598 if (unlikely(!iser_conn->snd_w_inv)) {
0599 iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
0600 iser_conn);
0601 return -EPROTO;
0602 }
0603
0604 task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
0605 if (likely(task)) {
0606 struct iscsi_iser_task *iser_task = task->dd_data;
0607 struct iser_fr_desc *desc;
0608
0609 if (iser_task->dir[ISER_DIR_IN]) {
0610 desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
0611 if (unlikely(iser_inv_desc(desc, rkey)))
0612 return -EINVAL;
0613 }
0614
0615 if (iser_task->dir[ISER_DIR_OUT]) {
0616 desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
0617 if (unlikely(iser_inv_desc(desc, rkey)))
0618 return -EINVAL;
0619 }
0620 } else {
0621 iser_err("failed to get task for itt=%d\n", hdr->itt);
0622 return -EINVAL;
0623 }
0624 }
0625
0626 return 0;
0627 }
0628
0629
0630 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
0631 {
0632 struct ib_conn *ib_conn = wc->qp->qp_context;
0633 struct iser_conn *iser_conn = to_iser_conn(ib_conn);
0634 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
0635 struct iscsi_hdr *hdr;
0636 int length, err;
0637
0638 if (unlikely(wc->status != IB_WC_SUCCESS)) {
0639 iser_err_comp(wc, "task_rsp");
0640 return;
0641 }
0642
0643 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
0644 desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
0645 DMA_FROM_DEVICE);
0646
0647 hdr = &desc->iscsi_header;
0648 length = wc->byte_len - ISER_HEADERS_LEN;
0649
0650 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
0651 hdr->itt, length);
0652
0653 if (iser_check_remote_inv(iser_conn, wc, hdr)) {
0654 iscsi_conn_failure(iser_conn->iscsi_conn,
0655 ISCSI_ERR_CONN_FAILED);
0656 return;
0657 }
0658
0659 iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
0660
0661 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
0662 desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
0663 DMA_FROM_DEVICE);
0664
0665 err = iser_post_recvm(iser_conn, desc);
0666 if (err)
0667 iser_err("posting rx buffer err %d\n", err);
0668 }
0669
0670 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
0671 {
0672 if (unlikely(wc->status != IB_WC_SUCCESS))
0673 iser_err_comp(wc, "command");
0674 }
0675
0676 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
0677 {
0678 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
0679 struct iscsi_task *task;
0680
0681 if (unlikely(wc->status != IB_WC_SUCCESS)) {
0682 iser_err_comp(wc, "control");
0683 return;
0684 }
0685
0686
0687 task = (void *)desc - sizeof(struct iscsi_task);
0688 if (task->hdr->itt == RESERVED_ITT)
0689 iscsi_put_task(task);
0690 }
0691
0692 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
0693 {
0694 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
0695 struct ib_conn *ib_conn = wc->qp->qp_context;
0696 struct iser_device *device = ib_conn->device;
0697
0698 if (unlikely(wc->status != IB_WC_SUCCESS))
0699 iser_err_comp(wc, "dataout");
0700
0701 ib_dma_unmap_single(device->ib_device, desc->dma_addr,
0702 ISER_HEADERS_LEN, DMA_TO_DEVICE);
0703 kmem_cache_free(ig.desc_cache, desc);
0704 }
0705
0706 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
0707
0708 {
0709 iser_task->status = ISER_TASK_STATUS_INIT;
0710
0711 iser_task->dir[ISER_DIR_IN] = 0;
0712 iser_task->dir[ISER_DIR_OUT] = 0;
0713
0714 iser_task->data[ISER_DIR_IN].data_len = 0;
0715 iser_task->data[ISER_DIR_OUT].data_len = 0;
0716
0717 iser_task->prot[ISER_DIR_IN].data_len = 0;
0718 iser_task->prot[ISER_DIR_OUT].data_len = 0;
0719
0720 iser_task->prot[ISER_DIR_IN].dma_nents = 0;
0721 iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
0722
0723 memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
0724 sizeof(struct iser_mem_reg));
0725 memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
0726 sizeof(struct iser_mem_reg));
0727 }
0728
0729 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
0730 {
0731
0732 if (iser_task->dir[ISER_DIR_IN]) {
0733 iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
0734 iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
0735 DMA_FROM_DEVICE);
0736 }
0737
0738 if (iser_task->dir[ISER_DIR_OUT]) {
0739 iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
0740 iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
0741 DMA_TO_DEVICE);
0742 }
0743 }