0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/kernel.h>
0034 #include <linux/slab.h>
0035 #include <linux/mm.h>
0036 #include <linux/highmem.h>
0037 #include <linux/scatterlist.h>
0038
0039 #include "iscsi_iser.h"
0040
0041 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
0042 {
0043 iser_err_comp(wc, "memreg");
0044 }
0045
0046 static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
0047 {
0048 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
0049 struct iser_fr_desc *desc;
0050 unsigned long flags;
0051
0052 spin_lock_irqsave(&fr_pool->lock, flags);
0053 desc = list_first_entry(&fr_pool->list,
0054 struct iser_fr_desc, list);
0055 list_del(&desc->list);
0056 spin_unlock_irqrestore(&fr_pool->lock, flags);
0057
0058 return desc;
0059 }
0060
0061 static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
0062 struct iser_fr_desc *desc)
0063 {
0064 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
0065 unsigned long flags;
0066
0067 spin_lock_irqsave(&fr_pool->lock, flags);
0068 list_add(&desc->list, &fr_pool->list);
0069 spin_unlock_irqrestore(&fr_pool->lock, flags);
0070 }
0071
0072 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
0073 enum iser_data_dir iser_dir,
0074 enum dma_data_direction dma_dir)
0075 {
0076 struct iser_data_buf *data = &iser_task->data[iser_dir];
0077 struct ib_device *dev;
0078
0079 iser_task->dir[iser_dir] = 1;
0080 dev = iser_task->iser_conn->ib_conn.device->ib_device;
0081
0082 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
0083 if (unlikely(data->dma_nents == 0)) {
0084 iser_err("dma_map_sg failed!!!\n");
0085 return -EINVAL;
0086 }
0087
0088 if (scsi_prot_sg_count(iser_task->sc)) {
0089 struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
0090
0091 pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
0092 if (unlikely(pdata->dma_nents == 0)) {
0093 iser_err("protection dma_map_sg failed!!!\n");
0094 goto out_unmap;
0095 }
0096 }
0097
0098 return 0;
0099
0100 out_unmap:
0101 ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
0102 return -EINVAL;
0103 }
0104
0105
0106 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
0107 enum iser_data_dir iser_dir,
0108 enum dma_data_direction dma_dir)
0109 {
0110 struct iser_data_buf *data = &iser_task->data[iser_dir];
0111 struct ib_device *dev;
0112
0113 dev = iser_task->iser_conn->ib_conn.device->ib_device;
0114 ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
0115
0116 if (scsi_prot_sg_count(iser_task->sc)) {
0117 struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
0118
0119 ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
0120 }
0121 }
0122
0123 static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
0124 struct iser_mem_reg *reg)
0125 {
0126 struct scatterlist *sg = mem->sg;
0127
0128 reg->sge.lkey = device->pd->local_dma_lkey;
0129
0130
0131
0132
0133
0134 if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
0135 reg->rkey = device->pd->unsafe_global_rkey;
0136 else
0137 reg->rkey = 0;
0138 reg->sge.addr = sg_dma_address(&sg[0]);
0139 reg->sge.length = sg_dma_len(&sg[0]);
0140
0141 iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
0142 " length=0x%x\n", reg->sge.lkey, reg->rkey,
0143 reg->sge.addr, reg->sge.length);
0144
0145 return 0;
0146 }
0147
0148 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
0149 enum iser_data_dir cmd_dir)
0150 {
0151 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
0152 struct iser_fr_desc *desc;
0153 struct ib_mr_status mr_status;
0154
0155 desc = reg->desc;
0156 if (!desc)
0157 return;
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 if (unlikely(desc->sig_protected)) {
0168 desc->sig_protected = false;
0169 ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
0170 &mr_status);
0171 }
0172 iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
0173 reg->desc = NULL;
0174 }
0175
0176 static void iser_set_dif_domain(struct scsi_cmnd *sc,
0177 struct ib_sig_domain *domain)
0178 {
0179 domain->sig_type = IB_SIG_TYPE_T10_DIF;
0180 domain->sig.dif.pi_interval = scsi_prot_interval(sc);
0181 domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
0182
0183
0184
0185
0186 domain->sig.dif.apptag_check_mask = 0xffff;
0187 domain->sig.dif.app_escape = true;
0188 domain->sig.dif.ref_escape = true;
0189 if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
0190 domain->sig.dif.ref_remap = true;
0191 }
0192
0193 static int iser_set_sig_attrs(struct scsi_cmnd *sc,
0194 struct ib_sig_attrs *sig_attrs)
0195 {
0196 switch (scsi_get_prot_op(sc)) {
0197 case SCSI_PROT_WRITE_INSERT:
0198 case SCSI_PROT_READ_STRIP:
0199 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
0200 iser_set_dif_domain(sc, &sig_attrs->wire);
0201 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
0202 break;
0203 case SCSI_PROT_READ_INSERT:
0204 case SCSI_PROT_WRITE_STRIP:
0205 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
0206 iser_set_dif_domain(sc, &sig_attrs->mem);
0207 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
0208 IB_T10DIF_CSUM : IB_T10DIF_CRC;
0209 break;
0210 case SCSI_PROT_READ_PASS:
0211 case SCSI_PROT_WRITE_PASS:
0212 iser_set_dif_domain(sc, &sig_attrs->wire);
0213 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
0214 iser_set_dif_domain(sc, &sig_attrs->mem);
0215 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
0216 IB_T10DIF_CSUM : IB_T10DIF_CRC;
0217 break;
0218 default:
0219 iser_err("Unsupported PI operation %d\n",
0220 scsi_get_prot_op(sc));
0221 return -EINVAL;
0222 }
0223
0224 return 0;
0225 }
0226
0227 static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
0228 {
0229 *mask = 0;
0230 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
0231 *mask |= IB_SIG_CHECK_REFTAG;
0232 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
0233 *mask |= IB_SIG_CHECK_GUARD;
0234 }
0235
0236 static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
0237 struct ib_cqe *cqe, struct ib_send_wr *next_wr)
0238 {
0239 inv_wr->opcode = IB_WR_LOCAL_INV;
0240 inv_wr->wr_cqe = cqe;
0241 inv_wr->ex.invalidate_rkey = mr->rkey;
0242 inv_wr->send_flags = 0;
0243 inv_wr->num_sge = 0;
0244 inv_wr->next = next_wr;
0245 }
0246
0247 static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
0248 struct iser_data_buf *mem,
0249 struct iser_data_buf *sig_mem,
0250 struct iser_reg_resources *rsc,
0251 struct iser_mem_reg *sig_reg)
0252 {
0253 struct iser_tx_desc *tx_desc = &iser_task->desc;
0254 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
0255 struct ib_mr *mr = rsc->sig_mr;
0256 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
0257 struct ib_reg_wr *wr = &tx_desc->reg_wr;
0258 int ret;
0259
0260 memset(sig_attrs, 0, sizeof(*sig_attrs));
0261 ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
0262 if (ret)
0263 goto err;
0264
0265 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
0266
0267 if (rsc->mr_valid)
0268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
0269
0270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
0271
0272 ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
0273 sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
0274 if (unlikely(ret)) {
0275 iser_err("failed to map PI sg (%d)\n",
0276 mem->dma_nents + sig_mem->dma_nents);
0277 goto err;
0278 }
0279
0280 memset(wr, 0, sizeof(*wr));
0281 wr->wr.next = &tx_desc->send_wr;
0282 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
0283 wr->wr.wr_cqe = cqe;
0284 wr->wr.num_sge = 0;
0285 wr->wr.send_flags = 0;
0286 wr->mr = mr;
0287 wr->key = mr->rkey;
0288 wr->access = IB_ACCESS_LOCAL_WRITE |
0289 IB_ACCESS_REMOTE_READ |
0290 IB_ACCESS_REMOTE_WRITE;
0291 rsc->mr_valid = 1;
0292
0293 sig_reg->sge.lkey = mr->lkey;
0294 sig_reg->rkey = mr->rkey;
0295 sig_reg->sge.addr = mr->iova;
0296 sig_reg->sge.length = mr->length;
0297
0298 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
0299 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
0300 sig_reg->sge.length);
0301 err:
0302 return ret;
0303 }
0304
0305 static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
0306 struct iser_data_buf *mem,
0307 struct iser_reg_resources *rsc,
0308 struct iser_mem_reg *reg)
0309 {
0310 struct iser_tx_desc *tx_desc = &iser_task->desc;
0311 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
0312 struct ib_mr *mr = rsc->mr;
0313 struct ib_reg_wr *wr = &tx_desc->reg_wr;
0314 int n;
0315
0316 if (rsc->mr_valid)
0317 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
0318
0319 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
0320
0321 n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
0322 if (unlikely(n != mem->dma_nents)) {
0323 iser_err("failed to map sg (%d/%d)\n",
0324 n, mem->dma_nents);
0325 return n < 0 ? n : -EINVAL;
0326 }
0327
0328 wr->wr.next = &tx_desc->send_wr;
0329 wr->wr.opcode = IB_WR_REG_MR;
0330 wr->wr.wr_cqe = cqe;
0331 wr->wr.send_flags = 0;
0332 wr->wr.num_sge = 0;
0333 wr->mr = mr;
0334 wr->key = mr->rkey;
0335 wr->access = IB_ACCESS_LOCAL_WRITE |
0336 IB_ACCESS_REMOTE_WRITE |
0337 IB_ACCESS_REMOTE_READ;
0338
0339 rsc->mr_valid = 1;
0340
0341 reg->sge.lkey = mr->lkey;
0342 reg->rkey = mr->rkey;
0343 reg->sge.addr = mr->iova;
0344 reg->sge.length = mr->length;
0345
0346 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
0347 reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
0348
0349 return 0;
0350 }
0351
0352 int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
0353 enum iser_data_dir dir,
0354 bool all_imm)
0355 {
0356 struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
0357 struct iser_device *device = ib_conn->device;
0358 struct iser_data_buf *mem = &task->data[dir];
0359 struct iser_mem_reg *reg = &task->rdma_reg[dir];
0360 struct iser_fr_desc *desc;
0361 bool use_dma_key;
0362 int err;
0363
0364 use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
0365 scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
0366 if (use_dma_key)
0367 return iser_reg_dma(device, mem, reg);
0368
0369 desc = iser_reg_desc_get_fr(ib_conn);
0370 if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
0371 err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
0372 if (unlikely(err))
0373 goto err_reg;
0374 } else {
0375 err = iser_reg_sig_mr(task, mem, &task->prot[dir],
0376 &desc->rsc, reg);
0377 if (unlikely(err))
0378 goto err_reg;
0379
0380 desc->sig_protected = true;
0381 }
0382
0383 reg->desc = desc;
0384
0385 return 0;
0386
0387 err_reg:
0388 iser_reg_desc_put_fr(ib_conn, desc);
0389
0390 return err;
0391 }