Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * NVMe over Fabrics RDMA target.
0004  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
0005  */
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 #include <linux/atomic.h>
0008 #include <linux/blk-integrity.h>
0009 #include <linux/ctype.h>
0010 #include <linux/delay.h>
0011 #include <linux/err.h>
0012 #include <linux/init.h>
0013 #include <linux/module.h>
0014 #include <linux/nvme.h>
0015 #include <linux/slab.h>
0016 #include <linux/string.h>
0017 #include <linux/wait.h>
0018 #include <linux/inet.h>
0019 #include <asm/unaligned.h>
0020 
0021 #include <rdma/ib_verbs.h>
0022 #include <rdma/rdma_cm.h>
0023 #include <rdma/rw.h>
0024 #include <rdma/ib_cm.h>
0025 
0026 #include <linux/nvme-rdma.h>
0027 #include "nvmet.h"
0028 
0029 /*
0030  * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
0031  */
0032 #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
0033 #define NVMET_RDMA_MAX_INLINE_SGE       4
0034 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE     max_t(int, SZ_16K, PAGE_SIZE)
0035 
0036 /* Assume mpsmin == device_page_size == 4KB */
0037 #define NVMET_RDMA_MAX_MDTS         8
0038 #define NVMET_RDMA_MAX_METADATA_MDTS        5
0039 
0040 struct nvmet_rdma_srq;
0041 
0042 struct nvmet_rdma_cmd {
0043     struct ib_sge       sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
0044     struct ib_cqe       cqe;
0045     struct ib_recv_wr   wr;
0046     struct scatterlist  inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
0047     struct nvme_command     *nvme_cmd;
0048     struct nvmet_rdma_queue *queue;
0049     struct nvmet_rdma_srq   *nsrq;
0050 };
0051 
0052 enum {
0053     NVMET_RDMA_REQ_INLINE_DATA  = (1 << 0),
0054     NVMET_RDMA_REQ_INVALIDATE_RKEY  = (1 << 1),
0055 };
0056 
0057 struct nvmet_rdma_rsp {
0058     struct ib_sge       send_sge;
0059     struct ib_cqe       send_cqe;
0060     struct ib_send_wr   send_wr;
0061 
0062     struct nvmet_rdma_cmd   *cmd;
0063     struct nvmet_rdma_queue *queue;
0064 
0065     struct ib_cqe       read_cqe;
0066     struct ib_cqe       write_cqe;
0067     struct rdma_rw_ctx  rw;
0068 
0069     struct nvmet_req    req;
0070 
0071     bool            allocated;
0072     u8          n_rdma;
0073     u32         flags;
0074     u32         invalidate_rkey;
0075 
0076     struct list_head    wait_list;
0077     struct list_head    free_list;
0078 };
0079 
0080 enum nvmet_rdma_queue_state {
0081     NVMET_RDMA_Q_CONNECTING,
0082     NVMET_RDMA_Q_LIVE,
0083     NVMET_RDMA_Q_DISCONNECTING,
0084 };
0085 
0086 struct nvmet_rdma_queue {
0087     struct rdma_cm_id   *cm_id;
0088     struct ib_qp        *qp;
0089     struct nvmet_port   *port;
0090     struct ib_cq        *cq;
0091     atomic_t        sq_wr_avail;
0092     struct nvmet_rdma_device *dev;
0093     struct nvmet_rdma_srq   *nsrq;
0094     spinlock_t      state_lock;
0095     enum nvmet_rdma_queue_state state;
0096     struct nvmet_cq     nvme_cq;
0097     struct nvmet_sq     nvme_sq;
0098 
0099     struct nvmet_rdma_rsp   *rsps;
0100     struct list_head    free_rsps;
0101     spinlock_t      rsps_lock;
0102     struct nvmet_rdma_cmd   *cmds;
0103 
0104     struct work_struct  release_work;
0105     struct list_head    rsp_wait_list;
0106     struct list_head    rsp_wr_wait_list;
0107     spinlock_t      rsp_wr_wait_lock;
0108 
0109     int         idx;
0110     int         host_qid;
0111     int         comp_vector;
0112     int         recv_queue_size;
0113     int         send_queue_size;
0114 
0115     struct list_head    queue_list;
0116 };
0117 
0118 struct nvmet_rdma_port {
0119     struct nvmet_port   *nport;
0120     struct sockaddr_storage addr;
0121     struct rdma_cm_id   *cm_id;
0122     struct delayed_work repair_work;
0123 };
0124 
0125 struct nvmet_rdma_srq {
0126     struct ib_srq            *srq;
0127     struct nvmet_rdma_cmd    *cmds;
0128     struct nvmet_rdma_device *ndev;
0129 };
0130 
0131 struct nvmet_rdma_device {
0132     struct ib_device    *device;
0133     struct ib_pd        *pd;
0134     struct nvmet_rdma_srq   **srqs;
0135     int         srq_count;
0136     size_t          srq_size;
0137     struct kref     ref;
0138     struct list_head    entry;
0139     int         inline_data_size;
0140     int         inline_page_count;
0141 };
0142 
0143 static bool nvmet_rdma_use_srq;
0144 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
0145 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
0146 
0147 static int srq_size_set(const char *val, const struct kernel_param *kp);
0148 static const struct kernel_param_ops srq_size_ops = {
0149     .set = srq_size_set,
0150     .get = param_get_int,
0151 };
0152 
0153 static int nvmet_rdma_srq_size = 1024;
0154 module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
0155 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
0156 
0157 static DEFINE_IDA(nvmet_rdma_queue_ida);
0158 static LIST_HEAD(nvmet_rdma_queue_list);
0159 static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
0160 
0161 static LIST_HEAD(device_list);
0162 static DEFINE_MUTEX(device_list_mutex);
0163 
0164 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
0165 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
0166 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
0167 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
0168 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
0169 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
0170 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
0171 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
0172                 struct nvmet_rdma_rsp *r);
0173 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
0174                 struct nvmet_rdma_rsp *r);
0175 
0176 static const struct nvmet_fabrics_ops nvmet_rdma_ops;
0177 
0178 static int srq_size_set(const char *val, const struct kernel_param *kp)
0179 {
0180     int n = 0, ret;
0181 
0182     ret = kstrtoint(val, 10, &n);
0183     if (ret != 0 || n < 256)
0184         return -EINVAL;
0185 
0186     return param_set_int(val, kp);
0187 }
0188 
0189 static int num_pages(int len)
0190 {
0191     return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
0192 }
0193 
0194 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
0195 {
0196     return nvme_is_write(rsp->req.cmd) &&
0197         rsp->req.transfer_len &&
0198         !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
0199 }
0200 
0201 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
0202 {
0203     return !nvme_is_write(rsp->req.cmd) &&
0204         rsp->req.transfer_len &&
0205         !rsp->req.cqe->status &&
0206         !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
0207 }
0208 
0209 static inline struct nvmet_rdma_rsp *
0210 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
0211 {
0212     struct nvmet_rdma_rsp *rsp;
0213     unsigned long flags;
0214 
0215     spin_lock_irqsave(&queue->rsps_lock, flags);
0216     rsp = list_first_entry_or_null(&queue->free_rsps,
0217                 struct nvmet_rdma_rsp, free_list);
0218     if (likely(rsp))
0219         list_del(&rsp->free_list);
0220     spin_unlock_irqrestore(&queue->rsps_lock, flags);
0221 
0222     if (unlikely(!rsp)) {
0223         int ret;
0224 
0225         rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
0226         if (unlikely(!rsp))
0227             return NULL;
0228         ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
0229         if (unlikely(ret)) {
0230             kfree(rsp);
0231             return NULL;
0232         }
0233 
0234         rsp->allocated = true;
0235     }
0236 
0237     return rsp;
0238 }
0239 
0240 static inline void
0241 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
0242 {
0243     unsigned long flags;
0244 
0245     if (unlikely(rsp->allocated)) {
0246         nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
0247         kfree(rsp);
0248         return;
0249     }
0250 
0251     spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
0252     list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
0253     spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
0254 }
0255 
0256 static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
0257                 struct nvmet_rdma_cmd *c)
0258 {
0259     struct scatterlist *sg;
0260     struct ib_sge *sge;
0261     int i;
0262 
0263     if (!ndev->inline_data_size)
0264         return;
0265 
0266     sg = c->inline_sg;
0267     sge = &c->sge[1];
0268 
0269     for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
0270         if (sge->length)
0271             ib_dma_unmap_page(ndev->device, sge->addr,
0272                     sge->length, DMA_FROM_DEVICE);
0273         if (sg_page(sg))
0274             __free_page(sg_page(sg));
0275     }
0276 }
0277 
0278 static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
0279                 struct nvmet_rdma_cmd *c)
0280 {
0281     struct scatterlist *sg;
0282     struct ib_sge *sge;
0283     struct page *pg;
0284     int len;
0285     int i;
0286 
0287     if (!ndev->inline_data_size)
0288         return 0;
0289 
0290     sg = c->inline_sg;
0291     sg_init_table(sg, ndev->inline_page_count);
0292     sge = &c->sge[1];
0293     len = ndev->inline_data_size;
0294 
0295     for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
0296         pg = alloc_page(GFP_KERNEL);
0297         if (!pg)
0298             goto out_err;
0299         sg_assign_page(sg, pg);
0300         sge->addr = ib_dma_map_page(ndev->device,
0301             pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
0302         if (ib_dma_mapping_error(ndev->device, sge->addr))
0303             goto out_err;
0304         sge->length = min_t(int, len, PAGE_SIZE);
0305         sge->lkey = ndev->pd->local_dma_lkey;
0306         len -= sge->length;
0307     }
0308 
0309     return 0;
0310 out_err:
0311     for (; i >= 0; i--, sg--, sge--) {
0312         if (sge->length)
0313             ib_dma_unmap_page(ndev->device, sge->addr,
0314                     sge->length, DMA_FROM_DEVICE);
0315         if (sg_page(sg))
0316             __free_page(sg_page(sg));
0317     }
0318     return -ENOMEM;
0319 }
0320 
0321 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
0322             struct nvmet_rdma_cmd *c, bool admin)
0323 {
0324     /* NVMe command / RDMA RECV */
0325     c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
0326     if (!c->nvme_cmd)
0327         goto out;
0328 
0329     c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
0330             sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
0331     if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
0332         goto out_free_cmd;
0333 
0334     c->sge[0].length = sizeof(*c->nvme_cmd);
0335     c->sge[0].lkey = ndev->pd->local_dma_lkey;
0336 
0337     if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
0338         goto out_unmap_cmd;
0339 
0340     c->cqe.done = nvmet_rdma_recv_done;
0341 
0342     c->wr.wr_cqe = &c->cqe;
0343     c->wr.sg_list = c->sge;
0344     c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
0345 
0346     return 0;
0347 
0348 out_unmap_cmd:
0349     ib_dma_unmap_single(ndev->device, c->sge[0].addr,
0350             sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
0351 out_free_cmd:
0352     kfree(c->nvme_cmd);
0353 
0354 out:
0355     return -ENOMEM;
0356 }
0357 
0358 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
0359         struct nvmet_rdma_cmd *c, bool admin)
0360 {
0361     if (!admin)
0362         nvmet_rdma_free_inline_pages(ndev, c);
0363     ib_dma_unmap_single(ndev->device, c->sge[0].addr,
0364                 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
0365     kfree(c->nvme_cmd);
0366 }
0367 
0368 static struct nvmet_rdma_cmd *
0369 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
0370         int nr_cmds, bool admin)
0371 {
0372     struct nvmet_rdma_cmd *cmds;
0373     int ret = -EINVAL, i;
0374 
0375     cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
0376     if (!cmds)
0377         goto out;
0378 
0379     for (i = 0; i < nr_cmds; i++) {
0380         ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
0381         if (ret)
0382             goto out_free;
0383     }
0384 
0385     return cmds;
0386 
0387 out_free:
0388     while (--i >= 0)
0389         nvmet_rdma_free_cmd(ndev, cmds + i, admin);
0390     kfree(cmds);
0391 out:
0392     return ERR_PTR(ret);
0393 }
0394 
0395 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
0396         struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
0397 {
0398     int i;
0399 
0400     for (i = 0; i < nr_cmds; i++)
0401         nvmet_rdma_free_cmd(ndev, cmds + i, admin);
0402     kfree(cmds);
0403 }
0404 
0405 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
0406         struct nvmet_rdma_rsp *r)
0407 {
0408     /* NVMe CQE / RDMA SEND */
0409     r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
0410     if (!r->req.cqe)
0411         goto out;
0412 
0413     r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
0414             sizeof(*r->req.cqe), DMA_TO_DEVICE);
0415     if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
0416         goto out_free_rsp;
0417 
0418     if (ib_dma_pci_p2p_dma_supported(ndev->device))
0419         r->req.p2p_client = &ndev->device->dev;
0420     r->send_sge.length = sizeof(*r->req.cqe);
0421     r->send_sge.lkey = ndev->pd->local_dma_lkey;
0422 
0423     r->send_cqe.done = nvmet_rdma_send_done;
0424 
0425     r->send_wr.wr_cqe = &r->send_cqe;
0426     r->send_wr.sg_list = &r->send_sge;
0427     r->send_wr.num_sge = 1;
0428     r->send_wr.send_flags = IB_SEND_SIGNALED;
0429 
0430     /* Data In / RDMA READ */
0431     r->read_cqe.done = nvmet_rdma_read_data_done;
0432     /* Data Out / RDMA WRITE */
0433     r->write_cqe.done = nvmet_rdma_write_data_done;
0434 
0435     return 0;
0436 
0437 out_free_rsp:
0438     kfree(r->req.cqe);
0439 out:
0440     return -ENOMEM;
0441 }
0442 
0443 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
0444         struct nvmet_rdma_rsp *r)
0445 {
0446     ib_dma_unmap_single(ndev->device, r->send_sge.addr,
0447                 sizeof(*r->req.cqe), DMA_TO_DEVICE);
0448     kfree(r->req.cqe);
0449 }
0450 
0451 static int
0452 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
0453 {
0454     struct nvmet_rdma_device *ndev = queue->dev;
0455     int nr_rsps = queue->recv_queue_size * 2;
0456     int ret = -EINVAL, i;
0457 
0458     queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
0459             GFP_KERNEL);
0460     if (!queue->rsps)
0461         goto out;
0462 
0463     for (i = 0; i < nr_rsps; i++) {
0464         struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
0465 
0466         ret = nvmet_rdma_alloc_rsp(ndev, rsp);
0467         if (ret)
0468             goto out_free;
0469 
0470         list_add_tail(&rsp->free_list, &queue->free_rsps);
0471     }
0472 
0473     return 0;
0474 
0475 out_free:
0476     while (--i >= 0) {
0477         struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
0478 
0479         list_del(&rsp->free_list);
0480         nvmet_rdma_free_rsp(ndev, rsp);
0481     }
0482     kfree(queue->rsps);
0483 out:
0484     return ret;
0485 }
0486 
0487 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
0488 {
0489     struct nvmet_rdma_device *ndev = queue->dev;
0490     int i, nr_rsps = queue->recv_queue_size * 2;
0491 
0492     for (i = 0; i < nr_rsps; i++) {
0493         struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
0494 
0495         list_del(&rsp->free_list);
0496         nvmet_rdma_free_rsp(ndev, rsp);
0497     }
0498     kfree(queue->rsps);
0499 }
0500 
0501 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
0502         struct nvmet_rdma_cmd *cmd)
0503 {
0504     int ret;
0505 
0506     ib_dma_sync_single_for_device(ndev->device,
0507         cmd->sge[0].addr, cmd->sge[0].length,
0508         DMA_FROM_DEVICE);
0509 
0510     if (cmd->nsrq)
0511         ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
0512     else
0513         ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
0514 
0515     if (unlikely(ret))
0516         pr_err("post_recv cmd failed\n");
0517 
0518     return ret;
0519 }
0520 
0521 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
0522 {
0523     spin_lock(&queue->rsp_wr_wait_lock);
0524     while (!list_empty(&queue->rsp_wr_wait_list)) {
0525         struct nvmet_rdma_rsp *rsp;
0526         bool ret;
0527 
0528         rsp = list_entry(queue->rsp_wr_wait_list.next,
0529                 struct nvmet_rdma_rsp, wait_list);
0530         list_del(&rsp->wait_list);
0531 
0532         spin_unlock(&queue->rsp_wr_wait_lock);
0533         ret = nvmet_rdma_execute_command(rsp);
0534         spin_lock(&queue->rsp_wr_wait_lock);
0535 
0536         if (!ret) {
0537             list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
0538             break;
0539         }
0540     }
0541     spin_unlock(&queue->rsp_wr_wait_lock);
0542 }
0543 
0544 static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
0545 {
0546     struct ib_mr_status mr_status;
0547     int ret;
0548     u16 status = 0;
0549 
0550     ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
0551     if (ret) {
0552         pr_err("ib_check_mr_status failed, ret %d\n", ret);
0553         return NVME_SC_INVALID_PI;
0554     }
0555 
0556     if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
0557         switch (mr_status.sig_err.err_type) {
0558         case IB_SIG_BAD_GUARD:
0559             status = NVME_SC_GUARD_CHECK;
0560             break;
0561         case IB_SIG_BAD_REFTAG:
0562             status = NVME_SC_REFTAG_CHECK;
0563             break;
0564         case IB_SIG_BAD_APPTAG:
0565             status = NVME_SC_APPTAG_CHECK;
0566             break;
0567         }
0568         pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
0569                mr_status.sig_err.err_type,
0570                mr_status.sig_err.expected,
0571                mr_status.sig_err.actual);
0572     }
0573 
0574     return status;
0575 }
0576 
0577 static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
0578         struct nvme_command *cmd, struct ib_sig_domain *domain,
0579         u16 control, u8 pi_type)
0580 {
0581     domain->sig_type = IB_SIG_TYPE_T10_DIF;
0582     domain->sig.dif.bg_type = IB_T10DIF_CRC;
0583     domain->sig.dif.pi_interval = 1 << bi->interval_exp;
0584     domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
0585     if (control & NVME_RW_PRINFO_PRCHK_REF)
0586         domain->sig.dif.ref_remap = true;
0587 
0588     domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
0589     domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
0590     domain->sig.dif.app_escape = true;
0591     if (pi_type == NVME_NS_DPS_PI_TYPE3)
0592         domain->sig.dif.ref_escape = true;
0593 }
0594 
0595 static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
0596                      struct ib_sig_attrs *sig_attrs)
0597 {
0598     struct nvme_command *cmd = req->cmd;
0599     u16 control = le16_to_cpu(cmd->rw.control);
0600     u8 pi_type = req->ns->pi_type;
0601     struct blk_integrity *bi;
0602 
0603     bi = bdev_get_integrity(req->ns->bdev);
0604 
0605     memset(sig_attrs, 0, sizeof(*sig_attrs));
0606 
0607     if (control & NVME_RW_PRINFO_PRACT) {
0608         /* for WRITE_INSERT/READ_STRIP no wire domain */
0609         sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
0610         nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
0611                       pi_type);
0612         /* Clear the PRACT bit since HCA will generate/verify the PI */
0613         control &= ~NVME_RW_PRINFO_PRACT;
0614         cmd->rw.control = cpu_to_le16(control);
0615         /* PI is added by the HW */
0616         req->transfer_len += req->metadata_len;
0617     } else {
0618         /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
0619         nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
0620                       pi_type);
0621         nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
0622                       pi_type);
0623     }
0624 
0625     if (control & NVME_RW_PRINFO_PRCHK_REF)
0626         sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
0627     if (control & NVME_RW_PRINFO_PRCHK_GUARD)
0628         sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
0629     if (control & NVME_RW_PRINFO_PRCHK_APP)
0630         sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
0631 }
0632 
0633 static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
0634                   struct ib_sig_attrs *sig_attrs)
0635 {
0636     struct rdma_cm_id *cm_id = rsp->queue->cm_id;
0637     struct nvmet_req *req = &rsp->req;
0638     int ret;
0639 
0640     if (req->metadata_len)
0641         ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
0642             cm_id->port_num, req->sg, req->sg_cnt,
0643             req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
0644             addr, key, nvmet_data_dir(req));
0645     else
0646         ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
0647                        req->sg, req->sg_cnt, 0, addr, key,
0648                        nvmet_data_dir(req));
0649 
0650     return ret;
0651 }
0652 
0653 static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
0654 {
0655     struct rdma_cm_id *cm_id = rsp->queue->cm_id;
0656     struct nvmet_req *req = &rsp->req;
0657 
0658     if (req->metadata_len)
0659         rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
0660             cm_id->port_num, req->sg, req->sg_cnt,
0661             req->metadata_sg, req->metadata_sg_cnt,
0662             nvmet_data_dir(req));
0663     else
0664         rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
0665                     req->sg, req->sg_cnt, nvmet_data_dir(req));
0666 }
0667 
0668 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
0669 {
0670     struct nvmet_rdma_queue *queue = rsp->queue;
0671 
0672     atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
0673 
0674     if (rsp->n_rdma)
0675         nvmet_rdma_rw_ctx_destroy(rsp);
0676 
0677     if (rsp->req.sg != rsp->cmd->inline_sg)
0678         nvmet_req_free_sgls(&rsp->req);
0679 
0680     if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
0681         nvmet_rdma_process_wr_wait_list(queue);
0682 
0683     nvmet_rdma_put_rsp(rsp);
0684 }
0685 
0686 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
0687 {
0688     if (queue->nvme_sq.ctrl) {
0689         nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
0690     } else {
0691         /*
0692          * we didn't setup the controller yet in case
0693          * of admin connect error, just disconnect and
0694          * cleanup the queue
0695          */
0696         nvmet_rdma_queue_disconnect(queue);
0697     }
0698 }
0699 
0700 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
0701 {
0702     struct nvmet_rdma_rsp *rsp =
0703         container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
0704     struct nvmet_rdma_queue *queue = wc->qp->qp_context;
0705 
0706     nvmet_rdma_release_rsp(rsp);
0707 
0708     if (unlikely(wc->status != IB_WC_SUCCESS &&
0709              wc->status != IB_WC_WR_FLUSH_ERR)) {
0710         pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
0711             wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
0712         nvmet_rdma_error_comp(queue);
0713     }
0714 }
0715 
0716 static void nvmet_rdma_queue_response(struct nvmet_req *req)
0717 {
0718     struct nvmet_rdma_rsp *rsp =
0719         container_of(req, struct nvmet_rdma_rsp, req);
0720     struct rdma_cm_id *cm_id = rsp->queue->cm_id;
0721     struct ib_send_wr *first_wr;
0722 
0723     if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
0724         rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
0725         rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
0726     } else {
0727         rsp->send_wr.opcode = IB_WR_SEND;
0728     }
0729 
0730     if (nvmet_rdma_need_data_out(rsp)) {
0731         if (rsp->req.metadata_len)
0732             first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
0733                     cm_id->port_num, &rsp->write_cqe, NULL);
0734         else
0735             first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
0736                     cm_id->port_num, NULL, &rsp->send_wr);
0737     } else {
0738         first_wr = &rsp->send_wr;
0739     }
0740 
0741     nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
0742 
0743     ib_dma_sync_single_for_device(rsp->queue->dev->device,
0744         rsp->send_sge.addr, rsp->send_sge.length,
0745         DMA_TO_DEVICE);
0746 
0747     if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
0748         pr_err("sending cmd response failed\n");
0749         nvmet_rdma_release_rsp(rsp);
0750     }
0751 }
0752 
0753 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
0754 {
0755     struct nvmet_rdma_rsp *rsp =
0756         container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
0757     struct nvmet_rdma_queue *queue = wc->qp->qp_context;
0758     u16 status = 0;
0759 
0760     WARN_ON(rsp->n_rdma <= 0);
0761     atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
0762     rsp->n_rdma = 0;
0763 
0764     if (unlikely(wc->status != IB_WC_SUCCESS)) {
0765         nvmet_rdma_rw_ctx_destroy(rsp);
0766         nvmet_req_uninit(&rsp->req);
0767         nvmet_rdma_release_rsp(rsp);
0768         if (wc->status != IB_WC_WR_FLUSH_ERR) {
0769             pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
0770                 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
0771             nvmet_rdma_error_comp(queue);
0772         }
0773         return;
0774     }
0775 
0776     if (rsp->req.metadata_len)
0777         status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
0778     nvmet_rdma_rw_ctx_destroy(rsp);
0779 
0780     if (unlikely(status))
0781         nvmet_req_complete(&rsp->req, status);
0782     else
0783         rsp->req.execute(&rsp->req);
0784 }
0785 
0786 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
0787 {
0788     struct nvmet_rdma_rsp *rsp =
0789         container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
0790     struct nvmet_rdma_queue *queue = wc->qp->qp_context;
0791     struct rdma_cm_id *cm_id = rsp->queue->cm_id;
0792     u16 status;
0793 
0794     if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
0795         return;
0796 
0797     WARN_ON(rsp->n_rdma <= 0);
0798     atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
0799     rsp->n_rdma = 0;
0800 
0801     if (unlikely(wc->status != IB_WC_SUCCESS)) {
0802         nvmet_rdma_rw_ctx_destroy(rsp);
0803         nvmet_req_uninit(&rsp->req);
0804         nvmet_rdma_release_rsp(rsp);
0805         if (wc->status != IB_WC_WR_FLUSH_ERR) {
0806             pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
0807                 ib_wc_status_msg(wc->status), wc->status);
0808             nvmet_rdma_error_comp(queue);
0809         }
0810         return;
0811     }
0812 
0813     /*
0814      * Upon RDMA completion check the signature status
0815      * - if succeeded send good NVMe response
0816      * - if failed send bad NVMe response with appropriate error
0817      */
0818     status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
0819     if (unlikely(status))
0820         rsp->req.cqe->status = cpu_to_le16(status << 1);
0821     nvmet_rdma_rw_ctx_destroy(rsp);
0822 
0823     if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
0824         pr_err("sending cmd response failed\n");
0825         nvmet_rdma_release_rsp(rsp);
0826     }
0827 }
0828 
0829 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
0830         u64 off)
0831 {
0832     int sg_count = num_pages(len);
0833     struct scatterlist *sg;
0834     int i;
0835 
0836     sg = rsp->cmd->inline_sg;
0837     for (i = 0; i < sg_count; i++, sg++) {
0838         if (i < sg_count - 1)
0839             sg_unmark_end(sg);
0840         else
0841             sg_mark_end(sg);
0842         sg->offset = off;
0843         sg->length = min_t(int, len, PAGE_SIZE - off);
0844         len -= sg->length;
0845         if (!i)
0846             off = 0;
0847     }
0848 
0849     rsp->req.sg = rsp->cmd->inline_sg;
0850     rsp->req.sg_cnt = sg_count;
0851 }
0852 
0853 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
0854 {
0855     struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
0856     u64 off = le64_to_cpu(sgl->addr);
0857     u32 len = le32_to_cpu(sgl->length);
0858 
0859     if (!nvme_is_write(rsp->req.cmd)) {
0860         rsp->req.error_loc =
0861             offsetof(struct nvme_common_command, opcode);
0862         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0863     }
0864 
0865     if (off + len > rsp->queue->dev->inline_data_size) {
0866         pr_err("invalid inline data offset!\n");
0867         return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
0868     }
0869 
0870     /* no data command? */
0871     if (!len)
0872         return 0;
0873 
0874     nvmet_rdma_use_inline_sg(rsp, len, off);
0875     rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
0876     rsp->req.transfer_len += len;
0877     return 0;
0878 }
0879 
0880 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
0881         struct nvme_keyed_sgl_desc *sgl, bool invalidate)
0882 {
0883     u64 addr = le64_to_cpu(sgl->addr);
0884     u32 key = get_unaligned_le32(sgl->key);
0885     struct ib_sig_attrs sig_attrs;
0886     int ret;
0887 
0888     rsp->req.transfer_len = get_unaligned_le24(sgl->length);
0889 
0890     /* no data command? */
0891     if (!rsp->req.transfer_len)
0892         return 0;
0893 
0894     if (rsp->req.metadata_len)
0895         nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
0896 
0897     ret = nvmet_req_alloc_sgls(&rsp->req);
0898     if (unlikely(ret < 0))
0899         goto error_out;
0900 
0901     ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
0902     if (unlikely(ret < 0))
0903         goto error_out;
0904     rsp->n_rdma += ret;
0905 
0906     if (invalidate) {
0907         rsp->invalidate_rkey = key;
0908         rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
0909     }
0910 
0911     return 0;
0912 
0913 error_out:
0914     rsp->req.transfer_len = 0;
0915     return NVME_SC_INTERNAL;
0916 }
0917 
0918 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
0919 {
0920     struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
0921 
0922     switch (sgl->type >> 4) {
0923     case NVME_SGL_FMT_DATA_DESC:
0924         switch (sgl->type & 0xf) {
0925         case NVME_SGL_FMT_OFFSET:
0926             return nvmet_rdma_map_sgl_inline(rsp);
0927         default:
0928             pr_err("invalid SGL subtype: %#x\n", sgl->type);
0929             rsp->req.error_loc =
0930                 offsetof(struct nvme_common_command, dptr);
0931             return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0932         }
0933     case NVME_KEY_SGL_FMT_DATA_DESC:
0934         switch (sgl->type & 0xf) {
0935         case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
0936             return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
0937         case NVME_SGL_FMT_ADDRESS:
0938             return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
0939         default:
0940             pr_err("invalid SGL subtype: %#x\n", sgl->type);
0941             rsp->req.error_loc =
0942                 offsetof(struct nvme_common_command, dptr);
0943             return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0944         }
0945     default:
0946         pr_err("invalid SGL type: %#x\n", sgl->type);
0947         rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
0948         return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
0949     }
0950 }
0951 
0952 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
0953 {
0954     struct nvmet_rdma_queue *queue = rsp->queue;
0955 
0956     if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
0957             &queue->sq_wr_avail) < 0)) {
0958         pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
0959                 1 + rsp->n_rdma, queue->idx,
0960                 queue->nvme_sq.ctrl->cntlid);
0961         atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
0962         return false;
0963     }
0964 
0965     if (nvmet_rdma_need_data_in(rsp)) {
0966         if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
0967                 queue->cm_id->port_num, &rsp->read_cqe, NULL))
0968             nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
0969     } else {
0970         rsp->req.execute(&rsp->req);
0971     }
0972 
0973     return true;
0974 }
0975 
0976 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
0977         struct nvmet_rdma_rsp *cmd)
0978 {
0979     u16 status;
0980 
0981     ib_dma_sync_single_for_cpu(queue->dev->device,
0982         cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
0983         DMA_FROM_DEVICE);
0984     ib_dma_sync_single_for_cpu(queue->dev->device,
0985         cmd->send_sge.addr, cmd->send_sge.length,
0986         DMA_TO_DEVICE);
0987 
0988     if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
0989             &queue->nvme_sq, &nvmet_rdma_ops))
0990         return;
0991 
0992     status = nvmet_rdma_map_sgl(cmd);
0993     if (status)
0994         goto out_err;
0995 
0996     if (unlikely(!nvmet_rdma_execute_command(cmd))) {
0997         spin_lock(&queue->rsp_wr_wait_lock);
0998         list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
0999         spin_unlock(&queue->rsp_wr_wait_lock);
1000     }
1001 
1002     return;
1003 
1004 out_err:
1005     nvmet_req_complete(&cmd->req, status);
1006 }
1007 
1008 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1009 {
1010     struct nvmet_rdma_cmd *cmd =
1011         container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
1012     struct nvmet_rdma_queue *queue = wc->qp->qp_context;
1013     struct nvmet_rdma_rsp *rsp;
1014 
1015     if (unlikely(wc->status != IB_WC_SUCCESS)) {
1016         if (wc->status != IB_WC_WR_FLUSH_ERR) {
1017             pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
1018                 wc->wr_cqe, ib_wc_status_msg(wc->status),
1019                 wc->status);
1020             nvmet_rdma_error_comp(queue);
1021         }
1022         return;
1023     }
1024 
1025     if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
1026         pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
1027         nvmet_rdma_error_comp(queue);
1028         return;
1029     }
1030 
1031     cmd->queue = queue;
1032     rsp = nvmet_rdma_get_rsp(queue);
1033     if (unlikely(!rsp)) {
1034         /*
1035          * we get here only under memory pressure,
1036          * silently drop and have the host retry
1037          * as we can't even fail it.
1038          */
1039         nvmet_rdma_post_recv(queue->dev, cmd);
1040         return;
1041     }
1042     rsp->queue = queue;
1043     rsp->cmd = cmd;
1044     rsp->flags = 0;
1045     rsp->req.cmd = cmd->nvme_cmd;
1046     rsp->req.port = queue->port;
1047     rsp->n_rdma = 0;
1048 
1049     if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
1050         unsigned long flags;
1051 
1052         spin_lock_irqsave(&queue->state_lock, flags);
1053         if (queue->state == NVMET_RDMA_Q_CONNECTING)
1054             list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
1055         else
1056             nvmet_rdma_put_rsp(rsp);
1057         spin_unlock_irqrestore(&queue->state_lock, flags);
1058         return;
1059     }
1060 
1061     nvmet_rdma_handle_command(queue, rsp);
1062 }
1063 
1064 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
1065 {
1066     nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
1067                  false);
1068     ib_destroy_srq(nsrq->srq);
1069 
1070     kfree(nsrq);
1071 }
1072 
1073 static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
1074 {
1075     int i;
1076 
1077     if (!ndev->srqs)
1078         return;
1079 
1080     for (i = 0; i < ndev->srq_count; i++)
1081         nvmet_rdma_destroy_srq(ndev->srqs[i]);
1082 
1083     kfree(ndev->srqs);
1084 }
1085 
1086 static struct nvmet_rdma_srq *
1087 nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
1088 {
1089     struct ib_srq_init_attr srq_attr = { NULL, };
1090     size_t srq_size = ndev->srq_size;
1091     struct nvmet_rdma_srq *nsrq;
1092     struct ib_srq *srq;
1093     int ret, i;
1094 
1095     nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
1096     if (!nsrq)
1097         return ERR_PTR(-ENOMEM);
1098 
1099     srq_attr.attr.max_wr = srq_size;
1100     srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
1101     srq_attr.attr.srq_limit = 0;
1102     srq_attr.srq_type = IB_SRQT_BASIC;
1103     srq = ib_create_srq(ndev->pd, &srq_attr);
1104     if (IS_ERR(srq)) {
1105         ret = PTR_ERR(srq);
1106         goto out_free;
1107     }
1108 
1109     nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
1110     if (IS_ERR(nsrq->cmds)) {
1111         ret = PTR_ERR(nsrq->cmds);
1112         goto out_destroy_srq;
1113     }
1114 
1115     nsrq->srq = srq;
1116     nsrq->ndev = ndev;
1117 
1118     for (i = 0; i < srq_size; i++) {
1119         nsrq->cmds[i].nsrq = nsrq;
1120         ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
1121         if (ret)
1122             goto out_free_cmds;
1123     }
1124 
1125     return nsrq;
1126 
1127 out_free_cmds:
1128     nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
1129 out_destroy_srq:
1130     ib_destroy_srq(srq);
1131 out_free:
1132     kfree(nsrq);
1133     return ERR_PTR(ret);
1134 }
1135 
1136 static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
1137 {
1138     int i, ret;
1139 
1140     if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
1141         /*
1142          * If SRQs aren't supported we just go ahead and use normal
1143          * non-shared receive queues.
1144          */
1145         pr_info("SRQ requested but not supported.\n");
1146         return 0;
1147     }
1148 
1149     ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
1150                  nvmet_rdma_srq_size);
1151     ndev->srq_count = min(ndev->device->num_comp_vectors,
1152                   ndev->device->attrs.max_srq);
1153 
1154     ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
1155     if (!ndev->srqs)
1156         return -ENOMEM;
1157 
1158     for (i = 0; i < ndev->srq_count; i++) {
1159         ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
1160         if (IS_ERR(ndev->srqs[i])) {
1161             ret = PTR_ERR(ndev->srqs[i]);
1162             goto err_srq;
1163         }
1164     }
1165 
1166     return 0;
1167 
1168 err_srq:
1169     while (--i >= 0)
1170         nvmet_rdma_destroy_srq(ndev->srqs[i]);
1171     kfree(ndev->srqs);
1172     return ret;
1173 }
1174 
1175 static void nvmet_rdma_free_dev(struct kref *ref)
1176 {
1177     struct nvmet_rdma_device *ndev =
1178         container_of(ref, struct nvmet_rdma_device, ref);
1179 
1180     mutex_lock(&device_list_mutex);
1181     list_del(&ndev->entry);
1182     mutex_unlock(&device_list_mutex);
1183 
1184     nvmet_rdma_destroy_srqs(ndev);
1185     ib_dealloc_pd(ndev->pd);
1186 
1187     kfree(ndev);
1188 }
1189 
1190 static struct nvmet_rdma_device *
1191 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
1192 {
1193     struct nvmet_rdma_port *port = cm_id->context;
1194     struct nvmet_port *nport = port->nport;
1195     struct nvmet_rdma_device *ndev;
1196     int inline_page_count;
1197     int inline_sge_count;
1198     int ret;
1199 
1200     mutex_lock(&device_list_mutex);
1201     list_for_each_entry(ndev, &device_list, entry) {
1202         if (ndev->device->node_guid == cm_id->device->node_guid &&
1203             kref_get_unless_zero(&ndev->ref))
1204             goto out_unlock;
1205     }
1206 
1207     ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1208     if (!ndev)
1209         goto out_err;
1210 
1211     inline_page_count = num_pages(nport->inline_data_size);
1212     inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
1213                 cm_id->device->attrs.max_recv_sge) - 1;
1214     if (inline_page_count > inline_sge_count) {
1215         pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
1216             nport->inline_data_size, cm_id->device->name,
1217             inline_sge_count * PAGE_SIZE);
1218         nport->inline_data_size = inline_sge_count * PAGE_SIZE;
1219         inline_page_count = inline_sge_count;
1220     }
1221     ndev->inline_data_size = nport->inline_data_size;
1222     ndev->inline_page_count = inline_page_count;
1223 
1224     if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
1225                   IBK_INTEGRITY_HANDOVER)) {
1226         pr_warn("T10-PI is not supported by device %s. Disabling it\n",
1227             cm_id->device->name);
1228         nport->pi_enable = false;
1229     }
1230 
1231     ndev->device = cm_id->device;
1232     kref_init(&ndev->ref);
1233 
1234     ndev->pd = ib_alloc_pd(ndev->device, 0);
1235     if (IS_ERR(ndev->pd))
1236         goto out_free_dev;
1237 
1238     if (nvmet_rdma_use_srq) {
1239         ret = nvmet_rdma_init_srqs(ndev);
1240         if (ret)
1241             goto out_free_pd;
1242     }
1243 
1244     list_add(&ndev->entry, &device_list);
1245 out_unlock:
1246     mutex_unlock(&device_list_mutex);
1247     pr_debug("added %s.\n", ndev->device->name);
1248     return ndev;
1249 
1250 out_free_pd:
1251     ib_dealloc_pd(ndev->pd);
1252 out_free_dev:
1253     kfree(ndev);
1254 out_err:
1255     mutex_unlock(&device_list_mutex);
1256     return NULL;
1257 }
1258 
1259 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
1260 {
1261     struct ib_qp_init_attr qp_attr = { };
1262     struct nvmet_rdma_device *ndev = queue->dev;
1263     int nr_cqe, ret, i, factor;
1264 
1265     /*
1266      * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
1267      */
1268     nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
1269 
1270     queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
1271                    queue->comp_vector, IB_POLL_WORKQUEUE);
1272     if (IS_ERR(queue->cq)) {
1273         ret = PTR_ERR(queue->cq);
1274         pr_err("failed to create CQ cqe= %d ret= %d\n",
1275                nr_cqe + 1, ret);
1276         goto out;
1277     }
1278 
1279     qp_attr.qp_context = queue;
1280     qp_attr.event_handler = nvmet_rdma_qp_event;
1281     qp_attr.send_cq = queue->cq;
1282     qp_attr.recv_cq = queue->cq;
1283     qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1284     qp_attr.qp_type = IB_QPT_RC;
1285     /* +1 for drain */
1286     qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
1287     factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1288                    1 << NVMET_RDMA_MAX_MDTS);
1289     qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
1290     qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
1291                     ndev->device->attrs.max_send_sge);
1292 
1293     if (queue->nsrq) {
1294         qp_attr.srq = queue->nsrq->srq;
1295     } else {
1296         /* +1 for drain */
1297         qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1298         qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1299     }
1300 
1301     if (queue->port->pi_enable && queue->host_qid)
1302         qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
1303 
1304     ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1305     if (ret) {
1306         pr_err("failed to create_qp ret= %d\n", ret);
1307         goto err_destroy_cq;
1308     }
1309     queue->qp = queue->cm_id->qp;
1310 
1311     atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1312 
1313     pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1314          __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1315          qp_attr.cap.max_send_wr, queue->cm_id);
1316 
1317     if (!queue->nsrq) {
1318         for (i = 0; i < queue->recv_queue_size; i++) {
1319             queue->cmds[i].queue = queue;
1320             ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1321             if (ret)
1322                 goto err_destroy_qp;
1323         }
1324     }
1325 
1326 out:
1327     return ret;
1328 
1329 err_destroy_qp:
1330     rdma_destroy_qp(queue->cm_id);
1331 err_destroy_cq:
1332     ib_cq_pool_put(queue->cq, nr_cqe + 1);
1333     goto out;
1334 }
1335 
1336 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1337 {
1338     ib_drain_qp(queue->qp);
1339     if (queue->cm_id)
1340         rdma_destroy_id(queue->cm_id);
1341     ib_destroy_qp(queue->qp);
1342     ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
1343                queue->send_queue_size + 1);
1344 }
1345 
1346 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1347 {
1348     pr_debug("freeing queue %d\n", queue->idx);
1349 
1350     nvmet_sq_destroy(&queue->nvme_sq);
1351 
1352     nvmet_rdma_destroy_queue_ib(queue);
1353     if (!queue->nsrq) {
1354         nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1355                 queue->recv_queue_size,
1356                 !queue->host_qid);
1357     }
1358     nvmet_rdma_free_rsps(queue);
1359     ida_free(&nvmet_rdma_queue_ida, queue->idx);
1360     kfree(queue);
1361 }
1362 
1363 static void nvmet_rdma_release_queue_work(struct work_struct *w)
1364 {
1365     struct nvmet_rdma_queue *queue =
1366         container_of(w, struct nvmet_rdma_queue, release_work);
1367     struct nvmet_rdma_device *dev = queue->dev;
1368 
1369     nvmet_rdma_free_queue(queue);
1370 
1371     kref_put(&dev->ref, nvmet_rdma_free_dev);
1372 }
1373 
1374 static int
1375 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1376                 struct nvmet_rdma_queue *queue)
1377 {
1378     struct nvme_rdma_cm_req *req;
1379 
1380     req = (struct nvme_rdma_cm_req *)conn->private_data;
1381     if (!req || conn->private_data_len == 0)
1382         return NVME_RDMA_CM_INVALID_LEN;
1383 
1384     if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1385         return NVME_RDMA_CM_INVALID_RECFMT;
1386 
1387     queue->host_qid = le16_to_cpu(req->qid);
1388 
1389     /*
1390      * req->hsqsize corresponds to our recv queue size plus 1
1391      * req->hrqsize corresponds to our send queue size
1392      */
1393     queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1394     queue->send_queue_size = le16_to_cpu(req->hrqsize);
1395 
1396     if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
1397         return NVME_RDMA_CM_INVALID_HSQSIZE;
1398 
1399     /* XXX: Should we enforce some kind of max for IO queues? */
1400 
1401     return 0;
1402 }
1403 
1404 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1405                 enum nvme_rdma_cm_status status)
1406 {
1407     struct nvme_rdma_cm_rej rej;
1408 
1409     pr_debug("rejecting connect request: status %d (%s)\n",
1410          status, nvme_rdma_cm_msg(status));
1411 
1412     rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1413     rej.sts = cpu_to_le16(status);
1414 
1415     return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
1416                IB_CM_REJ_CONSUMER_DEFINED);
1417 }
1418 
1419 static struct nvmet_rdma_queue *
1420 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1421         struct rdma_cm_id *cm_id,
1422         struct rdma_cm_event *event)
1423 {
1424     struct nvmet_rdma_port *port = cm_id->context;
1425     struct nvmet_rdma_queue *queue;
1426     int ret;
1427 
1428     queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1429     if (!queue) {
1430         ret = NVME_RDMA_CM_NO_RSC;
1431         goto out_reject;
1432     }
1433 
1434     ret = nvmet_sq_init(&queue->nvme_sq);
1435     if (ret) {
1436         ret = NVME_RDMA_CM_NO_RSC;
1437         goto out_free_queue;
1438     }
1439 
1440     ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1441     if (ret)
1442         goto out_destroy_sq;
1443 
1444     /*
1445      * Schedules the actual release because calling rdma_destroy_id from
1446      * inside a CM callback would trigger a deadlock. (great API design..)
1447      */
1448     INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1449     queue->dev = ndev;
1450     queue->cm_id = cm_id;
1451     queue->port = port->nport;
1452 
1453     spin_lock_init(&queue->state_lock);
1454     queue->state = NVMET_RDMA_Q_CONNECTING;
1455     INIT_LIST_HEAD(&queue->rsp_wait_list);
1456     INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1457     spin_lock_init(&queue->rsp_wr_wait_lock);
1458     INIT_LIST_HEAD(&queue->free_rsps);
1459     spin_lock_init(&queue->rsps_lock);
1460     INIT_LIST_HEAD(&queue->queue_list);
1461 
1462     queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
1463     if (queue->idx < 0) {
1464         ret = NVME_RDMA_CM_NO_RSC;
1465         goto out_destroy_sq;
1466     }
1467 
1468     /*
1469      * Spread the io queues across completion vectors,
1470      * but still keep all admin queues on vector 0.
1471      */
1472     queue->comp_vector = !queue->host_qid ? 0 :
1473         queue->idx % ndev->device->num_comp_vectors;
1474 
1475 
1476     ret = nvmet_rdma_alloc_rsps(queue);
1477     if (ret) {
1478         ret = NVME_RDMA_CM_NO_RSC;
1479         goto out_ida_remove;
1480     }
1481 
1482     if (ndev->srqs) {
1483         queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
1484     } else {
1485         queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1486                 queue->recv_queue_size,
1487                 !queue->host_qid);
1488         if (IS_ERR(queue->cmds)) {
1489             ret = NVME_RDMA_CM_NO_RSC;
1490             goto out_free_responses;
1491         }
1492     }
1493 
1494     ret = nvmet_rdma_create_queue_ib(queue);
1495     if (ret) {
1496         pr_err("%s: creating RDMA queue failed (%d).\n",
1497             __func__, ret);
1498         ret = NVME_RDMA_CM_NO_RSC;
1499         goto out_free_cmds;
1500     }
1501 
1502     return queue;
1503 
1504 out_free_cmds:
1505     if (!queue->nsrq) {
1506         nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1507                 queue->recv_queue_size,
1508                 !queue->host_qid);
1509     }
1510 out_free_responses:
1511     nvmet_rdma_free_rsps(queue);
1512 out_ida_remove:
1513     ida_free(&nvmet_rdma_queue_ida, queue->idx);
1514 out_destroy_sq:
1515     nvmet_sq_destroy(&queue->nvme_sq);
1516 out_free_queue:
1517     kfree(queue);
1518 out_reject:
1519     nvmet_rdma_cm_reject(cm_id, ret);
1520     return NULL;
1521 }
1522 
1523 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1524 {
1525     struct nvmet_rdma_queue *queue = priv;
1526 
1527     switch (event->event) {
1528     case IB_EVENT_COMM_EST:
1529         rdma_notify(queue->cm_id, event->event);
1530         break;
1531     case IB_EVENT_QP_LAST_WQE_REACHED:
1532         pr_debug("received last WQE reached event for queue=0x%p\n",
1533              queue);
1534         break;
1535     default:
1536         pr_err("received IB QP event: %s (%d)\n",
1537                ib_event_msg(event->event), event->event);
1538         break;
1539     }
1540 }
1541 
1542 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1543         struct nvmet_rdma_queue *queue,
1544         struct rdma_conn_param *p)
1545 {
1546     struct rdma_conn_param  param = { };
1547     struct nvme_rdma_cm_rep priv = { };
1548     int ret = -ENOMEM;
1549 
1550     param.rnr_retry_count = 7;
1551     param.flow_control = 1;
1552     param.initiator_depth = min_t(u8, p->initiator_depth,
1553         queue->dev->device->attrs.max_qp_init_rd_atom);
1554     param.private_data = &priv;
1555     param.private_data_len = sizeof(priv);
1556     priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1557     priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1558 
1559     ret = rdma_accept(cm_id, &param);
1560     if (ret)
1561         pr_err("rdma_accept failed (error code = %d)\n", ret);
1562 
1563     return ret;
1564 }
1565 
1566 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1567         struct rdma_cm_event *event)
1568 {
1569     struct nvmet_rdma_device *ndev;
1570     struct nvmet_rdma_queue *queue;
1571     int ret = -EINVAL;
1572 
1573     ndev = nvmet_rdma_find_get_device(cm_id);
1574     if (!ndev) {
1575         nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1576         return -ECONNREFUSED;
1577     }
1578 
1579     queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1580     if (!queue) {
1581         ret = -ENOMEM;
1582         goto put_device;
1583     }
1584 
1585     if (queue->host_qid == 0) {
1586         /* Let inflight controller teardown complete */
1587         flush_workqueue(nvmet_wq);
1588     }
1589 
1590     ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1591     if (ret) {
1592         /*
1593          * Don't destroy the cm_id in free path, as we implicitly
1594          * destroy the cm_id here with non-zero ret code.
1595          */
1596         queue->cm_id = NULL;
1597         goto free_queue;
1598     }
1599 
1600     mutex_lock(&nvmet_rdma_queue_mutex);
1601     list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1602     mutex_unlock(&nvmet_rdma_queue_mutex);
1603 
1604     return 0;
1605 
1606 free_queue:
1607     nvmet_rdma_free_queue(queue);
1608 put_device:
1609     kref_put(&ndev->ref, nvmet_rdma_free_dev);
1610 
1611     return ret;
1612 }
1613 
1614 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1615 {
1616     unsigned long flags;
1617 
1618     spin_lock_irqsave(&queue->state_lock, flags);
1619     if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1620         pr_warn("trying to establish a connected queue\n");
1621         goto out_unlock;
1622     }
1623     queue->state = NVMET_RDMA_Q_LIVE;
1624 
1625     while (!list_empty(&queue->rsp_wait_list)) {
1626         struct nvmet_rdma_rsp *cmd;
1627 
1628         cmd = list_first_entry(&queue->rsp_wait_list,
1629                     struct nvmet_rdma_rsp, wait_list);
1630         list_del(&cmd->wait_list);
1631 
1632         spin_unlock_irqrestore(&queue->state_lock, flags);
1633         nvmet_rdma_handle_command(queue, cmd);
1634         spin_lock_irqsave(&queue->state_lock, flags);
1635     }
1636 
1637 out_unlock:
1638     spin_unlock_irqrestore(&queue->state_lock, flags);
1639 }
1640 
1641 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1642 {
1643     bool disconnect = false;
1644     unsigned long flags;
1645 
1646     pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1647 
1648     spin_lock_irqsave(&queue->state_lock, flags);
1649     switch (queue->state) {
1650     case NVMET_RDMA_Q_CONNECTING:
1651         while (!list_empty(&queue->rsp_wait_list)) {
1652             struct nvmet_rdma_rsp *rsp;
1653 
1654             rsp = list_first_entry(&queue->rsp_wait_list,
1655                            struct nvmet_rdma_rsp,
1656                            wait_list);
1657             list_del(&rsp->wait_list);
1658             nvmet_rdma_put_rsp(rsp);
1659         }
1660         fallthrough;
1661     case NVMET_RDMA_Q_LIVE:
1662         queue->state = NVMET_RDMA_Q_DISCONNECTING;
1663         disconnect = true;
1664         break;
1665     case NVMET_RDMA_Q_DISCONNECTING:
1666         break;
1667     }
1668     spin_unlock_irqrestore(&queue->state_lock, flags);
1669 
1670     if (disconnect) {
1671         rdma_disconnect(queue->cm_id);
1672         queue_work(nvmet_wq, &queue->release_work);
1673     }
1674 }
1675 
1676 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1677 {
1678     bool disconnect = false;
1679 
1680     mutex_lock(&nvmet_rdma_queue_mutex);
1681     if (!list_empty(&queue->queue_list)) {
1682         list_del_init(&queue->queue_list);
1683         disconnect = true;
1684     }
1685     mutex_unlock(&nvmet_rdma_queue_mutex);
1686 
1687     if (disconnect)
1688         __nvmet_rdma_queue_disconnect(queue);
1689 }
1690 
1691 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1692         struct nvmet_rdma_queue *queue)
1693 {
1694     WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1695 
1696     mutex_lock(&nvmet_rdma_queue_mutex);
1697     if (!list_empty(&queue->queue_list))
1698         list_del_init(&queue->queue_list);
1699     mutex_unlock(&nvmet_rdma_queue_mutex);
1700 
1701     pr_err("failed to connect queue %d\n", queue->idx);
1702     queue_work(nvmet_wq, &queue->release_work);
1703 }
1704 
1705 /**
1706  * nvmet_rdma_device_removal() - Handle RDMA device removal
1707  * @cm_id:  rdma_cm id, used for nvmet port
1708  * @queue:      nvmet rdma queue (cm id qp_context)
1709  *
1710  * DEVICE_REMOVAL event notifies us that the RDMA device is about
1711  * to unplug. Note that this event can be generated on a normal
1712  * queue cm_id and/or a device bound listener cm_id (where in this
1713  * case queue will be null).
1714  *
1715  * We registered an ib_client to handle device removal for queues,
1716  * so we only need to handle the listening port cm_ids. In this case
1717  * we nullify the priv to prevent double cm_id destruction and destroying
1718  * the cm_id implicitely by returning a non-zero rc to the callout.
1719  */
1720 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1721         struct nvmet_rdma_queue *queue)
1722 {
1723     struct nvmet_rdma_port *port;
1724 
1725     if (queue) {
1726         /*
1727          * This is a queue cm_id. we have registered
1728          * an ib_client to handle queues removal
1729          * so don't interfear and just return.
1730          */
1731         return 0;
1732     }
1733 
1734     port = cm_id->context;
1735 
1736     /*
1737      * This is a listener cm_id. Make sure that
1738      * future remove_port won't invoke a double
1739      * cm_id destroy. use atomic xchg to make sure
1740      * we don't compete with remove_port.
1741      */
1742     if (xchg(&port->cm_id, NULL) != cm_id)
1743         return 0;
1744 
1745     /*
1746      * We need to return 1 so that the core will destroy
1747      * it's own ID.  What a great API design..
1748      */
1749     return 1;
1750 }
1751 
1752 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1753         struct rdma_cm_event *event)
1754 {
1755     struct nvmet_rdma_queue *queue = NULL;
1756     int ret = 0;
1757 
1758     if (cm_id->qp)
1759         queue = cm_id->qp->qp_context;
1760 
1761     pr_debug("%s (%d): status %d id %p\n",
1762         rdma_event_msg(event->event), event->event,
1763         event->status, cm_id);
1764 
1765     switch (event->event) {
1766     case RDMA_CM_EVENT_CONNECT_REQUEST:
1767         ret = nvmet_rdma_queue_connect(cm_id, event);
1768         break;
1769     case RDMA_CM_EVENT_ESTABLISHED:
1770         nvmet_rdma_queue_established(queue);
1771         break;
1772     case RDMA_CM_EVENT_ADDR_CHANGE:
1773         if (!queue) {
1774             struct nvmet_rdma_port *port = cm_id->context;
1775 
1776             queue_delayed_work(nvmet_wq, &port->repair_work, 0);
1777             break;
1778         }
1779         fallthrough;
1780     case RDMA_CM_EVENT_DISCONNECTED:
1781     case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1782         nvmet_rdma_queue_disconnect(queue);
1783         break;
1784     case RDMA_CM_EVENT_DEVICE_REMOVAL:
1785         ret = nvmet_rdma_device_removal(cm_id, queue);
1786         break;
1787     case RDMA_CM_EVENT_REJECTED:
1788         pr_debug("Connection rejected: %s\n",
1789              rdma_reject_msg(cm_id, event->status));
1790         fallthrough;
1791     case RDMA_CM_EVENT_UNREACHABLE:
1792     case RDMA_CM_EVENT_CONNECT_ERROR:
1793         nvmet_rdma_queue_connect_fail(cm_id, queue);
1794         break;
1795     default:
1796         pr_err("received unrecognized RDMA CM event %d\n",
1797             event->event);
1798         break;
1799     }
1800 
1801     return ret;
1802 }
1803 
1804 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1805 {
1806     struct nvmet_rdma_queue *queue;
1807 
1808 restart:
1809     mutex_lock(&nvmet_rdma_queue_mutex);
1810     list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1811         if (queue->nvme_sq.ctrl == ctrl) {
1812             list_del_init(&queue->queue_list);
1813             mutex_unlock(&nvmet_rdma_queue_mutex);
1814 
1815             __nvmet_rdma_queue_disconnect(queue);
1816             goto restart;
1817         }
1818     }
1819     mutex_unlock(&nvmet_rdma_queue_mutex);
1820 }
1821 
1822 static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
1823 {
1824     struct nvmet_rdma_queue *queue, *tmp;
1825     struct nvmet_port *nport = port->nport;
1826 
1827     mutex_lock(&nvmet_rdma_queue_mutex);
1828     list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1829                  queue_list) {
1830         if (queue->port != nport)
1831             continue;
1832 
1833         list_del_init(&queue->queue_list);
1834         __nvmet_rdma_queue_disconnect(queue);
1835     }
1836     mutex_unlock(&nvmet_rdma_queue_mutex);
1837 }
1838 
1839 static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
1840 {
1841     struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
1842 
1843     if (cm_id)
1844         rdma_destroy_id(cm_id);
1845 
1846     /*
1847      * Destroy the remaining queues, which are not belong to any
1848      * controller yet. Do it here after the RDMA-CM was destroyed
1849      * guarantees that no new queue will be created.
1850      */
1851     nvmet_rdma_destroy_port_queues(port);
1852 }
1853 
1854 static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
1855 {
1856     struct sockaddr *addr = (struct sockaddr *)&port->addr;
1857     struct rdma_cm_id *cm_id;
1858     int ret;
1859 
1860     cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1861             RDMA_PS_TCP, IB_QPT_RC);
1862     if (IS_ERR(cm_id)) {
1863         pr_err("CM ID creation failed\n");
1864         return PTR_ERR(cm_id);
1865     }
1866 
1867     /*
1868      * Allow both IPv4 and IPv6 sockets to bind a single port
1869      * at the same time.
1870      */
1871     ret = rdma_set_afonly(cm_id, 1);
1872     if (ret) {
1873         pr_err("rdma_set_afonly failed (%d)\n", ret);
1874         goto out_destroy_id;
1875     }
1876 
1877     ret = rdma_bind_addr(cm_id, addr);
1878     if (ret) {
1879         pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
1880         goto out_destroy_id;
1881     }
1882 
1883     ret = rdma_listen(cm_id, 128);
1884     if (ret) {
1885         pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
1886         goto out_destroy_id;
1887     }
1888 
1889     port->cm_id = cm_id;
1890     return 0;
1891 
1892 out_destroy_id:
1893     rdma_destroy_id(cm_id);
1894     return ret;
1895 }
1896 
1897 static void nvmet_rdma_repair_port_work(struct work_struct *w)
1898 {
1899     struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
1900             struct nvmet_rdma_port, repair_work);
1901     int ret;
1902 
1903     nvmet_rdma_disable_port(port);
1904     ret = nvmet_rdma_enable_port(port);
1905     if (ret)
1906         queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
1907 }
1908 
1909 static int nvmet_rdma_add_port(struct nvmet_port *nport)
1910 {
1911     struct nvmet_rdma_port *port;
1912     __kernel_sa_family_t af;
1913     int ret;
1914 
1915     port = kzalloc(sizeof(*port), GFP_KERNEL);
1916     if (!port)
1917         return -ENOMEM;
1918 
1919     nport->priv = port;
1920     port->nport = nport;
1921     INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
1922 
1923     switch (nport->disc_addr.adrfam) {
1924     case NVMF_ADDR_FAMILY_IP4:
1925         af = AF_INET;
1926         break;
1927     case NVMF_ADDR_FAMILY_IP6:
1928         af = AF_INET6;
1929         break;
1930     default:
1931         pr_err("address family %d not supported\n",
1932             nport->disc_addr.adrfam);
1933         ret = -EINVAL;
1934         goto out_free_port;
1935     }
1936 
1937     if (nport->inline_data_size < 0) {
1938         nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1939     } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1940         pr_warn("inline_data_size %u is too large, reducing to %u\n",
1941             nport->inline_data_size,
1942             NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1943         nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1944     }
1945 
1946     ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1947             nport->disc_addr.trsvcid, &port->addr);
1948     if (ret) {
1949         pr_err("malformed ip/port passed: %s:%s\n",
1950             nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1951         goto out_free_port;
1952     }
1953 
1954     ret = nvmet_rdma_enable_port(port);
1955     if (ret)
1956         goto out_free_port;
1957 
1958     pr_info("enabling port %d (%pISpcs)\n",
1959         le16_to_cpu(nport->disc_addr.portid),
1960         (struct sockaddr *)&port->addr);
1961 
1962     return 0;
1963 
1964 out_free_port:
1965     kfree(port);
1966     return ret;
1967 }
1968 
1969 static void nvmet_rdma_remove_port(struct nvmet_port *nport)
1970 {
1971     struct nvmet_rdma_port *port = nport->priv;
1972 
1973     cancel_delayed_work_sync(&port->repair_work);
1974     nvmet_rdma_disable_port(port);
1975     kfree(port);
1976 }
1977 
1978 static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1979         struct nvmet_port *nport, char *traddr)
1980 {
1981     struct nvmet_rdma_port *port = nport->priv;
1982     struct rdma_cm_id *cm_id = port->cm_id;
1983 
1984     if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
1985         struct nvmet_rdma_rsp *rsp =
1986             container_of(req, struct nvmet_rdma_rsp, req);
1987         struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
1988         struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
1989 
1990         sprintf(traddr, "%pISc", addr);
1991     } else {
1992         memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1993     }
1994 }
1995 
1996 static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
1997 {
1998     if (ctrl->pi_support)
1999         return NVMET_RDMA_MAX_METADATA_MDTS;
2000     return NVMET_RDMA_MAX_MDTS;
2001 }
2002 
2003 static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
2004 {
2005     return NVME_RDMA_MAX_QUEUE_SIZE;
2006 }
2007 
2008 static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
2009     .owner          = THIS_MODULE,
2010     .type           = NVMF_TRTYPE_RDMA,
2011     .msdbd          = 1,
2012     .flags          = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
2013     .add_port       = nvmet_rdma_add_port,
2014     .remove_port        = nvmet_rdma_remove_port,
2015     .queue_response     = nvmet_rdma_queue_response,
2016     .delete_ctrl        = nvmet_rdma_delete_ctrl,
2017     .disc_traddr        = nvmet_rdma_disc_port_addr,
2018     .get_mdts       = nvmet_rdma_get_mdts,
2019     .get_max_queue_size = nvmet_rdma_get_max_queue_size,
2020 };
2021 
2022 static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2023 {
2024     struct nvmet_rdma_queue *queue, *tmp;
2025     struct nvmet_rdma_device *ndev;
2026     bool found = false;
2027 
2028     mutex_lock(&device_list_mutex);
2029     list_for_each_entry(ndev, &device_list, entry) {
2030         if (ndev->device == ib_device) {
2031             found = true;
2032             break;
2033         }
2034     }
2035     mutex_unlock(&device_list_mutex);
2036 
2037     if (!found)
2038         return;
2039 
2040     /*
2041      * IB Device that is used by nvmet controllers is being removed,
2042      * delete all queues using this device.
2043      */
2044     mutex_lock(&nvmet_rdma_queue_mutex);
2045     list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
2046                  queue_list) {
2047         if (queue->dev->device != ib_device)
2048             continue;
2049 
2050         pr_info("Removing queue %d\n", queue->idx);
2051         list_del_init(&queue->queue_list);
2052         __nvmet_rdma_queue_disconnect(queue);
2053     }
2054     mutex_unlock(&nvmet_rdma_queue_mutex);
2055 
2056     flush_workqueue(nvmet_wq);
2057 }
2058 
2059 static struct ib_client nvmet_rdma_ib_client = {
2060     .name   = "nvmet_rdma",
2061     .remove = nvmet_rdma_remove_one
2062 };
2063 
2064 static int __init nvmet_rdma_init(void)
2065 {
2066     int ret;
2067 
2068     ret = ib_register_client(&nvmet_rdma_ib_client);
2069     if (ret)
2070         return ret;
2071 
2072     ret = nvmet_register_transport(&nvmet_rdma_ops);
2073     if (ret)
2074         goto err_ib_client;
2075 
2076     return 0;
2077 
2078 err_ib_client:
2079     ib_unregister_client(&nvmet_rdma_ib_client);
2080     return ret;
2081 }
2082 
2083 static void __exit nvmet_rdma_exit(void)
2084 {
2085     nvmet_unregister_transport(&nvmet_rdma_ops);
2086     ib_unregister_client(&nvmet_rdma_ib_client);
2087     WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
2088     ida_destroy(&nvmet_rdma_queue_ida);
2089 }
2090 
2091 module_init(nvmet_rdma_init);
2092 module_exit(nvmet_rdma_exit);
2093 
2094 MODULE_LICENSE("GPL v2");
2095 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */