0001
0002
0003
0004
0005 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0006 #include <linux/module.h>
0007 #include <linux/slab.h>
0008 #include <linux/blk-mq.h>
0009 #include <linux/parser.h>
0010 #include <linux/random.h>
0011 #include <uapi/scsi/fc/fc_fs.h>
0012 #include <uapi/scsi/fc/fc_els.h>
0013
0014 #include "nvmet.h"
0015 #include <linux/nvme-fc-driver.h>
0016 #include <linux/nvme-fc.h>
0017 #include "../host/fc.h"
0018
0019
0020
0021
0022
0023 #define NVMET_LS_CTX_COUNT 256
0024
0025 struct nvmet_fc_tgtport;
0026 struct nvmet_fc_tgt_assoc;
0027
0028 struct nvmet_fc_ls_iod {
0029 struct nvmefc_ls_rsp *lsrsp;
0030 struct nvmefc_tgt_fcp_req *fcpreq;
0031
0032 struct list_head ls_rcv_list;
0033
0034 struct nvmet_fc_tgtport *tgtport;
0035 struct nvmet_fc_tgt_assoc *assoc;
0036 void *hosthandle;
0037
0038 union nvmefc_ls_requests *rqstbuf;
0039 union nvmefc_ls_responses *rspbuf;
0040 u16 rqstdatalen;
0041 dma_addr_t rspdma;
0042
0043 struct scatterlist sg[2];
0044
0045 struct work_struct work;
0046 } __aligned(sizeof(unsigned long long));
0047
0048 struct nvmet_fc_ls_req_op {
0049 struct nvmefc_ls_req ls_req;
0050
0051 struct nvmet_fc_tgtport *tgtport;
0052 void *hosthandle;
0053
0054 int ls_error;
0055 struct list_head lsreq_list;
0056 bool req_queued;
0057 };
0058
0059
0060
0061 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
0062
0063 enum nvmet_fcp_datadir {
0064 NVMET_FCP_NODATA,
0065 NVMET_FCP_WRITE,
0066 NVMET_FCP_READ,
0067 NVMET_FCP_ABORTED,
0068 };
0069
0070 struct nvmet_fc_fcp_iod {
0071 struct nvmefc_tgt_fcp_req *fcpreq;
0072
0073 struct nvme_fc_cmd_iu cmdiubuf;
0074 struct nvme_fc_ersp_iu rspiubuf;
0075 dma_addr_t rspdma;
0076 struct scatterlist *next_sg;
0077 struct scatterlist *data_sg;
0078 int data_sg_cnt;
0079 u32 offset;
0080 enum nvmet_fcp_datadir io_dir;
0081 bool active;
0082 bool abort;
0083 bool aborted;
0084 bool writedataactive;
0085 spinlock_t flock;
0086
0087 struct nvmet_req req;
0088 struct work_struct defer_work;
0089
0090 struct nvmet_fc_tgtport *tgtport;
0091 struct nvmet_fc_tgt_queue *queue;
0092
0093 struct list_head fcp_list;
0094 };
0095
0096 struct nvmet_fc_tgtport {
0097 struct nvmet_fc_target_port fc_target_port;
0098
0099 struct list_head tgt_list;
0100 struct device *dev;
0101 struct nvmet_fc_target_template *ops;
0102
0103 struct nvmet_fc_ls_iod *iod;
0104 spinlock_t lock;
0105 struct list_head ls_rcv_list;
0106 struct list_head ls_req_list;
0107 struct list_head ls_busylist;
0108 struct list_head assoc_list;
0109 struct list_head host_list;
0110 struct ida assoc_cnt;
0111 struct nvmet_fc_port_entry *pe;
0112 struct kref ref;
0113 u32 max_sg_cnt;
0114 };
0115
0116 struct nvmet_fc_port_entry {
0117 struct nvmet_fc_tgtport *tgtport;
0118 struct nvmet_port *port;
0119 u64 node_name;
0120 u64 port_name;
0121 struct list_head pe_list;
0122 };
0123
0124 struct nvmet_fc_defer_fcp_req {
0125 struct list_head req_list;
0126 struct nvmefc_tgt_fcp_req *fcp_req;
0127 };
0128
0129 struct nvmet_fc_tgt_queue {
0130 bool ninetypercent;
0131 u16 qid;
0132 u16 sqsize;
0133 u16 ersp_ratio;
0134 __le16 sqhd;
0135 atomic_t connected;
0136 atomic_t sqtail;
0137 atomic_t zrspcnt;
0138 atomic_t rsn;
0139 spinlock_t qlock;
0140 struct nvmet_cq nvme_cq;
0141 struct nvmet_sq nvme_sq;
0142 struct nvmet_fc_tgt_assoc *assoc;
0143 struct list_head fod_list;
0144 struct list_head pending_cmd_list;
0145 struct list_head avail_defer_list;
0146 struct workqueue_struct *work_q;
0147 struct kref ref;
0148 struct rcu_head rcu;
0149 struct nvmet_fc_fcp_iod fod[];
0150 } __aligned(sizeof(unsigned long long));
0151
0152 struct nvmet_fc_hostport {
0153 struct nvmet_fc_tgtport *tgtport;
0154 void *hosthandle;
0155 struct list_head host_list;
0156 struct kref ref;
0157 u8 invalid;
0158 };
0159
0160 struct nvmet_fc_tgt_assoc {
0161 u64 association_id;
0162 u32 a_id;
0163 atomic_t terminating;
0164 struct nvmet_fc_tgtport *tgtport;
0165 struct nvmet_fc_hostport *hostport;
0166 struct nvmet_fc_ls_iod *rcv_disconn;
0167 struct list_head a_list;
0168 struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
0169 struct kref ref;
0170 struct work_struct del_work;
0171 struct rcu_head rcu;
0172 };
0173
0174
0175 static inline int
0176 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
0177 {
0178 return (iodptr - iodptr->tgtport->iod);
0179 }
0180
0181 static inline int
0182 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
0183 {
0184 return (fodptr - fodptr->queue->fod);
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 #define BYTES_FOR_QID sizeof(u16)
0199 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
0200 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
0201
0202 static inline u64
0203 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
0204 {
0205 return (assoc->association_id | qid);
0206 }
0207
0208 static inline u64
0209 nvmet_fc_getassociationid(u64 connectionid)
0210 {
0211 return connectionid & ~NVMET_FC_QUEUEID_MASK;
0212 }
0213
0214 static inline u16
0215 nvmet_fc_getqueueid(u64 connectionid)
0216 {
0217 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
0218 }
0219
0220 static inline struct nvmet_fc_tgtport *
0221 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
0222 {
0223 return container_of(targetport, struct nvmet_fc_tgtport,
0224 fc_target_port);
0225 }
0226
0227 static inline struct nvmet_fc_fcp_iod *
0228 nvmet_req_to_fod(struct nvmet_req *nvme_req)
0229 {
0230 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
0231 }
0232
0233
0234
0235
0236
0237 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
0238
0239 static LIST_HEAD(nvmet_fc_target_list);
0240 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
0241 static LIST_HEAD(nvmet_fc_portentry_list);
0242
0243
0244 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
0245 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
0246 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
0247 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
0248 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
0249 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
0250 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
0251 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
0252 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
0253 struct nvmet_fc_fcp_iod *fod);
0254 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
0255 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
0256 struct nvmet_fc_ls_iod *iod);
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277 static inline dma_addr_t
0278 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
0279 enum dma_data_direction dir)
0280 {
0281 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
0282 }
0283
0284 static inline int
0285 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
0286 {
0287 return dev ? dma_mapping_error(dev, dma_addr) : 0;
0288 }
0289
0290 static inline void
0291 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
0292 enum dma_data_direction dir)
0293 {
0294 if (dev)
0295 dma_unmap_single(dev, addr, size, dir);
0296 }
0297
0298 static inline void
0299 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
0300 enum dma_data_direction dir)
0301 {
0302 if (dev)
0303 dma_sync_single_for_cpu(dev, addr, size, dir);
0304 }
0305
0306 static inline void
0307 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
0308 enum dma_data_direction dir)
0309 {
0310 if (dev)
0311 dma_sync_single_for_device(dev, addr, size, dir);
0312 }
0313
0314
0315 static int
0316 fc_map_sg(struct scatterlist *sg, int nents)
0317 {
0318 struct scatterlist *s;
0319 int i;
0320
0321 WARN_ON(nents == 0 || sg[0].length == 0);
0322
0323 for_each_sg(sg, s, nents, i) {
0324 s->dma_address = 0L;
0325 #ifdef CONFIG_NEED_SG_DMA_LENGTH
0326 s->dma_length = s->length;
0327 #endif
0328 }
0329 return nents;
0330 }
0331
0332 static inline int
0333 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
0334 enum dma_data_direction dir)
0335 {
0336 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
0337 }
0338
0339 static inline void
0340 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
0341 enum dma_data_direction dir)
0342 {
0343 if (dev)
0344 dma_unmap_sg(dev, sg, nents, dir);
0345 }
0346
0347
0348
0349
0350
0351 static void
0352 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
0353 {
0354 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
0355 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
0356 unsigned long flags;
0357
0358 spin_lock_irqsave(&tgtport->lock, flags);
0359
0360 if (!lsop->req_queued) {
0361 spin_unlock_irqrestore(&tgtport->lock, flags);
0362 return;
0363 }
0364
0365 list_del(&lsop->lsreq_list);
0366
0367 lsop->req_queued = false;
0368
0369 spin_unlock_irqrestore(&tgtport->lock, flags);
0370
0371 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
0372 (lsreq->rqstlen + lsreq->rsplen),
0373 DMA_BIDIRECTIONAL);
0374
0375 nvmet_fc_tgtport_put(tgtport);
0376 }
0377
0378 static int
0379 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
0380 struct nvmet_fc_ls_req_op *lsop,
0381 void (*done)(struct nvmefc_ls_req *req, int status))
0382 {
0383 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
0384 unsigned long flags;
0385 int ret = 0;
0386
0387 if (!tgtport->ops->ls_req)
0388 return -EOPNOTSUPP;
0389
0390 if (!nvmet_fc_tgtport_get(tgtport))
0391 return -ESHUTDOWN;
0392
0393 lsreq->done = done;
0394 lsop->req_queued = false;
0395 INIT_LIST_HEAD(&lsop->lsreq_list);
0396
0397 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
0398 lsreq->rqstlen + lsreq->rsplen,
0399 DMA_BIDIRECTIONAL);
0400 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
0401 ret = -EFAULT;
0402 goto out_puttgtport;
0403 }
0404 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
0405
0406 spin_lock_irqsave(&tgtport->lock, flags);
0407
0408 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
0409
0410 lsop->req_queued = true;
0411
0412 spin_unlock_irqrestore(&tgtport->lock, flags);
0413
0414 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
0415 lsreq);
0416 if (ret)
0417 goto out_unlink;
0418
0419 return 0;
0420
0421 out_unlink:
0422 lsop->ls_error = ret;
0423 spin_lock_irqsave(&tgtport->lock, flags);
0424 lsop->req_queued = false;
0425 list_del(&lsop->lsreq_list);
0426 spin_unlock_irqrestore(&tgtport->lock, flags);
0427 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
0428 (lsreq->rqstlen + lsreq->rsplen),
0429 DMA_BIDIRECTIONAL);
0430 out_puttgtport:
0431 nvmet_fc_tgtport_put(tgtport);
0432
0433 return ret;
0434 }
0435
0436 static int
0437 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
0438 struct nvmet_fc_ls_req_op *lsop,
0439 void (*done)(struct nvmefc_ls_req *req, int status))
0440 {
0441
0442
0443 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
0444 }
0445
0446 static void
0447 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
0448 {
0449 struct nvmet_fc_ls_req_op *lsop =
0450 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
0451
0452 __nvmet_fc_finish_ls_req(lsop);
0453
0454
0455
0456 kfree(lsop);
0457 }
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476 static void
0477 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
0478 {
0479 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
0480 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
0481 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
0482 struct nvmet_fc_ls_req_op *lsop;
0483 struct nvmefc_ls_req *lsreq;
0484 int ret;
0485
0486
0487
0488
0489
0490
0491 if (!tgtport->ops->ls_req || !assoc->hostport ||
0492 assoc->hostport->invalid)
0493 return;
0494
0495 lsop = kzalloc((sizeof(*lsop) +
0496 sizeof(*discon_rqst) + sizeof(*discon_acc) +
0497 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
0498 if (!lsop) {
0499 dev_info(tgtport->dev,
0500 "{%d:%d} send Disconnect Association failed: ENOMEM\n",
0501 tgtport->fc_target_port.port_num, assoc->a_id);
0502 return;
0503 }
0504
0505 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
0506 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
0507 lsreq = &lsop->ls_req;
0508 if (tgtport->ops->lsrqst_priv_sz)
0509 lsreq->private = (void *)&discon_acc[1];
0510 else
0511 lsreq->private = NULL;
0512
0513 lsop->tgtport = tgtport;
0514 lsop->hosthandle = assoc->hostport->hosthandle;
0515
0516 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
0517 assoc->association_id);
0518
0519 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
0520 nvmet_fc_disconnect_assoc_done);
0521 if (ret) {
0522 dev_info(tgtport->dev,
0523 "{%d:%d} XMT Disconnect Association failed: %d\n",
0524 tgtport->fc_target_port.port_num, assoc->a_id, ret);
0525 kfree(lsop);
0526 }
0527 }
0528
0529
0530
0531
0532
0533 static int
0534 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
0535 {
0536 struct nvmet_fc_ls_iod *iod;
0537 int i;
0538
0539 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
0540 GFP_KERNEL);
0541 if (!iod)
0542 return -ENOMEM;
0543
0544 tgtport->iod = iod;
0545
0546 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
0547 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
0548 iod->tgtport = tgtport;
0549 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
0550
0551 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
0552 sizeof(union nvmefc_ls_responses),
0553 GFP_KERNEL);
0554 if (!iod->rqstbuf)
0555 goto out_fail;
0556
0557 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
0558
0559 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
0560 sizeof(*iod->rspbuf),
0561 DMA_TO_DEVICE);
0562 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
0563 goto out_fail;
0564 }
0565
0566 return 0;
0567
0568 out_fail:
0569 kfree(iod->rqstbuf);
0570 list_del(&iod->ls_rcv_list);
0571 for (iod--, i--; i >= 0; iod--, i--) {
0572 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
0573 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
0574 kfree(iod->rqstbuf);
0575 list_del(&iod->ls_rcv_list);
0576 }
0577
0578 kfree(iod);
0579
0580 return -EFAULT;
0581 }
0582
0583 static void
0584 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
0585 {
0586 struct nvmet_fc_ls_iod *iod = tgtport->iod;
0587 int i;
0588
0589 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
0590 fc_dma_unmap_single(tgtport->dev,
0591 iod->rspdma, sizeof(*iod->rspbuf),
0592 DMA_TO_DEVICE);
0593 kfree(iod->rqstbuf);
0594 list_del(&iod->ls_rcv_list);
0595 }
0596 kfree(tgtport->iod);
0597 }
0598
0599 static struct nvmet_fc_ls_iod *
0600 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
0601 {
0602 struct nvmet_fc_ls_iod *iod;
0603 unsigned long flags;
0604
0605 spin_lock_irqsave(&tgtport->lock, flags);
0606 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
0607 struct nvmet_fc_ls_iod, ls_rcv_list);
0608 if (iod)
0609 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
0610 spin_unlock_irqrestore(&tgtport->lock, flags);
0611 return iod;
0612 }
0613
0614
0615 static void
0616 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
0617 struct nvmet_fc_ls_iod *iod)
0618 {
0619 unsigned long flags;
0620
0621 spin_lock_irqsave(&tgtport->lock, flags);
0622 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
0623 spin_unlock_irqrestore(&tgtport->lock, flags);
0624 }
0625
0626 static void
0627 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
0628 struct nvmet_fc_tgt_queue *queue)
0629 {
0630 struct nvmet_fc_fcp_iod *fod = queue->fod;
0631 int i;
0632
0633 for (i = 0; i < queue->sqsize; fod++, i++) {
0634 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
0635 fod->tgtport = tgtport;
0636 fod->queue = queue;
0637 fod->active = false;
0638 fod->abort = false;
0639 fod->aborted = false;
0640 fod->fcpreq = NULL;
0641 list_add_tail(&fod->fcp_list, &queue->fod_list);
0642 spin_lock_init(&fod->flock);
0643
0644 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
0645 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
0646 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
0647 list_del(&fod->fcp_list);
0648 for (fod--, i--; i >= 0; fod--, i--) {
0649 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
0650 sizeof(fod->rspiubuf),
0651 DMA_TO_DEVICE);
0652 fod->rspdma = 0L;
0653 list_del(&fod->fcp_list);
0654 }
0655
0656 return;
0657 }
0658 }
0659 }
0660
0661 static void
0662 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
0663 struct nvmet_fc_tgt_queue *queue)
0664 {
0665 struct nvmet_fc_fcp_iod *fod = queue->fod;
0666 int i;
0667
0668 for (i = 0; i < queue->sqsize; fod++, i++) {
0669 if (fod->rspdma)
0670 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
0671 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
0672 }
0673 }
0674
0675 static struct nvmet_fc_fcp_iod *
0676 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
0677 {
0678 struct nvmet_fc_fcp_iod *fod;
0679
0680 lockdep_assert_held(&queue->qlock);
0681
0682 fod = list_first_entry_or_null(&queue->fod_list,
0683 struct nvmet_fc_fcp_iod, fcp_list);
0684 if (fod) {
0685 list_del(&fod->fcp_list);
0686 fod->active = true;
0687
0688
0689
0690
0691
0692 }
0693 return fod;
0694 }
0695
0696
0697 static void
0698 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
0699 struct nvmet_fc_tgt_queue *queue,
0700 struct nvmefc_tgt_fcp_req *fcpreq)
0701 {
0702 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
0703
0704
0705
0706
0707
0708 fcpreq->hwqid = queue->qid ?
0709 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
0710
0711 nvmet_fc_handle_fcp_rqst(tgtport, fod);
0712 }
0713
0714 static void
0715 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
0716 {
0717 struct nvmet_fc_fcp_iod *fod =
0718 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
0719
0720
0721 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
0722
0723 }
0724
0725 static void
0726 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
0727 struct nvmet_fc_fcp_iod *fod)
0728 {
0729 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
0730 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
0731 struct nvmet_fc_defer_fcp_req *deferfcp;
0732 unsigned long flags;
0733
0734 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
0735 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
0736
0737 fcpreq->nvmet_fc_private = NULL;
0738
0739 fod->active = false;
0740 fod->abort = false;
0741 fod->aborted = false;
0742 fod->writedataactive = false;
0743 fod->fcpreq = NULL;
0744
0745 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
0746
0747
0748 nvmet_fc_tgt_q_put(queue);
0749
0750 spin_lock_irqsave(&queue->qlock, flags);
0751 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
0752 struct nvmet_fc_defer_fcp_req, req_list);
0753 if (!deferfcp) {
0754 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
0755 spin_unlock_irqrestore(&queue->qlock, flags);
0756 return;
0757 }
0758
0759
0760 list_del(&deferfcp->req_list);
0761
0762 fcpreq = deferfcp->fcp_req;
0763
0764
0765 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
0766
0767 spin_unlock_irqrestore(&queue->qlock, flags);
0768
0769
0770 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
0771
0772
0773 fcpreq->rspaddr = NULL;
0774 fcpreq->rsplen = 0;
0775 fcpreq->nvmet_fc_private = fod;
0776 fod->fcpreq = fcpreq;
0777 fod->active = true;
0778
0779
0780 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
0781
0782
0783
0784
0785
0786
0787 queue_work(queue->work_q, &fod->defer_work);
0788 }
0789
0790 static struct nvmet_fc_tgt_queue *
0791 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
0792 u16 qid, u16 sqsize)
0793 {
0794 struct nvmet_fc_tgt_queue *queue;
0795 int ret;
0796
0797 if (qid > NVMET_NR_QUEUES)
0798 return NULL;
0799
0800 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
0801 if (!queue)
0802 return NULL;
0803
0804 if (!nvmet_fc_tgt_a_get(assoc))
0805 goto out_free_queue;
0806
0807 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
0808 assoc->tgtport->fc_target_port.port_num,
0809 assoc->a_id, qid);
0810 if (!queue->work_q)
0811 goto out_a_put;
0812
0813 queue->qid = qid;
0814 queue->sqsize = sqsize;
0815 queue->assoc = assoc;
0816 INIT_LIST_HEAD(&queue->fod_list);
0817 INIT_LIST_HEAD(&queue->avail_defer_list);
0818 INIT_LIST_HEAD(&queue->pending_cmd_list);
0819 atomic_set(&queue->connected, 0);
0820 atomic_set(&queue->sqtail, 0);
0821 atomic_set(&queue->rsn, 1);
0822 atomic_set(&queue->zrspcnt, 0);
0823 spin_lock_init(&queue->qlock);
0824 kref_init(&queue->ref);
0825
0826 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
0827
0828 ret = nvmet_sq_init(&queue->nvme_sq);
0829 if (ret)
0830 goto out_fail_iodlist;
0831
0832 WARN_ON(assoc->queues[qid]);
0833 rcu_assign_pointer(assoc->queues[qid], queue);
0834
0835 return queue;
0836
0837 out_fail_iodlist:
0838 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
0839 destroy_workqueue(queue->work_q);
0840 out_a_put:
0841 nvmet_fc_tgt_a_put(assoc);
0842 out_free_queue:
0843 kfree(queue);
0844 return NULL;
0845 }
0846
0847
0848 static void
0849 nvmet_fc_tgt_queue_free(struct kref *ref)
0850 {
0851 struct nvmet_fc_tgt_queue *queue =
0852 container_of(ref, struct nvmet_fc_tgt_queue, ref);
0853
0854 rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
0855
0856 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
0857
0858 nvmet_fc_tgt_a_put(queue->assoc);
0859
0860 destroy_workqueue(queue->work_q);
0861
0862 kfree_rcu(queue, rcu);
0863 }
0864
0865 static void
0866 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
0867 {
0868 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
0869 }
0870
0871 static int
0872 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
0873 {
0874 return kref_get_unless_zero(&queue->ref);
0875 }
0876
0877
0878 static void
0879 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
0880 {
0881 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
0882 struct nvmet_fc_fcp_iod *fod = queue->fod;
0883 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
0884 unsigned long flags;
0885 int i;
0886 bool disconnect;
0887
0888 disconnect = atomic_xchg(&queue->connected, 0);
0889
0890
0891 if (!disconnect)
0892 return;
0893
0894 spin_lock_irqsave(&queue->qlock, flags);
0895
0896 for (i = 0; i < queue->sqsize; fod++, i++) {
0897 if (fod->active) {
0898 spin_lock(&fod->flock);
0899 fod->abort = true;
0900
0901
0902
0903
0904
0905 if (fod->writedataactive) {
0906 fod->aborted = true;
0907 spin_unlock(&fod->flock);
0908 tgtport->ops->fcp_abort(
0909 &tgtport->fc_target_port, fod->fcpreq);
0910 } else
0911 spin_unlock(&fod->flock);
0912 }
0913 }
0914
0915
0916 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
0917 req_list) {
0918 list_del(&deferfcp->req_list);
0919 kfree(deferfcp);
0920 }
0921
0922 for (;;) {
0923 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
0924 struct nvmet_fc_defer_fcp_req, req_list);
0925 if (!deferfcp)
0926 break;
0927
0928 list_del(&deferfcp->req_list);
0929 spin_unlock_irqrestore(&queue->qlock, flags);
0930
0931 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
0932 deferfcp->fcp_req);
0933
0934 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
0935 deferfcp->fcp_req);
0936
0937 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
0938 deferfcp->fcp_req);
0939
0940
0941 nvmet_fc_tgt_q_put(queue);
0942
0943 kfree(deferfcp);
0944
0945 spin_lock_irqsave(&queue->qlock, flags);
0946 }
0947 spin_unlock_irqrestore(&queue->qlock, flags);
0948
0949 flush_workqueue(queue->work_q);
0950
0951 nvmet_sq_destroy(&queue->nvme_sq);
0952
0953 nvmet_fc_tgt_q_put(queue);
0954 }
0955
0956 static struct nvmet_fc_tgt_queue *
0957 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
0958 u64 connection_id)
0959 {
0960 struct nvmet_fc_tgt_assoc *assoc;
0961 struct nvmet_fc_tgt_queue *queue;
0962 u64 association_id = nvmet_fc_getassociationid(connection_id);
0963 u16 qid = nvmet_fc_getqueueid(connection_id);
0964
0965 if (qid > NVMET_NR_QUEUES)
0966 return NULL;
0967
0968 rcu_read_lock();
0969 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
0970 if (association_id == assoc->association_id) {
0971 queue = rcu_dereference(assoc->queues[qid]);
0972 if (queue &&
0973 (!atomic_read(&queue->connected) ||
0974 !nvmet_fc_tgt_q_get(queue)))
0975 queue = NULL;
0976 rcu_read_unlock();
0977 return queue;
0978 }
0979 }
0980 rcu_read_unlock();
0981 return NULL;
0982 }
0983
0984 static void
0985 nvmet_fc_hostport_free(struct kref *ref)
0986 {
0987 struct nvmet_fc_hostport *hostport =
0988 container_of(ref, struct nvmet_fc_hostport, ref);
0989 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
0990 unsigned long flags;
0991
0992 spin_lock_irqsave(&tgtport->lock, flags);
0993 list_del(&hostport->host_list);
0994 spin_unlock_irqrestore(&tgtport->lock, flags);
0995 if (tgtport->ops->host_release && hostport->invalid)
0996 tgtport->ops->host_release(hostport->hosthandle);
0997 kfree(hostport);
0998 nvmet_fc_tgtport_put(tgtport);
0999 }
1000
1001 static void
1002 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
1003 {
1004 kref_put(&hostport->ref, nvmet_fc_hostport_free);
1005 }
1006
1007 static int
1008 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
1009 {
1010 return kref_get_unless_zero(&hostport->ref);
1011 }
1012
1013 static void
1014 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
1015 {
1016
1017 if (!hostport || !hostport->hosthandle)
1018 return;
1019
1020 nvmet_fc_hostport_put(hostport);
1021 }
1022
1023 static struct nvmet_fc_hostport *
1024 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1025 {
1026 struct nvmet_fc_hostport *host;
1027
1028 lockdep_assert_held(&tgtport->lock);
1029
1030 list_for_each_entry(host, &tgtport->host_list, host_list) {
1031 if (host->hosthandle == hosthandle && !host->invalid) {
1032 if (nvmet_fc_hostport_get(host))
1033 return (host);
1034 }
1035 }
1036
1037 return NULL;
1038 }
1039
1040 static struct nvmet_fc_hostport *
1041 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1042 {
1043 struct nvmet_fc_hostport *newhost, *match = NULL;
1044 unsigned long flags;
1045
1046
1047 if (!hosthandle)
1048 return NULL;
1049
1050
1051
1052
1053
1054 if (!nvmet_fc_tgtport_get(tgtport))
1055 return ERR_PTR(-EINVAL);
1056
1057 spin_lock_irqsave(&tgtport->lock, flags);
1058 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1059 spin_unlock_irqrestore(&tgtport->lock, flags);
1060
1061 if (match) {
1062
1063 nvmet_fc_tgtport_put(tgtport);
1064 return match;
1065 }
1066
1067 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
1068 if (!newhost) {
1069
1070 nvmet_fc_tgtport_put(tgtport);
1071 return ERR_PTR(-ENOMEM);
1072 }
1073
1074 spin_lock_irqsave(&tgtport->lock, flags);
1075 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1076 if (match) {
1077
1078 kfree(newhost);
1079 newhost = match;
1080
1081 nvmet_fc_tgtport_put(tgtport);
1082 } else {
1083 newhost->tgtport = tgtport;
1084 newhost->hosthandle = hosthandle;
1085 INIT_LIST_HEAD(&newhost->host_list);
1086 kref_init(&newhost->ref);
1087
1088 list_add_tail(&newhost->host_list, &tgtport->host_list);
1089 }
1090 spin_unlock_irqrestore(&tgtport->lock, flags);
1091
1092 return newhost;
1093 }
1094
1095 static void
1096 nvmet_fc_delete_assoc(struct work_struct *work)
1097 {
1098 struct nvmet_fc_tgt_assoc *assoc =
1099 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1100
1101 nvmet_fc_delete_target_assoc(assoc);
1102 nvmet_fc_tgt_a_put(assoc);
1103 }
1104
1105 static struct nvmet_fc_tgt_assoc *
1106 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1107 {
1108 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1109 unsigned long flags;
1110 u64 ran;
1111 int idx;
1112 bool needrandom = true;
1113
1114 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1115 if (!assoc)
1116 return NULL;
1117
1118 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
1119 if (idx < 0)
1120 goto out_free_assoc;
1121
1122 if (!nvmet_fc_tgtport_get(tgtport))
1123 goto out_ida;
1124
1125 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1126 if (IS_ERR(assoc->hostport))
1127 goto out_put;
1128
1129 assoc->tgtport = tgtport;
1130 assoc->a_id = idx;
1131 INIT_LIST_HEAD(&assoc->a_list);
1132 kref_init(&assoc->ref);
1133 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
1134 atomic_set(&assoc->terminating, 0);
1135
1136 while (needrandom) {
1137 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1138 ran = ran << BYTES_FOR_QID_SHIFT;
1139
1140 spin_lock_irqsave(&tgtport->lock, flags);
1141 needrandom = false;
1142 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1143 if (ran == tmpassoc->association_id) {
1144 needrandom = true;
1145 break;
1146 }
1147 }
1148 if (!needrandom) {
1149 assoc->association_id = ran;
1150 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
1151 }
1152 spin_unlock_irqrestore(&tgtport->lock, flags);
1153 }
1154
1155 return assoc;
1156
1157 out_put:
1158 nvmet_fc_tgtport_put(tgtport);
1159 out_ida:
1160 ida_free(&tgtport->assoc_cnt, idx);
1161 out_free_assoc:
1162 kfree(assoc);
1163 return NULL;
1164 }
1165
1166 static void
1167 nvmet_fc_target_assoc_free(struct kref *ref)
1168 {
1169 struct nvmet_fc_tgt_assoc *assoc =
1170 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1171 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1172 struct nvmet_fc_ls_iod *oldls;
1173 unsigned long flags;
1174
1175
1176 nvmet_fc_xmt_disconnect_assoc(assoc);
1177
1178 nvmet_fc_free_hostport(assoc->hostport);
1179 spin_lock_irqsave(&tgtport->lock, flags);
1180 list_del_rcu(&assoc->a_list);
1181 oldls = assoc->rcv_disconn;
1182 spin_unlock_irqrestore(&tgtport->lock, flags);
1183
1184 if (oldls)
1185 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1186 ida_free(&tgtport->assoc_cnt, assoc->a_id);
1187 dev_info(tgtport->dev,
1188 "{%d:%d} Association freed\n",
1189 tgtport->fc_target_port.port_num, assoc->a_id);
1190 kfree_rcu(assoc, rcu);
1191 nvmet_fc_tgtport_put(tgtport);
1192 }
1193
1194 static void
1195 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1196 {
1197 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1198 }
1199
1200 static int
1201 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1202 {
1203 return kref_get_unless_zero(&assoc->ref);
1204 }
1205
1206 static void
1207 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1208 {
1209 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1210 struct nvmet_fc_tgt_queue *queue;
1211 int i, terminating;
1212
1213 terminating = atomic_xchg(&assoc->terminating, 1);
1214
1215
1216 if (terminating)
1217 return;
1218
1219
1220 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1221 rcu_read_lock();
1222 queue = rcu_dereference(assoc->queues[i]);
1223 if (!queue) {
1224 rcu_read_unlock();
1225 continue;
1226 }
1227
1228 if (!nvmet_fc_tgt_q_get(queue)) {
1229 rcu_read_unlock();
1230 continue;
1231 }
1232 rcu_read_unlock();
1233 nvmet_fc_delete_target_queue(queue);
1234 nvmet_fc_tgt_q_put(queue);
1235 }
1236
1237 dev_info(tgtport->dev,
1238 "{%d:%d} Association deleted\n",
1239 tgtport->fc_target_port.port_num, assoc->a_id);
1240
1241 nvmet_fc_tgt_a_put(assoc);
1242 }
1243
1244 static struct nvmet_fc_tgt_assoc *
1245 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1246 u64 association_id)
1247 {
1248 struct nvmet_fc_tgt_assoc *assoc;
1249 struct nvmet_fc_tgt_assoc *ret = NULL;
1250
1251 rcu_read_lock();
1252 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1253 if (association_id == assoc->association_id) {
1254 ret = assoc;
1255 if (!nvmet_fc_tgt_a_get(assoc))
1256 ret = NULL;
1257 break;
1258 }
1259 }
1260 rcu_read_unlock();
1261
1262 return ret;
1263 }
1264
1265 static void
1266 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1267 struct nvmet_fc_port_entry *pe,
1268 struct nvmet_port *port)
1269 {
1270 lockdep_assert_held(&nvmet_fc_tgtlock);
1271
1272 pe->tgtport = tgtport;
1273 tgtport->pe = pe;
1274
1275 pe->port = port;
1276 port->priv = pe;
1277
1278 pe->node_name = tgtport->fc_target_port.node_name;
1279 pe->port_name = tgtport->fc_target_port.port_name;
1280 INIT_LIST_HEAD(&pe->pe_list);
1281
1282 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1283 }
1284
1285 static void
1286 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1287 {
1288 unsigned long flags;
1289
1290 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1291 if (pe->tgtport)
1292 pe->tgtport->pe = NULL;
1293 list_del(&pe->pe_list);
1294 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1295 }
1296
1297
1298
1299
1300
1301
1302 static void
1303 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1304 {
1305 struct nvmet_fc_port_entry *pe;
1306 unsigned long flags;
1307
1308 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1309 pe = tgtport->pe;
1310 if (pe)
1311 pe->tgtport = NULL;
1312 tgtport->pe = NULL;
1313 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1314 }
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324 static void
1325 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1326 {
1327 struct nvmet_fc_port_entry *pe;
1328 unsigned long flags;
1329
1330 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1331 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1332 if (tgtport->fc_target_port.node_name == pe->node_name &&
1333 tgtport->fc_target_port.port_name == pe->port_name) {
1334 WARN_ON(pe->tgtport);
1335 tgtport->pe = pe;
1336 pe->tgtport = tgtport;
1337 break;
1338 }
1339 }
1340 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360 int
1361 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1362 struct nvmet_fc_target_template *template,
1363 struct device *dev,
1364 struct nvmet_fc_target_port **portptr)
1365 {
1366 struct nvmet_fc_tgtport *newrec;
1367 unsigned long flags;
1368 int ret, idx;
1369
1370 if (!template->xmt_ls_rsp || !template->fcp_op ||
1371 !template->fcp_abort ||
1372 !template->fcp_req_release || !template->targetport_delete ||
1373 !template->max_hw_queues || !template->max_sgl_segments ||
1374 !template->max_dif_sgl_segments || !template->dma_boundary) {
1375 ret = -EINVAL;
1376 goto out_regtgt_failed;
1377 }
1378
1379 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1380 GFP_KERNEL);
1381 if (!newrec) {
1382 ret = -ENOMEM;
1383 goto out_regtgt_failed;
1384 }
1385
1386 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
1387 if (idx < 0) {
1388 ret = -ENOSPC;
1389 goto out_fail_kfree;
1390 }
1391
1392 if (!get_device(dev) && dev) {
1393 ret = -ENODEV;
1394 goto out_ida_put;
1395 }
1396
1397 newrec->fc_target_port.node_name = pinfo->node_name;
1398 newrec->fc_target_port.port_name = pinfo->port_name;
1399 if (template->target_priv_sz)
1400 newrec->fc_target_port.private = &newrec[1];
1401 else
1402 newrec->fc_target_port.private = NULL;
1403 newrec->fc_target_port.port_id = pinfo->port_id;
1404 newrec->fc_target_port.port_num = idx;
1405 INIT_LIST_HEAD(&newrec->tgt_list);
1406 newrec->dev = dev;
1407 newrec->ops = template;
1408 spin_lock_init(&newrec->lock);
1409 INIT_LIST_HEAD(&newrec->ls_rcv_list);
1410 INIT_LIST_HEAD(&newrec->ls_req_list);
1411 INIT_LIST_HEAD(&newrec->ls_busylist);
1412 INIT_LIST_HEAD(&newrec->assoc_list);
1413 INIT_LIST_HEAD(&newrec->host_list);
1414 kref_init(&newrec->ref);
1415 ida_init(&newrec->assoc_cnt);
1416 newrec->max_sg_cnt = template->max_sgl_segments;
1417
1418 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1419 if (ret) {
1420 ret = -ENOMEM;
1421 goto out_free_newrec;
1422 }
1423
1424 nvmet_fc_portentry_rebind_tgt(newrec);
1425
1426 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1427 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1428 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1429
1430 *portptr = &newrec->fc_target_port;
1431 return 0;
1432
1433 out_free_newrec:
1434 put_device(dev);
1435 out_ida_put:
1436 ida_free(&nvmet_fc_tgtport_cnt, idx);
1437 out_fail_kfree:
1438 kfree(newrec);
1439 out_regtgt_failed:
1440 *portptr = NULL;
1441 return ret;
1442 }
1443 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1444
1445
1446 static void
1447 nvmet_fc_free_tgtport(struct kref *ref)
1448 {
1449 struct nvmet_fc_tgtport *tgtport =
1450 container_of(ref, struct nvmet_fc_tgtport, ref);
1451 struct device *dev = tgtport->dev;
1452 unsigned long flags;
1453
1454 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1455 list_del(&tgtport->tgt_list);
1456 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1457
1458 nvmet_fc_free_ls_iodlist(tgtport);
1459
1460
1461 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1462
1463 ida_free(&nvmet_fc_tgtport_cnt,
1464 tgtport->fc_target_port.port_num);
1465
1466 ida_destroy(&tgtport->assoc_cnt);
1467
1468 kfree(tgtport);
1469
1470 put_device(dev);
1471 }
1472
1473 static void
1474 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1475 {
1476 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1477 }
1478
1479 static int
1480 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1481 {
1482 return kref_get_unless_zero(&tgtport->ref);
1483 }
1484
1485 static void
1486 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1487 {
1488 struct nvmet_fc_tgt_assoc *assoc;
1489
1490 rcu_read_lock();
1491 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1492 if (!nvmet_fc_tgt_a_get(assoc))
1493 continue;
1494 if (!queue_work(nvmet_wq, &assoc->del_work))
1495
1496 nvmet_fc_tgt_a_put(assoc);
1497 }
1498 rcu_read_unlock();
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 void
1531 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1532 void *hosthandle)
1533 {
1534 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1535 struct nvmet_fc_tgt_assoc *assoc, *next;
1536 unsigned long flags;
1537 bool noassoc = true;
1538
1539 spin_lock_irqsave(&tgtport->lock, flags);
1540 list_for_each_entry_safe(assoc, next,
1541 &tgtport->assoc_list, a_list) {
1542 if (!assoc->hostport ||
1543 assoc->hostport->hosthandle != hosthandle)
1544 continue;
1545 if (!nvmet_fc_tgt_a_get(assoc))
1546 continue;
1547 assoc->hostport->invalid = 1;
1548 noassoc = false;
1549 if (!queue_work(nvmet_wq, &assoc->del_work))
1550
1551 nvmet_fc_tgt_a_put(assoc);
1552 }
1553 spin_unlock_irqrestore(&tgtport->lock, flags);
1554
1555
1556 if (noassoc && tgtport->ops->host_release)
1557 tgtport->ops->host_release(hosthandle);
1558 }
1559 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1560
1561
1562
1563
1564 static void
1565 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1566 {
1567 struct nvmet_fc_tgtport *tgtport, *next;
1568 struct nvmet_fc_tgt_assoc *assoc;
1569 struct nvmet_fc_tgt_queue *queue;
1570 unsigned long flags;
1571 bool found_ctrl = false;
1572
1573
1574 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1575 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1576 tgt_list) {
1577 if (!nvmet_fc_tgtport_get(tgtport))
1578 continue;
1579 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1580
1581 rcu_read_lock();
1582 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1583 queue = rcu_dereference(assoc->queues[0]);
1584 if (queue && queue->nvme_sq.ctrl == ctrl) {
1585 if (nvmet_fc_tgt_a_get(assoc))
1586 found_ctrl = true;
1587 break;
1588 }
1589 }
1590 rcu_read_unlock();
1591
1592 nvmet_fc_tgtport_put(tgtport);
1593
1594 if (found_ctrl) {
1595 if (!queue_work(nvmet_wq, &assoc->del_work))
1596
1597 nvmet_fc_tgt_a_put(assoc);
1598 return;
1599 }
1600
1601 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1602 }
1603 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1604 }
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 int
1618 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1619 {
1620 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1621
1622 nvmet_fc_portentry_unbind_tgt(tgtport);
1623
1624
1625 __nvmet_fc_free_assocs(tgtport);
1626
1627
1628
1629
1630
1631
1632
1633
1634 nvmet_fc_tgtport_put(tgtport);
1635
1636 return 0;
1637 }
1638 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1639
1640
1641
1642
1643
1644 static void
1645 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1646 struct nvmet_fc_ls_iod *iod)
1647 {
1648 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
1649 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1650 struct nvmet_fc_tgt_queue *queue;
1651 int ret = 0;
1652
1653 memset(acc, 0, sizeof(*acc));
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1664 ret = VERR_CR_ASSOC_LEN;
1665 else if (be32_to_cpu(rqst->desc_list_len) <
1666 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1667 ret = VERR_CR_ASSOC_RQST_LEN;
1668 else if (rqst->assoc_cmd.desc_tag !=
1669 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1670 ret = VERR_CR_ASSOC_CMD;
1671 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1672 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1673 ret = VERR_CR_ASSOC_CMD_LEN;
1674 else if (!rqst->assoc_cmd.ersp_ratio ||
1675 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1676 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1677 ret = VERR_ERSP_RATIO;
1678
1679 else {
1680
1681 iod->assoc = nvmet_fc_alloc_target_assoc(
1682 tgtport, iod->hosthandle);
1683 if (!iod->assoc)
1684 ret = VERR_ASSOC_ALLOC_FAIL;
1685 else {
1686 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1687 be16_to_cpu(rqst->assoc_cmd.sqsize));
1688 if (!queue)
1689 ret = VERR_QUEUE_ALLOC_FAIL;
1690 }
1691 }
1692
1693 if (ret) {
1694 dev_err(tgtport->dev,
1695 "Create Association LS failed: %s\n",
1696 validation_errors[ret]);
1697 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1698 sizeof(*acc), rqst->w0.ls_cmd,
1699 FCNVME_RJT_RC_LOGIC,
1700 FCNVME_RJT_EXP_NONE, 0);
1701 return;
1702 }
1703
1704 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1705 atomic_set(&queue->connected, 1);
1706 queue->sqhd = 0;
1707
1708 dev_info(tgtport->dev,
1709 "{%d:%d} Association created\n",
1710 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1711
1712
1713
1714 iod->lsrsp->rsplen = sizeof(*acc);
1715
1716 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1717 fcnvme_lsdesc_len(
1718 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1719 FCNVME_LS_CREATE_ASSOCIATION);
1720 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1721 acc->associd.desc_len =
1722 fcnvme_lsdesc_len(
1723 sizeof(struct fcnvme_lsdesc_assoc_id));
1724 acc->associd.association_id =
1725 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1726 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1727 acc->connectid.desc_len =
1728 fcnvme_lsdesc_len(
1729 sizeof(struct fcnvme_lsdesc_conn_id));
1730 acc->connectid.connection_id = acc->associd.association_id;
1731 }
1732
1733 static void
1734 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1735 struct nvmet_fc_ls_iod *iod)
1736 {
1737 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
1738 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1739 struct nvmet_fc_tgt_queue *queue;
1740 int ret = 0;
1741
1742 memset(acc, 0, sizeof(*acc));
1743
1744 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1745 ret = VERR_CR_CONN_LEN;
1746 else if (rqst->desc_list_len !=
1747 fcnvme_lsdesc_len(
1748 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1749 ret = VERR_CR_CONN_RQST_LEN;
1750 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1751 ret = VERR_ASSOC_ID;
1752 else if (rqst->associd.desc_len !=
1753 fcnvme_lsdesc_len(
1754 sizeof(struct fcnvme_lsdesc_assoc_id)))
1755 ret = VERR_ASSOC_ID_LEN;
1756 else if (rqst->connect_cmd.desc_tag !=
1757 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1758 ret = VERR_CR_CONN_CMD;
1759 else if (rqst->connect_cmd.desc_len !=
1760 fcnvme_lsdesc_len(
1761 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1762 ret = VERR_CR_CONN_CMD_LEN;
1763 else if (!rqst->connect_cmd.ersp_ratio ||
1764 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1765 be16_to_cpu(rqst->connect_cmd.sqsize)))
1766 ret = VERR_ERSP_RATIO;
1767
1768 else {
1769
1770 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1771 be64_to_cpu(rqst->associd.association_id));
1772 if (!iod->assoc)
1773 ret = VERR_NO_ASSOC;
1774 else {
1775 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1776 be16_to_cpu(rqst->connect_cmd.qid),
1777 be16_to_cpu(rqst->connect_cmd.sqsize));
1778 if (!queue)
1779 ret = VERR_QUEUE_ALLOC_FAIL;
1780
1781
1782 nvmet_fc_tgt_a_put(iod->assoc);
1783 }
1784 }
1785
1786 if (ret) {
1787 dev_err(tgtport->dev,
1788 "Create Connection LS failed: %s\n",
1789 validation_errors[ret]);
1790 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1791 sizeof(*acc), rqst->w0.ls_cmd,
1792 (ret == VERR_NO_ASSOC) ?
1793 FCNVME_RJT_RC_INV_ASSOC :
1794 FCNVME_RJT_RC_LOGIC,
1795 FCNVME_RJT_EXP_NONE, 0);
1796 return;
1797 }
1798
1799 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1800 atomic_set(&queue->connected, 1);
1801 queue->sqhd = 0;
1802
1803
1804
1805 iod->lsrsp->rsplen = sizeof(*acc);
1806
1807 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1808 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1809 FCNVME_LS_CREATE_CONNECTION);
1810 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1811 acc->connectid.desc_len =
1812 fcnvme_lsdesc_len(
1813 sizeof(struct fcnvme_lsdesc_conn_id));
1814 acc->connectid.connection_id =
1815 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1816 be16_to_cpu(rqst->connect_cmd.qid)));
1817 }
1818
1819
1820
1821
1822
1823 static int
1824 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1825 struct nvmet_fc_ls_iod *iod)
1826 {
1827 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1828 &iod->rqstbuf->rq_dis_assoc;
1829 struct fcnvme_ls_disconnect_assoc_acc *acc =
1830 &iod->rspbuf->rsp_dis_assoc;
1831 struct nvmet_fc_tgt_assoc *assoc = NULL;
1832 struct nvmet_fc_ls_iod *oldls = NULL;
1833 unsigned long flags;
1834 int ret = 0;
1835
1836 memset(acc, 0, sizeof(*acc));
1837
1838 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1839 if (!ret) {
1840
1841 assoc = nvmet_fc_find_target_assoc(tgtport,
1842 be64_to_cpu(rqst->associd.association_id));
1843 iod->assoc = assoc;
1844 if (!assoc)
1845 ret = VERR_NO_ASSOC;
1846 }
1847
1848 if (ret || !assoc) {
1849 dev_err(tgtport->dev,
1850 "Disconnect LS failed: %s\n",
1851 validation_errors[ret]);
1852 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1853 sizeof(*acc), rqst->w0.ls_cmd,
1854 (ret == VERR_NO_ASSOC) ?
1855 FCNVME_RJT_RC_INV_ASSOC :
1856 FCNVME_RJT_RC_LOGIC,
1857 FCNVME_RJT_EXP_NONE, 0);
1858 return true;
1859 }
1860
1861
1862
1863 iod->lsrsp->rsplen = sizeof(*acc);
1864
1865 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1866 fcnvme_lsdesc_len(
1867 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1868 FCNVME_LS_DISCONNECT_ASSOC);
1869
1870
1871 nvmet_fc_tgt_a_put(assoc);
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882 spin_lock_irqsave(&tgtport->lock, flags);
1883 oldls = assoc->rcv_disconn;
1884 assoc->rcv_disconn = iod;
1885 spin_unlock_irqrestore(&tgtport->lock, flags);
1886
1887 nvmet_fc_delete_target_assoc(assoc);
1888
1889 if (oldls) {
1890 dev_info(tgtport->dev,
1891 "{%d:%d} Multiple Disconnect Association LS's "
1892 "received\n",
1893 tgtport->fc_target_port.port_num, assoc->a_id);
1894
1895 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1896 sizeof(*iod->rspbuf),
1897
1898 rqst->w0.ls_cmd,
1899 FCNVME_RJT_RC_UNAB,
1900 FCNVME_RJT_EXP_NONE, 0);
1901 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1902 }
1903
1904 return false;
1905 }
1906
1907
1908
1909
1910
1911 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1912
1913 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1914
1915 static void
1916 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1917 {
1918 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1919 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1920
1921 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1922 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1923 nvmet_fc_free_ls_iod(tgtport, iod);
1924 nvmet_fc_tgtport_put(tgtport);
1925 }
1926
1927 static void
1928 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1929 struct nvmet_fc_ls_iod *iod)
1930 {
1931 int ret;
1932
1933 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1934 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1935
1936 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1937 if (ret)
1938 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1939 }
1940
1941
1942
1943
1944 static void
1945 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1946 struct nvmet_fc_ls_iod *iod)
1947 {
1948 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
1949 bool sendrsp = true;
1950
1951 iod->lsrsp->nvme_fc_private = iod;
1952 iod->lsrsp->rspbuf = iod->rspbuf;
1953 iod->lsrsp->rspdma = iod->rspdma;
1954 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1955
1956 iod->lsrsp->rsplen = 0;
1957
1958 iod->assoc = NULL;
1959
1960
1961
1962
1963
1964
1965 switch (w0->ls_cmd) {
1966 case FCNVME_LS_CREATE_ASSOCIATION:
1967
1968 nvmet_fc_ls_create_association(tgtport, iod);
1969 break;
1970 case FCNVME_LS_CREATE_CONNECTION:
1971
1972 nvmet_fc_ls_create_connection(tgtport, iod);
1973 break;
1974 case FCNVME_LS_DISCONNECT_ASSOC:
1975
1976 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1977 break;
1978 default:
1979 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1980 sizeof(*iod->rspbuf), w0->ls_cmd,
1981 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1982 }
1983
1984 if (sendrsp)
1985 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1986 }
1987
1988
1989
1990
1991 static void
1992 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1993 {
1994 struct nvmet_fc_ls_iod *iod =
1995 container_of(work, struct nvmet_fc_ls_iod, work);
1996 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1997
1998 nvmet_fc_handle_ls_rqst(tgtport, iod);
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020 int
2021 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
2022 void *hosthandle,
2023 struct nvmefc_ls_rsp *lsrsp,
2024 void *lsreqbuf, u32 lsreqbuf_len)
2025 {
2026 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2027 struct nvmet_fc_ls_iod *iod;
2028 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2029
2030 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2031 dev_info(tgtport->dev,
2032 "RCV %s LS failed: payload too large (%d)\n",
2033 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2034 nvmefc_ls_names[w0->ls_cmd] : "",
2035 lsreqbuf_len);
2036 return -E2BIG;
2037 }
2038
2039 if (!nvmet_fc_tgtport_get(tgtport)) {
2040 dev_info(tgtport->dev,
2041 "RCV %s LS failed: target deleting\n",
2042 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2043 nvmefc_ls_names[w0->ls_cmd] : "");
2044 return -ESHUTDOWN;
2045 }
2046
2047 iod = nvmet_fc_alloc_ls_iod(tgtport);
2048 if (!iod) {
2049 dev_info(tgtport->dev,
2050 "RCV %s LS failed: context allocation failed\n",
2051 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2052 nvmefc_ls_names[w0->ls_cmd] : "");
2053 nvmet_fc_tgtport_put(tgtport);
2054 return -ENOENT;
2055 }
2056
2057 iod->lsrsp = lsrsp;
2058 iod->fcpreq = NULL;
2059 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2060 iod->rqstdatalen = lsreqbuf_len;
2061 iod->hosthandle = hosthandle;
2062
2063 queue_work(nvmet_wq, &iod->work);
2064
2065 return 0;
2066 }
2067 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2068
2069
2070
2071
2072
2073
2074
2075
2076 static int
2077 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2078 {
2079 struct scatterlist *sg;
2080 unsigned int nent;
2081
2082 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2083 if (!sg)
2084 goto out;
2085
2086 fod->data_sg = sg;
2087 fod->data_sg_cnt = nent;
2088 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2089 ((fod->io_dir == NVMET_FCP_WRITE) ?
2090 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2091
2092 fod->next_sg = fod->data_sg;
2093
2094 return 0;
2095
2096 out:
2097 return NVME_SC_INTERNAL;
2098 }
2099
2100 static void
2101 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2102 {
2103 if (!fod->data_sg || !fod->data_sg_cnt)
2104 return;
2105
2106 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2107 ((fod->io_dir == NVMET_FCP_WRITE) ?
2108 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2109 sgl_free(fod->data_sg);
2110 fod->data_sg = NULL;
2111 fod->data_sg_cnt = 0;
2112 }
2113
2114
2115 static bool
2116 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2117 {
2118 u32 sqtail, used;
2119
2120
2121 sqtail = atomic_read(&q->sqtail) % q->sqsize;
2122
2123 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2124 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2125 }
2126
2127
2128
2129
2130
2131 static void
2132 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2133 struct nvmet_fc_fcp_iod *fod)
2134 {
2135 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2136 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2137 struct nvme_completion *cqe = &ersp->cqe;
2138 u32 *cqewd = (u32 *)cqe;
2139 bool send_ersp = false;
2140 u32 rsn, rspcnt, xfr_length;
2141
2142 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2143 xfr_length = fod->req.transfer_len;
2144 else
2145 xfr_length = fod->offset;
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2167 if (!(rspcnt % fod->queue->ersp_ratio) ||
2168 nvme_is_fabrics((struct nvme_command *) sqe) ||
2169 xfr_length != fod->req.transfer_len ||
2170 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2171 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2172 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2173 send_ersp = true;
2174
2175
2176 fod->fcpreq->rspaddr = ersp;
2177 fod->fcpreq->rspdma = fod->rspdma;
2178
2179 if (!send_ersp) {
2180 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2181 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2182 } else {
2183 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2184 rsn = atomic_inc_return(&fod->queue->rsn);
2185 ersp->rsn = cpu_to_be32(rsn);
2186 ersp->xfrd_len = cpu_to_be32(xfr_length);
2187 fod->fcpreq->rsplen = sizeof(*ersp);
2188 }
2189
2190 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2191 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2192 }
2193
2194 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2195
2196 static void
2197 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2198 struct nvmet_fc_fcp_iod *fod)
2199 {
2200 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2201
2202
2203 nvmet_fc_free_tgt_pgs(fod);
2204
2205
2206
2207
2208
2209
2210 if (!fod->aborted)
2211 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2212
2213 nvmet_fc_free_fcp_iod(fod->queue, fod);
2214 }
2215
2216 static void
2217 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2218 struct nvmet_fc_fcp_iod *fod)
2219 {
2220 int ret;
2221
2222 fod->fcpreq->op = NVMET_FCOP_RSP;
2223 fod->fcpreq->timeout = 0;
2224
2225 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2226
2227 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2228 if (ret)
2229 nvmet_fc_abort_op(tgtport, fod);
2230 }
2231
2232 static void
2233 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2234 struct nvmet_fc_fcp_iod *fod, u8 op)
2235 {
2236 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2237 struct scatterlist *sg = fod->next_sg;
2238 unsigned long flags;
2239 u32 remaininglen = fod->req.transfer_len - fod->offset;
2240 u32 tlen = 0;
2241 int ret;
2242
2243 fcpreq->op = op;
2244 fcpreq->offset = fod->offset;
2245 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256 fcpreq->sg = sg;
2257 fcpreq->sg_cnt = 0;
2258 while (tlen < remaininglen &&
2259 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2260 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2261 fcpreq->sg_cnt++;
2262 tlen += sg_dma_len(sg);
2263 sg = sg_next(sg);
2264 }
2265 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2266 fcpreq->sg_cnt++;
2267 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2268 sg = sg_next(sg);
2269 }
2270 if (tlen < remaininglen)
2271 fod->next_sg = sg;
2272 else
2273 fod->next_sg = NULL;
2274
2275 fcpreq->transfer_length = tlen;
2276 fcpreq->transferred_length = 0;
2277 fcpreq->fcp_error = 0;
2278 fcpreq->rsplen = 0;
2279
2280
2281
2282
2283
2284 if ((op == NVMET_FCOP_READDATA) &&
2285 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2286 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2287 fcpreq->op = NVMET_FCOP_READDATA_RSP;
2288 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2289 }
2290
2291 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2292 if (ret) {
2293
2294
2295
2296
2297
2298 fod->abort = true;
2299
2300 if (op == NVMET_FCOP_WRITEDATA) {
2301 spin_lock_irqsave(&fod->flock, flags);
2302 fod->writedataactive = false;
2303 spin_unlock_irqrestore(&fod->flock, flags);
2304 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2305 } else {
2306 fcpreq->fcp_error = ret;
2307 fcpreq->transferred_length = 0;
2308 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2309 }
2310 }
2311 }
2312
2313 static inline bool
2314 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2315 {
2316 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2317 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2318
2319
2320 if (abort) {
2321 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2322 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2323 return true;
2324 }
2325
2326 nvmet_fc_abort_op(tgtport, fod);
2327 return true;
2328 }
2329
2330 return false;
2331 }
2332
2333
2334
2335
2336 static void
2337 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2338 {
2339 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2340 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2341 unsigned long flags;
2342 bool abort;
2343
2344 spin_lock_irqsave(&fod->flock, flags);
2345 abort = fod->abort;
2346 fod->writedataactive = false;
2347 spin_unlock_irqrestore(&fod->flock, flags);
2348
2349 switch (fcpreq->op) {
2350
2351 case NVMET_FCOP_WRITEDATA:
2352 if (__nvmet_fc_fod_op_abort(fod, abort))
2353 return;
2354 if (fcpreq->fcp_error ||
2355 fcpreq->transferred_length != fcpreq->transfer_length) {
2356 spin_lock_irqsave(&fod->flock, flags);
2357 fod->abort = true;
2358 spin_unlock_irqrestore(&fod->flock, flags);
2359
2360 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2361 return;
2362 }
2363
2364 fod->offset += fcpreq->transferred_length;
2365 if (fod->offset != fod->req.transfer_len) {
2366 spin_lock_irqsave(&fod->flock, flags);
2367 fod->writedataactive = true;
2368 spin_unlock_irqrestore(&fod->flock, flags);
2369
2370
2371 nvmet_fc_transfer_fcp_data(tgtport, fod,
2372 NVMET_FCOP_WRITEDATA);
2373 return;
2374 }
2375
2376
2377 fod->req.execute(&fod->req);
2378 break;
2379
2380 case NVMET_FCOP_READDATA:
2381 case NVMET_FCOP_READDATA_RSP:
2382 if (__nvmet_fc_fod_op_abort(fod, abort))
2383 return;
2384 if (fcpreq->fcp_error ||
2385 fcpreq->transferred_length != fcpreq->transfer_length) {
2386 nvmet_fc_abort_op(tgtport, fod);
2387 return;
2388 }
2389
2390
2391
2392 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2393
2394 nvmet_fc_free_tgt_pgs(fod);
2395 nvmet_fc_free_fcp_iod(fod->queue, fod);
2396 return;
2397 }
2398
2399 fod->offset += fcpreq->transferred_length;
2400 if (fod->offset != fod->req.transfer_len) {
2401
2402 nvmet_fc_transfer_fcp_data(tgtport, fod,
2403 NVMET_FCOP_READDATA);
2404 return;
2405 }
2406
2407
2408
2409
2410 nvmet_fc_free_tgt_pgs(fod);
2411
2412 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2413
2414 break;
2415
2416 case NVMET_FCOP_RSP:
2417 if (__nvmet_fc_fod_op_abort(fod, abort))
2418 return;
2419 nvmet_fc_free_fcp_iod(fod->queue, fod);
2420 break;
2421
2422 default:
2423 break;
2424 }
2425 }
2426
2427 static void
2428 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2429 {
2430 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2431
2432 nvmet_fc_fod_op_done(fod);
2433 }
2434
2435
2436
2437
2438 static void
2439 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2440 struct nvmet_fc_fcp_iod *fod, int status)
2441 {
2442 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2443 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2444 unsigned long flags;
2445 bool abort;
2446
2447 spin_lock_irqsave(&fod->flock, flags);
2448 abort = fod->abort;
2449 spin_unlock_irqrestore(&fod->flock, flags);
2450
2451
2452 if (!status)
2453 fod->queue->sqhd = cqe->sq_head;
2454
2455 if (abort) {
2456 nvmet_fc_abort_op(tgtport, fod);
2457 return;
2458 }
2459
2460
2461 if (status) {
2462
2463 memset(cqe, 0, sizeof(*cqe));
2464 cqe->sq_head = fod->queue->sqhd;
2465 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2466 cqe->command_id = sqe->command_id;
2467 cqe->status = cpu_to_le16(status);
2468 } else {
2469
2470
2471
2472
2473
2474
2475 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2476
2477 nvmet_fc_transfer_fcp_data(tgtport, fod,
2478 NVMET_FCOP_READDATA);
2479 return;
2480 }
2481
2482
2483 }
2484
2485
2486 nvmet_fc_free_tgt_pgs(fod);
2487
2488 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2489 }
2490
2491
2492 static void
2493 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2494 {
2495 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2496 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2497
2498 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2499 }
2500
2501
2502
2503
2504
2505 static void
2506 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2507 struct nvmet_fc_fcp_iod *fod)
2508 {
2509 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2510 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2511 int ret;
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2523
2524 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2525 fod->io_dir = NVMET_FCP_WRITE;
2526 if (!nvme_is_write(&cmdiu->sqe))
2527 goto transport_error;
2528 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2529 fod->io_dir = NVMET_FCP_READ;
2530 if (nvme_is_write(&cmdiu->sqe))
2531 goto transport_error;
2532 } else {
2533 fod->io_dir = NVMET_FCP_NODATA;
2534 if (xfrlen)
2535 goto transport_error;
2536 }
2537
2538 fod->req.cmd = &fod->cmdiubuf.sqe;
2539 fod->req.cqe = &fod->rspiubuf.cqe;
2540 if (tgtport->pe)
2541 fod->req.port = tgtport->pe->port;
2542
2543
2544 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2545
2546 fod->data_sg = NULL;
2547 fod->data_sg_cnt = 0;
2548
2549 ret = nvmet_req_init(&fod->req,
2550 &fod->queue->nvme_cq,
2551 &fod->queue->nvme_sq,
2552 &nvmet_fc_tgt_fcp_ops);
2553 if (!ret) {
2554
2555
2556 return;
2557 }
2558
2559 fod->req.transfer_len = xfrlen;
2560
2561
2562 atomic_inc(&fod->queue->sqtail);
2563
2564 if (fod->req.transfer_len) {
2565 ret = nvmet_fc_alloc_tgt_pgs(fod);
2566 if (ret) {
2567 nvmet_req_complete(&fod->req, ret);
2568 return;
2569 }
2570 }
2571 fod->req.sg = fod->data_sg;
2572 fod->req.sg_cnt = fod->data_sg_cnt;
2573 fod->offset = 0;
2574
2575 if (fod->io_dir == NVMET_FCP_WRITE) {
2576
2577 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2578 return;
2579 }
2580
2581
2582
2583
2584
2585
2586
2587 fod->req.execute(&fod->req);
2588 return;
2589
2590 transport_error:
2591 nvmet_fc_abort_op(tgtport, fod);
2592 }
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641 int
2642 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2643 struct nvmefc_tgt_fcp_req *fcpreq,
2644 void *cmdiubuf, u32 cmdiubuf_len)
2645 {
2646 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2647 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2648 struct nvmet_fc_tgt_queue *queue;
2649 struct nvmet_fc_fcp_iod *fod;
2650 struct nvmet_fc_defer_fcp_req *deferfcp;
2651 unsigned long flags;
2652
2653
2654 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2655 (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2656 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2657 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2658 return -EIO;
2659
2660 queue = nvmet_fc_find_target_queue(tgtport,
2661 be64_to_cpu(cmdiu->connection_id));
2662 if (!queue)
2663 return -ENOTCONN;
2664
2665
2666
2667
2668
2669
2670
2671
2672 spin_lock_irqsave(&queue->qlock, flags);
2673
2674 fod = nvmet_fc_alloc_fcp_iod(queue);
2675 if (fod) {
2676 spin_unlock_irqrestore(&queue->qlock, flags);
2677
2678 fcpreq->nvmet_fc_private = fod;
2679 fod->fcpreq = fcpreq;
2680
2681 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2682
2683 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2684
2685 return 0;
2686 }
2687
2688 if (!tgtport->ops->defer_rcv) {
2689 spin_unlock_irqrestore(&queue->qlock, flags);
2690
2691 nvmet_fc_tgt_q_put(queue);
2692 return -ENOENT;
2693 }
2694
2695 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2696 struct nvmet_fc_defer_fcp_req, req_list);
2697 if (deferfcp) {
2698
2699 list_del(&deferfcp->req_list);
2700 } else {
2701 spin_unlock_irqrestore(&queue->qlock, flags);
2702
2703
2704 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2705 if (!deferfcp) {
2706
2707 nvmet_fc_tgt_q_put(queue);
2708 return -ENOMEM;
2709 }
2710 spin_lock_irqsave(&queue->qlock, flags);
2711 }
2712
2713
2714 fcpreq->rspaddr = cmdiubuf;
2715 fcpreq->rsplen = cmdiubuf_len;
2716 deferfcp->fcp_req = fcpreq;
2717
2718
2719 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2720
2721
2722
2723 spin_unlock_irqrestore(&queue->qlock, flags);
2724
2725 return -EOVERFLOW;
2726 }
2727 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752 void
2753 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2754 struct nvmefc_tgt_fcp_req *fcpreq)
2755 {
2756 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2757 struct nvmet_fc_tgt_queue *queue;
2758 unsigned long flags;
2759
2760 if (!fod || fod->fcpreq != fcpreq)
2761
2762 return;
2763
2764 queue = fod->queue;
2765
2766 spin_lock_irqsave(&queue->qlock, flags);
2767 if (fod->active) {
2768
2769
2770
2771
2772
2773 spin_lock(&fod->flock);
2774 fod->abort = true;
2775 fod->aborted = true;
2776 spin_unlock(&fod->flock);
2777 }
2778 spin_unlock_irqrestore(&queue->qlock, flags);
2779 }
2780 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2781
2782
2783 struct nvmet_fc_traddr {
2784 u64 nn;
2785 u64 pn;
2786 };
2787
2788 static int
2789 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2790 {
2791 u64 token64;
2792
2793 if (match_u64(sstr, &token64))
2794 return -EINVAL;
2795 *val = token64;
2796
2797 return 0;
2798 }
2799
2800
2801
2802
2803
2804
2805 static int
2806 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2807 {
2808 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2809 substring_t wwn = { name, &name[sizeof(name)-1] };
2810 int nnoffset, pnoffset;
2811
2812
2813 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2814 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2815 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2816 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2817 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2818 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2819 NVME_FC_TRADDR_OXNNLEN;
2820 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2821 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2822 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2823 "pn-", NVME_FC_TRADDR_NNLEN))) {
2824 nnoffset = NVME_FC_TRADDR_NNLEN;
2825 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2826 } else
2827 goto out_einval;
2828
2829 name[0] = '0';
2830 name[1] = 'x';
2831 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2832
2833 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2834 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2835 goto out_einval;
2836
2837 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2838 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2839 goto out_einval;
2840
2841 return 0;
2842
2843 out_einval:
2844 pr_warn("%s: bad traddr string\n", __func__);
2845 return -EINVAL;
2846 }
2847
2848 static int
2849 nvmet_fc_add_port(struct nvmet_port *port)
2850 {
2851 struct nvmet_fc_tgtport *tgtport;
2852 struct nvmet_fc_port_entry *pe;
2853 struct nvmet_fc_traddr traddr = { 0L, 0L };
2854 unsigned long flags;
2855 int ret;
2856
2857
2858 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2859 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2860 return -EINVAL;
2861
2862
2863
2864 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2865 sizeof(port->disc_addr.traddr));
2866 if (ret)
2867 return ret;
2868
2869 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2870 if (!pe)
2871 return -ENOMEM;
2872
2873 ret = -ENXIO;
2874 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2875 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2876 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2877 (tgtport->fc_target_port.port_name == traddr.pn)) {
2878
2879 if (!tgtport->pe) {
2880 nvmet_fc_portentry_bind(tgtport, pe, port);
2881 ret = 0;
2882 } else
2883 ret = -EALREADY;
2884 break;
2885 }
2886 }
2887 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2888
2889 if (ret)
2890 kfree(pe);
2891
2892 return ret;
2893 }
2894
2895 static void
2896 nvmet_fc_remove_port(struct nvmet_port *port)
2897 {
2898 struct nvmet_fc_port_entry *pe = port->priv;
2899
2900 nvmet_fc_portentry_unbind(pe);
2901
2902 kfree(pe);
2903 }
2904
2905 static void
2906 nvmet_fc_discovery_chg(struct nvmet_port *port)
2907 {
2908 struct nvmet_fc_port_entry *pe = port->priv;
2909 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2910
2911 if (tgtport && tgtport->ops->discovery_event)
2912 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2913 }
2914
2915 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2916 .owner = THIS_MODULE,
2917 .type = NVMF_TRTYPE_FC,
2918 .msdbd = 1,
2919 .add_port = nvmet_fc_add_port,
2920 .remove_port = nvmet_fc_remove_port,
2921 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2922 .delete_ctrl = nvmet_fc_delete_ctrl,
2923 .discovery_chg = nvmet_fc_discovery_chg,
2924 };
2925
2926 static int __init nvmet_fc_init_module(void)
2927 {
2928 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2929 }
2930
2931 static void __exit nvmet_fc_exit_module(void)
2932 {
2933
2934 if (!list_empty(&nvmet_fc_target_list))
2935 pr_warn("%s: targetport list not empty\n", __func__);
2936
2937 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2938
2939 ida_destroy(&nvmet_fc_tgtport_cnt);
2940 }
2941
2942 module_init(nvmet_fc_init_module);
2943 module_exit(nvmet_fc_exit_module);
2944
2945 MODULE_LICENSE("GPL v2");