0001
0002
0003
0004
0005 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0006 #include <linux/module.h>
0007 #include <linux/parser.h>
0008 #include <uapi/scsi/fc/fc_fs.h>
0009 #include <uapi/scsi/fc/fc_els.h>
0010 #include <linux/delay.h>
0011 #include <linux/overflow.h>
0012 #include <linux/blk-cgroup.h>
0013 #include "nvme.h"
0014 #include "fabrics.h"
0015 #include <linux/nvme-fc-driver.h>
0016 #include <linux/nvme-fc.h>
0017 #include "fc.h"
0018 #include <scsi/scsi_transport_fc.h>
0019 #include <linux/blk-mq-pci.h>
0020
0021
0022
0023
0024 enum nvme_fc_queue_flags {
0025 NVME_FC_Q_CONNECTED = 0,
0026 NVME_FC_Q_LIVE,
0027 };
0028
0029 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60
0030 #define NVME_FC_DEFAULT_RECONNECT_TMO 2
0031
0032
0033
0034
0035 struct nvme_fc_queue {
0036 struct nvme_fc_ctrl *ctrl;
0037 struct device *dev;
0038 struct blk_mq_hw_ctx *hctx;
0039 void *lldd_handle;
0040 size_t cmnd_capsule_len;
0041 u32 qnum;
0042 u32 rqcnt;
0043 u32 seqno;
0044
0045 u64 connection_id;
0046 atomic_t csn;
0047
0048 unsigned long flags;
0049 } __aligned(sizeof(u64));
0050
0051 enum nvme_fcop_flags {
0052 FCOP_FLAGS_TERMIO = (1 << 0),
0053 FCOP_FLAGS_AEN = (1 << 1),
0054 };
0055
0056 struct nvmefc_ls_req_op {
0057 struct nvmefc_ls_req ls_req;
0058
0059 struct nvme_fc_rport *rport;
0060 struct nvme_fc_queue *queue;
0061 struct request *rq;
0062 u32 flags;
0063
0064 int ls_error;
0065 struct completion ls_done;
0066 struct list_head lsreq_list;
0067 bool req_queued;
0068 };
0069
0070 struct nvmefc_ls_rcv_op {
0071 struct nvme_fc_rport *rport;
0072 struct nvmefc_ls_rsp *lsrsp;
0073 union nvmefc_ls_requests *rqstbuf;
0074 union nvmefc_ls_responses *rspbuf;
0075 u16 rqstdatalen;
0076 bool handled;
0077 dma_addr_t rspdma;
0078 struct list_head lsrcv_list;
0079 } __aligned(sizeof(u64));
0080
0081 enum nvme_fcpop_state {
0082 FCPOP_STATE_UNINIT = 0,
0083 FCPOP_STATE_IDLE = 1,
0084 FCPOP_STATE_ACTIVE = 2,
0085 FCPOP_STATE_ABORTED = 3,
0086 FCPOP_STATE_COMPLETE = 4,
0087 };
0088
0089 struct nvme_fc_fcp_op {
0090 struct nvme_request nreq;
0091
0092
0093
0094
0095
0096
0097
0098 struct nvmefc_fcp_req fcp_req;
0099
0100 struct nvme_fc_ctrl *ctrl;
0101 struct nvme_fc_queue *queue;
0102 struct request *rq;
0103
0104 atomic_t state;
0105 u32 flags;
0106 u32 rqno;
0107 u32 nents;
0108
0109 struct nvme_fc_cmd_iu cmd_iu;
0110 struct nvme_fc_ersp_iu rsp_iu;
0111 };
0112
0113 struct nvme_fcp_op_w_sgl {
0114 struct nvme_fc_fcp_op op;
0115 struct scatterlist sgl[NVME_INLINE_SG_CNT];
0116 uint8_t priv[];
0117 };
0118
0119 struct nvme_fc_lport {
0120 struct nvme_fc_local_port localport;
0121
0122 struct ida endp_cnt;
0123 struct list_head port_list;
0124 struct list_head endp_list;
0125 struct device *dev;
0126 struct nvme_fc_port_template *ops;
0127 struct kref ref;
0128 atomic_t act_rport_cnt;
0129 } __aligned(sizeof(u64));
0130
0131 struct nvme_fc_rport {
0132 struct nvme_fc_remote_port remoteport;
0133
0134 struct list_head endp_list;
0135 struct list_head ctrl_list;
0136 struct list_head ls_req_list;
0137 struct list_head ls_rcv_list;
0138 struct list_head disc_list;
0139 struct device *dev;
0140 struct nvme_fc_lport *lport;
0141 spinlock_t lock;
0142 struct kref ref;
0143 atomic_t act_ctrl_cnt;
0144 unsigned long dev_loss_end;
0145 struct work_struct lsrcv_work;
0146 } __aligned(sizeof(u64));
0147
0148
0149 #define ASSOC_ACTIVE 0
0150 #define ASSOC_FAILED 1
0151 #define FCCTRL_TERMIO 2
0152
0153 struct nvme_fc_ctrl {
0154 spinlock_t lock;
0155 struct nvme_fc_queue *queues;
0156 struct device *dev;
0157 struct nvme_fc_lport *lport;
0158 struct nvme_fc_rport *rport;
0159 u32 cnum;
0160
0161 bool ioq_live;
0162 u64 association_id;
0163 struct nvmefc_ls_rcv_op *rcv_disconn;
0164
0165 struct list_head ctrl_list;
0166
0167 struct blk_mq_tag_set admin_tag_set;
0168 struct blk_mq_tag_set tag_set;
0169
0170 struct work_struct ioerr_work;
0171 struct delayed_work connect_work;
0172
0173 struct kref ref;
0174 unsigned long flags;
0175 u32 iocnt;
0176 wait_queue_head_t ioabort_wait;
0177
0178 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
0179
0180 struct nvme_ctrl ctrl;
0181 };
0182
0183 static inline struct nvme_fc_ctrl *
0184 to_fc_ctrl(struct nvme_ctrl *ctrl)
0185 {
0186 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
0187 }
0188
0189 static inline struct nvme_fc_lport *
0190 localport_to_lport(struct nvme_fc_local_port *portptr)
0191 {
0192 return container_of(portptr, struct nvme_fc_lport, localport);
0193 }
0194
0195 static inline struct nvme_fc_rport *
0196 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
0197 {
0198 return container_of(portptr, struct nvme_fc_rport, remoteport);
0199 }
0200
0201 static inline struct nvmefc_ls_req_op *
0202 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
0203 {
0204 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
0205 }
0206
0207 static inline struct nvme_fc_fcp_op *
0208 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
0209 {
0210 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
0211 }
0212
0213
0214
0215
0216
0217
0218 static DEFINE_SPINLOCK(nvme_fc_lock);
0219
0220 static LIST_HEAD(nvme_fc_lport_list);
0221 static DEFINE_IDA(nvme_fc_local_port_cnt);
0222 static DEFINE_IDA(nvme_fc_ctrl_cnt);
0223
0224 static struct workqueue_struct *nvme_fc_wq;
0225
0226 static bool nvme_fc_waiting_to_unload;
0227 static DECLARE_COMPLETION(nvme_fc_unload_proceed);
0228
0229
0230
0231
0232
0233 static struct device *fc_udev_device;
0234
0235 static void nvme_fc_complete_rq(struct request *rq);
0236
0237
0238
0239 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
0240 struct nvme_fc_queue *, unsigned int);
0241
0242 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
0243
0244
0245 static void
0246 nvme_fc_free_lport(struct kref *ref)
0247 {
0248 struct nvme_fc_lport *lport =
0249 container_of(ref, struct nvme_fc_lport, ref);
0250 unsigned long flags;
0251
0252 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
0253 WARN_ON(!list_empty(&lport->endp_list));
0254
0255
0256 spin_lock_irqsave(&nvme_fc_lock, flags);
0257 list_del(&lport->port_list);
0258 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
0259 complete(&nvme_fc_unload_proceed);
0260 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0261
0262 ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
0263 ida_destroy(&lport->endp_cnt);
0264
0265 put_device(lport->dev);
0266
0267 kfree(lport);
0268 }
0269
0270 static void
0271 nvme_fc_lport_put(struct nvme_fc_lport *lport)
0272 {
0273 kref_put(&lport->ref, nvme_fc_free_lport);
0274 }
0275
0276 static int
0277 nvme_fc_lport_get(struct nvme_fc_lport *lport)
0278 {
0279 return kref_get_unless_zero(&lport->ref);
0280 }
0281
0282
0283 static struct nvme_fc_lport *
0284 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
0285 struct nvme_fc_port_template *ops,
0286 struct device *dev)
0287 {
0288 struct nvme_fc_lport *lport;
0289 unsigned long flags;
0290
0291 spin_lock_irqsave(&nvme_fc_lock, flags);
0292
0293 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
0294 if (lport->localport.node_name != pinfo->node_name ||
0295 lport->localport.port_name != pinfo->port_name)
0296 continue;
0297
0298 if (lport->dev != dev) {
0299 lport = ERR_PTR(-EXDEV);
0300 goto out_done;
0301 }
0302
0303 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
0304 lport = ERR_PTR(-EEXIST);
0305 goto out_done;
0306 }
0307
0308 if (!nvme_fc_lport_get(lport)) {
0309
0310
0311
0312
0313 lport = NULL;
0314 goto out_done;
0315 }
0316
0317
0318
0319 lport->ops = ops;
0320 lport->localport.port_role = pinfo->port_role;
0321 lport->localport.port_id = pinfo->port_id;
0322 lport->localport.port_state = FC_OBJSTATE_ONLINE;
0323
0324 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0325
0326 return lport;
0327 }
0328
0329 lport = NULL;
0330
0331 out_done:
0332 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0333
0334 return lport;
0335 }
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354 int
0355 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
0356 struct nvme_fc_port_template *template,
0357 struct device *dev,
0358 struct nvme_fc_local_port **portptr)
0359 {
0360 struct nvme_fc_lport *newrec;
0361 unsigned long flags;
0362 int ret, idx;
0363
0364 if (!template->localport_delete || !template->remoteport_delete ||
0365 !template->ls_req || !template->fcp_io ||
0366 !template->ls_abort || !template->fcp_abort ||
0367 !template->max_hw_queues || !template->max_sgl_segments ||
0368 !template->max_dif_sgl_segments || !template->dma_boundary) {
0369 ret = -EINVAL;
0370 goto out_reghost_failed;
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
0381
0382
0383 if (IS_ERR(newrec)) {
0384 ret = PTR_ERR(newrec);
0385 goto out_reghost_failed;
0386
0387
0388 } else if (newrec) {
0389 *portptr = &newrec->localport;
0390 return 0;
0391 }
0392
0393
0394
0395 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
0396 GFP_KERNEL);
0397 if (!newrec) {
0398 ret = -ENOMEM;
0399 goto out_reghost_failed;
0400 }
0401
0402 idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
0403 if (idx < 0) {
0404 ret = -ENOSPC;
0405 goto out_fail_kfree;
0406 }
0407
0408 if (!get_device(dev) && dev) {
0409 ret = -ENODEV;
0410 goto out_ida_put;
0411 }
0412
0413 INIT_LIST_HEAD(&newrec->port_list);
0414 INIT_LIST_HEAD(&newrec->endp_list);
0415 kref_init(&newrec->ref);
0416 atomic_set(&newrec->act_rport_cnt, 0);
0417 newrec->ops = template;
0418 newrec->dev = dev;
0419 ida_init(&newrec->endp_cnt);
0420 if (template->local_priv_sz)
0421 newrec->localport.private = &newrec[1];
0422 else
0423 newrec->localport.private = NULL;
0424 newrec->localport.node_name = pinfo->node_name;
0425 newrec->localport.port_name = pinfo->port_name;
0426 newrec->localport.port_role = pinfo->port_role;
0427 newrec->localport.port_id = pinfo->port_id;
0428 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
0429 newrec->localport.port_num = idx;
0430
0431 spin_lock_irqsave(&nvme_fc_lock, flags);
0432 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
0433 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0434
0435 if (dev)
0436 dma_set_seg_boundary(dev, template->dma_boundary);
0437
0438 *portptr = &newrec->localport;
0439 return 0;
0440
0441 out_ida_put:
0442 ida_free(&nvme_fc_local_port_cnt, idx);
0443 out_fail_kfree:
0444 kfree(newrec);
0445 out_reghost_failed:
0446 *portptr = NULL;
0447
0448 return ret;
0449 }
0450 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 int
0463 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
0464 {
0465 struct nvme_fc_lport *lport = localport_to_lport(portptr);
0466 unsigned long flags;
0467
0468 if (!portptr)
0469 return -EINVAL;
0470
0471 spin_lock_irqsave(&nvme_fc_lock, flags);
0472
0473 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
0474 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0475 return -EINVAL;
0476 }
0477 portptr->port_state = FC_OBJSTATE_DELETED;
0478
0479 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0480
0481 if (atomic_read(&lport->act_rport_cnt) == 0)
0482 lport->ops->localport_delete(&lport->localport);
0483
0484 nvme_fc_lport_put(lport);
0485
0486 return 0;
0487 }
0488 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498 #define FCNVME_TRADDR_LENGTH 64
0499
0500 static void
0501 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
0502 struct nvme_fc_rport *rport)
0503 {
0504 char hostaddr[FCNVME_TRADDR_LENGTH];
0505 char tgtaddr[FCNVME_TRADDR_LENGTH];
0506 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
0507
0508 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
0509 return;
0510
0511 snprintf(hostaddr, sizeof(hostaddr),
0512 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
0513 lport->localport.node_name, lport->localport.port_name);
0514 snprintf(tgtaddr, sizeof(tgtaddr),
0515 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
0516 rport->remoteport.node_name, rport->remoteport.port_name);
0517 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
0518 }
0519
0520 static void
0521 nvme_fc_free_rport(struct kref *ref)
0522 {
0523 struct nvme_fc_rport *rport =
0524 container_of(ref, struct nvme_fc_rport, ref);
0525 struct nvme_fc_lport *lport =
0526 localport_to_lport(rport->remoteport.localport);
0527 unsigned long flags;
0528
0529 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
0530 WARN_ON(!list_empty(&rport->ctrl_list));
0531
0532
0533 spin_lock_irqsave(&nvme_fc_lock, flags);
0534 list_del(&rport->endp_list);
0535 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0536
0537 WARN_ON(!list_empty(&rport->disc_list));
0538 ida_free(&lport->endp_cnt, rport->remoteport.port_num);
0539
0540 kfree(rport);
0541
0542 nvme_fc_lport_put(lport);
0543 }
0544
0545 static void
0546 nvme_fc_rport_put(struct nvme_fc_rport *rport)
0547 {
0548 kref_put(&rport->ref, nvme_fc_free_rport);
0549 }
0550
0551 static int
0552 nvme_fc_rport_get(struct nvme_fc_rport *rport)
0553 {
0554 return kref_get_unless_zero(&rport->ref);
0555 }
0556
0557 static void
0558 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
0559 {
0560 switch (ctrl->ctrl.state) {
0561 case NVME_CTRL_NEW:
0562 case NVME_CTRL_CONNECTING:
0563
0564
0565
0566
0567 dev_info(ctrl->ctrl.device,
0568 "NVME-FC{%d}: connectivity re-established. "
0569 "Attempting reconnect\n", ctrl->cnum);
0570
0571 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
0572 break;
0573
0574 case NVME_CTRL_RESETTING:
0575
0576
0577
0578
0579
0580 break;
0581
0582 default:
0583
0584 break;
0585 }
0586 }
0587
0588 static struct nvme_fc_rport *
0589 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
0590 struct nvme_fc_port_info *pinfo)
0591 {
0592 struct nvme_fc_rport *rport;
0593 struct nvme_fc_ctrl *ctrl;
0594 unsigned long flags;
0595
0596 spin_lock_irqsave(&nvme_fc_lock, flags);
0597
0598 list_for_each_entry(rport, &lport->endp_list, endp_list) {
0599 if (rport->remoteport.node_name != pinfo->node_name ||
0600 rport->remoteport.port_name != pinfo->port_name)
0601 continue;
0602
0603 if (!nvme_fc_rport_get(rport)) {
0604 rport = ERR_PTR(-ENOLCK);
0605 goto out_done;
0606 }
0607
0608 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0609
0610 spin_lock_irqsave(&rport->lock, flags);
0611
0612
0613 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
0614
0615 spin_unlock_irqrestore(&rport->lock, flags);
0616 nvme_fc_rport_put(rport);
0617 return ERR_PTR(-ESTALE);
0618 }
0619
0620 rport->remoteport.port_role = pinfo->port_role;
0621 rport->remoteport.port_id = pinfo->port_id;
0622 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
0623 rport->dev_loss_end = 0;
0624
0625
0626
0627
0628
0629 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
0630 nvme_fc_resume_controller(ctrl);
0631
0632 spin_unlock_irqrestore(&rport->lock, flags);
0633
0634 return rport;
0635 }
0636
0637 rport = NULL;
0638
0639 out_done:
0640 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0641
0642 return rport;
0643 }
0644
0645 static inline void
0646 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
0647 struct nvme_fc_port_info *pinfo)
0648 {
0649 if (pinfo->dev_loss_tmo)
0650 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
0651 else
0652 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
0653 }
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671 int
0672 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
0673 struct nvme_fc_port_info *pinfo,
0674 struct nvme_fc_remote_port **portptr)
0675 {
0676 struct nvme_fc_lport *lport = localport_to_lport(localport);
0677 struct nvme_fc_rport *newrec;
0678 unsigned long flags;
0679 int ret, idx;
0680
0681 if (!nvme_fc_lport_get(lport)) {
0682 ret = -ESHUTDOWN;
0683 goto out_reghost_failed;
0684 }
0685
0686
0687
0688
0689
0690
0691 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
0692
0693
0694 if (IS_ERR(newrec)) {
0695 ret = PTR_ERR(newrec);
0696 goto out_lport_put;
0697
0698
0699 } else if (newrec) {
0700 nvme_fc_lport_put(lport);
0701 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
0702 nvme_fc_signal_discovery_scan(lport, newrec);
0703 *portptr = &newrec->remoteport;
0704 return 0;
0705 }
0706
0707
0708
0709 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
0710 GFP_KERNEL);
0711 if (!newrec) {
0712 ret = -ENOMEM;
0713 goto out_lport_put;
0714 }
0715
0716 idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
0717 if (idx < 0) {
0718 ret = -ENOSPC;
0719 goto out_kfree_rport;
0720 }
0721
0722 INIT_LIST_HEAD(&newrec->endp_list);
0723 INIT_LIST_HEAD(&newrec->ctrl_list);
0724 INIT_LIST_HEAD(&newrec->ls_req_list);
0725 INIT_LIST_HEAD(&newrec->disc_list);
0726 kref_init(&newrec->ref);
0727 atomic_set(&newrec->act_ctrl_cnt, 0);
0728 spin_lock_init(&newrec->lock);
0729 newrec->remoteport.localport = &lport->localport;
0730 INIT_LIST_HEAD(&newrec->ls_rcv_list);
0731 newrec->dev = lport->dev;
0732 newrec->lport = lport;
0733 if (lport->ops->remote_priv_sz)
0734 newrec->remoteport.private = &newrec[1];
0735 else
0736 newrec->remoteport.private = NULL;
0737 newrec->remoteport.port_role = pinfo->port_role;
0738 newrec->remoteport.node_name = pinfo->node_name;
0739 newrec->remoteport.port_name = pinfo->port_name;
0740 newrec->remoteport.port_id = pinfo->port_id;
0741 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
0742 newrec->remoteport.port_num = idx;
0743 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
0744 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
0745
0746 spin_lock_irqsave(&nvme_fc_lock, flags);
0747 list_add_tail(&newrec->endp_list, &lport->endp_list);
0748 spin_unlock_irqrestore(&nvme_fc_lock, flags);
0749
0750 nvme_fc_signal_discovery_scan(lport, newrec);
0751
0752 *portptr = &newrec->remoteport;
0753 return 0;
0754
0755 out_kfree_rport:
0756 kfree(newrec);
0757 out_lport_put:
0758 nvme_fc_lport_put(lport);
0759 out_reghost_failed:
0760 *portptr = NULL;
0761 return ret;
0762 }
0763 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
0764
0765 static int
0766 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
0767 {
0768 struct nvmefc_ls_req_op *lsop;
0769 unsigned long flags;
0770
0771 restart:
0772 spin_lock_irqsave(&rport->lock, flags);
0773
0774 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
0775 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
0776 lsop->flags |= FCOP_FLAGS_TERMIO;
0777 spin_unlock_irqrestore(&rport->lock, flags);
0778 rport->lport->ops->ls_abort(&rport->lport->localport,
0779 &rport->remoteport,
0780 &lsop->ls_req);
0781 goto restart;
0782 }
0783 }
0784 spin_unlock_irqrestore(&rport->lock, flags);
0785
0786 return 0;
0787 }
0788
0789 static void
0790 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
0791 {
0792 dev_info(ctrl->ctrl.device,
0793 "NVME-FC{%d}: controller connectivity lost. Awaiting "
0794 "Reconnect", ctrl->cnum);
0795
0796 switch (ctrl->ctrl.state) {
0797 case NVME_CTRL_NEW:
0798 case NVME_CTRL_LIVE:
0799
0800
0801
0802
0803
0804
0805
0806 if (nvme_reset_ctrl(&ctrl->ctrl)) {
0807 dev_warn(ctrl->ctrl.device,
0808 "NVME-FC{%d}: Couldn't schedule reset.\n",
0809 ctrl->cnum);
0810 nvme_delete_ctrl(&ctrl->ctrl);
0811 }
0812 break;
0813
0814 case NVME_CTRL_CONNECTING:
0815
0816
0817
0818
0819
0820
0821
0822 break;
0823
0824 case NVME_CTRL_RESETTING:
0825
0826
0827
0828
0829
0830
0831 break;
0832
0833 case NVME_CTRL_DELETING:
0834 case NVME_CTRL_DELETING_NOIO:
0835 default:
0836
0837 break;
0838 }
0839 }
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852 int
0853 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
0854 {
0855 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
0856 struct nvme_fc_ctrl *ctrl;
0857 unsigned long flags;
0858
0859 if (!portptr)
0860 return -EINVAL;
0861
0862 spin_lock_irqsave(&rport->lock, flags);
0863
0864 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
0865 spin_unlock_irqrestore(&rport->lock, flags);
0866 return -EINVAL;
0867 }
0868 portptr->port_state = FC_OBJSTATE_DELETED;
0869
0870 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
0871
0872 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
0873
0874 if (!portptr->dev_loss_tmo) {
0875 dev_warn(ctrl->ctrl.device,
0876 "NVME-FC{%d}: controller connectivity lost.\n",
0877 ctrl->cnum);
0878 nvme_delete_ctrl(&ctrl->ctrl);
0879 } else
0880 nvme_fc_ctrl_connectivity_loss(ctrl);
0881 }
0882
0883 spin_unlock_irqrestore(&rport->lock, flags);
0884
0885 nvme_fc_abort_lsops(rport);
0886
0887 if (atomic_read(&rport->act_ctrl_cnt) == 0)
0888 rport->lport->ops->remoteport_delete(portptr);
0889
0890
0891
0892
0893
0894
0895 nvme_fc_rport_put(rport);
0896
0897 return 0;
0898 }
0899 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 void
0910 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
0911 {
0912 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
0913
0914 nvme_fc_signal_discovery_scan(rport->lport, rport);
0915 }
0916 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
0917
0918 int
0919 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
0920 u32 dev_loss_tmo)
0921 {
0922 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
0923 unsigned long flags;
0924
0925 spin_lock_irqsave(&rport->lock, flags);
0926
0927 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
0928 spin_unlock_irqrestore(&rport->lock, flags);
0929 return -EINVAL;
0930 }
0931
0932
0933 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
0934
0935 spin_unlock_irqrestore(&rport->lock, flags);
0936
0937 return 0;
0938 }
0939 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960 static inline dma_addr_t
0961 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
0962 enum dma_data_direction dir)
0963 {
0964 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
0965 }
0966
0967 static inline int
0968 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
0969 {
0970 return dev ? dma_mapping_error(dev, dma_addr) : 0;
0971 }
0972
0973 static inline void
0974 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
0975 enum dma_data_direction dir)
0976 {
0977 if (dev)
0978 dma_unmap_single(dev, addr, size, dir);
0979 }
0980
0981 static inline void
0982 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
0983 enum dma_data_direction dir)
0984 {
0985 if (dev)
0986 dma_sync_single_for_cpu(dev, addr, size, dir);
0987 }
0988
0989 static inline void
0990 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
0991 enum dma_data_direction dir)
0992 {
0993 if (dev)
0994 dma_sync_single_for_device(dev, addr, size, dir);
0995 }
0996
0997
0998 static int
0999 fc_map_sg(struct scatterlist *sg, int nents)
1000 {
1001 struct scatterlist *s;
1002 int i;
1003
1004 WARN_ON(nents == 0 || sg[0].length == 0);
1005
1006 for_each_sg(sg, s, nents, i) {
1007 s->dma_address = 0L;
1008 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1009 s->dma_length = s->length;
1010 #endif
1011 }
1012 return nents;
1013 }
1014
1015 static inline int
1016 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1017 enum dma_data_direction dir)
1018 {
1019 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1020 }
1021
1022 static inline void
1023 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1024 enum dma_data_direction dir)
1025 {
1026 if (dev)
1027 dma_unmap_sg(dev, sg, nents, dir);
1028 }
1029
1030
1031
1032 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1033 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1034
1035 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1036
1037 static void
1038 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1039 {
1040 struct nvme_fc_rport *rport = lsop->rport;
1041 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1042 unsigned long flags;
1043
1044 spin_lock_irqsave(&rport->lock, flags);
1045
1046 if (!lsop->req_queued) {
1047 spin_unlock_irqrestore(&rport->lock, flags);
1048 return;
1049 }
1050
1051 list_del(&lsop->lsreq_list);
1052
1053 lsop->req_queued = false;
1054
1055 spin_unlock_irqrestore(&rport->lock, flags);
1056
1057 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1058 (lsreq->rqstlen + lsreq->rsplen),
1059 DMA_BIDIRECTIONAL);
1060
1061 nvme_fc_rport_put(rport);
1062 }
1063
1064 static int
1065 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1066 struct nvmefc_ls_req_op *lsop,
1067 void (*done)(struct nvmefc_ls_req *req, int status))
1068 {
1069 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1070 unsigned long flags;
1071 int ret = 0;
1072
1073 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1074 return -ECONNREFUSED;
1075
1076 if (!nvme_fc_rport_get(rport))
1077 return -ESHUTDOWN;
1078
1079 lsreq->done = done;
1080 lsop->rport = rport;
1081 lsop->req_queued = false;
1082 INIT_LIST_HEAD(&lsop->lsreq_list);
1083 init_completion(&lsop->ls_done);
1084
1085 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1086 lsreq->rqstlen + lsreq->rsplen,
1087 DMA_BIDIRECTIONAL);
1088 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1089 ret = -EFAULT;
1090 goto out_putrport;
1091 }
1092 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1093
1094 spin_lock_irqsave(&rport->lock, flags);
1095
1096 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1097
1098 lsop->req_queued = true;
1099
1100 spin_unlock_irqrestore(&rport->lock, flags);
1101
1102 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1103 &rport->remoteport, lsreq);
1104 if (ret)
1105 goto out_unlink;
1106
1107 return 0;
1108
1109 out_unlink:
1110 lsop->ls_error = ret;
1111 spin_lock_irqsave(&rport->lock, flags);
1112 lsop->req_queued = false;
1113 list_del(&lsop->lsreq_list);
1114 spin_unlock_irqrestore(&rport->lock, flags);
1115 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1116 (lsreq->rqstlen + lsreq->rsplen),
1117 DMA_BIDIRECTIONAL);
1118 out_putrport:
1119 nvme_fc_rport_put(rport);
1120
1121 return ret;
1122 }
1123
1124 static void
1125 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1126 {
1127 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1128
1129 lsop->ls_error = status;
1130 complete(&lsop->ls_done);
1131 }
1132
1133 static int
1134 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1135 {
1136 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1137 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1138 int ret;
1139
1140 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1141
1142 if (!ret) {
1143
1144
1145
1146
1147
1148
1149 wait_for_completion(&lsop->ls_done);
1150
1151 __nvme_fc_finish_ls_req(lsop);
1152
1153 ret = lsop->ls_error;
1154 }
1155
1156 if (ret)
1157 return ret;
1158
1159
1160 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1161 return -ENXIO;
1162
1163 return 0;
1164 }
1165
1166 static int
1167 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1168 struct nvmefc_ls_req_op *lsop,
1169 void (*done)(struct nvmefc_ls_req *req, int status))
1170 {
1171
1172
1173 return __nvme_fc_send_ls_req(rport, lsop, done);
1174 }
1175
1176 static int
1177 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1178 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1179 {
1180 struct nvmefc_ls_req_op *lsop;
1181 struct nvmefc_ls_req *lsreq;
1182 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1183 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1184 unsigned long flags;
1185 int ret, fcret = 0;
1186
1187 lsop = kzalloc((sizeof(*lsop) +
1188 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1189 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1190 if (!lsop) {
1191 dev_info(ctrl->ctrl.device,
1192 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1193 ctrl->cnum);
1194 ret = -ENOMEM;
1195 goto out_no_memory;
1196 }
1197
1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1199 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1200 lsreq = &lsop->ls_req;
1201 if (ctrl->lport->ops->lsrqst_priv_sz)
1202 lsreq->private = &assoc_acc[1];
1203 else
1204 lsreq->private = NULL;
1205
1206 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1207 assoc_rqst->desc_list_len =
1208 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1209
1210 assoc_rqst->assoc_cmd.desc_tag =
1211 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1212 assoc_rqst->assoc_cmd.desc_len =
1213 fcnvme_lsdesc_len(
1214 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1215
1216 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1217 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1218
1219 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1220 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1221 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1222 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1223 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1224 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1225
1226 lsop->queue = queue;
1227 lsreq->rqstaddr = assoc_rqst;
1228 lsreq->rqstlen = sizeof(*assoc_rqst);
1229 lsreq->rspaddr = assoc_acc;
1230 lsreq->rsplen = sizeof(*assoc_acc);
1231 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1232
1233 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1234 if (ret)
1235 goto out_free_buffer;
1236
1237
1238
1239
1240 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1241 fcret = VERR_LSACC;
1242 else if (assoc_acc->hdr.desc_list_len !=
1243 fcnvme_lsdesc_len(
1244 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1245 fcret = VERR_CR_ASSOC_ACC_LEN;
1246 else if (assoc_acc->hdr.rqst.desc_tag !=
1247 cpu_to_be32(FCNVME_LSDESC_RQST))
1248 fcret = VERR_LSDESC_RQST;
1249 else if (assoc_acc->hdr.rqst.desc_len !=
1250 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1251 fcret = VERR_LSDESC_RQST_LEN;
1252 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1253 fcret = VERR_CR_ASSOC;
1254 else if (assoc_acc->associd.desc_tag !=
1255 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1256 fcret = VERR_ASSOC_ID;
1257 else if (assoc_acc->associd.desc_len !=
1258 fcnvme_lsdesc_len(
1259 sizeof(struct fcnvme_lsdesc_assoc_id)))
1260 fcret = VERR_ASSOC_ID_LEN;
1261 else if (assoc_acc->connectid.desc_tag !=
1262 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1263 fcret = VERR_CONN_ID;
1264 else if (assoc_acc->connectid.desc_len !=
1265 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1266 fcret = VERR_CONN_ID_LEN;
1267
1268 if (fcret) {
1269 ret = -EBADF;
1270 dev_err(ctrl->dev,
1271 "q %d Create Association LS failed: %s\n",
1272 queue->qnum, validation_errors[fcret]);
1273 } else {
1274 spin_lock_irqsave(&ctrl->lock, flags);
1275 ctrl->association_id =
1276 be64_to_cpu(assoc_acc->associd.association_id);
1277 queue->connection_id =
1278 be64_to_cpu(assoc_acc->connectid.connection_id);
1279 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1280 spin_unlock_irqrestore(&ctrl->lock, flags);
1281 }
1282
1283 out_free_buffer:
1284 kfree(lsop);
1285 out_no_memory:
1286 if (ret)
1287 dev_err(ctrl->dev,
1288 "queue %d connect admin queue failed (%d).\n",
1289 queue->qnum, ret);
1290 return ret;
1291 }
1292
1293 static int
1294 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1295 u16 qsize, u16 ersp_ratio)
1296 {
1297 struct nvmefc_ls_req_op *lsop;
1298 struct nvmefc_ls_req *lsreq;
1299 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1300 struct fcnvme_ls_cr_conn_acc *conn_acc;
1301 int ret, fcret = 0;
1302
1303 lsop = kzalloc((sizeof(*lsop) +
1304 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1305 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1306 if (!lsop) {
1307 dev_info(ctrl->ctrl.device,
1308 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1309 ctrl->cnum);
1310 ret = -ENOMEM;
1311 goto out_no_memory;
1312 }
1313
1314 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1315 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1316 lsreq = &lsop->ls_req;
1317 if (ctrl->lport->ops->lsrqst_priv_sz)
1318 lsreq->private = (void *)&conn_acc[1];
1319 else
1320 lsreq->private = NULL;
1321
1322 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1323 conn_rqst->desc_list_len = cpu_to_be32(
1324 sizeof(struct fcnvme_lsdesc_assoc_id) +
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326
1327 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1328 conn_rqst->associd.desc_len =
1329 fcnvme_lsdesc_len(
1330 sizeof(struct fcnvme_lsdesc_assoc_id));
1331 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1332 conn_rqst->connect_cmd.desc_tag =
1333 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1334 conn_rqst->connect_cmd.desc_len =
1335 fcnvme_lsdesc_len(
1336 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1337 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1338 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1339 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1340
1341 lsop->queue = queue;
1342 lsreq->rqstaddr = conn_rqst;
1343 lsreq->rqstlen = sizeof(*conn_rqst);
1344 lsreq->rspaddr = conn_acc;
1345 lsreq->rsplen = sizeof(*conn_acc);
1346 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1347
1348 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1349 if (ret)
1350 goto out_free_buffer;
1351
1352
1353
1354
1355 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1356 fcret = VERR_LSACC;
1357 else if (conn_acc->hdr.desc_list_len !=
1358 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1359 fcret = VERR_CR_CONN_ACC_LEN;
1360 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1361 fcret = VERR_LSDESC_RQST;
1362 else if (conn_acc->hdr.rqst.desc_len !=
1363 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1364 fcret = VERR_LSDESC_RQST_LEN;
1365 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1366 fcret = VERR_CR_CONN;
1367 else if (conn_acc->connectid.desc_tag !=
1368 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1369 fcret = VERR_CONN_ID;
1370 else if (conn_acc->connectid.desc_len !=
1371 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1372 fcret = VERR_CONN_ID_LEN;
1373
1374 if (fcret) {
1375 ret = -EBADF;
1376 dev_err(ctrl->dev,
1377 "q %d Create I/O Connection LS failed: %s\n",
1378 queue->qnum, validation_errors[fcret]);
1379 } else {
1380 queue->connection_id =
1381 be64_to_cpu(conn_acc->connectid.connection_id);
1382 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1383 }
1384
1385 out_free_buffer:
1386 kfree(lsop);
1387 out_no_memory:
1388 if (ret)
1389 dev_err(ctrl->dev,
1390 "queue %d connect I/O queue failed (%d).\n",
1391 queue->qnum, ret);
1392 return ret;
1393 }
1394
1395 static void
1396 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1397 {
1398 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1399
1400 __nvme_fc_finish_ls_req(lsop);
1401
1402
1403
1404 kfree(lsop);
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 static void
1425 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1426 {
1427 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1428 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1429 struct nvmefc_ls_req_op *lsop;
1430 struct nvmefc_ls_req *lsreq;
1431 int ret;
1432
1433 lsop = kzalloc((sizeof(*lsop) +
1434 sizeof(*discon_rqst) + sizeof(*discon_acc) +
1435 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1436 if (!lsop) {
1437 dev_info(ctrl->ctrl.device,
1438 "NVME-FC{%d}: send Disconnect Association "
1439 "failed: ENOMEM\n",
1440 ctrl->cnum);
1441 return;
1442 }
1443
1444 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1445 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1446 lsreq = &lsop->ls_req;
1447 if (ctrl->lport->ops->lsrqst_priv_sz)
1448 lsreq->private = (void *)&discon_acc[1];
1449 else
1450 lsreq->private = NULL;
1451
1452 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1453 ctrl->association_id);
1454
1455 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1456 nvme_fc_disconnect_assoc_done);
1457 if (ret)
1458 kfree(lsop);
1459 }
1460
1461 static void
1462 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1463 {
1464 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1465 struct nvme_fc_rport *rport = lsop->rport;
1466 struct nvme_fc_lport *lport = rport->lport;
1467 unsigned long flags;
1468
1469 spin_lock_irqsave(&rport->lock, flags);
1470 list_del(&lsop->lsrcv_list);
1471 spin_unlock_irqrestore(&rport->lock, flags);
1472
1473 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1474 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1475 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1476 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1477
1478 kfree(lsop);
1479
1480 nvme_fc_rport_put(rport);
1481 }
1482
1483 static void
1484 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1485 {
1486 struct nvme_fc_rport *rport = lsop->rport;
1487 struct nvme_fc_lport *lport = rport->lport;
1488 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1489 int ret;
1490
1491 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1492 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1493
1494 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1495 lsop->lsrsp);
1496 if (ret) {
1497 dev_warn(lport->dev,
1498 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1499 w0->ls_cmd, ret);
1500 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1501 return;
1502 }
1503 }
1504
1505 static struct nvme_fc_ctrl *
1506 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1507 struct nvmefc_ls_rcv_op *lsop)
1508 {
1509 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1510 &lsop->rqstbuf->rq_dis_assoc;
1511 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1512 struct nvmefc_ls_rcv_op *oldls = NULL;
1513 u64 association_id = be64_to_cpu(rqst->associd.association_id);
1514 unsigned long flags;
1515
1516 spin_lock_irqsave(&rport->lock, flags);
1517
1518 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1519 if (!nvme_fc_ctrl_get(ctrl))
1520 continue;
1521 spin_lock(&ctrl->lock);
1522 if (association_id == ctrl->association_id) {
1523 oldls = ctrl->rcv_disconn;
1524 ctrl->rcv_disconn = lsop;
1525 ret = ctrl;
1526 }
1527 spin_unlock(&ctrl->lock);
1528 if (ret)
1529
1530 break;
1531 nvme_fc_ctrl_put(ctrl);
1532 }
1533
1534 spin_unlock_irqrestore(&rport->lock, flags);
1535
1536
1537 if (oldls) {
1538 dev_info(rport->lport->dev,
1539 "NVME-FC{%d}: Multiple Disconnect Association "
1540 "LS's received\n", ctrl->cnum);
1541
1542 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1543 sizeof(*oldls->rspbuf),
1544 rqst->w0.ls_cmd,
1545 FCNVME_RJT_RC_UNAB,
1546 FCNVME_RJT_EXP_NONE, 0);
1547 nvme_fc_xmt_ls_rsp(oldls);
1548 }
1549
1550 return ret;
1551 }
1552
1553
1554
1555
1556
1557
1558 static bool
1559 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1560 {
1561 struct nvme_fc_rport *rport = lsop->rport;
1562 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1563 &lsop->rqstbuf->rq_dis_assoc;
1564 struct fcnvme_ls_disconnect_assoc_acc *acc =
1565 &lsop->rspbuf->rsp_dis_assoc;
1566 struct nvme_fc_ctrl *ctrl = NULL;
1567 int ret = 0;
1568
1569 memset(acc, 0, sizeof(*acc));
1570
1571 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1572 if (!ret) {
1573
1574 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1575 if (!ctrl)
1576 ret = VERR_NO_ASSOC;
1577 }
1578
1579 if (ret) {
1580 dev_info(rport->lport->dev,
1581 "Disconnect LS failed: %s\n",
1582 validation_errors[ret]);
1583 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1584 sizeof(*acc), rqst->w0.ls_cmd,
1585 (ret == VERR_NO_ASSOC) ?
1586 FCNVME_RJT_RC_INV_ASSOC :
1587 FCNVME_RJT_RC_LOGIC,
1588 FCNVME_RJT_EXP_NONE, 0);
1589 return true;
1590 }
1591
1592
1593
1594 lsop->lsrsp->rsplen = sizeof(*acc);
1595
1596 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1597 fcnvme_lsdesc_len(
1598 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1599 FCNVME_LS_DISCONNECT_ASSOC);
1600
1601
1602
1603
1604
1605
1606
1607
1608 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1609
1610
1611 nvme_fc_ctrl_put(ctrl);
1612
1613 return false;
1614 }
1615
1616
1617
1618
1619
1620
1621 static bool
1622 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1623 {
1624 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1625 bool ret = true;
1626
1627 lsop->lsrsp->nvme_fc_private = lsop;
1628 lsop->lsrsp->rspbuf = lsop->rspbuf;
1629 lsop->lsrsp->rspdma = lsop->rspdma;
1630 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1631
1632 lsop->lsrsp->rsplen = 0;
1633
1634
1635
1636
1637
1638
1639 switch (w0->ls_cmd) {
1640 case FCNVME_LS_DISCONNECT_ASSOC:
1641 ret = nvme_fc_ls_disconnect_assoc(lsop);
1642 break;
1643 case FCNVME_LS_DISCONNECT_CONN:
1644 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1645 sizeof(*lsop->rspbuf), w0->ls_cmd,
1646 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1647 break;
1648 case FCNVME_LS_CREATE_ASSOCIATION:
1649 case FCNVME_LS_CREATE_CONNECTION:
1650 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1651 sizeof(*lsop->rspbuf), w0->ls_cmd,
1652 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1653 break;
1654 default:
1655 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1656 sizeof(*lsop->rspbuf), w0->ls_cmd,
1657 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1658 break;
1659 }
1660
1661 return(ret);
1662 }
1663
1664 static void
1665 nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1666 {
1667 struct nvme_fc_rport *rport =
1668 container_of(work, struct nvme_fc_rport, lsrcv_work);
1669 struct fcnvme_ls_rqst_w0 *w0;
1670 struct nvmefc_ls_rcv_op *lsop;
1671 unsigned long flags;
1672 bool sendrsp;
1673
1674 restart:
1675 sendrsp = true;
1676 spin_lock_irqsave(&rport->lock, flags);
1677 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1678 if (lsop->handled)
1679 continue;
1680
1681 lsop->handled = true;
1682 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1683 spin_unlock_irqrestore(&rport->lock, flags);
1684 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1685 } else {
1686 spin_unlock_irqrestore(&rport->lock, flags);
1687 w0 = &lsop->rqstbuf->w0;
1688 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1689 lsop->rspbuf,
1690 sizeof(*lsop->rspbuf),
1691 w0->ls_cmd,
1692 FCNVME_RJT_RC_UNAB,
1693 FCNVME_RJT_EXP_NONE, 0);
1694 }
1695 if (sendrsp)
1696 nvme_fc_xmt_ls_rsp(lsop);
1697 goto restart;
1698 }
1699 spin_unlock_irqrestore(&rport->lock, flags);
1700 }
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721 int
1722 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1723 struct nvmefc_ls_rsp *lsrsp,
1724 void *lsreqbuf, u32 lsreqbuf_len)
1725 {
1726 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1727 struct nvme_fc_lport *lport = rport->lport;
1728 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1729 struct nvmefc_ls_rcv_op *lsop;
1730 unsigned long flags;
1731 int ret;
1732
1733 nvme_fc_rport_get(rport);
1734
1735
1736 if (!lport->ops->xmt_ls_rsp) {
1737 dev_info(lport->dev,
1738 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1739 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1740 nvmefc_ls_names[w0->ls_cmd] : "");
1741 ret = -EINVAL;
1742 goto out_put;
1743 }
1744
1745 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1746 dev_info(lport->dev,
1747 "RCV %s LS failed: payload too large\n",
1748 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1749 nvmefc_ls_names[w0->ls_cmd] : "");
1750 ret = -E2BIG;
1751 goto out_put;
1752 }
1753
1754 lsop = kzalloc(sizeof(*lsop) +
1755 sizeof(union nvmefc_ls_requests) +
1756 sizeof(union nvmefc_ls_responses),
1757 GFP_KERNEL);
1758 if (!lsop) {
1759 dev_info(lport->dev,
1760 "RCV %s LS failed: No memory\n",
1761 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1762 nvmefc_ls_names[w0->ls_cmd] : "");
1763 ret = -ENOMEM;
1764 goto out_put;
1765 }
1766 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1767 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1768
1769 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1770 sizeof(*lsop->rspbuf),
1771 DMA_TO_DEVICE);
1772 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1773 dev_info(lport->dev,
1774 "RCV %s LS failed: DMA mapping failure\n",
1775 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1776 nvmefc_ls_names[w0->ls_cmd] : "");
1777 ret = -EFAULT;
1778 goto out_free;
1779 }
1780
1781 lsop->rport = rport;
1782 lsop->lsrsp = lsrsp;
1783
1784 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1785 lsop->rqstdatalen = lsreqbuf_len;
1786
1787 spin_lock_irqsave(&rport->lock, flags);
1788 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1789 spin_unlock_irqrestore(&rport->lock, flags);
1790 ret = -ENOTCONN;
1791 goto out_unmap;
1792 }
1793 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1794 spin_unlock_irqrestore(&rport->lock, flags);
1795
1796 schedule_work(&rport->lsrcv_work);
1797
1798 return 0;
1799
1800 out_unmap:
1801 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1802 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1803 out_free:
1804 kfree(lsop);
1805 out_put:
1806 nvme_fc_rport_put(rport);
1807 return ret;
1808 }
1809 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1810
1811
1812
1813
1814 static void
1815 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1816 struct nvme_fc_fcp_op *op)
1817 {
1818 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1819 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1820 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1821 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1822
1823 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1824 }
1825
1826 static void
1827 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1828 unsigned int hctx_idx)
1829 {
1830 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1831
1832 return __nvme_fc_exit_request(set->driver_data, op);
1833 }
1834
1835 static int
1836 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1837 {
1838 unsigned long flags;
1839 int opstate;
1840
1841 spin_lock_irqsave(&ctrl->lock, flags);
1842 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1843 if (opstate != FCPOP_STATE_ACTIVE)
1844 atomic_set(&op->state, opstate);
1845 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1846 op->flags |= FCOP_FLAGS_TERMIO;
1847 ctrl->iocnt++;
1848 }
1849 spin_unlock_irqrestore(&ctrl->lock, flags);
1850
1851 if (opstate != FCPOP_STATE_ACTIVE)
1852 return -ECANCELED;
1853
1854 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1855 &ctrl->rport->remoteport,
1856 op->queue->lldd_handle,
1857 &op->fcp_req);
1858
1859 return 0;
1860 }
1861
1862 static void
1863 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1864 {
1865 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1866 int i;
1867
1868
1869 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1870 return;
1871
1872 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1873 __nvme_fc_abort_op(ctrl, aen_op);
1874 }
1875
1876 static inline void
1877 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1878 struct nvme_fc_fcp_op *op, int opstate)
1879 {
1880 unsigned long flags;
1881
1882 if (opstate == FCPOP_STATE_ABORTED) {
1883 spin_lock_irqsave(&ctrl->lock, flags);
1884 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1885 op->flags & FCOP_FLAGS_TERMIO) {
1886 if (!--ctrl->iocnt)
1887 wake_up(&ctrl->ioabort_wait);
1888 }
1889 spin_unlock_irqrestore(&ctrl->lock, flags);
1890 }
1891 }
1892
1893 static void
1894 nvme_fc_ctrl_ioerr_work(struct work_struct *work)
1895 {
1896 struct nvme_fc_ctrl *ctrl =
1897 container_of(work, struct nvme_fc_ctrl, ioerr_work);
1898
1899 nvme_fc_error_recovery(ctrl, "transport detected io error");
1900 }
1901
1902
1903
1904
1905
1906
1907
1908
1909 char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
1910 {
1911 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1912 struct request *rq = op->rq;
1913
1914 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
1915 return NULL;
1916 return blkcg_get_fc_appid(rq->bio);
1917 }
1918 EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
1919
1920 static void
1921 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1922 {
1923 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1924 struct request *rq = op->rq;
1925 struct nvmefc_fcp_req *freq = &op->fcp_req;
1926 struct nvme_fc_ctrl *ctrl = op->ctrl;
1927 struct nvme_fc_queue *queue = op->queue;
1928 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1929 struct nvme_command *sqe = &op->cmd_iu.sqe;
1930 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1931 union nvme_result result;
1932 bool terminate_assoc = true;
1933 int opstate;
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1973
1974 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1975 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1976
1977 if (opstate == FCPOP_STATE_ABORTED)
1978 status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
1979 else if (freq->status) {
1980 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1981 dev_info(ctrl->ctrl.device,
1982 "NVME-FC{%d}: io failed due to lldd error %d\n",
1983 ctrl->cnum, freq->status);
1984 }
1985
1986
1987
1988
1989
1990
1991 if (status)
1992 goto done;
1993
1994
1995
1996
1997
1998
1999
2000
2001 switch (freq->rcv_rsplen) {
2002
2003 case 0:
2004 case NVME_FC_SIZEOF_ZEROS_RSP:
2005
2006
2007
2008
2009
2010 if (freq->transferred_length !=
2011 be32_to_cpu(op->cmd_iu.data_len)) {
2012 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2013 dev_info(ctrl->ctrl.device,
2014 "NVME-FC{%d}: io failed due to bad transfer "
2015 "length: %d vs expected %d\n",
2016 ctrl->cnum, freq->transferred_length,
2017 be32_to_cpu(op->cmd_iu.data_len));
2018 goto done;
2019 }
2020 result.u64 = 0;
2021 break;
2022
2023 case sizeof(struct nvme_fc_ersp_iu):
2024
2025
2026
2027
2028 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
2029 (freq->rcv_rsplen / 4) ||
2030 be32_to_cpu(op->rsp_iu.xfrd_len) !=
2031 freq->transferred_length ||
2032 op->rsp_iu.ersp_result ||
2033 sqe->common.command_id != cqe->command_id)) {
2034 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2035 dev_info(ctrl->ctrl.device,
2036 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2037 "iu len %d, xfr len %d vs %d, status code "
2038 "%d, cmdid %d vs %d\n",
2039 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2040 be32_to_cpu(op->rsp_iu.xfrd_len),
2041 freq->transferred_length,
2042 op->rsp_iu.ersp_result,
2043 sqe->common.command_id,
2044 cqe->command_id);
2045 goto done;
2046 }
2047 result = cqe->result;
2048 status = cqe->status;
2049 break;
2050
2051 default:
2052 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2053 dev_info(ctrl->ctrl.device,
2054 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2055 "len %d\n",
2056 ctrl->cnum, freq->rcv_rsplen);
2057 goto done;
2058 }
2059
2060 terminate_assoc = false;
2061
2062 done:
2063 if (op->flags & FCOP_FLAGS_AEN) {
2064 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2065 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2066 atomic_set(&op->state, FCPOP_STATE_IDLE);
2067 op->flags = FCOP_FLAGS_AEN;
2068 nvme_fc_ctrl_put(ctrl);
2069 goto check_error;
2070 }
2071
2072 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2073 if (!nvme_try_complete_req(rq, status, result))
2074 nvme_fc_complete_rq(rq);
2075
2076 check_error:
2077 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2078 queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2079 }
2080
2081 static int
2082 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2083 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2084 struct request *rq, u32 rqno)
2085 {
2086 struct nvme_fcp_op_w_sgl *op_w_sgl =
2087 container_of(op, typeof(*op_w_sgl), op);
2088 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2089 int ret = 0;
2090
2091 memset(op, 0, sizeof(*op));
2092 op->fcp_req.cmdaddr = &op->cmd_iu;
2093 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2094 op->fcp_req.rspaddr = &op->rsp_iu;
2095 op->fcp_req.rsplen = sizeof(op->rsp_iu);
2096 op->fcp_req.done = nvme_fc_fcpio_done;
2097 op->ctrl = ctrl;
2098 op->queue = queue;
2099 op->rq = rq;
2100 op->rqno = rqno;
2101
2102 cmdiu->format_id = NVME_CMD_FORMAT_ID;
2103 cmdiu->fc_id = NVME_CMD_FC_ID;
2104 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
2105 if (queue->qnum)
2106 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2107 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2108 else
2109 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
2110
2111 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2112 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2113 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2114 dev_err(ctrl->dev,
2115 "FCP Op failed - cmdiu dma mapping failed.\n");
2116 ret = -EFAULT;
2117 goto out_on_error;
2118 }
2119
2120 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2121 &op->rsp_iu, sizeof(op->rsp_iu),
2122 DMA_FROM_DEVICE);
2123 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2124 dev_err(ctrl->dev,
2125 "FCP Op failed - rspiu dma mapping failed.\n");
2126 ret = -EFAULT;
2127 }
2128
2129 atomic_set(&op->state, FCPOP_STATE_IDLE);
2130 out_on_error:
2131 return ret;
2132 }
2133
2134 static int
2135 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2136 unsigned int hctx_idx, unsigned int numa_node)
2137 {
2138 struct nvme_fc_ctrl *ctrl = set->driver_data;
2139 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
2140 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2141 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2142 int res;
2143
2144 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2145 if (res)
2146 return res;
2147 op->op.fcp_req.first_sgl = op->sgl;
2148 op->op.fcp_req.private = &op->priv[0];
2149 nvme_req(rq)->ctrl = &ctrl->ctrl;
2150 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
2151 return res;
2152 }
2153
2154 static int
2155 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2156 {
2157 struct nvme_fc_fcp_op *aen_op;
2158 struct nvme_fc_cmd_iu *cmdiu;
2159 struct nvme_command *sqe;
2160 void *private = NULL;
2161 int i, ret;
2162
2163 aen_op = ctrl->aen_ops;
2164 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2165 if (ctrl->lport->ops->fcprqst_priv_sz) {
2166 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2167 GFP_KERNEL);
2168 if (!private)
2169 return -ENOMEM;
2170 }
2171
2172 cmdiu = &aen_op->cmd_iu;
2173 sqe = &cmdiu->sqe;
2174 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2175 aen_op, (struct request *)NULL,
2176 (NVME_AQ_BLK_MQ_DEPTH + i));
2177 if (ret) {
2178 kfree(private);
2179 return ret;
2180 }
2181
2182 aen_op->flags = FCOP_FLAGS_AEN;
2183 aen_op->fcp_req.private = private;
2184
2185 memset(sqe, 0, sizeof(*sqe));
2186 sqe->common.opcode = nvme_admin_async_event;
2187
2188 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
2189 }
2190 return 0;
2191 }
2192
2193 static void
2194 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2195 {
2196 struct nvme_fc_fcp_op *aen_op;
2197 int i;
2198
2199 cancel_work_sync(&ctrl->ctrl.async_event_work);
2200 aen_op = ctrl->aen_ops;
2201 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2202 __nvme_fc_exit_request(ctrl, aen_op);
2203
2204 kfree(aen_op->fcp_req.private);
2205 aen_op->fcp_req.private = NULL;
2206 }
2207 }
2208
2209 static inline void
2210 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2211 unsigned int qidx)
2212 {
2213 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2214
2215 hctx->driver_data = queue;
2216 queue->hctx = hctx;
2217 }
2218
2219 static int
2220 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2221 unsigned int hctx_idx)
2222 {
2223 struct nvme_fc_ctrl *ctrl = data;
2224
2225 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2226
2227 return 0;
2228 }
2229
2230 static int
2231 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2232 unsigned int hctx_idx)
2233 {
2234 struct nvme_fc_ctrl *ctrl = data;
2235
2236 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2237
2238 return 0;
2239 }
2240
2241 static void
2242 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2243 {
2244 struct nvme_fc_queue *queue;
2245
2246 queue = &ctrl->queues[idx];
2247 memset(queue, 0, sizeof(*queue));
2248 queue->ctrl = ctrl;
2249 queue->qnum = idx;
2250 atomic_set(&queue->csn, 0);
2251 queue->dev = ctrl->dev;
2252
2253 if (idx > 0)
2254 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2255 else
2256 queue->cmnd_capsule_len = sizeof(struct nvme_command);
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268 }
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278 static void
2279 nvme_fc_free_queue(struct nvme_fc_queue *queue)
2280 {
2281 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2282 return;
2283
2284 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2285
2286
2287
2288
2289
2290
2291 queue->connection_id = 0;
2292 atomic_set(&queue->csn, 0);
2293 }
2294
2295 static void
2296 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2297 struct nvme_fc_queue *queue, unsigned int qidx)
2298 {
2299 if (ctrl->lport->ops->delete_queue)
2300 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2301 queue->lldd_handle);
2302 queue->lldd_handle = NULL;
2303 }
2304
2305 static void
2306 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2307 {
2308 int i;
2309
2310 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2311 nvme_fc_free_queue(&ctrl->queues[i]);
2312 }
2313
2314 static int
2315 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2316 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2317 {
2318 int ret = 0;
2319
2320 queue->lldd_handle = NULL;
2321 if (ctrl->lport->ops->create_queue)
2322 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2323 qidx, qsize, &queue->lldd_handle);
2324
2325 return ret;
2326 }
2327
2328 static void
2329 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2330 {
2331 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2332 int i;
2333
2334 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2335 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2336 }
2337
2338 static int
2339 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2340 {
2341 struct nvme_fc_queue *queue = &ctrl->queues[1];
2342 int i, ret;
2343
2344 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2345 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2346 if (ret)
2347 goto delete_queues;
2348 }
2349
2350 return 0;
2351
2352 delete_queues:
2353 for (; i > 0; i--)
2354 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2355 return ret;
2356 }
2357
2358 static int
2359 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2360 {
2361 int i, ret = 0;
2362
2363 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2364 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2365 (qsize / 5));
2366 if (ret)
2367 break;
2368 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
2369 if (ret)
2370 break;
2371
2372 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2373 }
2374
2375 return ret;
2376 }
2377
2378 static void
2379 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2380 {
2381 int i;
2382
2383 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2384 nvme_fc_init_queue(ctrl, i);
2385 }
2386
2387 static void
2388 nvme_fc_ctrl_free(struct kref *ref)
2389 {
2390 struct nvme_fc_ctrl *ctrl =
2391 container_of(ref, struct nvme_fc_ctrl, ref);
2392 unsigned long flags;
2393
2394 if (ctrl->ctrl.tagset) {
2395 blk_mq_destroy_queue(ctrl->ctrl.connect_q);
2396 blk_mq_free_tag_set(&ctrl->tag_set);
2397 }
2398
2399
2400 spin_lock_irqsave(&ctrl->rport->lock, flags);
2401 list_del(&ctrl->ctrl_list);
2402 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2403
2404 nvme_start_admin_queue(&ctrl->ctrl);
2405 blk_mq_destroy_queue(ctrl->ctrl.admin_q);
2406 blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
2407 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2408
2409 kfree(ctrl->queues);
2410
2411 put_device(ctrl->dev);
2412 nvme_fc_rport_put(ctrl->rport);
2413
2414 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
2415 if (ctrl->ctrl.opts)
2416 nvmf_free_options(ctrl->ctrl.opts);
2417 kfree(ctrl);
2418 }
2419
2420 static void
2421 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2422 {
2423 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2424 }
2425
2426 static int
2427 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2428 {
2429 return kref_get_unless_zero(&ctrl->ref);
2430 }
2431
2432
2433
2434
2435
2436 static void
2437 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2438 {
2439 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2440
2441 WARN_ON(nctrl != &ctrl->ctrl);
2442
2443 nvme_fc_ctrl_put(ctrl);
2444 }
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459 static bool nvme_fc_terminate_exchange(struct request *req, void *data)
2460 {
2461 struct nvme_ctrl *nctrl = data;
2462 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2463 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2464
2465 op->nreq.flags |= NVME_REQ_CANCELLED;
2466 __nvme_fc_abort_op(ctrl, op);
2467 return true;
2468 }
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479 static void
2480 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2481 {
2482 int q;
2483
2484
2485
2486
2487
2488 if (ctrl->ctrl.queue_count > 1) {
2489 for (q = 1; q < ctrl->ctrl.queue_count; q++)
2490 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2491 }
2492 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506 if (ctrl->ctrl.queue_count > 1) {
2507 nvme_stop_queues(&ctrl->ctrl);
2508 nvme_sync_io_queues(&ctrl->ctrl);
2509 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2510 nvme_fc_terminate_exchange, &ctrl->ctrl);
2511 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2512 if (start_queues)
2513 nvme_start_queues(&ctrl->ctrl);
2514 }
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531 nvme_stop_admin_queue(&ctrl->ctrl);
2532 blk_sync_queue(ctrl->ctrl.admin_q);
2533 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2534 nvme_fc_terminate_exchange, &ctrl->ctrl);
2535 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2536 if (start_queues)
2537 nvme_start_admin_queue(&ctrl->ctrl);
2538 }
2539
2540 static void
2541 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2542 {
2543
2544
2545
2546
2547
2548
2549
2550 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2551 __nvme_fc_abort_outstanding_ios(ctrl, true);
2552 set_bit(ASSOC_FAILED, &ctrl->flags);
2553 return;
2554 }
2555
2556
2557 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2558 return;
2559
2560 dev_warn(ctrl->ctrl.device,
2561 "NVME-FC{%d}: transport association event: %s\n",
2562 ctrl->cnum, errmsg);
2563 dev_warn(ctrl->ctrl.device,
2564 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2565
2566 nvme_reset_ctrl(&ctrl->ctrl);
2567 }
2568
2569 static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
2570 {
2571 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2572 struct nvme_fc_ctrl *ctrl = op->ctrl;
2573 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2574 struct nvme_command *sqe = &cmdiu->sqe;
2575
2576
2577
2578
2579
2580 dev_info(ctrl->ctrl.device,
2581 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2582 "x%08x/x%08x\n",
2583 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2584 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
2585 if (__nvme_fc_abort_op(ctrl, op))
2586 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2587
2588
2589
2590
2591
2592
2593 return BLK_EH_RESET_TIMER;
2594 }
2595
2596 static int
2597 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2598 struct nvme_fc_fcp_op *op)
2599 {
2600 struct nvmefc_fcp_req *freq = &op->fcp_req;
2601 int ret;
2602
2603 freq->sg_cnt = 0;
2604
2605 if (!blk_rq_nr_phys_segments(rq))
2606 return 0;
2607
2608 freq->sg_table.sgl = freq->first_sgl;
2609 ret = sg_alloc_table_chained(&freq->sg_table,
2610 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2611 NVME_INLINE_SG_CNT);
2612 if (ret)
2613 return -ENOMEM;
2614
2615 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2616 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2617 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2618 op->nents, rq_dma_dir(rq));
2619 if (unlikely(freq->sg_cnt <= 0)) {
2620 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2621 freq->sg_cnt = 0;
2622 return -EFAULT;
2623 }
2624
2625
2626
2627
2628 return 0;
2629 }
2630
2631 static void
2632 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2633 struct nvme_fc_fcp_op *op)
2634 {
2635 struct nvmefc_fcp_req *freq = &op->fcp_req;
2636
2637 if (!freq->sg_cnt)
2638 return;
2639
2640 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2641 rq_dma_dir(rq));
2642
2643 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2644
2645 freq->sg_cnt = 0;
2646 }
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671 static blk_status_t
2672 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2673 struct nvme_fc_fcp_op *op, u32 data_len,
2674 enum nvmefc_fcp_datadir io_dir)
2675 {
2676 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2677 struct nvme_command *sqe = &cmdiu->sqe;
2678 int ret, opstate;
2679
2680
2681
2682
2683
2684 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2685 return BLK_STS_RESOURCE;
2686
2687 if (!nvme_fc_ctrl_get(ctrl))
2688 return BLK_STS_IOERR;
2689
2690
2691 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2692 cmdiu->data_len = cpu_to_be32(data_len);
2693 switch (io_dir) {
2694 case NVMEFC_FCP_WRITE:
2695 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2696 break;
2697 case NVMEFC_FCP_READ:
2698 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2699 break;
2700 case NVMEFC_FCP_NODATA:
2701 cmdiu->flags = 0;
2702 break;
2703 }
2704 op->fcp_req.payload_length = data_len;
2705 op->fcp_req.io_dir = io_dir;
2706 op->fcp_req.transferred_length = 0;
2707 op->fcp_req.rcv_rsplen = 0;
2708 op->fcp_req.status = NVME_SC_SUCCESS;
2709 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2710
2711
2712
2713
2714
2715 WARN_ON_ONCE(sqe->common.metadata);
2716 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2717
2718
2719
2720
2721
2722
2723
2724
2725 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2726 NVME_SGL_FMT_TRANSPORT_A;
2727 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2728 sqe->rw.dptr.sgl.addr = 0;
2729
2730 if (!(op->flags & FCOP_FLAGS_AEN)) {
2731 ret = nvme_fc_map_data(ctrl, op->rq, op);
2732 if (ret < 0) {
2733 nvme_cleanup_cmd(op->rq);
2734 nvme_fc_ctrl_put(ctrl);
2735 if (ret == -ENOMEM || ret == -EAGAIN)
2736 return BLK_STS_RESOURCE;
2737 return BLK_STS_IOERR;
2738 }
2739 }
2740
2741 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2742 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2743
2744 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2745
2746 if (!(op->flags & FCOP_FLAGS_AEN))
2747 blk_mq_start_request(op->rq);
2748
2749 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2750 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2751 &ctrl->rport->remoteport,
2752 queue->lldd_handle, &op->fcp_req);
2753
2754 if (ret) {
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2768 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2769
2770 if (!(op->flags & FCOP_FLAGS_AEN)) {
2771 nvme_fc_unmap_data(ctrl, op->rq, op);
2772 nvme_cleanup_cmd(op->rq);
2773 }
2774
2775 nvme_fc_ctrl_put(ctrl);
2776
2777 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2778 ret != -EBUSY)
2779 return BLK_STS_IOERR;
2780
2781 return BLK_STS_RESOURCE;
2782 }
2783
2784 return BLK_STS_OK;
2785 }
2786
2787 static blk_status_t
2788 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2789 const struct blk_mq_queue_data *bd)
2790 {
2791 struct nvme_ns *ns = hctx->queue->queuedata;
2792 struct nvme_fc_queue *queue = hctx->driver_data;
2793 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2794 struct request *rq = bd->rq;
2795 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2796 enum nvmefc_fcp_datadir io_dir;
2797 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2798 u32 data_len;
2799 blk_status_t ret;
2800
2801 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2802 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2803 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2804
2805 ret = nvme_setup_cmd(ns, rq);
2806 if (ret)
2807 return ret;
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817 if (blk_rq_nr_phys_segments(rq)) {
2818 data_len = blk_rq_payload_bytes(rq);
2819 io_dir = ((rq_data_dir(rq) == WRITE) ?
2820 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2821 } else {
2822 data_len = 0;
2823 io_dir = NVMEFC_FCP_NODATA;
2824 }
2825
2826
2827 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2828 }
2829
2830 static void
2831 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2832 {
2833 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2834 struct nvme_fc_fcp_op *aen_op;
2835 blk_status_t ret;
2836
2837 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2838 return;
2839
2840 aen_op = &ctrl->aen_ops[0];
2841
2842 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2843 NVMEFC_FCP_NODATA);
2844 if (ret)
2845 dev_err(ctrl->ctrl.device,
2846 "failed async event work\n");
2847 }
2848
2849 static void
2850 nvme_fc_complete_rq(struct request *rq)
2851 {
2852 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2853 struct nvme_fc_ctrl *ctrl = op->ctrl;
2854
2855 atomic_set(&op->state, FCPOP_STATE_IDLE);
2856 op->flags &= ~FCOP_FLAGS_TERMIO;
2857
2858 nvme_fc_unmap_data(ctrl, rq, op);
2859 nvme_complete_rq(rq);
2860 nvme_fc_ctrl_put(ctrl);
2861 }
2862
2863 static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
2864 {
2865 struct nvme_fc_ctrl *ctrl = set->driver_data;
2866 int i;
2867
2868 for (i = 0; i < set->nr_maps; i++) {
2869 struct blk_mq_queue_map *map = &set->map[i];
2870
2871 if (!map->nr_queues) {
2872 WARN_ON(i == HCTX_TYPE_DEFAULT);
2873 continue;
2874 }
2875
2876
2877 if (ctrl->lport->ops->map_queues)
2878 ctrl->lport->ops->map_queues(&ctrl->lport->localport,
2879 map);
2880 else
2881 blk_mq_map_queues(map);
2882 }
2883 return 0;
2884 }
2885
2886 static const struct blk_mq_ops nvme_fc_mq_ops = {
2887 .queue_rq = nvme_fc_queue_rq,
2888 .complete = nvme_fc_complete_rq,
2889 .init_request = nvme_fc_init_request,
2890 .exit_request = nvme_fc_exit_request,
2891 .init_hctx = nvme_fc_init_hctx,
2892 .timeout = nvme_fc_timeout,
2893 .map_queues = nvme_fc_map_queues,
2894 };
2895
2896 static int
2897 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2898 {
2899 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2900 unsigned int nr_io_queues;
2901 int ret;
2902
2903 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2904 ctrl->lport->ops->max_hw_queues);
2905 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2906 if (ret) {
2907 dev_info(ctrl->ctrl.device,
2908 "set_queue_count failed: %d\n", ret);
2909 return ret;
2910 }
2911
2912 ctrl->ctrl.queue_count = nr_io_queues + 1;
2913 if (!nr_io_queues)
2914 return 0;
2915
2916 nvme_fc_init_io_queues(ctrl);
2917
2918 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2919 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2920 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2921 ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
2922 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2923 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2924 ctrl->tag_set.cmd_size =
2925 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2926 ctrl->lport->ops->fcprqst_priv_sz);
2927 ctrl->tag_set.driver_data = ctrl;
2928 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2929 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2930
2931 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2932 if (ret)
2933 return ret;
2934
2935 ctrl->ctrl.tagset = &ctrl->tag_set;
2936
2937 ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
2938 if (ret)
2939 goto out_free_tag_set;
2940
2941 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2942 if (ret)
2943 goto out_cleanup_blk_queue;
2944
2945 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2946 if (ret)
2947 goto out_delete_hw_queues;
2948
2949 ctrl->ioq_live = true;
2950
2951 return 0;
2952
2953 out_delete_hw_queues:
2954 nvme_fc_delete_hw_io_queues(ctrl);
2955 out_cleanup_blk_queue:
2956 blk_mq_destroy_queue(ctrl->ctrl.connect_q);
2957 out_free_tag_set:
2958 blk_mq_free_tag_set(&ctrl->tag_set);
2959 nvme_fc_free_io_queues(ctrl);
2960
2961
2962 ctrl->ctrl.tagset = NULL;
2963
2964 return ret;
2965 }
2966
2967 static int
2968 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2969 {
2970 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2971 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2972 unsigned int nr_io_queues;
2973 int ret;
2974
2975 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2976 ctrl->lport->ops->max_hw_queues);
2977 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2978 if (ret) {
2979 dev_info(ctrl->ctrl.device,
2980 "set_queue_count failed: %d\n", ret);
2981 return ret;
2982 }
2983
2984 if (!nr_io_queues && prior_ioq_cnt) {
2985 dev_info(ctrl->ctrl.device,
2986 "Fail Reconnect: At least 1 io queue "
2987 "required (was %d)\n", prior_ioq_cnt);
2988 return -ENOSPC;
2989 }
2990
2991 ctrl->ctrl.queue_count = nr_io_queues + 1;
2992
2993 if (ctrl->ctrl.queue_count == 1)
2994 return 0;
2995
2996 if (prior_ioq_cnt != nr_io_queues) {
2997 dev_info(ctrl->ctrl.device,
2998 "reconnect: revising io queue count from %d to %d\n",
2999 prior_ioq_cnt, nr_io_queues);
3000 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
3001 }
3002
3003 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
3004 if (ret)
3005 goto out_free_io_queues;
3006
3007 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
3008 if (ret)
3009 goto out_delete_hw_queues;
3010
3011 return 0;
3012
3013 out_delete_hw_queues:
3014 nvme_fc_delete_hw_io_queues(ctrl);
3015 out_free_io_queues:
3016 nvme_fc_free_io_queues(ctrl);
3017 return ret;
3018 }
3019
3020 static void
3021 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
3022 {
3023 struct nvme_fc_lport *lport = rport->lport;
3024
3025 atomic_inc(&lport->act_rport_cnt);
3026 }
3027
3028 static void
3029 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
3030 {
3031 struct nvme_fc_lport *lport = rport->lport;
3032 u32 cnt;
3033
3034 cnt = atomic_dec_return(&lport->act_rport_cnt);
3035 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
3036 lport->ops->localport_delete(&lport->localport);
3037 }
3038
3039 static int
3040 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
3041 {
3042 struct nvme_fc_rport *rport = ctrl->rport;
3043 u32 cnt;
3044
3045 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3046 return 1;
3047
3048 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
3049 if (cnt == 1)
3050 nvme_fc_rport_active_on_lport(rport);
3051
3052 return 0;
3053 }
3054
3055 static int
3056 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3057 {
3058 struct nvme_fc_rport *rport = ctrl->rport;
3059 struct nvme_fc_lport *lport = rport->lport;
3060 u32 cnt;
3061
3062
3063
3064 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
3065 if (cnt == 0) {
3066 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
3067 lport->ops->remoteport_delete(&rport->remoteport);
3068 nvme_fc_rport_inactive_on_lport(rport);
3069 }
3070
3071 return 0;
3072 }
3073
3074
3075
3076
3077
3078 static int
3079 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3080 {
3081 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3082 struct nvmefc_ls_rcv_op *disls = NULL;
3083 unsigned long flags;
3084 int ret;
3085 bool changed;
3086
3087 ++ctrl->ctrl.nr_reconnects;
3088
3089 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3090 return -ENODEV;
3091
3092 if (nvme_fc_ctlr_active_on_rport(ctrl))
3093 return -ENOTUNIQ;
3094
3095 dev_info(ctrl->ctrl.device,
3096 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
3097 " rport wwpn 0x%016llx: NQN \"%s\"\n",
3098 ctrl->cnum, ctrl->lport->localport.port_name,
3099 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3100
3101 clear_bit(ASSOC_FAILED, &ctrl->flags);
3102
3103
3104
3105
3106
3107 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3108 NVME_AQ_DEPTH);
3109 if (ret)
3110 goto out_free_queue;
3111
3112 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3113 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
3114 if (ret)
3115 goto out_delete_hw_queue;
3116
3117 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3118 if (ret)
3119 goto out_disconnect_admin_queue;
3120
3121 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3122
3123
3124
3125
3126
3127
3128
3129
3130 ret = nvme_enable_ctrl(&ctrl->ctrl);
3131 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3132 goto out_disconnect_admin_queue;
3133
3134 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3135 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3136 (ilog2(SZ_4K) - 9);
3137
3138 nvme_start_admin_queue(&ctrl->ctrl);
3139
3140 ret = nvme_init_ctrl_finish(&ctrl->ctrl);
3141 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3142 goto out_disconnect_admin_queue;
3143
3144
3145
3146
3147 if (ctrl->ctrl.icdoff) {
3148 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3149 ctrl->ctrl.icdoff);
3150 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3151 goto out_disconnect_admin_queue;
3152 }
3153
3154
3155 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
3156 dev_err(ctrl->ctrl.device,
3157 "Mandatory sgls are not supported!\n");
3158 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3159 goto out_disconnect_admin_queue;
3160 }
3161
3162 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3163
3164 dev_warn(ctrl->ctrl.device,
3165 "queue_size %zu > ctrl maxcmd %u, reducing "
3166 "to maxcmd\n",
3167 opts->queue_size, ctrl->ctrl.maxcmd);
3168 opts->queue_size = ctrl->ctrl.maxcmd;
3169 }
3170
3171 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3172
3173 dev_warn(ctrl->ctrl.device,
3174 "queue_size %zu > ctrl sqsize %u, reducing "
3175 "to sqsize\n",
3176 opts->queue_size, ctrl->ctrl.sqsize + 1);
3177 opts->queue_size = ctrl->ctrl.sqsize + 1;
3178 }
3179
3180 ret = nvme_fc_init_aen_ops(ctrl);
3181 if (ret)
3182 goto out_term_aen_ops;
3183
3184
3185
3186
3187
3188 if (ctrl->ctrl.queue_count > 1) {
3189 if (!ctrl->ioq_live)
3190 ret = nvme_fc_create_io_queues(ctrl);
3191 else
3192 ret = nvme_fc_recreate_io_queues(ctrl);
3193 }
3194 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3195 goto out_term_aen_ops;
3196
3197 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3198
3199 ctrl->ctrl.nr_reconnects = 0;
3200
3201 if (changed)
3202 nvme_start_ctrl(&ctrl->ctrl);
3203
3204 return 0;
3205
3206 out_term_aen_ops:
3207 nvme_fc_term_aen_ops(ctrl);
3208 out_disconnect_admin_queue:
3209
3210 nvme_fc_xmt_disconnect_assoc(ctrl);
3211 spin_lock_irqsave(&ctrl->lock, flags);
3212 ctrl->association_id = 0;
3213 disls = ctrl->rcv_disconn;
3214 ctrl->rcv_disconn = NULL;
3215 spin_unlock_irqrestore(&ctrl->lock, flags);
3216 if (disls)
3217 nvme_fc_xmt_ls_rsp(disls);
3218 out_delete_hw_queue:
3219 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3220 out_free_queue:
3221 nvme_fc_free_queue(&ctrl->queues[0]);
3222 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3223 nvme_fc_ctlr_inactive_on_rport(ctrl);
3224
3225 return ret;
3226 }
3227
3228
3229
3230
3231
3232
3233
3234
3235 static void
3236 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3237 {
3238 struct nvmefc_ls_rcv_op *disls = NULL;
3239 unsigned long flags;
3240
3241 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3242 return;
3243
3244 spin_lock_irqsave(&ctrl->lock, flags);
3245 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3246 ctrl->iocnt = 0;
3247 spin_unlock_irqrestore(&ctrl->lock, flags);
3248
3249 __nvme_fc_abort_outstanding_ios(ctrl, false);
3250
3251
3252 nvme_fc_abort_aen_ops(ctrl);
3253
3254
3255 spin_lock_irq(&ctrl->lock);
3256 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3257 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3258 spin_unlock_irq(&ctrl->lock);
3259
3260 nvme_fc_term_aen_ops(ctrl);
3261
3262
3263
3264
3265
3266
3267
3268 if (ctrl->association_id)
3269 nvme_fc_xmt_disconnect_assoc(ctrl);
3270
3271 spin_lock_irqsave(&ctrl->lock, flags);
3272 ctrl->association_id = 0;
3273 disls = ctrl->rcv_disconn;
3274 ctrl->rcv_disconn = NULL;
3275 spin_unlock_irqrestore(&ctrl->lock, flags);
3276 if (disls)
3277
3278
3279
3280
3281 nvme_fc_xmt_ls_rsp(disls);
3282
3283 if (ctrl->ctrl.tagset) {
3284 nvme_fc_delete_hw_io_queues(ctrl);
3285 nvme_fc_free_io_queues(ctrl);
3286 }
3287
3288 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3289 nvme_fc_free_queue(&ctrl->queues[0]);
3290
3291
3292 nvme_start_admin_queue(&ctrl->ctrl);
3293
3294
3295 nvme_start_queues(&ctrl->ctrl);
3296
3297 nvme_fc_ctlr_inactive_on_rport(ctrl);
3298 }
3299
3300 static void
3301 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3302 {
3303 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3304
3305 cancel_work_sync(&ctrl->ioerr_work);
3306 cancel_delayed_work_sync(&ctrl->connect_work);
3307
3308
3309
3310
3311 nvme_fc_delete_association(ctrl);
3312 }
3313
3314 static void
3315 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3316 {
3317 struct nvme_fc_rport *rport = ctrl->rport;
3318 struct nvme_fc_remote_port *portptr = &rport->remoteport;
3319 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3320 bool recon = true;
3321
3322 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
3323 return;
3324
3325 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3326 dev_info(ctrl->ctrl.device,
3327 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3328 ctrl->cnum, status);
3329 if (status > 0 && (status & NVME_SC_DNR))
3330 recon = false;
3331 } else if (time_after_eq(jiffies, rport->dev_loss_end))
3332 recon = false;
3333
3334 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3335 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3336 dev_info(ctrl->ctrl.device,
3337 "NVME-FC{%d}: Reconnect attempt in %ld "
3338 "seconds\n",
3339 ctrl->cnum, recon_delay / HZ);
3340 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3341 recon_delay = rport->dev_loss_end - jiffies;
3342
3343 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3344 } else {
3345 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3346 if (status > 0 && (status & NVME_SC_DNR))
3347 dev_warn(ctrl->ctrl.device,
3348 "NVME-FC{%d}: reconnect failure\n",
3349 ctrl->cnum);
3350 else
3351 dev_warn(ctrl->ctrl.device,
3352 "NVME-FC{%d}: Max reconnect attempts "
3353 "(%d) reached.\n",
3354 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3355 } else
3356 dev_warn(ctrl->ctrl.device,
3357 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
3358 "while waiting for remoteport connectivity.\n",
3359 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3360 (ctrl->ctrl.opts->max_reconnects *
3361 ctrl->ctrl.opts->reconnect_delay)));
3362 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3363 }
3364 }
3365
3366 static void
3367 nvme_fc_reset_ctrl_work(struct work_struct *work)
3368 {
3369 struct nvme_fc_ctrl *ctrl =
3370 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3371
3372 nvme_stop_ctrl(&ctrl->ctrl);
3373
3374
3375 nvme_fc_delete_association(ctrl);
3376
3377 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3378 dev_err(ctrl->ctrl.device,
3379 "NVME-FC{%d}: error_recovery: Couldn't change state "
3380 "to CONNECTING\n", ctrl->cnum);
3381
3382 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3383 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3384 dev_err(ctrl->ctrl.device,
3385 "NVME-FC{%d}: failed to schedule connect "
3386 "after reset\n", ctrl->cnum);
3387 } else {
3388 flush_delayed_work(&ctrl->connect_work);
3389 }
3390 } else {
3391 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3392 }
3393 }
3394
3395
3396 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3397 .name = "fc",
3398 .module = THIS_MODULE,
3399 .flags = NVME_F_FABRICS,
3400 .reg_read32 = nvmf_reg_read32,
3401 .reg_read64 = nvmf_reg_read64,
3402 .reg_write32 = nvmf_reg_write32,
3403 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3404 .submit_async_event = nvme_fc_submit_async_event,
3405 .delete_ctrl = nvme_fc_delete_ctrl,
3406 .get_address = nvmf_get_address,
3407 };
3408
3409 static void
3410 nvme_fc_connect_ctrl_work(struct work_struct *work)
3411 {
3412 int ret;
3413
3414 struct nvme_fc_ctrl *ctrl =
3415 container_of(to_delayed_work(work),
3416 struct nvme_fc_ctrl, connect_work);
3417
3418 ret = nvme_fc_create_association(ctrl);
3419 if (ret)
3420 nvme_fc_reconnect_or_delete(ctrl, ret);
3421 else
3422 dev_info(ctrl->ctrl.device,
3423 "NVME-FC{%d}: controller connect complete\n",
3424 ctrl->cnum);
3425 }
3426
3427
3428 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3429 .queue_rq = nvme_fc_queue_rq,
3430 .complete = nvme_fc_complete_rq,
3431 .init_request = nvme_fc_init_request,
3432 .exit_request = nvme_fc_exit_request,
3433 .init_hctx = nvme_fc_init_admin_hctx,
3434 .timeout = nvme_fc_timeout,
3435 };
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446 static bool
3447 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3448 struct nvmf_ctrl_options *opts)
3449 {
3450 struct nvme_fc_ctrl *ctrl;
3451 unsigned long flags;
3452 bool found = false;
3453
3454 spin_lock_irqsave(&rport->lock, flags);
3455 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3456 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3457 if (found)
3458 break;
3459 }
3460 spin_unlock_irqrestore(&rport->lock, flags);
3461
3462 return found;
3463 }
3464
3465 static struct nvme_ctrl *
3466 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3467 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3468 {
3469 struct nvme_fc_ctrl *ctrl;
3470 unsigned long flags;
3471 int ret, idx, ctrl_loss_tmo;
3472
3473 if (!(rport->remoteport.port_role &
3474 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3475 ret = -EBADR;
3476 goto out_fail;
3477 }
3478
3479 if (!opts->duplicate_connect &&
3480 nvme_fc_existing_controller(rport, opts)) {
3481 ret = -EALREADY;
3482 goto out_fail;
3483 }
3484
3485 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3486 if (!ctrl) {
3487 ret = -ENOMEM;
3488 goto out_fail;
3489 }
3490
3491 idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
3492 if (idx < 0) {
3493 ret = -ENOSPC;
3494 goto out_free_ctrl;
3495 }
3496
3497
3498
3499
3500
3501 if (opts->max_reconnects != -1 &&
3502 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
3503 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
3504 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
3505 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
3506 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3507 opts->reconnect_delay);
3508 }
3509
3510 ctrl->ctrl.opts = opts;
3511 ctrl->ctrl.nr_reconnects = 0;
3512 if (lport->dev)
3513 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3514 else
3515 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3516 INIT_LIST_HEAD(&ctrl->ctrl_list);
3517 ctrl->lport = lport;
3518 ctrl->rport = rport;
3519 ctrl->dev = lport->dev;
3520 ctrl->cnum = idx;
3521 ctrl->ioq_live = false;
3522 init_waitqueue_head(&ctrl->ioabort_wait);
3523
3524 get_device(ctrl->dev);
3525 kref_init(&ctrl->ref);
3526
3527 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3528 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3529 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3530 spin_lock_init(&ctrl->lock);
3531
3532
3533 ctrl->ctrl.queue_count = min_t(unsigned int,
3534 opts->nr_io_queues,
3535 lport->ops->max_hw_queues);
3536 ctrl->ctrl.queue_count++;
3537
3538 ctrl->ctrl.sqsize = opts->queue_size - 1;
3539 ctrl->ctrl.kato = opts->kato;
3540 ctrl->ctrl.cntlid = 0xffff;
3541
3542 ret = -ENOMEM;
3543 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3544 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3545 if (!ctrl->queues)
3546 goto out_free_ida;
3547
3548 nvme_fc_init_queue(ctrl, 0);
3549
3550 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3551 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3552 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3553 ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
3554 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3555 ctrl->admin_tag_set.cmd_size =
3556 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3557 ctrl->lport->ops->fcprqst_priv_sz);
3558 ctrl->admin_tag_set.driver_data = ctrl;
3559 ctrl->admin_tag_set.nr_hw_queues = 1;
3560 ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
3561 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3562
3563 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3564 if (ret)
3565 goto out_free_queues;
3566 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3567
3568 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3569 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3570 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3571 goto out_free_admin_tag_set;
3572 }
3573
3574 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3575 if (IS_ERR(ctrl->ctrl.admin_q)) {
3576 ret = PTR_ERR(ctrl->ctrl.admin_q);
3577 goto out_cleanup_fabrics_q;
3578 }
3579
3580
3581
3582
3583
3584
3585
3586
3587 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3588 if (ret)
3589 goto out_cleanup_admin_q;
3590
3591
3592
3593 spin_lock_irqsave(&rport->lock, flags);
3594 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3595 spin_unlock_irqrestore(&rport->lock, flags);
3596
3597 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3598 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3599 dev_err(ctrl->ctrl.device,
3600 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3601 goto fail_ctrl;
3602 }
3603
3604 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3605 dev_err(ctrl->ctrl.device,
3606 "NVME-FC{%d}: failed to schedule initial connect\n",
3607 ctrl->cnum);
3608 goto fail_ctrl;
3609 }
3610
3611 flush_delayed_work(&ctrl->connect_work);
3612
3613 dev_info(ctrl->ctrl.device,
3614 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3615 ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
3616
3617 return &ctrl->ctrl;
3618
3619 fail_ctrl:
3620 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3621 cancel_work_sync(&ctrl->ioerr_work);
3622 cancel_work_sync(&ctrl->ctrl.reset_work);
3623 cancel_delayed_work_sync(&ctrl->connect_work);
3624
3625 ctrl->ctrl.opts = NULL;
3626
3627
3628 nvme_uninit_ctrl(&ctrl->ctrl);
3629
3630
3631 nvme_put_ctrl(&ctrl->ctrl);
3632
3633
3634
3635
3636
3637
3638
3639
3640 nvme_fc_rport_get(rport);
3641
3642 return ERR_PTR(-EIO);
3643
3644 out_cleanup_admin_q:
3645 blk_mq_destroy_queue(ctrl->ctrl.admin_q);
3646 out_cleanup_fabrics_q:
3647 blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
3648 out_free_admin_tag_set:
3649 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3650 out_free_queues:
3651 kfree(ctrl->queues);
3652 out_free_ida:
3653 put_device(ctrl->dev);
3654 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
3655 out_free_ctrl:
3656 kfree(ctrl);
3657 out_fail:
3658
3659 return ERR_PTR(ret);
3660 }
3661
3662
3663 struct nvmet_fc_traddr {
3664 u64 nn;
3665 u64 pn;
3666 };
3667
3668 static int
3669 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3670 {
3671 u64 token64;
3672
3673 if (match_u64(sstr, &token64))
3674 return -EINVAL;
3675 *val = token64;
3676
3677 return 0;
3678 }
3679
3680
3681
3682
3683
3684
3685 static int
3686 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3687 {
3688 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3689 substring_t wwn = { name, &name[sizeof(name)-1] };
3690 int nnoffset, pnoffset;
3691
3692
3693 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3694 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3695 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3696 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3697 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3698 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3699 NVME_FC_TRADDR_OXNNLEN;
3700 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3701 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3702 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3703 "pn-", NVME_FC_TRADDR_NNLEN))) {
3704 nnoffset = NVME_FC_TRADDR_NNLEN;
3705 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3706 } else
3707 goto out_einval;
3708
3709 name[0] = '0';
3710 name[1] = 'x';
3711 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3712
3713 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3714 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3715 goto out_einval;
3716
3717 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3718 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3719 goto out_einval;
3720
3721 return 0;
3722
3723 out_einval:
3724 pr_warn("%s: bad traddr string\n", __func__);
3725 return -EINVAL;
3726 }
3727
3728 static struct nvme_ctrl *
3729 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3730 {
3731 struct nvme_fc_lport *lport;
3732 struct nvme_fc_rport *rport;
3733 struct nvme_ctrl *ctrl;
3734 struct nvmet_fc_traddr laddr = { 0L, 0L };
3735 struct nvmet_fc_traddr raddr = { 0L, 0L };
3736 unsigned long flags;
3737 int ret;
3738
3739 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3740 if (ret || !raddr.nn || !raddr.pn)
3741 return ERR_PTR(-EINVAL);
3742
3743 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3744 if (ret || !laddr.nn || !laddr.pn)
3745 return ERR_PTR(-EINVAL);
3746
3747
3748 spin_lock_irqsave(&nvme_fc_lock, flags);
3749 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3750 if (lport->localport.node_name != laddr.nn ||
3751 lport->localport.port_name != laddr.pn ||
3752 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3753 continue;
3754
3755 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3756 if (rport->remoteport.node_name != raddr.nn ||
3757 rport->remoteport.port_name != raddr.pn ||
3758 rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3759 continue;
3760
3761
3762 if (!nvme_fc_rport_get(rport))
3763 break;
3764
3765 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3766
3767 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3768 if (IS_ERR(ctrl))
3769 nvme_fc_rport_put(rport);
3770 return ctrl;
3771 }
3772 }
3773 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3774
3775 pr_warn("%s: %s - %s combination not found\n",
3776 __func__, opts->traddr, opts->host_traddr);
3777 return ERR_PTR(-ENOENT);
3778 }
3779
3780
3781 static struct nvmf_transport_ops nvme_fc_transport = {
3782 .name = "fc",
3783 .module = THIS_MODULE,
3784 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3785 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3786 .create_ctrl = nvme_fc_create_ctrl,
3787 };
3788
3789
3790 #define DISCOVERY_MAX_FAIL 20
3791
3792 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3793 struct device_attribute *attr, const char *buf, size_t count)
3794 {
3795 unsigned long flags;
3796 LIST_HEAD(local_disc_list);
3797 struct nvme_fc_lport *lport;
3798 struct nvme_fc_rport *rport;
3799 int failcnt = 0;
3800
3801 spin_lock_irqsave(&nvme_fc_lock, flags);
3802 restart:
3803 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3804 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3805 if (!nvme_fc_lport_get(lport))
3806 continue;
3807 if (!nvme_fc_rport_get(rport)) {
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817 nvme_fc_lport_put(lport);
3818
3819 if (failcnt++ < DISCOVERY_MAX_FAIL)
3820 goto restart;
3821
3822 pr_err("nvme_discovery: too many reference "
3823 "failures\n");
3824 goto process_local_list;
3825 }
3826 if (list_empty(&rport->disc_list))
3827 list_add_tail(&rport->disc_list,
3828 &local_disc_list);
3829 }
3830 }
3831
3832 process_local_list:
3833 while (!list_empty(&local_disc_list)) {
3834 rport = list_first_entry(&local_disc_list,
3835 struct nvme_fc_rport, disc_list);
3836 list_del_init(&rport->disc_list);
3837 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3838
3839 lport = rport->lport;
3840
3841 nvme_fc_signal_discovery_scan(lport, rport);
3842 nvme_fc_rport_put(rport);
3843 nvme_fc_lport_put(lport);
3844
3845 spin_lock_irqsave(&nvme_fc_lock, flags);
3846 }
3847 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3848
3849 return count;
3850 }
3851
3852 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3853
3854 #ifdef CONFIG_BLK_CGROUP_FC_APPID
3855
3856 static int fc_parse_cgrpid(const char *buf, u64 *id)
3857 {
3858 char cgrp_id[16+1];
3859 int cgrpid_len, j;
3860
3861 memset(cgrp_id, 0x0, sizeof(cgrp_id));
3862 for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
3863 if (buf[cgrpid_len] != ':')
3864 cgrp_id[cgrpid_len] = buf[cgrpid_len];
3865 else {
3866 j = 1;
3867 break;
3868 }
3869 }
3870 if (!j)
3871 return -EINVAL;
3872 if (kstrtou64(cgrp_id, 16, id) < 0)
3873 return -EINVAL;
3874 return cgrpid_len;
3875 }
3876
3877
3878
3879
3880 static ssize_t fc_appid_store(struct device *dev,
3881 struct device_attribute *attr, const char *buf, size_t count)
3882 {
3883 size_t orig_count = count;
3884 u64 cgrp_id;
3885 int appid_len = 0;
3886 int cgrpid_len = 0;
3887 char app_id[FC_APPID_LEN];
3888 int ret = 0;
3889
3890 if (buf[count-1] == '\n')
3891 count--;
3892
3893 if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
3894 return -EINVAL;
3895
3896 cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
3897 if (cgrpid_len < 0)
3898 return -EINVAL;
3899 appid_len = count - cgrpid_len - 1;
3900 if (appid_len > FC_APPID_LEN)
3901 return -EINVAL;
3902
3903 memset(app_id, 0x0, sizeof(app_id));
3904 memcpy(app_id, &buf[cgrpid_len+1], appid_len);
3905 ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
3906 if (ret < 0)
3907 return ret;
3908 return orig_count;
3909 }
3910 static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
3911 #endif
3912
3913 static struct attribute *nvme_fc_attrs[] = {
3914 &dev_attr_nvme_discovery.attr,
3915 #ifdef CONFIG_BLK_CGROUP_FC_APPID
3916 &dev_attr_appid_store.attr,
3917 #endif
3918 NULL
3919 };
3920
3921 static const struct attribute_group nvme_fc_attr_group = {
3922 .attrs = nvme_fc_attrs,
3923 };
3924
3925 static const struct attribute_group *nvme_fc_attr_groups[] = {
3926 &nvme_fc_attr_group,
3927 NULL
3928 };
3929
3930 static struct class fc_class = {
3931 .name = "fc",
3932 .dev_groups = nvme_fc_attr_groups,
3933 .owner = THIS_MODULE,
3934 };
3935
3936 static int __init nvme_fc_init_module(void)
3937 {
3938 int ret;
3939
3940 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3941 if (!nvme_fc_wq)
3942 return -ENOMEM;
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958 ret = class_register(&fc_class);
3959 if (ret) {
3960 pr_err("couldn't register class fc\n");
3961 goto out_destroy_wq;
3962 }
3963
3964
3965
3966
3967 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3968 "fc_udev_device");
3969 if (IS_ERR(fc_udev_device)) {
3970 pr_err("couldn't create fc_udev device!\n");
3971 ret = PTR_ERR(fc_udev_device);
3972 goto out_destroy_class;
3973 }
3974
3975 ret = nvmf_register_transport(&nvme_fc_transport);
3976 if (ret)
3977 goto out_destroy_device;
3978
3979 return 0;
3980
3981 out_destroy_device:
3982 device_destroy(&fc_class, MKDEV(0, 0));
3983 out_destroy_class:
3984 class_unregister(&fc_class);
3985 out_destroy_wq:
3986 destroy_workqueue(nvme_fc_wq);
3987
3988 return ret;
3989 }
3990
3991 static void
3992 nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3993 {
3994 struct nvme_fc_ctrl *ctrl;
3995
3996 spin_lock(&rport->lock);
3997 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3998 dev_warn(ctrl->ctrl.device,
3999 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
4000 ctrl->cnum);
4001 nvme_delete_ctrl(&ctrl->ctrl);
4002 }
4003 spin_unlock(&rport->lock);
4004 }
4005
4006 static void
4007 nvme_fc_cleanup_for_unload(void)
4008 {
4009 struct nvme_fc_lport *lport;
4010 struct nvme_fc_rport *rport;
4011
4012 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
4013 list_for_each_entry(rport, &lport->endp_list, endp_list) {
4014 nvme_fc_delete_controllers(rport);
4015 }
4016 }
4017 }
4018
4019 static void __exit nvme_fc_exit_module(void)
4020 {
4021 unsigned long flags;
4022 bool need_cleanup = false;
4023
4024 spin_lock_irqsave(&nvme_fc_lock, flags);
4025 nvme_fc_waiting_to_unload = true;
4026 if (!list_empty(&nvme_fc_lport_list)) {
4027 need_cleanup = true;
4028 nvme_fc_cleanup_for_unload();
4029 }
4030 spin_unlock_irqrestore(&nvme_fc_lock, flags);
4031 if (need_cleanup) {
4032 pr_info("%s: waiting for ctlr deletes\n", __func__);
4033 wait_for_completion(&nvme_fc_unload_proceed);
4034 pr_info("%s: ctrl deletes complete\n", __func__);
4035 }
4036
4037 nvmf_unregister_transport(&nvme_fc_transport);
4038
4039 ida_destroy(&nvme_fc_local_port_cnt);
4040 ida_destroy(&nvme_fc_ctrl_cnt);
4041
4042 device_destroy(&fc_class, MKDEV(0, 0));
4043 class_unregister(&fc_class);
4044 destroy_workqueue(nvme_fc_wq);
4045 }
4046
4047 module_init(nvme_fc_init_module);
4048 module_exit(nvme_fc_exit_module);
4049
4050 MODULE_LICENSE("GPL v2");