0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 #include <linux/module.h>
0008 #include <linux/random.h>
0009 #include <linux/rculist.h>
0010 #include <linux/pci-p2pdma.h>
0011 #include <linux/scatterlist.h>
0012
0013 #define CREATE_TRACE_POINTS
0014 #include "trace.h"
0015
0016 #include "nvmet.h"
0017
0018 struct workqueue_struct *buffered_io_wq;
0019 struct workqueue_struct *zbd_wq;
0020 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
0021 static DEFINE_IDA(cntlid_ida);
0022
0023 struct workqueue_struct *nvmet_wq;
0024 EXPORT_SYMBOL_GPL(nvmet_wq);
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 DECLARE_RWSEM(nvmet_config_sem);
0043
0044 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
0045 u64 nvmet_ana_chgcnt;
0046 DECLARE_RWSEM(nvmet_ana_sem);
0047
0048 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
0049 {
0050 switch (errno) {
0051 case 0:
0052 return NVME_SC_SUCCESS;
0053 case -ENOSPC:
0054 req->error_loc = offsetof(struct nvme_rw_command, length);
0055 return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
0056 case -EREMOTEIO:
0057 req->error_loc = offsetof(struct nvme_rw_command, slba);
0058 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
0059 case -EOPNOTSUPP:
0060 req->error_loc = offsetof(struct nvme_common_command, opcode);
0061 switch (req->cmd->common.opcode) {
0062 case nvme_cmd_dsm:
0063 case nvme_cmd_write_zeroes:
0064 return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
0065 default:
0066 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0067 }
0068 break;
0069 case -ENODATA:
0070 req->error_loc = offsetof(struct nvme_rw_command, nsid);
0071 return NVME_SC_ACCESS_DENIED;
0072 case -EIO:
0073 fallthrough;
0074 default:
0075 req->error_loc = offsetof(struct nvme_common_command, opcode);
0076 return NVME_SC_INTERNAL | NVME_SC_DNR;
0077 }
0078 }
0079
0080 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
0081 {
0082 pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
0083 req->sq->qid);
0084
0085 req->error_loc = offsetof(struct nvme_common_command, opcode);
0086 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0087 }
0088
0089 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
0090 const char *subsysnqn);
0091
0092 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
0093 size_t len)
0094 {
0095 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
0096 req->error_loc = offsetof(struct nvme_common_command, dptr);
0097 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
0098 }
0099 return 0;
0100 }
0101
0102 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
0103 {
0104 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
0105 req->error_loc = offsetof(struct nvme_common_command, dptr);
0106 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
0107 }
0108 return 0;
0109 }
0110
0111 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
0112 {
0113 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
0114 req->error_loc = offsetof(struct nvme_common_command, dptr);
0115 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
0116 }
0117 return 0;
0118 }
0119
0120 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
0121 {
0122 struct nvmet_ns *cur;
0123 unsigned long idx;
0124 u32 nsid = 0;
0125
0126 xa_for_each(&subsys->namespaces, idx, cur)
0127 nsid = cur->nsid;
0128
0129 return nsid;
0130 }
0131
0132 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
0133 {
0134 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
0135 }
0136
0137 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
0138 {
0139 struct nvmet_req *req;
0140
0141 mutex_lock(&ctrl->lock);
0142 while (ctrl->nr_async_event_cmds) {
0143 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
0144 mutex_unlock(&ctrl->lock);
0145 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
0146 mutex_lock(&ctrl->lock);
0147 }
0148 mutex_unlock(&ctrl->lock);
0149 }
0150
0151 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
0152 {
0153 struct nvmet_async_event *aen;
0154 struct nvmet_req *req;
0155
0156 mutex_lock(&ctrl->lock);
0157 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
0158 aen = list_first_entry(&ctrl->async_events,
0159 struct nvmet_async_event, entry);
0160 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
0161 nvmet_set_result(req, nvmet_async_event_result(aen));
0162
0163 list_del(&aen->entry);
0164 kfree(aen);
0165
0166 mutex_unlock(&ctrl->lock);
0167 trace_nvmet_async_event(ctrl, req->cqe->result.u32);
0168 nvmet_req_complete(req, 0);
0169 mutex_lock(&ctrl->lock);
0170 }
0171 mutex_unlock(&ctrl->lock);
0172 }
0173
0174 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
0175 {
0176 struct nvmet_async_event *aen, *tmp;
0177
0178 mutex_lock(&ctrl->lock);
0179 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
0180 list_del(&aen->entry);
0181 kfree(aen);
0182 }
0183 mutex_unlock(&ctrl->lock);
0184 }
0185
0186 static void nvmet_async_event_work(struct work_struct *work)
0187 {
0188 struct nvmet_ctrl *ctrl =
0189 container_of(work, struct nvmet_ctrl, async_event_work);
0190
0191 nvmet_async_events_process(ctrl);
0192 }
0193
0194 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
0195 u8 event_info, u8 log_page)
0196 {
0197 struct nvmet_async_event *aen;
0198
0199 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
0200 if (!aen)
0201 return;
0202
0203 aen->event_type = event_type;
0204 aen->event_info = event_info;
0205 aen->log_page = log_page;
0206
0207 mutex_lock(&ctrl->lock);
0208 list_add_tail(&aen->entry, &ctrl->async_events);
0209 mutex_unlock(&ctrl->lock);
0210
0211 queue_work(nvmet_wq, &ctrl->async_event_work);
0212 }
0213
0214 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
0215 {
0216 u32 i;
0217
0218 mutex_lock(&ctrl->lock);
0219 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
0220 goto out_unlock;
0221
0222 for (i = 0; i < ctrl->nr_changed_ns; i++) {
0223 if (ctrl->changed_ns_list[i] == nsid)
0224 goto out_unlock;
0225 }
0226
0227 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
0228 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
0229 ctrl->nr_changed_ns = U32_MAX;
0230 goto out_unlock;
0231 }
0232
0233 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
0234 out_unlock:
0235 mutex_unlock(&ctrl->lock);
0236 }
0237
0238 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
0239 {
0240 struct nvmet_ctrl *ctrl;
0241
0242 lockdep_assert_held(&subsys->lock);
0243
0244 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
0245 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
0246 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
0247 continue;
0248 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
0249 NVME_AER_NOTICE_NS_CHANGED,
0250 NVME_LOG_CHANGED_NS);
0251 }
0252 }
0253
0254 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
0255 struct nvmet_port *port)
0256 {
0257 struct nvmet_ctrl *ctrl;
0258
0259 mutex_lock(&subsys->lock);
0260 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
0261 if (port && ctrl->port != port)
0262 continue;
0263 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
0264 continue;
0265 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
0266 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
0267 }
0268 mutex_unlock(&subsys->lock);
0269 }
0270
0271 void nvmet_port_send_ana_event(struct nvmet_port *port)
0272 {
0273 struct nvmet_subsys_link *p;
0274
0275 down_read(&nvmet_config_sem);
0276 list_for_each_entry(p, &port->subsystems, entry)
0277 nvmet_send_ana_event(p->subsys, port);
0278 up_read(&nvmet_config_sem);
0279 }
0280
0281 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
0282 {
0283 int ret = 0;
0284
0285 down_write(&nvmet_config_sem);
0286 if (nvmet_transports[ops->type])
0287 ret = -EINVAL;
0288 else
0289 nvmet_transports[ops->type] = ops;
0290 up_write(&nvmet_config_sem);
0291
0292 return ret;
0293 }
0294 EXPORT_SYMBOL_GPL(nvmet_register_transport);
0295
0296 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
0297 {
0298 down_write(&nvmet_config_sem);
0299 nvmet_transports[ops->type] = NULL;
0300 up_write(&nvmet_config_sem);
0301 }
0302 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
0303
0304 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
0305 {
0306 struct nvmet_ctrl *ctrl;
0307
0308 mutex_lock(&subsys->lock);
0309 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
0310 if (ctrl->port == port)
0311 ctrl->ops->delete_ctrl(ctrl);
0312 }
0313 mutex_unlock(&subsys->lock);
0314 }
0315
0316 int nvmet_enable_port(struct nvmet_port *port)
0317 {
0318 const struct nvmet_fabrics_ops *ops;
0319 int ret;
0320
0321 lockdep_assert_held(&nvmet_config_sem);
0322
0323 ops = nvmet_transports[port->disc_addr.trtype];
0324 if (!ops) {
0325 up_write(&nvmet_config_sem);
0326 request_module("nvmet-transport-%d", port->disc_addr.trtype);
0327 down_write(&nvmet_config_sem);
0328 ops = nvmet_transports[port->disc_addr.trtype];
0329 if (!ops) {
0330 pr_err("transport type %d not supported\n",
0331 port->disc_addr.trtype);
0332 return -EINVAL;
0333 }
0334 }
0335
0336 if (!try_module_get(ops->owner))
0337 return -EINVAL;
0338
0339
0340
0341
0342
0343 if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
0344 pr_err("T10-PI is not supported by transport type %d\n",
0345 port->disc_addr.trtype);
0346 ret = -EINVAL;
0347 goto out_put;
0348 }
0349
0350 ret = ops->add_port(port);
0351 if (ret)
0352 goto out_put;
0353
0354
0355 if (port->inline_data_size < 0)
0356 port->inline_data_size = 0;
0357
0358 port->enabled = true;
0359 port->tr_ops = ops;
0360 return 0;
0361
0362 out_put:
0363 module_put(ops->owner);
0364 return ret;
0365 }
0366
0367 void nvmet_disable_port(struct nvmet_port *port)
0368 {
0369 const struct nvmet_fabrics_ops *ops;
0370
0371 lockdep_assert_held(&nvmet_config_sem);
0372
0373 port->enabled = false;
0374 port->tr_ops = NULL;
0375
0376 ops = nvmet_transports[port->disc_addr.trtype];
0377 ops->remove_port(port);
0378 module_put(ops->owner);
0379 }
0380
0381 static void nvmet_keep_alive_timer(struct work_struct *work)
0382 {
0383 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
0384 struct nvmet_ctrl, ka_work);
0385 bool reset_tbkas = ctrl->reset_tbkas;
0386
0387 ctrl->reset_tbkas = false;
0388 if (reset_tbkas) {
0389 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
0390 ctrl->cntlid);
0391 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
0392 return;
0393 }
0394
0395 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
0396 ctrl->cntlid, ctrl->kato);
0397
0398 nvmet_ctrl_fatal_error(ctrl);
0399 }
0400
0401 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
0402 {
0403 if (unlikely(ctrl->kato == 0))
0404 return;
0405
0406 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
0407 ctrl->cntlid, ctrl->kato);
0408
0409 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
0410 }
0411
0412 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
0413 {
0414 if (unlikely(ctrl->kato == 0))
0415 return;
0416
0417 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
0418
0419 cancel_delayed_work_sync(&ctrl->ka_work);
0420 }
0421
0422 u16 nvmet_req_find_ns(struct nvmet_req *req)
0423 {
0424 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
0425
0426 req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
0427 if (unlikely(!req->ns)) {
0428 req->error_loc = offsetof(struct nvme_common_command, nsid);
0429 return NVME_SC_INVALID_NS | NVME_SC_DNR;
0430 }
0431
0432 percpu_ref_get(&req->ns->ref);
0433 return NVME_SC_SUCCESS;
0434 }
0435
0436 static void nvmet_destroy_namespace(struct percpu_ref *ref)
0437 {
0438 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
0439
0440 complete(&ns->disable_done);
0441 }
0442
0443 void nvmet_put_namespace(struct nvmet_ns *ns)
0444 {
0445 percpu_ref_put(&ns->ref);
0446 }
0447
0448 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
0449 {
0450 nvmet_bdev_ns_disable(ns);
0451 nvmet_file_ns_disable(ns);
0452 }
0453
0454 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
0455 {
0456 int ret;
0457 struct pci_dev *p2p_dev;
0458
0459 if (!ns->use_p2pmem)
0460 return 0;
0461
0462 if (!ns->bdev) {
0463 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
0464 return -EINVAL;
0465 }
0466
0467 if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
0468 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
0469 ns->device_path);
0470 return -EINVAL;
0471 }
0472
0473 if (ns->p2p_dev) {
0474 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
0475 if (ret < 0)
0476 return -EINVAL;
0477 } else {
0478
0479
0480
0481
0482
0483
0484
0485 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
0486 if (!p2p_dev) {
0487 pr_err("no peer-to-peer memory is available for %s\n",
0488 ns->device_path);
0489 return -EINVAL;
0490 }
0491
0492 pci_dev_put(p2p_dev);
0493 }
0494
0495 return 0;
0496 }
0497
0498
0499
0500
0501 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
0502 struct nvmet_ns *ns)
0503 {
0504 struct device *clients[2];
0505 struct pci_dev *p2p_dev;
0506 int ret;
0507
0508 if (!ctrl->p2p_client || !ns->use_p2pmem)
0509 return;
0510
0511 if (ns->p2p_dev) {
0512 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
0513 if (ret < 0)
0514 return;
0515
0516 p2p_dev = pci_dev_get(ns->p2p_dev);
0517 } else {
0518 clients[0] = ctrl->p2p_client;
0519 clients[1] = nvmet_ns_dev(ns);
0520
0521 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
0522 if (!p2p_dev) {
0523 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
0524 dev_name(ctrl->p2p_client), ns->device_path);
0525 return;
0526 }
0527 }
0528
0529 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
0530 if (ret < 0)
0531 pci_dev_put(p2p_dev);
0532
0533 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
0534 ns->nsid);
0535 }
0536
0537 bool nvmet_ns_revalidate(struct nvmet_ns *ns)
0538 {
0539 loff_t oldsize = ns->size;
0540
0541 if (ns->bdev)
0542 nvmet_bdev_ns_revalidate(ns);
0543 else
0544 nvmet_file_ns_revalidate(ns);
0545
0546 return oldsize != ns->size;
0547 }
0548
0549 int nvmet_ns_enable(struct nvmet_ns *ns)
0550 {
0551 struct nvmet_subsys *subsys = ns->subsys;
0552 struct nvmet_ctrl *ctrl;
0553 int ret;
0554
0555 mutex_lock(&subsys->lock);
0556 ret = 0;
0557
0558 if (nvmet_is_passthru_subsys(subsys)) {
0559 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
0560 goto out_unlock;
0561 }
0562
0563 if (ns->enabled)
0564 goto out_unlock;
0565
0566 ret = -EMFILE;
0567 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
0568 goto out_unlock;
0569
0570 ret = nvmet_bdev_ns_enable(ns);
0571 if (ret == -ENOTBLK)
0572 ret = nvmet_file_ns_enable(ns);
0573 if (ret)
0574 goto out_unlock;
0575
0576 ret = nvmet_p2pmem_ns_enable(ns);
0577 if (ret)
0578 goto out_dev_disable;
0579
0580 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
0581 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
0582
0583 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0584 0, GFP_KERNEL);
0585 if (ret)
0586 goto out_dev_put;
0587
0588 if (ns->nsid > subsys->max_nsid)
0589 subsys->max_nsid = ns->nsid;
0590
0591 ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
0592 if (ret)
0593 goto out_restore_subsys_maxnsid;
0594
0595 subsys->nr_namespaces++;
0596
0597 nvmet_ns_changed(subsys, ns->nsid);
0598 ns->enabled = true;
0599 ret = 0;
0600 out_unlock:
0601 mutex_unlock(&subsys->lock);
0602 return ret;
0603
0604 out_restore_subsys_maxnsid:
0605 subsys->max_nsid = nvmet_max_nsid(subsys);
0606 percpu_ref_exit(&ns->ref);
0607 out_dev_put:
0608 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
0609 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
0610 out_dev_disable:
0611 nvmet_ns_dev_disable(ns);
0612 goto out_unlock;
0613 }
0614
0615 void nvmet_ns_disable(struct nvmet_ns *ns)
0616 {
0617 struct nvmet_subsys *subsys = ns->subsys;
0618 struct nvmet_ctrl *ctrl;
0619
0620 mutex_lock(&subsys->lock);
0621 if (!ns->enabled)
0622 goto out_unlock;
0623
0624 ns->enabled = false;
0625 xa_erase(&ns->subsys->namespaces, ns->nsid);
0626 if (ns->nsid == subsys->max_nsid)
0627 subsys->max_nsid = nvmet_max_nsid(subsys);
0628
0629 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
0630 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
0631
0632 mutex_unlock(&subsys->lock);
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642 percpu_ref_kill(&ns->ref);
0643 synchronize_rcu();
0644 wait_for_completion(&ns->disable_done);
0645 percpu_ref_exit(&ns->ref);
0646
0647 mutex_lock(&subsys->lock);
0648
0649 subsys->nr_namespaces--;
0650 nvmet_ns_changed(subsys, ns->nsid);
0651 nvmet_ns_dev_disable(ns);
0652 out_unlock:
0653 mutex_unlock(&subsys->lock);
0654 }
0655
0656 void nvmet_ns_free(struct nvmet_ns *ns)
0657 {
0658 nvmet_ns_disable(ns);
0659
0660 down_write(&nvmet_ana_sem);
0661 nvmet_ana_group_enabled[ns->anagrpid]--;
0662 up_write(&nvmet_ana_sem);
0663
0664 kfree(ns->device_path);
0665 kfree(ns);
0666 }
0667
0668 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
0669 {
0670 struct nvmet_ns *ns;
0671
0672 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
0673 if (!ns)
0674 return NULL;
0675
0676 init_completion(&ns->disable_done);
0677
0678 ns->nsid = nsid;
0679 ns->subsys = subsys;
0680
0681 down_write(&nvmet_ana_sem);
0682 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
0683 nvmet_ana_group_enabled[ns->anagrpid]++;
0684 up_write(&nvmet_ana_sem);
0685
0686 uuid_gen(&ns->uuid);
0687 ns->buffered_io = false;
0688 ns->csi = NVME_CSI_NVM;
0689
0690 return ns;
0691 }
0692
0693 static void nvmet_update_sq_head(struct nvmet_req *req)
0694 {
0695 if (req->sq->size) {
0696 u32 old_sqhd, new_sqhd;
0697
0698 do {
0699 old_sqhd = req->sq->sqhd;
0700 new_sqhd = (old_sqhd + 1) % req->sq->size;
0701 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
0702 old_sqhd);
0703 }
0704 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
0705 }
0706
0707 static void nvmet_set_error(struct nvmet_req *req, u16 status)
0708 {
0709 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0710 struct nvme_error_slot *new_error_slot;
0711 unsigned long flags;
0712
0713 req->cqe->status = cpu_to_le16(status << 1);
0714
0715 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
0716 return;
0717
0718 spin_lock_irqsave(&ctrl->error_lock, flags);
0719 ctrl->err_counter++;
0720 new_error_slot =
0721 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
0722
0723 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
0724 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
0725 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
0726 new_error_slot->status_field = cpu_to_le16(status << 1);
0727 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
0728 new_error_slot->lba = cpu_to_le64(req->error_slba);
0729 new_error_slot->nsid = req->cmd->common.nsid;
0730 spin_unlock_irqrestore(&ctrl->error_lock, flags);
0731
0732
0733 req->cqe->status |= cpu_to_le16(1 << 14);
0734 }
0735
0736 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
0737 {
0738 struct nvmet_ns *ns = req->ns;
0739
0740 if (!req->sq->sqhd_disabled)
0741 nvmet_update_sq_head(req);
0742 req->cqe->sq_id = cpu_to_le16(req->sq->qid);
0743 req->cqe->command_id = req->cmd->common.command_id;
0744
0745 if (unlikely(status))
0746 nvmet_set_error(req, status);
0747
0748 trace_nvmet_req_complete(req);
0749
0750 req->ops->queue_response(req);
0751 if (ns)
0752 nvmet_put_namespace(ns);
0753 }
0754
0755 void nvmet_req_complete(struct nvmet_req *req, u16 status)
0756 {
0757 __nvmet_req_complete(req, status);
0758 percpu_ref_put(&req->sq->ref);
0759 }
0760 EXPORT_SYMBOL_GPL(nvmet_req_complete);
0761
0762 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
0763 u16 qid, u16 size)
0764 {
0765 cq->qid = qid;
0766 cq->size = size;
0767 }
0768
0769 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
0770 u16 qid, u16 size)
0771 {
0772 sq->sqhd = 0;
0773 sq->qid = qid;
0774 sq->size = size;
0775
0776 ctrl->sqs[qid] = sq;
0777 }
0778
0779 static void nvmet_confirm_sq(struct percpu_ref *ref)
0780 {
0781 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
0782
0783 complete(&sq->confirm_done);
0784 }
0785
0786 void nvmet_sq_destroy(struct nvmet_sq *sq)
0787 {
0788 struct nvmet_ctrl *ctrl = sq->ctrl;
0789
0790
0791
0792
0793
0794 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
0795 nvmet_async_events_failall(ctrl);
0796 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
0797 wait_for_completion(&sq->confirm_done);
0798 wait_for_completion(&sq->free_done);
0799 percpu_ref_exit(&sq->ref);
0800 nvmet_auth_sq_free(sq);
0801
0802 if (ctrl) {
0803
0804
0805
0806
0807
0808
0809 ctrl->reset_tbkas = true;
0810 sq->ctrl->sqs[sq->qid] = NULL;
0811 nvmet_ctrl_put(ctrl);
0812 sq->ctrl = NULL;
0813 }
0814 }
0815 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
0816
0817 static void nvmet_sq_free(struct percpu_ref *ref)
0818 {
0819 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
0820
0821 complete(&sq->free_done);
0822 }
0823
0824 int nvmet_sq_init(struct nvmet_sq *sq)
0825 {
0826 int ret;
0827
0828 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
0829 if (ret) {
0830 pr_err("percpu_ref init failed!\n");
0831 return ret;
0832 }
0833 init_completion(&sq->free_done);
0834 init_completion(&sq->confirm_done);
0835
0836 return 0;
0837 }
0838 EXPORT_SYMBOL_GPL(nvmet_sq_init);
0839
0840 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
0841 struct nvmet_ns *ns)
0842 {
0843 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
0844
0845 if (unlikely(state == NVME_ANA_INACCESSIBLE))
0846 return NVME_SC_ANA_INACCESSIBLE;
0847 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
0848 return NVME_SC_ANA_PERSISTENT_LOSS;
0849 if (unlikely(state == NVME_ANA_CHANGE))
0850 return NVME_SC_ANA_TRANSITION;
0851 return 0;
0852 }
0853
0854 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
0855 {
0856 if (unlikely(req->ns->readonly)) {
0857 switch (req->cmd->common.opcode) {
0858 case nvme_cmd_read:
0859 case nvme_cmd_flush:
0860 break;
0861 default:
0862 return NVME_SC_NS_WRITE_PROTECTED;
0863 }
0864 }
0865
0866 return 0;
0867 }
0868
0869 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
0870 {
0871 struct nvme_command *cmd = req->cmd;
0872 u16 ret;
0873
0874 if (nvme_is_fabrics(cmd))
0875 return nvmet_parse_fabrics_io_cmd(req);
0876
0877 if (unlikely(!nvmet_check_auth_status(req)))
0878 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
0879
0880 ret = nvmet_check_ctrl_status(req);
0881 if (unlikely(ret))
0882 return ret;
0883
0884 if (nvmet_is_passthru_req(req))
0885 return nvmet_parse_passthru_io_cmd(req);
0886
0887 ret = nvmet_req_find_ns(req);
0888 if (unlikely(ret))
0889 return ret;
0890
0891 ret = nvmet_check_ana_state(req->port, req->ns);
0892 if (unlikely(ret)) {
0893 req->error_loc = offsetof(struct nvme_common_command, nsid);
0894 return ret;
0895 }
0896 ret = nvmet_io_cmd_check_access(req);
0897 if (unlikely(ret)) {
0898 req->error_loc = offsetof(struct nvme_common_command, nsid);
0899 return ret;
0900 }
0901
0902 switch (req->ns->csi) {
0903 case NVME_CSI_NVM:
0904 if (req->ns->file)
0905 return nvmet_file_parse_io_cmd(req);
0906 return nvmet_bdev_parse_io_cmd(req);
0907 case NVME_CSI_ZNS:
0908 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
0909 return nvmet_bdev_zns_parse_io_cmd(req);
0910 return NVME_SC_INVALID_IO_CMD_SET;
0911 default:
0912 return NVME_SC_INVALID_IO_CMD_SET;
0913 }
0914 }
0915
0916 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
0917 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
0918 {
0919 u8 flags = req->cmd->common.flags;
0920 u16 status;
0921
0922 req->cq = cq;
0923 req->sq = sq;
0924 req->ops = ops;
0925 req->sg = NULL;
0926 req->metadata_sg = NULL;
0927 req->sg_cnt = 0;
0928 req->metadata_sg_cnt = 0;
0929 req->transfer_len = 0;
0930 req->metadata_len = 0;
0931 req->cqe->status = 0;
0932 req->cqe->sq_head = 0;
0933 req->ns = NULL;
0934 req->error_loc = NVMET_NO_ERROR_LOC;
0935 req->error_slba = 0;
0936
0937
0938 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
0939 req->error_loc = offsetof(struct nvme_common_command, flags);
0940 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0941 goto fail;
0942 }
0943
0944
0945
0946
0947
0948
0949 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
0950 req->error_loc = offsetof(struct nvme_common_command, flags);
0951 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0952 goto fail;
0953 }
0954
0955 if (unlikely(!req->sq->ctrl))
0956
0957 status = nvmet_parse_connect_cmd(req);
0958 else if (likely(req->sq->qid != 0))
0959 status = nvmet_parse_io_cmd(req);
0960 else
0961 status = nvmet_parse_admin_cmd(req);
0962
0963 if (status)
0964 goto fail;
0965
0966 trace_nvmet_req_init(req, req->cmd);
0967
0968 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
0969 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0970 goto fail;
0971 }
0972
0973 if (sq->ctrl)
0974 sq->ctrl->reset_tbkas = true;
0975
0976 return true;
0977
0978 fail:
0979 __nvmet_req_complete(req, status);
0980 return false;
0981 }
0982 EXPORT_SYMBOL_GPL(nvmet_req_init);
0983
0984 void nvmet_req_uninit(struct nvmet_req *req)
0985 {
0986 percpu_ref_put(&req->sq->ref);
0987 if (req->ns)
0988 nvmet_put_namespace(req->ns);
0989 }
0990 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
0991
0992 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
0993 {
0994 if (unlikely(len != req->transfer_len)) {
0995 req->error_loc = offsetof(struct nvme_common_command, dptr);
0996 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
0997 return false;
0998 }
0999
1000 return true;
1001 }
1002 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
1003
1004 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1005 {
1006 if (unlikely(data_len > req->transfer_len)) {
1007 req->error_loc = offsetof(struct nvme_common_command, dptr);
1008 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1009 return false;
1010 }
1011
1012 return true;
1013 }
1014
1015 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
1016 {
1017 return req->transfer_len - req->metadata_len;
1018 }
1019
1020 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1021 struct nvmet_req *req)
1022 {
1023 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1024 nvmet_data_transfer_len(req));
1025 if (!req->sg)
1026 goto out_err;
1027
1028 if (req->metadata_len) {
1029 req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1030 &req->metadata_sg_cnt, req->metadata_len);
1031 if (!req->metadata_sg)
1032 goto out_free_sg;
1033 }
1034
1035 req->p2p_dev = p2p_dev;
1036
1037 return 0;
1038 out_free_sg:
1039 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1040 out_err:
1041 return -ENOMEM;
1042 }
1043
1044 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1045 {
1046 if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1047 !req->sq->ctrl || !req->sq->qid || !req->ns)
1048 return NULL;
1049 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1050 }
1051
1052 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1053 {
1054 struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1055
1056 if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1057 return 0;
1058
1059 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1060 &req->sg_cnt);
1061 if (unlikely(!req->sg))
1062 goto out;
1063
1064 if (req->metadata_len) {
1065 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1066 &req->metadata_sg_cnt);
1067 if (unlikely(!req->metadata_sg))
1068 goto out_free;
1069 }
1070
1071 return 0;
1072 out_free:
1073 sgl_free(req->sg);
1074 out:
1075 return -ENOMEM;
1076 }
1077 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1078
1079 void nvmet_req_free_sgls(struct nvmet_req *req)
1080 {
1081 if (req->p2p_dev) {
1082 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1083 if (req->metadata_sg)
1084 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1085 req->p2p_dev = NULL;
1086 } else {
1087 sgl_free(req->sg);
1088 if (req->metadata_sg)
1089 sgl_free(req->metadata_sg);
1090 }
1091
1092 req->sg = NULL;
1093 req->metadata_sg = NULL;
1094 req->sg_cnt = 0;
1095 req->metadata_sg_cnt = 0;
1096 }
1097 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1098
1099 static inline bool nvmet_cc_en(u32 cc)
1100 {
1101 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1102 }
1103
1104 static inline u8 nvmet_cc_css(u32 cc)
1105 {
1106 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1107 }
1108
1109 static inline u8 nvmet_cc_mps(u32 cc)
1110 {
1111 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1112 }
1113
1114 static inline u8 nvmet_cc_ams(u32 cc)
1115 {
1116 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1117 }
1118
1119 static inline u8 nvmet_cc_shn(u32 cc)
1120 {
1121 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1122 }
1123
1124 static inline u8 nvmet_cc_iosqes(u32 cc)
1125 {
1126 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1127 }
1128
1129 static inline u8 nvmet_cc_iocqes(u32 cc)
1130 {
1131 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1132 }
1133
1134 static inline bool nvmet_css_supported(u8 cc_css)
1135 {
1136 switch (cc_css << NVME_CC_CSS_SHIFT) {
1137 case NVME_CC_CSS_NVM:
1138 case NVME_CC_CSS_CSI:
1139 return true;
1140 default:
1141 return false;
1142 }
1143 }
1144
1145 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1146 {
1147 lockdep_assert_held(&ctrl->lock);
1148
1149
1150
1151
1152
1153
1154
1155 if (!nvmet_is_disc_subsys(ctrl->subsys) &&
1156 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1157 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1158 ctrl->csts = NVME_CSTS_CFS;
1159 return;
1160 }
1161
1162 if (nvmet_cc_mps(ctrl->cc) != 0 ||
1163 nvmet_cc_ams(ctrl->cc) != 0 ||
1164 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1165 ctrl->csts = NVME_CSTS_CFS;
1166 return;
1167 }
1168
1169 ctrl->csts = NVME_CSTS_RDY;
1170
1171
1172
1173
1174
1175
1176
1177 if (ctrl->kato)
1178 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1179 }
1180
1181 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1182 {
1183 lockdep_assert_held(&ctrl->lock);
1184
1185
1186 ctrl->csts &= ~NVME_CSTS_RDY;
1187 ctrl->cc = 0;
1188 }
1189
1190 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1191 {
1192 u32 old;
1193
1194 mutex_lock(&ctrl->lock);
1195 old = ctrl->cc;
1196 ctrl->cc = new;
1197
1198 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1199 nvmet_start_ctrl(ctrl);
1200 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1201 nvmet_clear_ctrl(ctrl);
1202 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1203 nvmet_clear_ctrl(ctrl);
1204 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1205 }
1206 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1207 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1208 mutex_unlock(&ctrl->lock);
1209 }
1210
1211 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1212 {
1213
1214 ctrl->cap = (1ULL << 37);
1215
1216 ctrl->cap |= (1ULL << 43);
1217
1218 ctrl->cap |= (15ULL << 24);
1219
1220 if (ctrl->ops->get_max_queue_size)
1221 ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
1222 else
1223 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1224
1225 if (nvmet_is_passthru_subsys(ctrl->subsys))
1226 nvmet_passthrough_override_cap(ctrl);
1227 }
1228
1229 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1230 const char *hostnqn, u16 cntlid,
1231 struct nvmet_req *req)
1232 {
1233 struct nvmet_ctrl *ctrl = NULL;
1234 struct nvmet_subsys *subsys;
1235
1236 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1237 if (!subsys) {
1238 pr_warn("connect request for invalid subsystem %s!\n",
1239 subsysnqn);
1240 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1241 goto out;
1242 }
1243
1244 mutex_lock(&subsys->lock);
1245 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1246 if (ctrl->cntlid == cntlid) {
1247 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1248 pr_warn("hostnqn mismatch.\n");
1249 continue;
1250 }
1251 if (!kref_get_unless_zero(&ctrl->ref))
1252 continue;
1253
1254
1255 goto found;
1256 }
1257 }
1258
1259 ctrl = NULL;
1260 pr_warn("could not find controller %d for subsys %s / host %s\n",
1261 cntlid, subsysnqn, hostnqn);
1262 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1263
1264 found:
1265 mutex_unlock(&subsys->lock);
1266 nvmet_subsys_put(subsys);
1267 out:
1268 return ctrl;
1269 }
1270
1271 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1272 {
1273 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1274 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1275 req->cmd->common.opcode, req->sq->qid);
1276 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1277 }
1278
1279 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1280 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1281 req->cmd->common.opcode, req->sq->qid);
1282 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1283 }
1284
1285 if (unlikely(!nvmet_check_auth_status(req))) {
1286 pr_warn("qid %d not authenticated\n", req->sq->qid);
1287 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1288 }
1289 return 0;
1290 }
1291
1292 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1293 {
1294 struct nvmet_host_link *p;
1295
1296 lockdep_assert_held(&nvmet_config_sem);
1297
1298 if (subsys->allow_any_host)
1299 return true;
1300
1301 if (nvmet_is_disc_subsys(subsys))
1302 return true;
1303
1304 list_for_each_entry(p, &subsys->hosts, entry) {
1305 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1306 return true;
1307 }
1308
1309 return false;
1310 }
1311
1312
1313
1314
1315 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1316 struct nvmet_req *req)
1317 {
1318 struct nvmet_ns *ns;
1319 unsigned long idx;
1320
1321 if (!req->p2p_client)
1322 return;
1323
1324 ctrl->p2p_client = get_device(req->p2p_client);
1325
1326 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1327 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1328 }
1329
1330
1331
1332
1333 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1334 {
1335 struct radix_tree_iter iter;
1336 void __rcu **slot;
1337
1338 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1339 pci_dev_put(radix_tree_deref_slot(slot));
1340
1341 put_device(ctrl->p2p_client);
1342 }
1343
1344 static void nvmet_fatal_error_handler(struct work_struct *work)
1345 {
1346 struct nvmet_ctrl *ctrl =
1347 container_of(work, struct nvmet_ctrl, fatal_err_work);
1348
1349 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1350 ctrl->ops->delete_ctrl(ctrl);
1351 }
1352
1353 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1354 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1355 {
1356 struct nvmet_subsys *subsys;
1357 struct nvmet_ctrl *ctrl;
1358 int ret;
1359 u16 status;
1360
1361 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1362 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1363 if (!subsys) {
1364 pr_warn("connect request for invalid subsystem %s!\n",
1365 subsysnqn);
1366 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1367 req->error_loc = offsetof(struct nvme_common_command, dptr);
1368 goto out;
1369 }
1370
1371 down_read(&nvmet_config_sem);
1372 if (!nvmet_host_allowed(subsys, hostnqn)) {
1373 pr_info("connect by host %s for subsystem %s not allowed\n",
1374 hostnqn, subsysnqn);
1375 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1376 up_read(&nvmet_config_sem);
1377 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1378 req->error_loc = offsetof(struct nvme_common_command, dptr);
1379 goto out_put_subsystem;
1380 }
1381 up_read(&nvmet_config_sem);
1382
1383 status = NVME_SC_INTERNAL;
1384 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1385 if (!ctrl)
1386 goto out_put_subsystem;
1387 mutex_init(&ctrl->lock);
1388
1389 ctrl->port = req->port;
1390 ctrl->ops = req->ops;
1391
1392 #ifdef CONFIG_NVME_TARGET_PASSTHRU
1393
1394 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
1395 subsys->clear_ids = 1;
1396 #endif
1397
1398 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1399 INIT_LIST_HEAD(&ctrl->async_events);
1400 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1401 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1402 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1403
1404 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1405 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1406
1407 kref_init(&ctrl->ref);
1408 ctrl->subsys = subsys;
1409 nvmet_init_cap(ctrl);
1410 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1411
1412 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1413 sizeof(__le32), GFP_KERNEL);
1414 if (!ctrl->changed_ns_list)
1415 goto out_free_ctrl;
1416
1417 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1418 sizeof(struct nvmet_sq *),
1419 GFP_KERNEL);
1420 if (!ctrl->sqs)
1421 goto out_free_changed_ns_list;
1422
1423 if (subsys->cntlid_min > subsys->cntlid_max)
1424 goto out_free_sqs;
1425
1426 ret = ida_alloc_range(&cntlid_ida,
1427 subsys->cntlid_min, subsys->cntlid_max,
1428 GFP_KERNEL);
1429 if (ret < 0) {
1430 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1431 goto out_free_sqs;
1432 }
1433 ctrl->cntlid = ret;
1434
1435
1436
1437
1438
1439 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
1440 kato = NVMET_DISC_KATO_MS;
1441
1442
1443 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1444
1445 ctrl->err_counter = 0;
1446 spin_lock_init(&ctrl->error_lock);
1447
1448 nvmet_start_keep_alive_timer(ctrl);
1449
1450 mutex_lock(&subsys->lock);
1451 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1452 nvmet_setup_p2p_ns_map(ctrl, req);
1453 mutex_unlock(&subsys->lock);
1454
1455 *ctrlp = ctrl;
1456 return 0;
1457
1458 out_free_sqs:
1459 kfree(ctrl->sqs);
1460 out_free_changed_ns_list:
1461 kfree(ctrl->changed_ns_list);
1462 out_free_ctrl:
1463 kfree(ctrl);
1464 out_put_subsystem:
1465 nvmet_subsys_put(subsys);
1466 out:
1467 return status;
1468 }
1469
1470 static void nvmet_ctrl_free(struct kref *ref)
1471 {
1472 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1473 struct nvmet_subsys *subsys = ctrl->subsys;
1474
1475 mutex_lock(&subsys->lock);
1476 nvmet_release_p2p_ns_map(ctrl);
1477 list_del(&ctrl->subsys_entry);
1478 mutex_unlock(&subsys->lock);
1479
1480 nvmet_stop_keep_alive_timer(ctrl);
1481
1482 flush_work(&ctrl->async_event_work);
1483 cancel_work_sync(&ctrl->fatal_err_work);
1484
1485 nvmet_destroy_auth(ctrl);
1486
1487 ida_free(&cntlid_ida, ctrl->cntlid);
1488
1489 nvmet_async_events_free(ctrl);
1490 kfree(ctrl->sqs);
1491 kfree(ctrl->changed_ns_list);
1492 kfree(ctrl);
1493
1494 nvmet_subsys_put(subsys);
1495 }
1496
1497 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1498 {
1499 kref_put(&ctrl->ref, nvmet_ctrl_free);
1500 }
1501
1502 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1503 {
1504 mutex_lock(&ctrl->lock);
1505 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1506 ctrl->csts |= NVME_CSTS_CFS;
1507 queue_work(nvmet_wq, &ctrl->fatal_err_work);
1508 }
1509 mutex_unlock(&ctrl->lock);
1510 }
1511 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1512
1513 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1514 const char *subsysnqn)
1515 {
1516 struct nvmet_subsys_link *p;
1517
1518 if (!port)
1519 return NULL;
1520
1521 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1522 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1523 return NULL;
1524 return nvmet_disc_subsys;
1525 }
1526
1527 down_read(&nvmet_config_sem);
1528 list_for_each_entry(p, &port->subsystems, entry) {
1529 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1530 NVMF_NQN_SIZE)) {
1531 if (!kref_get_unless_zero(&p->subsys->ref))
1532 break;
1533 up_read(&nvmet_config_sem);
1534 return p->subsys;
1535 }
1536 }
1537 up_read(&nvmet_config_sem);
1538 return NULL;
1539 }
1540
1541 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1542 enum nvme_subsys_type type)
1543 {
1544 struct nvmet_subsys *subsys;
1545 char serial[NVMET_SN_MAX_SIZE / 2];
1546 int ret;
1547
1548 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1549 if (!subsys)
1550 return ERR_PTR(-ENOMEM);
1551
1552 subsys->ver = NVMET_DEFAULT_VS;
1553
1554 get_random_bytes(&serial, sizeof(serial));
1555 bin2hex(subsys->serial, &serial, sizeof(serial));
1556
1557 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1558 if (!subsys->model_number) {
1559 ret = -ENOMEM;
1560 goto free_subsys;
1561 }
1562
1563 switch (type) {
1564 case NVME_NQN_NVME:
1565 subsys->max_qid = NVMET_NR_QUEUES;
1566 break;
1567 case NVME_NQN_DISC:
1568 case NVME_NQN_CURR:
1569 subsys->max_qid = 0;
1570 break;
1571 default:
1572 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1573 ret = -EINVAL;
1574 goto free_mn;
1575 }
1576 subsys->type = type;
1577 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1578 GFP_KERNEL);
1579 if (!subsys->subsysnqn) {
1580 ret = -ENOMEM;
1581 goto free_mn;
1582 }
1583 subsys->cntlid_min = NVME_CNTLID_MIN;
1584 subsys->cntlid_max = NVME_CNTLID_MAX;
1585 kref_init(&subsys->ref);
1586
1587 mutex_init(&subsys->lock);
1588 xa_init(&subsys->namespaces);
1589 INIT_LIST_HEAD(&subsys->ctrls);
1590 INIT_LIST_HEAD(&subsys->hosts);
1591
1592 return subsys;
1593
1594 free_mn:
1595 kfree(subsys->model_number);
1596 free_subsys:
1597 kfree(subsys);
1598 return ERR_PTR(ret);
1599 }
1600
1601 static void nvmet_subsys_free(struct kref *ref)
1602 {
1603 struct nvmet_subsys *subsys =
1604 container_of(ref, struct nvmet_subsys, ref);
1605
1606 WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1607
1608 xa_destroy(&subsys->namespaces);
1609 nvmet_passthru_subsys_free(subsys);
1610
1611 kfree(subsys->subsysnqn);
1612 kfree(subsys->model_number);
1613 kfree(subsys);
1614 }
1615
1616 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1617 {
1618 struct nvmet_ctrl *ctrl;
1619
1620 mutex_lock(&subsys->lock);
1621 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1622 ctrl->ops->delete_ctrl(ctrl);
1623 mutex_unlock(&subsys->lock);
1624 }
1625
1626 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1627 {
1628 kref_put(&subsys->ref, nvmet_subsys_free);
1629 }
1630
1631 static int __init nvmet_init(void)
1632 {
1633 int error;
1634
1635 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1636
1637 zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1638 if (!zbd_wq)
1639 return -ENOMEM;
1640
1641 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1642 WQ_MEM_RECLAIM, 0);
1643 if (!buffered_io_wq) {
1644 error = -ENOMEM;
1645 goto out_free_zbd_work_queue;
1646 }
1647
1648 nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
1649 if (!nvmet_wq) {
1650 error = -ENOMEM;
1651 goto out_free_buffered_work_queue;
1652 }
1653
1654 error = nvmet_init_discovery();
1655 if (error)
1656 goto out_free_nvmet_work_queue;
1657
1658 error = nvmet_init_configfs();
1659 if (error)
1660 goto out_exit_discovery;
1661 return 0;
1662
1663 out_exit_discovery:
1664 nvmet_exit_discovery();
1665 out_free_nvmet_work_queue:
1666 destroy_workqueue(nvmet_wq);
1667 out_free_buffered_work_queue:
1668 destroy_workqueue(buffered_io_wq);
1669 out_free_zbd_work_queue:
1670 destroy_workqueue(zbd_wq);
1671 return error;
1672 }
1673
1674 static void __exit nvmet_exit(void)
1675 {
1676 nvmet_exit_configfs();
1677 nvmet_exit_discovery();
1678 ida_destroy(&cntlid_ida);
1679 destroy_workqueue(nvmet_wq);
1680 destroy_workqueue(buffered_io_wq);
1681 destroy_workqueue(zbd_wq);
1682
1683 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1684 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1685 }
1686
1687 module_init(nvmet_init);
1688 module_exit(nvmet_exit);
1689
1690 MODULE_LICENSE("GPL v2");