Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * NVMe Over Fabrics Target Passthrough command implementation.
0004  *
0005  * Copyright (c) 2017-2018 Western Digital Corporation or its
0006  * affiliates.
0007  * Copyright (c) 2019-2020, Eideticom Inc.
0008  *
0009  */
0010 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0011 #include <linux/module.h>
0012 
0013 #include "../host/nvme.h"
0014 #include "nvmet.h"
0015 
0016 MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
0017 
0018 /*
0019  * xarray to maintain one passthru subsystem per nvme controller.
0020  */
0021 static DEFINE_XARRAY(passthru_subsystems);
0022 
0023 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
0024 {
0025     /*
0026      * Multiple command set support can only be declared if the underlying
0027      * controller actually supports it.
0028      */
0029     if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
0030         ctrl->cap &= ~(1ULL << 43);
0031 }
0032 
0033 static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
0034 {
0035     struct nvmet_ctrl *ctrl = req->sq->ctrl;
0036     u16 status = NVME_SC_SUCCESS;
0037     int pos, len;
0038     bool csi_seen = false;
0039     void *data;
0040     u8 csi;
0041 
0042     if (!ctrl->subsys->clear_ids)
0043         return status;
0044 
0045     data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
0046     if (!data)
0047         return NVME_SC_INTERNAL;
0048 
0049     status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
0050     if (status)
0051         goto out_free;
0052 
0053     for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
0054         struct nvme_ns_id_desc *cur = data + pos;
0055 
0056         if (cur->nidl == 0)
0057             break;
0058         if (cur->nidt == NVME_NIDT_CSI) {
0059             memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
0060             csi_seen = true;
0061             break;
0062         }
0063         len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
0064     }
0065 
0066     memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
0067     if (csi_seen) {
0068         struct nvme_ns_id_desc *cur = data;
0069 
0070         cur->nidt = NVME_NIDT_CSI;
0071         cur->nidl = NVME_NIDT_CSI_LEN;
0072         memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
0073     }
0074     status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
0075 out_free:
0076     kfree(data);
0077     return status;
0078 }
0079 
0080 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
0081 {
0082     struct nvmet_ctrl *ctrl = req->sq->ctrl;
0083     struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
0084     u16 status = NVME_SC_SUCCESS;
0085     struct nvme_id_ctrl *id;
0086     unsigned int max_hw_sectors;
0087     int page_shift;
0088 
0089     id = kzalloc(sizeof(*id), GFP_KERNEL);
0090     if (!id)
0091         return NVME_SC_INTERNAL;
0092 
0093     status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
0094     if (status)
0095         goto out_free;
0096 
0097     id->cntlid = cpu_to_le16(ctrl->cntlid);
0098     id->ver = cpu_to_le32(ctrl->subsys->ver);
0099 
0100     /*
0101      * The passthru NVMe driver may have a limit on the number of segments
0102      * which depends on the host's memory fragementation. To solve this,
0103      * ensure mdts is limited to the pages equal to the number of segments.
0104      */
0105     max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
0106                       pctrl->max_hw_sectors);
0107 
0108     /*
0109      * nvmet_passthru_map_sg is limitted to using a single bio so limit
0110      * the mdts based on BIO_MAX_VECS as well
0111      */
0112     max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
0113                       max_hw_sectors);
0114 
0115     page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
0116 
0117     id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
0118 
0119     id->acl = 3;
0120     /*
0121      * We export aerl limit for the fabrics controller, update this when
0122      * passthru based aerl support is added.
0123      */
0124     id->aerl = NVMET_ASYNC_EVENTS - 1;
0125 
0126     /* emulate kas as most of the PCIe ctrl don't have a support for kas */
0127     id->kas = cpu_to_le16(NVMET_KAS);
0128 
0129     /* don't support host memory buffer */
0130     id->hmpre = 0;
0131     id->hmmin = 0;
0132 
0133     id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
0134     id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
0135     id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
0136 
0137     /* don't support fuse commands */
0138     id->fuses = 0;
0139 
0140     id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
0141     if (ctrl->ops->flags & NVMF_KEYED_SGLS)
0142         id->sgls |= cpu_to_le32(1 << 2);
0143     if (req->port->inline_data_size)
0144         id->sgls |= cpu_to_le32(1 << 20);
0145 
0146     /*
0147      * When passthru controller is setup using nvme-loop transport it will
0148      * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
0149      * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
0150      * code path with duplicate ctr subsynqn. In order to prevent that we
0151      * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
0152      */
0153     memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
0154 
0155     /* use fabric id-ctrl values */
0156     id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
0157                 req->port->inline_data_size) / 16);
0158     id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
0159 
0160     id->msdbd = ctrl->ops->msdbd;
0161 
0162     /* Support multipath connections with fabrics */
0163     id->cmic |= 1 << 1;
0164 
0165     /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
0166     id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
0167 
0168     status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
0169 
0170 out_free:
0171     kfree(id);
0172     return status;
0173 }
0174 
0175 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
0176 {
0177     u16 status = NVME_SC_SUCCESS;
0178     struct nvme_id_ns *id;
0179     int i;
0180 
0181     id = kzalloc(sizeof(*id), GFP_KERNEL);
0182     if (!id)
0183         return NVME_SC_INTERNAL;
0184 
0185     status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
0186     if (status)
0187         goto out_free;
0188 
0189     for (i = 0; i < (id->nlbaf + 1); i++)
0190         if (id->lbaf[i].ms)
0191             memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
0192 
0193     id->flbas = id->flbas & ~(1 << 4);
0194 
0195     /*
0196      * Presently the NVMEof target code does not support sending
0197      * metadata, so we must disable it here. This should be updated
0198      * once target starts supporting metadata.
0199      */
0200     id->mc = 0;
0201 
0202     if (req->sq->ctrl->subsys->clear_ids) {
0203         memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
0204         memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
0205     }
0206 
0207     status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
0208 
0209 out_free:
0210     kfree(id);
0211     return status;
0212 }
0213 
0214 static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
0215 {
0216     struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
0217     struct request *rq = req->p.rq;
0218     int status;
0219 
0220     status = nvme_execute_passthru_rq(rq);
0221 
0222     if (status == NVME_SC_SUCCESS &&
0223         req->cmd->common.opcode == nvme_admin_identify) {
0224         switch (req->cmd->identify.cns) {
0225         case NVME_ID_CNS_CTRL:
0226             nvmet_passthru_override_id_ctrl(req);
0227             break;
0228         case NVME_ID_CNS_NS:
0229             nvmet_passthru_override_id_ns(req);
0230             break;
0231         case NVME_ID_CNS_NS_DESC_LIST:
0232             nvmet_passthru_override_id_descs(req);
0233             break;
0234         }
0235     } else if (status < 0)
0236         status = NVME_SC_INTERNAL;
0237 
0238     req->cqe->result = nvme_req(rq)->result;
0239     nvmet_req_complete(req, status);
0240     blk_mq_free_request(rq);
0241 }
0242 
0243 static void nvmet_passthru_req_done(struct request *rq,
0244                     blk_status_t blk_status)
0245 {
0246     struct nvmet_req *req = rq->end_io_data;
0247 
0248     req->cqe->result = nvme_req(rq)->result;
0249     nvmet_req_complete(req, nvme_req(rq)->status);
0250     blk_mq_free_request(rq);
0251 }
0252 
0253 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
0254 {
0255     struct scatterlist *sg;
0256     struct bio *bio;
0257     int i;
0258 
0259     if (req->sg_cnt > BIO_MAX_VECS)
0260         return -EINVAL;
0261 
0262     if (nvmet_use_inline_bvec(req)) {
0263         bio = &req->p.inline_bio;
0264         bio_init(bio, NULL, req->inline_bvec,
0265              ARRAY_SIZE(req->inline_bvec), req_op(rq));
0266     } else {
0267         bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
0268                 GFP_KERNEL);
0269         bio->bi_end_io = bio_put;
0270     }
0271 
0272     for_each_sg(req->sg, sg, req->sg_cnt, i) {
0273         if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
0274                     sg->offset) < sg->length) {
0275             nvmet_req_bio_put(req, bio);
0276             return -EINVAL;
0277         }
0278     }
0279 
0280     blk_rq_bio_prep(rq, bio, req->sg_cnt);
0281 
0282     return 0;
0283 }
0284 
0285 static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
0286 {
0287     struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
0288     struct request_queue *q = ctrl->admin_q;
0289     struct nvme_ns *ns = NULL;
0290     struct request *rq = NULL;
0291     unsigned int timeout;
0292     u32 effects;
0293     u16 status;
0294     int ret;
0295 
0296     if (likely(req->sq->qid != 0)) {
0297         u32 nsid = le32_to_cpu(req->cmd->common.nsid);
0298 
0299         ns = nvme_find_get_ns(ctrl, nsid);
0300         if (unlikely(!ns)) {
0301             pr_err("failed to get passthru ns nsid:%u\n", nsid);
0302             status = NVME_SC_INVALID_NS | NVME_SC_DNR;
0303             goto out;
0304         }
0305 
0306         q = ns->queue;
0307         timeout = nvmet_req_subsys(req)->io_timeout;
0308     } else {
0309         timeout = nvmet_req_subsys(req)->admin_timeout;
0310     }
0311 
0312     rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
0313     if (IS_ERR(rq)) {
0314         status = NVME_SC_INTERNAL;
0315         goto out_put_ns;
0316     }
0317     nvme_init_request(rq, req->cmd);
0318 
0319     if (timeout)
0320         rq->timeout = timeout;
0321 
0322     if (req->sg_cnt) {
0323         ret = nvmet_passthru_map_sg(req, rq);
0324         if (unlikely(ret)) {
0325             status = NVME_SC_INTERNAL;
0326             goto out_put_req;
0327         }
0328     }
0329 
0330     /*
0331      * If there are effects for the command we are about to execute, or
0332      * an end_req function we need to use nvme_execute_passthru_rq()
0333      * synchronously in a work item seeing the end_req function and
0334      * nvme_passthru_end() can't be called in the request done callback
0335      * which is typically in interrupt context.
0336      */
0337     effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
0338     if (req->p.use_workqueue || effects) {
0339         INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
0340         req->p.rq = rq;
0341         queue_work(nvmet_wq, &req->p.work);
0342     } else {
0343         rq->end_io = nvmet_passthru_req_done;
0344         rq->end_io_data = req;
0345         blk_execute_rq_nowait(rq, false);
0346     }
0347 
0348     if (ns)
0349         nvme_put_ns(ns);
0350 
0351     return;
0352 
0353 out_put_req:
0354     blk_mq_free_request(rq);
0355 out_put_ns:
0356     if (ns)
0357         nvme_put_ns(ns);
0358 out:
0359     nvmet_req_complete(req, status);
0360 }
0361 
0362 /*
0363  * We need to emulate set host behaviour to ensure that any requested
0364  * behaviour of the target's host matches the requested behaviour
0365  * of the device's host and fail otherwise.
0366  */
0367 static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
0368 {
0369     struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
0370     struct nvme_feat_host_behavior *host;
0371     u16 status = NVME_SC_INTERNAL;
0372     int ret;
0373 
0374     host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
0375     if (!host)
0376         goto out_complete_req;
0377 
0378     ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
0379                 host, sizeof(*host), NULL);
0380     if (ret)
0381         goto out_free_host;
0382 
0383     status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
0384     if (status)
0385         goto out_free_host;
0386 
0387     if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
0388         pr_warn("target host has requested different behaviour from the local host\n");
0389         status = NVME_SC_INTERNAL;
0390     }
0391 
0392 out_free_host:
0393     kfree(host);
0394 out_complete_req:
0395     nvmet_req_complete(req, status);
0396 }
0397 
0398 static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
0399 {
0400     req->p.use_workqueue = false;
0401     req->execute = nvmet_passthru_execute_cmd;
0402     return NVME_SC_SUCCESS;
0403 }
0404 
0405 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
0406 {
0407     /* Reject any commands with non-sgl flags set (ie. fused commands) */
0408     if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
0409         return NVME_SC_INVALID_FIELD;
0410 
0411     switch (req->cmd->common.opcode) {
0412     case nvme_cmd_resv_register:
0413     case nvme_cmd_resv_report:
0414     case nvme_cmd_resv_acquire:
0415     case nvme_cmd_resv_release:
0416         /*
0417          * Reservations cannot be supported properly because the
0418          * underlying device has no way of differentiating different
0419          * hosts that connect via fabrics. This could potentially be
0420          * emulated in the future if regular targets grow support for
0421          * this feature.
0422          */
0423         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0424     }
0425 
0426     return nvmet_setup_passthru_command(req);
0427 }
0428 
0429 /*
0430  * Only features that are emulated or specifically allowed in the list  are
0431  * passed down to the controller. This function implements the allow list for
0432  * both get and set features.
0433  */
0434 static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
0435 {
0436     switch (le32_to_cpu(req->cmd->features.fid)) {
0437     case NVME_FEAT_ARBITRATION:
0438     case NVME_FEAT_POWER_MGMT:
0439     case NVME_FEAT_LBA_RANGE:
0440     case NVME_FEAT_TEMP_THRESH:
0441     case NVME_FEAT_ERR_RECOVERY:
0442     case NVME_FEAT_VOLATILE_WC:
0443     case NVME_FEAT_WRITE_ATOMIC:
0444     case NVME_FEAT_AUTO_PST:
0445     case NVME_FEAT_TIMESTAMP:
0446     case NVME_FEAT_HCTM:
0447     case NVME_FEAT_NOPSC:
0448     case NVME_FEAT_RRL:
0449     case NVME_FEAT_PLM_CONFIG:
0450     case NVME_FEAT_PLM_WINDOW:
0451     case NVME_FEAT_HOST_BEHAVIOR:
0452     case NVME_FEAT_SANITIZE:
0453     case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
0454         return nvmet_setup_passthru_command(req);
0455 
0456     case NVME_FEAT_ASYNC_EVENT:
0457         /* There is no support for forwarding ASYNC events */
0458     case NVME_FEAT_IRQ_COALESCE:
0459     case NVME_FEAT_IRQ_CONFIG:
0460         /* The IRQ settings will not apply to the target controller */
0461     case NVME_FEAT_HOST_MEM_BUF:
0462         /*
0463          * Any HMB that's set will not be passed through and will
0464          * not work as expected
0465          */
0466     case NVME_FEAT_SW_PROGRESS:
0467         /*
0468          * The Pre-Boot Software Load Count doesn't make much
0469          * sense for a target to export
0470          */
0471     case NVME_FEAT_RESV_MASK:
0472     case NVME_FEAT_RESV_PERSIST:
0473         /* No reservations, see nvmet_parse_passthru_io_cmd() */
0474     default:
0475         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0476     }
0477 }
0478 
0479 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
0480 {
0481     /* Reject any commands with non-sgl flags set (ie. fused commands) */
0482     if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
0483         return NVME_SC_INVALID_FIELD;
0484 
0485     /*
0486      * Passthru all vendor specific commands
0487      */
0488     if (req->cmd->common.opcode >= nvme_admin_vendor_start)
0489         return nvmet_setup_passthru_command(req);
0490 
0491     switch (req->cmd->common.opcode) {
0492     case nvme_admin_async_event:
0493         req->execute = nvmet_execute_async_event;
0494         return NVME_SC_SUCCESS;
0495     case nvme_admin_keep_alive:
0496         /*
0497          * Most PCIe ctrls don't support keep alive cmd, we route keep
0498          * alive to the non-passthru mode. In future please change this
0499          * code when PCIe ctrls with keep alive support available.
0500          */
0501         req->execute = nvmet_execute_keep_alive;
0502         return NVME_SC_SUCCESS;
0503     case nvme_admin_set_features:
0504         switch (le32_to_cpu(req->cmd->features.fid)) {
0505         case NVME_FEAT_ASYNC_EVENT:
0506         case NVME_FEAT_KATO:
0507         case NVME_FEAT_NUM_QUEUES:
0508         case NVME_FEAT_HOST_ID:
0509             req->execute = nvmet_execute_set_features;
0510             return NVME_SC_SUCCESS;
0511         case NVME_FEAT_HOST_BEHAVIOR:
0512             req->execute = nvmet_passthru_set_host_behaviour;
0513             return NVME_SC_SUCCESS;
0514         default:
0515             return nvmet_passthru_get_set_features(req);
0516         }
0517         break;
0518     case nvme_admin_get_features:
0519         switch (le32_to_cpu(req->cmd->features.fid)) {
0520         case NVME_FEAT_ASYNC_EVENT:
0521         case NVME_FEAT_KATO:
0522         case NVME_FEAT_NUM_QUEUES:
0523         case NVME_FEAT_HOST_ID:
0524             req->execute = nvmet_execute_get_features;
0525             return NVME_SC_SUCCESS;
0526         default:
0527             return nvmet_passthru_get_set_features(req);
0528         }
0529         break;
0530     case nvme_admin_identify:
0531         switch (req->cmd->identify.cns) {
0532         case NVME_ID_CNS_CTRL:
0533             req->execute = nvmet_passthru_execute_cmd;
0534             req->p.use_workqueue = true;
0535             return NVME_SC_SUCCESS;
0536         case NVME_ID_CNS_CS_CTRL:
0537             switch (req->cmd->identify.csi) {
0538             case NVME_CSI_ZNS:
0539                 req->execute = nvmet_passthru_execute_cmd;
0540                 req->p.use_workqueue = true;
0541                 return NVME_SC_SUCCESS;
0542             }
0543             return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0544         case NVME_ID_CNS_NS:
0545             req->execute = nvmet_passthru_execute_cmd;
0546             req->p.use_workqueue = true;
0547             return NVME_SC_SUCCESS;
0548         case NVME_ID_CNS_CS_NS:
0549             switch (req->cmd->identify.csi) {
0550             case NVME_CSI_ZNS:
0551                 req->execute = nvmet_passthru_execute_cmd;
0552                 req->p.use_workqueue = true;
0553                 return NVME_SC_SUCCESS;
0554             }
0555             return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0556         default:
0557             return nvmet_setup_passthru_command(req);
0558         }
0559     case nvme_admin_get_log_page:
0560         return nvmet_setup_passthru_command(req);
0561     default:
0562         /* Reject commands not in the allowlist above */
0563         return nvmet_report_invalid_opcode(req);
0564     }
0565 }
0566 
0567 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
0568 {
0569     struct nvme_ctrl *ctrl;
0570     struct file *file;
0571     int ret = -EINVAL;
0572     void *old;
0573 
0574     mutex_lock(&subsys->lock);
0575     if (!subsys->passthru_ctrl_path)
0576         goto out_unlock;
0577     if (subsys->passthru_ctrl)
0578         goto out_unlock;
0579 
0580     if (subsys->nr_namespaces) {
0581         pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
0582         goto out_unlock;
0583     }
0584 
0585     file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
0586     if (IS_ERR(file)) {
0587         ret = PTR_ERR(file);
0588         goto out_unlock;
0589     }
0590 
0591     ctrl = nvme_ctrl_from_file(file);
0592     if (!ctrl) {
0593         pr_err("failed to open nvme controller %s\n",
0594                subsys->passthru_ctrl_path);
0595 
0596         goto out_put_file;
0597     }
0598 
0599     old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
0600              subsys, GFP_KERNEL);
0601     if (xa_is_err(old)) {
0602         ret = xa_err(old);
0603         goto out_put_file;
0604     }
0605 
0606     if (old)
0607         goto out_put_file;
0608 
0609     subsys->passthru_ctrl = ctrl;
0610     subsys->ver = ctrl->vs;
0611 
0612     if (subsys->ver < NVME_VS(1, 2, 1)) {
0613         pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
0614             NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
0615             NVME_TERTIARY(subsys->ver));
0616         subsys->ver = NVME_VS(1, 2, 1);
0617     }
0618     nvme_get_ctrl(ctrl);
0619     __module_get(subsys->passthru_ctrl->ops->module);
0620     ret = 0;
0621 
0622 out_put_file:
0623     filp_close(file, NULL);
0624 out_unlock:
0625     mutex_unlock(&subsys->lock);
0626     return ret;
0627 }
0628 
0629 static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
0630 {
0631     if (subsys->passthru_ctrl) {
0632         xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
0633         module_put(subsys->passthru_ctrl->ops->module);
0634         nvme_put_ctrl(subsys->passthru_ctrl);
0635     }
0636     subsys->passthru_ctrl = NULL;
0637     subsys->ver = NVMET_DEFAULT_VS;
0638 }
0639 
0640 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
0641 {
0642     mutex_lock(&subsys->lock);
0643     __nvmet_passthru_ctrl_disable(subsys);
0644     mutex_unlock(&subsys->lock);
0645 }
0646 
0647 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
0648 {
0649     mutex_lock(&subsys->lock);
0650     __nvmet_passthru_ctrl_disable(subsys);
0651     mutex_unlock(&subsys->lock);
0652     kfree(subsys->passthru_ctrl_path);
0653 }