0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 #include <linux/slab.h>
0008 #include <generated/utsrelease.h>
0009 #include "nvmet.h"
0010
0011 struct nvmet_subsys *nvmet_disc_subsys;
0012
0013 static u64 nvmet_genctr;
0014
0015 static void __nvmet_disc_changed(struct nvmet_port *port,
0016 struct nvmet_ctrl *ctrl)
0017 {
0018 if (ctrl->port != port)
0019 return;
0020
0021 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
0022 return;
0023
0024 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
0025 NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
0026 }
0027
0028 void nvmet_port_disc_changed(struct nvmet_port *port,
0029 struct nvmet_subsys *subsys)
0030 {
0031 struct nvmet_ctrl *ctrl;
0032
0033 lockdep_assert_held(&nvmet_config_sem);
0034 nvmet_genctr++;
0035
0036 mutex_lock(&nvmet_disc_subsys->lock);
0037 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
0038 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
0039 continue;
0040
0041 __nvmet_disc_changed(port, ctrl);
0042 }
0043 mutex_unlock(&nvmet_disc_subsys->lock);
0044
0045
0046 if (port->tr_ops && port->tr_ops->discovery_chg)
0047 port->tr_ops->discovery_chg(port);
0048 }
0049
0050 static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
0051 struct nvmet_subsys *subsys,
0052 struct nvmet_host *host)
0053 {
0054 struct nvmet_ctrl *ctrl;
0055
0056 mutex_lock(&nvmet_disc_subsys->lock);
0057 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
0058 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
0059 continue;
0060
0061 __nvmet_disc_changed(port, ctrl);
0062 }
0063 mutex_unlock(&nvmet_disc_subsys->lock);
0064 }
0065
0066 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
0067 struct nvmet_host *host)
0068 {
0069 struct nvmet_port *port;
0070 struct nvmet_subsys_link *s;
0071
0072 lockdep_assert_held(&nvmet_config_sem);
0073 nvmet_genctr++;
0074
0075 list_for_each_entry(port, nvmet_ports, global_entry)
0076 list_for_each_entry(s, &port->subsystems, entry) {
0077 if (s->subsys != subsys)
0078 continue;
0079 __nvmet_subsys_disc_changed(port, subsys, host);
0080 }
0081 }
0082
0083 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
0084 {
0085 down_write(&nvmet_config_sem);
0086 if (list_empty(&port->entry)) {
0087 list_add_tail(&port->entry, &parent->referrals);
0088 port->enabled = true;
0089 nvmet_port_disc_changed(parent, NULL);
0090 }
0091 up_write(&nvmet_config_sem);
0092 }
0093
0094 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
0095 {
0096 down_write(&nvmet_config_sem);
0097 if (!list_empty(&port->entry)) {
0098 port->enabled = false;
0099 list_del_init(&port->entry);
0100 nvmet_port_disc_changed(parent, NULL);
0101 }
0102 up_write(&nvmet_config_sem);
0103 }
0104
0105 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
0106 struct nvmet_port *port, char *subsys_nqn, char *traddr,
0107 u8 type, u32 numrec)
0108 {
0109 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
0110
0111 e->trtype = port->disc_addr.trtype;
0112 e->adrfam = port->disc_addr.adrfam;
0113 e->treq = port->disc_addr.treq;
0114 e->portid = port->disc_addr.portid;
0115
0116 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
0117 e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
0118 e->subtype = type;
0119 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
0120 memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
0121 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
0122 strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
0123 }
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
0136 char *traddr)
0137 {
0138 if (req->ops->disc_traddr)
0139 req->ops->disc_traddr(req, port, traddr);
0140 else
0141 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
0142 }
0143
0144 static size_t discovery_log_entries(struct nvmet_req *req)
0145 {
0146 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0147 struct nvmet_subsys_link *p;
0148 struct nvmet_port *r;
0149 size_t entries = 1;
0150
0151 list_for_each_entry(p, &req->port->subsystems, entry) {
0152 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
0153 continue;
0154 entries++;
0155 }
0156 list_for_each_entry(r, &req->port->referrals, entry)
0157 entries++;
0158 return entries;
0159 }
0160
0161 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
0162 {
0163 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
0164 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0165 struct nvmf_disc_rsp_page_hdr *hdr;
0166 u64 offset = nvmet_get_log_page_offset(req->cmd);
0167 size_t data_len = nvmet_get_log_page_len(req->cmd);
0168 size_t alloc_len;
0169 struct nvmet_subsys_link *p;
0170 struct nvmet_port *r;
0171 u32 numrec = 0;
0172 u16 status = 0;
0173 void *buffer;
0174 char traddr[NVMF_TRADDR_SIZE];
0175
0176 if (!nvmet_check_transfer_len(req, data_len))
0177 return;
0178
0179 if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
0180 req->error_loc =
0181 offsetof(struct nvme_get_log_page_command, lid);
0182 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0183 goto out;
0184 }
0185
0186
0187 if (offset & 0x3) {
0188 req->error_loc =
0189 offsetof(struct nvme_get_log_page_command, lpo);
0190 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0191 goto out;
0192 }
0193
0194
0195
0196
0197
0198
0199 down_read(&nvmet_config_sem);
0200 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
0201 buffer = kzalloc(alloc_len, GFP_KERNEL);
0202 if (!buffer) {
0203 up_read(&nvmet_config_sem);
0204 status = NVME_SC_INTERNAL;
0205 goto out;
0206 }
0207 hdr = buffer;
0208
0209 nvmet_set_disc_traddr(req, req->port, traddr);
0210
0211 nvmet_format_discovery_entry(hdr, req->port,
0212 nvmet_disc_subsys->subsysnqn,
0213 traddr, NVME_NQN_CURR, numrec);
0214 numrec++;
0215
0216 list_for_each_entry(p, &req->port->subsystems, entry) {
0217 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
0218 continue;
0219
0220 nvmet_format_discovery_entry(hdr, req->port,
0221 p->subsys->subsysnqn, traddr,
0222 NVME_NQN_NVME, numrec);
0223 numrec++;
0224 }
0225
0226 list_for_each_entry(r, &req->port->referrals, entry) {
0227 nvmet_format_discovery_entry(hdr, r,
0228 NVME_DISC_SUBSYS_NAME,
0229 r->disc_addr.traddr,
0230 NVME_NQN_DISC, numrec);
0231 numrec++;
0232 }
0233
0234 hdr->genctr = cpu_to_le64(nvmet_genctr);
0235 hdr->numrec = cpu_to_le64(numrec);
0236 hdr->recfmt = cpu_to_le16(0);
0237
0238 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
0239
0240 up_read(&nvmet_config_sem);
0241
0242 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
0243 kfree(buffer);
0244 out:
0245 nvmet_req_complete(req, status);
0246 }
0247
0248 static void nvmet_execute_disc_identify(struct nvmet_req *req)
0249 {
0250 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0251 struct nvme_id_ctrl *id;
0252 u16 status = 0;
0253
0254 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
0255 return;
0256
0257 if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
0258 req->error_loc = offsetof(struct nvme_identify, cns);
0259 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0260 goto out;
0261 }
0262
0263 id = kzalloc(sizeof(*id), GFP_KERNEL);
0264 if (!id) {
0265 status = NVME_SC_INTERNAL;
0266 goto out;
0267 }
0268
0269 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
0270 memset(id->fr, ' ', sizeof(id->fr));
0271 memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
0272 strlen(ctrl->subsys->model_number), ' ');
0273 memcpy_and_pad(id->fr, sizeof(id->fr),
0274 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
0275
0276 id->cntrltype = NVME_CTRL_DISC;
0277
0278
0279 id->mdts = 0;
0280 id->cntlid = cpu_to_le16(ctrl->cntlid);
0281 id->ver = cpu_to_le32(ctrl->subsys->ver);
0282 id->lpa = (1 << 2);
0283
0284
0285 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
0286
0287 id->sgls = cpu_to_le32(1 << 0);
0288 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
0289 id->sgls |= cpu_to_le32(1 << 2);
0290 if (req->port->inline_data_size)
0291 id->sgls |= cpu_to_le32(1 << 20);
0292
0293 id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
0294
0295 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
0296
0297 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
0298
0299 kfree(id);
0300 out:
0301 nvmet_req_complete(req, status);
0302 }
0303
0304 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
0305 {
0306 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
0307 u16 stat;
0308
0309 if (!nvmet_check_transfer_len(req, 0))
0310 return;
0311
0312 switch (cdw10 & 0xff) {
0313 case NVME_FEAT_KATO:
0314 stat = nvmet_set_feat_kato(req);
0315 break;
0316 case NVME_FEAT_ASYNC_EVENT:
0317 stat = nvmet_set_feat_async_event(req,
0318 NVMET_DISC_AEN_CFG_OPTIONAL);
0319 break;
0320 default:
0321 req->error_loc =
0322 offsetof(struct nvme_common_command, cdw10);
0323 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0324 break;
0325 }
0326
0327 nvmet_req_complete(req, stat);
0328 }
0329
0330 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
0331 {
0332 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
0333 u16 stat = 0;
0334
0335 if (!nvmet_check_transfer_len(req, 0))
0336 return;
0337
0338 switch (cdw10 & 0xff) {
0339 case NVME_FEAT_KATO:
0340 nvmet_get_feat_kato(req);
0341 break;
0342 case NVME_FEAT_ASYNC_EVENT:
0343 nvmet_get_feat_async_event(req);
0344 break;
0345 default:
0346 req->error_loc =
0347 offsetof(struct nvme_common_command, cdw10);
0348 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0349 break;
0350 }
0351
0352 nvmet_req_complete(req, stat);
0353 }
0354
0355 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
0356 {
0357 struct nvme_command *cmd = req->cmd;
0358
0359 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
0360 pr_err("got cmd %d while not ready\n",
0361 cmd->common.opcode);
0362 req->error_loc =
0363 offsetof(struct nvme_common_command, opcode);
0364 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0365 }
0366
0367 switch (cmd->common.opcode) {
0368 case nvme_admin_set_features:
0369 req->execute = nvmet_execute_disc_set_features;
0370 return 0;
0371 case nvme_admin_get_features:
0372 req->execute = nvmet_execute_disc_get_features;
0373 return 0;
0374 case nvme_admin_async_event:
0375 req->execute = nvmet_execute_async_event;
0376 return 0;
0377 case nvme_admin_keep_alive:
0378 req->execute = nvmet_execute_keep_alive;
0379 return 0;
0380 case nvme_admin_get_log_page:
0381 req->execute = nvmet_execute_disc_get_log_page;
0382 return 0;
0383 case nvme_admin_identify:
0384 req->execute = nvmet_execute_disc_identify;
0385 return 0;
0386 default:
0387 pr_debug("unhandled cmd %d\n", cmd->common.opcode);
0388 req->error_loc = offsetof(struct nvme_common_command, opcode);
0389 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
0390 }
0391
0392 }
0393
0394 int __init nvmet_init_discovery(void)
0395 {
0396 nvmet_disc_subsys =
0397 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
0398 return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
0399 }
0400
0401 void nvmet_exit_discovery(void)
0402 {
0403 nvmet_subsys_put(nvmet_disc_subsys);
0404 }