0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 #include <linux/module.h>
0008 #include <linux/rculist.h>
0009 #include <linux/part_stat.h>
0010
0011 #include <generated/utsrelease.h>
0012 #include <asm/unaligned.h>
0013 #include "nvmet.h"
0014
0015 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
0016 {
0017 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
0018
0019 len <<= 16;
0020 len += le16_to_cpu(cmd->get_log_page.numdl);
0021
0022 len += 1;
0023 len *= sizeof(u32);
0024
0025 return len;
0026 }
0027
0028 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
0029 {
0030 switch (cdw10 & 0xff) {
0031 case NVME_FEAT_HOST_ID:
0032 return sizeof(req->sq->ctrl->hostid);
0033 default:
0034 return 0;
0035 }
0036 }
0037
0038 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
0039 {
0040 return le64_to_cpu(cmd->get_log_page.lpo);
0041 }
0042
0043 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
0044 {
0045 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
0046 }
0047
0048 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
0049 {
0050 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0051 unsigned long flags;
0052 off_t offset = 0;
0053 u64 slot;
0054 u64 i;
0055
0056 spin_lock_irqsave(&ctrl->error_lock, flags);
0057 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
0058
0059 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
0060 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
0061 sizeof(struct nvme_error_slot)))
0062 break;
0063
0064 if (slot == 0)
0065 slot = NVMET_ERROR_LOG_SLOTS - 1;
0066 else
0067 slot--;
0068 offset += sizeof(struct nvme_error_slot);
0069 }
0070 spin_unlock_irqrestore(&ctrl->error_lock, flags);
0071 nvmet_req_complete(req, 0);
0072 }
0073
0074 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
0075 struct nvme_smart_log *slog)
0076 {
0077 u64 host_reads, host_writes, data_units_read, data_units_written;
0078 u16 status;
0079
0080 status = nvmet_req_find_ns(req);
0081 if (status)
0082 return status;
0083
0084
0085 if (!req->ns->bdev)
0086 return NVME_SC_SUCCESS;
0087
0088 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
0089 data_units_read =
0090 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
0091 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
0092 data_units_written =
0093 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
0094
0095 put_unaligned_le64(host_reads, &slog->host_reads[0]);
0096 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
0097 put_unaligned_le64(host_writes, &slog->host_writes[0]);
0098 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
0099
0100 return NVME_SC_SUCCESS;
0101 }
0102
0103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
0104 struct nvme_smart_log *slog)
0105 {
0106 u64 host_reads = 0, host_writes = 0;
0107 u64 data_units_read = 0, data_units_written = 0;
0108 struct nvmet_ns *ns;
0109 struct nvmet_ctrl *ctrl;
0110 unsigned long idx;
0111
0112 ctrl = req->sq->ctrl;
0113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
0114
0115 if (!ns->bdev)
0116 continue;
0117 host_reads += part_stat_read(ns->bdev, ios[READ]);
0118 data_units_read += DIV_ROUND_UP(
0119 part_stat_read(ns->bdev, sectors[READ]), 1000);
0120 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
0121 data_units_written += DIV_ROUND_UP(
0122 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
0123 }
0124
0125 put_unaligned_le64(host_reads, &slog->host_reads[0]);
0126 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
0127 put_unaligned_le64(host_writes, &slog->host_writes[0]);
0128 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
0129
0130 return NVME_SC_SUCCESS;
0131 }
0132
0133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
0134 {
0135 struct nvme_smart_log *log;
0136 u16 status = NVME_SC_INTERNAL;
0137 unsigned long flags;
0138
0139 if (req->transfer_len != sizeof(*log))
0140 goto out;
0141
0142 log = kzalloc(sizeof(*log), GFP_KERNEL);
0143 if (!log)
0144 goto out;
0145
0146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
0147 status = nvmet_get_smart_log_all(req, log);
0148 else
0149 status = nvmet_get_smart_log_nsid(req, log);
0150 if (status)
0151 goto out_free_log;
0152
0153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
0154 put_unaligned_le64(req->sq->ctrl->err_counter,
0155 &log->num_err_log_entries);
0156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
0157
0158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
0159 out_free_log:
0160 kfree(log);
0161 out:
0162 nvmet_req_complete(req, status);
0163 }
0164
0165 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
0166 {
0167 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
0168 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
0169 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
0170 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
0171 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
0172 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
0173 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
0174
0175 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
0176 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
0177 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
0178 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
0179 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
0180 }
0181
0182 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
0183 {
0184 log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
0185 log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
0186 log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
0187 }
0188
0189 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
0190 {
0191 struct nvme_effects_log *log;
0192 u16 status = NVME_SC_SUCCESS;
0193
0194 log = kzalloc(sizeof(*log), GFP_KERNEL);
0195 if (!log) {
0196 status = NVME_SC_INTERNAL;
0197 goto out;
0198 }
0199
0200 switch (req->cmd->get_log_page.csi) {
0201 case NVME_CSI_NVM:
0202 nvmet_get_cmd_effects_nvm(log);
0203 break;
0204 case NVME_CSI_ZNS:
0205 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
0206 status = NVME_SC_INVALID_IO_CMD_SET;
0207 goto free;
0208 }
0209 nvmet_get_cmd_effects_nvm(log);
0210 nvmet_get_cmd_effects_zns(log);
0211 break;
0212 default:
0213 status = NVME_SC_INVALID_LOG_PAGE;
0214 goto free;
0215 }
0216
0217 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
0218 free:
0219 kfree(log);
0220 out:
0221 nvmet_req_complete(req, status);
0222 }
0223
0224 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
0225 {
0226 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0227 u16 status = NVME_SC_INTERNAL;
0228 size_t len;
0229
0230 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
0231 goto out;
0232
0233 mutex_lock(&ctrl->lock);
0234 if (ctrl->nr_changed_ns == U32_MAX)
0235 len = sizeof(__le32);
0236 else
0237 len = ctrl->nr_changed_ns * sizeof(__le32);
0238 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
0239 if (!status)
0240 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
0241 ctrl->nr_changed_ns = 0;
0242 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
0243 mutex_unlock(&ctrl->lock);
0244 out:
0245 nvmet_req_complete(req, status);
0246 }
0247
0248 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
0249 struct nvme_ana_group_desc *desc)
0250 {
0251 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0252 struct nvmet_ns *ns;
0253 unsigned long idx;
0254 u32 count = 0;
0255
0256 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
0257 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
0258 if (ns->anagrpid == grpid)
0259 desc->nsids[count++] = cpu_to_le32(ns->nsid);
0260 }
0261
0262 desc->grpid = cpu_to_le32(grpid);
0263 desc->nnsids = cpu_to_le32(count);
0264 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
0265 desc->state = req->port->ana_state[grpid];
0266 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
0267 return struct_size(desc, nsids, count);
0268 }
0269
0270 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
0271 {
0272 struct nvme_ana_rsp_hdr hdr = { 0, };
0273 struct nvme_ana_group_desc *desc;
0274 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
0275 size_t len;
0276 u32 grpid;
0277 u16 ngrps = 0;
0278 u16 status;
0279
0280 status = NVME_SC_INTERNAL;
0281 desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
0282 GFP_KERNEL);
0283 if (!desc)
0284 goto out;
0285
0286 down_read(&nvmet_ana_sem);
0287 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
0288 if (!nvmet_ana_group_enabled[grpid])
0289 continue;
0290 len = nvmet_format_ana_group(req, grpid, desc);
0291 status = nvmet_copy_to_sgl(req, offset, desc, len);
0292 if (status)
0293 break;
0294 offset += len;
0295 ngrps++;
0296 }
0297 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
0298 if (nvmet_ana_group_enabled[grpid])
0299 ngrps++;
0300 }
0301
0302 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
0303 hdr.ngrps = cpu_to_le16(ngrps);
0304 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
0305 up_read(&nvmet_ana_sem);
0306
0307 kfree(desc);
0308
0309
0310 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
0311 out:
0312 nvmet_req_complete(req, status);
0313 }
0314
0315 static void nvmet_execute_get_log_page(struct nvmet_req *req)
0316 {
0317 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
0318 return;
0319
0320 switch (req->cmd->get_log_page.lid) {
0321 case NVME_LOG_ERROR:
0322 return nvmet_execute_get_log_page_error(req);
0323 case NVME_LOG_SMART:
0324 return nvmet_execute_get_log_page_smart(req);
0325 case NVME_LOG_FW_SLOT:
0326
0327
0328
0329
0330
0331 return nvmet_execute_get_log_page_noop(req);
0332 case NVME_LOG_CHANGED_NS:
0333 return nvmet_execute_get_log_changed_ns(req);
0334 case NVME_LOG_CMD_EFFECTS:
0335 return nvmet_execute_get_log_cmd_effects_ns(req);
0336 case NVME_LOG_ANA:
0337 return nvmet_execute_get_log_page_ana(req);
0338 }
0339 pr_debug("unhandled lid %d on qid %d\n",
0340 req->cmd->get_log_page.lid, req->sq->qid);
0341 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
0342 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
0343 }
0344
0345 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
0346 {
0347 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0348 struct nvmet_subsys *subsys = ctrl->subsys;
0349 struct nvme_id_ctrl *id;
0350 u32 cmd_capsule_size;
0351 u16 status = 0;
0352
0353 if (!subsys->subsys_discovered) {
0354 mutex_lock(&subsys->lock);
0355 subsys->subsys_discovered = true;
0356 mutex_unlock(&subsys->lock);
0357 }
0358
0359 id = kzalloc(sizeof(*id), GFP_KERNEL);
0360 if (!id) {
0361 status = NVME_SC_INTERNAL;
0362 goto out;
0363 }
0364
0365
0366 id->vid = 0;
0367 id->ssvid = 0;
0368
0369 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
0370 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
0371 strlen(subsys->model_number), ' ');
0372 memcpy_and_pad(id->fr, sizeof(id->fr),
0373 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
0374
0375 id->rab = 6;
0376
0377 if (nvmet_is_disc_subsys(ctrl->subsys))
0378 id->cntrltype = NVME_CTRL_DISC;
0379 else
0380 id->cntrltype = NVME_CTRL_IO;
0381
0382
0383
0384
0385
0386
0387
0388 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
0389 NVME_CTRL_CMIC_ANA;
0390
0391
0392 if (ctrl->ops->get_mdts)
0393 id->mdts = ctrl->ops->get_mdts(ctrl);
0394 else
0395 id->mdts = 0;
0396
0397 id->cntlid = cpu_to_le16(ctrl->cntlid);
0398 id->ver = cpu_to_le32(ctrl->subsys->ver);
0399
0400
0401 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
0402 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
0403 NVME_CTRL_ATTR_TBKAS);
0404
0405 id->oacs = 0;
0406
0407
0408
0409
0410
0411
0412 id->acl = 3;
0413
0414 id->aerl = NVMET_ASYNC_EVENTS - 1;
0415
0416
0417 id->frmw = (1 << 0) | (1 << 1);
0418 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
0419 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
0420 id->npss = 0;
0421
0422
0423 id->kas = cpu_to_le16(NVMET_KAS);
0424
0425 id->sqes = (0x6 << 4) | 0x6;
0426 id->cqes = (0x4 << 4) | 0x4;
0427
0428
0429 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
0430
0431 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
0432 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
0433 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
0434 NVME_CTRL_ONCS_WRITE_ZEROES);
0435
0436
0437 id->vwc = NVME_CTRL_VWC_PRESENT;
0438
0439
0440
0441
0442
0443 id->awun = 0;
0444 id->awupf = 0;
0445
0446 id->sgls = cpu_to_le32(1 << 0);
0447 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
0448 id->sgls |= cpu_to_le32(1 << 2);
0449 if (req->port->inline_data_size)
0450 id->sgls |= cpu_to_le32(1 << 20);
0451
0452 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
0453
0454
0455
0456
0457
0458 cmd_capsule_size = sizeof(struct nvme_command);
0459 if (!ctrl->pi_support)
0460 cmd_capsule_size += req->port->inline_data_size;
0461 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
0462
0463
0464 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
0465
0466 id->msdbd = ctrl->ops->msdbd;
0467
0468 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
0469 id->anatt = 10;
0470 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
0471 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
0472
0473
0474
0475
0476
0477 id->psd[0].max_power = cpu_to_le16(0x9c4);
0478 id->psd[0].entry_lat = cpu_to_le32(0x10);
0479 id->psd[0].exit_lat = cpu_to_le32(0x4);
0480
0481 id->nwpc = 1 << 0;
0482
0483 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
0484
0485 kfree(id);
0486 out:
0487 nvmet_req_complete(req, status);
0488 }
0489
0490 static void nvmet_execute_identify_ns(struct nvmet_req *req)
0491 {
0492 struct nvme_id_ns *id;
0493 u16 status;
0494
0495 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
0496 req->error_loc = offsetof(struct nvme_identify, nsid);
0497 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
0498 goto out;
0499 }
0500
0501 id = kzalloc(sizeof(*id), GFP_KERNEL);
0502 if (!id) {
0503 status = NVME_SC_INTERNAL;
0504 goto out;
0505 }
0506
0507
0508 status = nvmet_req_find_ns(req);
0509 if (status) {
0510 status = 0;
0511 goto done;
0512 }
0513
0514 if (nvmet_ns_revalidate(req->ns)) {
0515 mutex_lock(&req->ns->subsys->lock);
0516 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
0517 mutex_unlock(&req->ns->subsys->lock);
0518 }
0519
0520
0521
0522
0523
0524 id->ncap = id->nsze =
0525 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
0526 switch (req->port->ana_state[req->ns->anagrpid]) {
0527 case NVME_ANA_INACCESSIBLE:
0528 case NVME_ANA_PERSISTENT_LOSS:
0529 break;
0530 default:
0531 id->nuse = id->nsze;
0532 break;
0533 }
0534
0535 if (req->ns->bdev)
0536 nvmet_bdev_set_limits(req->ns->bdev, id);
0537
0538
0539
0540
0541
0542 id->nlbaf = 0;
0543 id->flbas = 0;
0544
0545
0546
0547
0548
0549 id->nmic = NVME_NS_NMIC_SHARED;
0550 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
0551
0552 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
0553
0554 id->lbaf[0].ds = req->ns->blksize_shift;
0555
0556 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
0557 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
0558 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
0559 NVME_NS_DPC_PI_TYPE3;
0560 id->mc = NVME_MC_EXTENDED_LBA;
0561 id->dps = req->ns->pi_type;
0562 id->flbas = NVME_NS_FLBAS_META_EXT;
0563 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
0564 }
0565
0566 if (req->ns->readonly)
0567 id->nsattr |= (1 << 0);
0568 done:
0569 if (!status)
0570 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
0571
0572 kfree(id);
0573 out:
0574 nvmet_req_complete(req, status);
0575 }
0576
0577 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
0578 {
0579 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
0580 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0581 struct nvmet_ns *ns;
0582 unsigned long idx;
0583 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
0584 __le32 *list;
0585 u16 status = 0;
0586 int i = 0;
0587
0588 list = kzalloc(buf_size, GFP_KERNEL);
0589 if (!list) {
0590 status = NVME_SC_INTERNAL;
0591 goto out;
0592 }
0593
0594 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
0595 if (ns->nsid <= min_nsid)
0596 continue;
0597 list[i++] = cpu_to_le32(ns->nsid);
0598 if (i == buf_size / sizeof(__le32))
0599 break;
0600 }
0601
0602 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
0603
0604 kfree(list);
0605 out:
0606 nvmet_req_complete(req, status);
0607 }
0608
0609 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
0610 void *id, off_t *off)
0611 {
0612 struct nvme_ns_id_desc desc = {
0613 .nidt = type,
0614 .nidl = len,
0615 };
0616 u16 status;
0617
0618 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
0619 if (status)
0620 return status;
0621 *off += sizeof(desc);
0622
0623 status = nvmet_copy_to_sgl(req, *off, id, len);
0624 if (status)
0625 return status;
0626 *off += len;
0627
0628 return 0;
0629 }
0630
0631 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
0632 {
0633 off_t off = 0;
0634 u16 status;
0635
0636 status = nvmet_req_find_ns(req);
0637 if (status)
0638 goto out;
0639
0640 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
0641 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
0642 NVME_NIDT_UUID_LEN,
0643 &req->ns->uuid, &off);
0644 if (status)
0645 goto out;
0646 }
0647 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
0648 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
0649 NVME_NIDT_NGUID_LEN,
0650 &req->ns->nguid, &off);
0651 if (status)
0652 goto out;
0653 }
0654
0655 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
0656 NVME_NIDT_CSI_LEN,
0657 &req->ns->csi, &off);
0658 if (status)
0659 goto out;
0660
0661 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
0662 off) != NVME_IDENTIFY_DATA_SIZE - off)
0663 status = NVME_SC_INTERNAL | NVME_SC_DNR;
0664
0665 out:
0666 nvmet_req_complete(req, status);
0667 }
0668
0669 static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
0670 {
0671 switch (req->cmd->identify.csi) {
0672 case NVME_CSI_NVM:
0673 nvmet_execute_identify_desclist(req);
0674 return true;
0675 case NVME_CSI_ZNS:
0676 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
0677 nvmet_execute_identify_desclist(req);
0678 return true;
0679 }
0680 return false;
0681 default:
0682 return false;
0683 }
0684 }
0685
0686 static void nvmet_execute_identify(struct nvmet_req *req)
0687 {
0688 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
0689 return;
0690
0691 switch (req->cmd->identify.cns) {
0692 case NVME_ID_CNS_NS:
0693 switch (req->cmd->identify.csi) {
0694 case NVME_CSI_NVM:
0695 return nvmet_execute_identify_ns(req);
0696 default:
0697 break;
0698 }
0699 break;
0700 case NVME_ID_CNS_CS_NS:
0701 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
0702 switch (req->cmd->identify.csi) {
0703 case NVME_CSI_ZNS:
0704 return nvmet_execute_identify_cns_cs_ns(req);
0705 default:
0706 break;
0707 }
0708 }
0709 break;
0710 case NVME_ID_CNS_CTRL:
0711 switch (req->cmd->identify.csi) {
0712 case NVME_CSI_NVM:
0713 return nvmet_execute_identify_ctrl(req);
0714 }
0715 break;
0716 case NVME_ID_CNS_CS_CTRL:
0717 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
0718 switch (req->cmd->identify.csi) {
0719 case NVME_CSI_ZNS:
0720 return nvmet_execute_identify_cns_cs_ctrl(req);
0721 default:
0722 break;
0723 }
0724 }
0725 break;
0726 case NVME_ID_CNS_NS_ACTIVE_LIST:
0727 switch (req->cmd->identify.csi) {
0728 case NVME_CSI_NVM:
0729 return nvmet_execute_identify_nslist(req);
0730 default:
0731 break;
0732 }
0733 break;
0734 case NVME_ID_CNS_NS_DESC_LIST:
0735 if (nvmet_handle_identify_desclist(req) == true)
0736 return;
0737 break;
0738 }
0739
0740 nvmet_req_cns_error_complete(req);
0741 }
0742
0743
0744
0745
0746
0747
0748
0749
0750 static void nvmet_execute_abort(struct nvmet_req *req)
0751 {
0752 if (!nvmet_check_transfer_len(req, 0))
0753 return;
0754 nvmet_set_result(req, 1);
0755 nvmet_req_complete(req, 0);
0756 }
0757
0758 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
0759 {
0760 u16 status;
0761
0762 if (req->ns->file)
0763 status = nvmet_file_flush(req);
0764 else
0765 status = nvmet_bdev_flush(req);
0766
0767 if (status)
0768 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
0769 return status;
0770 }
0771
0772 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
0773 {
0774 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
0775 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
0776 u16 status;
0777
0778 status = nvmet_req_find_ns(req);
0779 if (status)
0780 return status;
0781
0782 mutex_lock(&subsys->lock);
0783 switch (write_protect) {
0784 case NVME_NS_WRITE_PROTECT:
0785 req->ns->readonly = true;
0786 status = nvmet_write_protect_flush_sync(req);
0787 if (status)
0788 req->ns->readonly = false;
0789 break;
0790 case NVME_NS_NO_WRITE_PROTECT:
0791 req->ns->readonly = false;
0792 status = 0;
0793 break;
0794 default:
0795 break;
0796 }
0797
0798 if (!status)
0799 nvmet_ns_changed(subsys, req->ns->nsid);
0800 mutex_unlock(&subsys->lock);
0801 return status;
0802 }
0803
0804 u16 nvmet_set_feat_kato(struct nvmet_req *req)
0805 {
0806 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
0807
0808 nvmet_stop_keep_alive_timer(req->sq->ctrl);
0809 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
0810 nvmet_start_keep_alive_timer(req->sq->ctrl);
0811
0812 nvmet_set_result(req, req->sq->ctrl->kato);
0813
0814 return 0;
0815 }
0816
0817 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
0818 {
0819 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
0820
0821 if (val32 & ~mask) {
0822 req->error_loc = offsetof(struct nvme_common_command, cdw11);
0823 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0824 }
0825
0826 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
0827 nvmet_set_result(req, val32);
0828
0829 return 0;
0830 }
0831
0832 void nvmet_execute_set_features(struct nvmet_req *req)
0833 {
0834 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
0835 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
0836 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
0837 u16 status = 0;
0838 u16 nsqr;
0839 u16 ncqr;
0840
0841 if (!nvmet_check_transfer_len(req, 0))
0842 return;
0843
0844 switch (cdw10 & 0xff) {
0845 case NVME_FEAT_NUM_QUEUES:
0846 ncqr = (cdw11 >> 16) & 0xffff;
0847 nsqr = cdw11 & 0xffff;
0848 if (ncqr == 0xffff || nsqr == 0xffff) {
0849 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0850 break;
0851 }
0852 nvmet_set_result(req,
0853 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
0854 break;
0855 case NVME_FEAT_KATO:
0856 status = nvmet_set_feat_kato(req);
0857 break;
0858 case NVME_FEAT_ASYNC_EVENT:
0859 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
0860 break;
0861 case NVME_FEAT_HOST_ID:
0862 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
0863 break;
0864 case NVME_FEAT_WRITE_PROTECT:
0865 status = nvmet_set_feat_write_protect(req);
0866 break;
0867 default:
0868 req->error_loc = offsetof(struct nvme_common_command, cdw10);
0869 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0870 break;
0871 }
0872
0873 nvmet_req_complete(req, status);
0874 }
0875
0876 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
0877 {
0878 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
0879 u32 result;
0880
0881 result = nvmet_req_find_ns(req);
0882 if (result)
0883 return result;
0884
0885 mutex_lock(&subsys->lock);
0886 if (req->ns->readonly == true)
0887 result = NVME_NS_WRITE_PROTECT;
0888 else
0889 result = NVME_NS_NO_WRITE_PROTECT;
0890 nvmet_set_result(req, result);
0891 mutex_unlock(&subsys->lock);
0892
0893 return 0;
0894 }
0895
0896 void nvmet_get_feat_kato(struct nvmet_req *req)
0897 {
0898 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
0899 }
0900
0901 void nvmet_get_feat_async_event(struct nvmet_req *req)
0902 {
0903 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
0904 }
0905
0906 void nvmet_execute_get_features(struct nvmet_req *req)
0907 {
0908 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
0909 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
0910 u16 status = 0;
0911
0912 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
0913 return;
0914
0915 switch (cdw10 & 0xff) {
0916
0917
0918
0919
0920
0921 #if 0
0922 case NVME_FEAT_ARBITRATION:
0923 break;
0924 case NVME_FEAT_POWER_MGMT:
0925 break;
0926 case NVME_FEAT_TEMP_THRESH:
0927 break;
0928 case NVME_FEAT_ERR_RECOVERY:
0929 break;
0930 case NVME_FEAT_IRQ_COALESCE:
0931 break;
0932 case NVME_FEAT_IRQ_CONFIG:
0933 break;
0934 case NVME_FEAT_WRITE_ATOMIC:
0935 break;
0936 #endif
0937 case NVME_FEAT_ASYNC_EVENT:
0938 nvmet_get_feat_async_event(req);
0939 break;
0940 case NVME_FEAT_VOLATILE_WC:
0941 nvmet_set_result(req, 1);
0942 break;
0943 case NVME_FEAT_NUM_QUEUES:
0944 nvmet_set_result(req,
0945 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
0946 break;
0947 case NVME_FEAT_KATO:
0948 nvmet_get_feat_kato(req);
0949 break;
0950 case NVME_FEAT_HOST_ID:
0951
0952 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
0953 req->error_loc =
0954 offsetof(struct nvme_common_command, cdw11);
0955 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0956 break;
0957 }
0958
0959 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
0960 sizeof(req->sq->ctrl->hostid));
0961 break;
0962 case NVME_FEAT_WRITE_PROTECT:
0963 status = nvmet_get_feat_write_protect(req);
0964 break;
0965 default:
0966 req->error_loc =
0967 offsetof(struct nvme_common_command, cdw10);
0968 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
0969 break;
0970 }
0971
0972 nvmet_req_complete(req, status);
0973 }
0974
0975 void nvmet_execute_async_event(struct nvmet_req *req)
0976 {
0977 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0978
0979 if (!nvmet_check_transfer_len(req, 0))
0980 return;
0981
0982 mutex_lock(&ctrl->lock);
0983 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
0984 mutex_unlock(&ctrl->lock);
0985 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
0986 return;
0987 }
0988 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
0989 mutex_unlock(&ctrl->lock);
0990
0991 queue_work(nvmet_wq, &ctrl->async_event_work);
0992 }
0993
0994 void nvmet_execute_keep_alive(struct nvmet_req *req)
0995 {
0996 struct nvmet_ctrl *ctrl = req->sq->ctrl;
0997 u16 status = 0;
0998
0999 if (!nvmet_check_transfer_len(req, 0))
1000 return;
1001
1002 if (!ctrl->kato) {
1003 status = NVME_SC_KA_TIMEOUT_INVALID;
1004 goto out;
1005 }
1006
1007 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1008 ctrl->cntlid, ctrl->kato);
1009 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1010 out:
1011 nvmet_req_complete(req, status);
1012 }
1013
1014 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1015 {
1016 struct nvme_command *cmd = req->cmd;
1017 u16 ret;
1018
1019 if (nvme_is_fabrics(cmd))
1020 return nvmet_parse_fabrics_admin_cmd(req);
1021 if (unlikely(!nvmet_check_auth_status(req)))
1022 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1023 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1024 return nvmet_parse_discovery_cmd(req);
1025
1026 ret = nvmet_check_ctrl_status(req);
1027 if (unlikely(ret))
1028 return ret;
1029
1030 if (nvmet_is_passthru_req(req))
1031 return nvmet_parse_passthru_admin_cmd(req);
1032
1033 switch (cmd->common.opcode) {
1034 case nvme_admin_get_log_page:
1035 req->execute = nvmet_execute_get_log_page;
1036 return 0;
1037 case nvme_admin_identify:
1038 req->execute = nvmet_execute_identify;
1039 return 0;
1040 case nvme_admin_abort_cmd:
1041 req->execute = nvmet_execute_abort;
1042 return 0;
1043 case nvme_admin_set_features:
1044 req->execute = nvmet_execute_set_features;
1045 return 0;
1046 case nvme_admin_get_features:
1047 req->execute = nvmet_execute_get_features;
1048 return 0;
1049 case nvme_admin_async_event:
1050 req->execute = nvmet_execute_async_event;
1051 return 0;
1052 case nvme_admin_keep_alive:
1053 req->execute = nvmet_execute_keep_alive;
1054 return 0;
1055 default:
1056 return nvmet_report_invalid_opcode(req);
1057 }
1058 }