0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 #include <linux/kernel.h>
0008 #include <linux/module.h>
0009 #include <linux/slab.h>
0010 #include <linux/stat.h>
0011 #include <linux/ctype.h>
0012 #include <linux/pci.h>
0013 #include <linux/pci-p2pdma.h>
0014 #ifdef CONFIG_NVME_TARGET_AUTH
0015 #include <linux/nvme-auth.h>
0016 #endif
0017 #include <crypto/hash.h>
0018 #include <crypto/kpp.h>
0019
0020 #include "nvmet.h"
0021
0022 static const struct config_item_type nvmet_host_type;
0023 static const struct config_item_type nvmet_subsys_type;
0024
0025 static LIST_HEAD(nvmet_ports_list);
0026 struct list_head *nvmet_ports = &nvmet_ports_list;
0027
0028 struct nvmet_type_name_map {
0029 u8 type;
0030 const char *name;
0031 };
0032
0033 static struct nvmet_type_name_map nvmet_transport[] = {
0034 { NVMF_TRTYPE_RDMA, "rdma" },
0035 { NVMF_TRTYPE_FC, "fc" },
0036 { NVMF_TRTYPE_TCP, "tcp" },
0037 { NVMF_TRTYPE_LOOP, "loop" },
0038 };
0039
0040 static const struct nvmet_type_name_map nvmet_addr_family[] = {
0041 { NVMF_ADDR_FAMILY_PCI, "pcie" },
0042 { NVMF_ADDR_FAMILY_IP4, "ipv4" },
0043 { NVMF_ADDR_FAMILY_IP6, "ipv6" },
0044 { NVMF_ADDR_FAMILY_IB, "ib" },
0045 { NVMF_ADDR_FAMILY_FC, "fc" },
0046 { NVMF_ADDR_FAMILY_LOOP, "loop" },
0047 };
0048
0049 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
0050 {
0051 if (p->enabled)
0052 pr_err("Disable port '%u' before changing attribute in %s\n",
0053 le16_to_cpu(p->disc_addr.portid), caller);
0054 return p->enabled;
0055 }
0056
0057
0058
0059
0060
0061 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
0062 {
0063 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
0064 int i;
0065
0066 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
0067 if (nvmet_addr_family[i].type == adrfam)
0068 return snprintf(page, PAGE_SIZE, "%s\n",
0069 nvmet_addr_family[i].name);
0070 }
0071
0072 return snprintf(page, PAGE_SIZE, "\n");
0073 }
0074
0075 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
0076 const char *page, size_t count)
0077 {
0078 struct nvmet_port *port = to_nvmet_port(item);
0079 int i;
0080
0081 if (nvmet_is_port_enabled(port, __func__))
0082 return -EACCES;
0083
0084 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
0085 if (sysfs_streq(page, nvmet_addr_family[i].name))
0086 goto found;
0087 }
0088
0089 pr_err("Invalid value '%s' for adrfam\n", page);
0090 return -EINVAL;
0091
0092 found:
0093 port->disc_addr.adrfam = nvmet_addr_family[i].type;
0094 return count;
0095 }
0096
0097 CONFIGFS_ATTR(nvmet_, addr_adrfam);
0098
0099 static ssize_t nvmet_addr_portid_show(struct config_item *item,
0100 char *page)
0101 {
0102 __le16 portid = to_nvmet_port(item)->disc_addr.portid;
0103
0104 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
0105 }
0106
0107 static ssize_t nvmet_addr_portid_store(struct config_item *item,
0108 const char *page, size_t count)
0109 {
0110 struct nvmet_port *port = to_nvmet_port(item);
0111 u16 portid = 0;
0112
0113 if (kstrtou16(page, 0, &portid)) {
0114 pr_err("Invalid value '%s' for portid\n", page);
0115 return -EINVAL;
0116 }
0117
0118 if (nvmet_is_port_enabled(port, __func__))
0119 return -EACCES;
0120
0121 port->disc_addr.portid = cpu_to_le16(portid);
0122 return count;
0123 }
0124
0125 CONFIGFS_ATTR(nvmet_, addr_portid);
0126
0127 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
0128 char *page)
0129 {
0130 struct nvmet_port *port = to_nvmet_port(item);
0131
0132 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
0133 }
0134
0135 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
0136 const char *page, size_t count)
0137 {
0138 struct nvmet_port *port = to_nvmet_port(item);
0139
0140 if (count > NVMF_TRADDR_SIZE) {
0141 pr_err("Invalid value '%s' for traddr\n", page);
0142 return -EINVAL;
0143 }
0144
0145 if (nvmet_is_port_enabled(port, __func__))
0146 return -EACCES;
0147
0148 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
0149 return -EINVAL;
0150 return count;
0151 }
0152
0153 CONFIGFS_ATTR(nvmet_, addr_traddr);
0154
0155 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
0156 { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
0157 { NVMF_TREQ_REQUIRED, "required" },
0158 { NVMF_TREQ_NOT_REQUIRED, "not required" },
0159 };
0160
0161 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
0162 {
0163 u8 treq = to_nvmet_port(item)->disc_addr.treq &
0164 NVME_TREQ_SECURE_CHANNEL_MASK;
0165 int i;
0166
0167 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
0168 if (treq == nvmet_addr_treq[i].type)
0169 return snprintf(page, PAGE_SIZE, "%s\n",
0170 nvmet_addr_treq[i].name);
0171 }
0172
0173 return snprintf(page, PAGE_SIZE, "\n");
0174 }
0175
0176 static ssize_t nvmet_addr_treq_store(struct config_item *item,
0177 const char *page, size_t count)
0178 {
0179 struct nvmet_port *port = to_nvmet_port(item);
0180 u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
0181 int i;
0182
0183 if (nvmet_is_port_enabled(port, __func__))
0184 return -EACCES;
0185
0186 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
0187 if (sysfs_streq(page, nvmet_addr_treq[i].name))
0188 goto found;
0189 }
0190
0191 pr_err("Invalid value '%s' for treq\n", page);
0192 return -EINVAL;
0193
0194 found:
0195 treq |= nvmet_addr_treq[i].type;
0196 port->disc_addr.treq = treq;
0197 return count;
0198 }
0199
0200 CONFIGFS_ATTR(nvmet_, addr_treq);
0201
0202 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
0203 char *page)
0204 {
0205 struct nvmet_port *port = to_nvmet_port(item);
0206
0207 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
0208 }
0209
0210 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
0211 const char *page, size_t count)
0212 {
0213 struct nvmet_port *port = to_nvmet_port(item);
0214
0215 if (count > NVMF_TRSVCID_SIZE) {
0216 pr_err("Invalid value '%s' for trsvcid\n", page);
0217 return -EINVAL;
0218 }
0219 if (nvmet_is_port_enabled(port, __func__))
0220 return -EACCES;
0221
0222 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
0223 return -EINVAL;
0224 return count;
0225 }
0226
0227 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
0228
0229 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
0230 char *page)
0231 {
0232 struct nvmet_port *port = to_nvmet_port(item);
0233
0234 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
0235 }
0236
0237 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
0238 const char *page, size_t count)
0239 {
0240 struct nvmet_port *port = to_nvmet_port(item);
0241 int ret;
0242
0243 if (nvmet_is_port_enabled(port, __func__))
0244 return -EACCES;
0245 ret = kstrtoint(page, 0, &port->inline_data_size);
0246 if (ret) {
0247 pr_err("Invalid value '%s' for inline_data_size\n", page);
0248 return -EINVAL;
0249 }
0250 return count;
0251 }
0252
0253 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
0254
0255 #ifdef CONFIG_BLK_DEV_INTEGRITY
0256 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
0257 char *page)
0258 {
0259 struct nvmet_port *port = to_nvmet_port(item);
0260
0261 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
0262 }
0263
0264 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
0265 const char *page, size_t count)
0266 {
0267 struct nvmet_port *port = to_nvmet_port(item);
0268 bool val;
0269
0270 if (strtobool(page, &val))
0271 return -EINVAL;
0272
0273 if (nvmet_is_port_enabled(port, __func__))
0274 return -EACCES;
0275
0276 port->pi_enable = val;
0277 return count;
0278 }
0279
0280 CONFIGFS_ATTR(nvmet_, param_pi_enable);
0281 #endif
0282
0283 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
0284 char *page)
0285 {
0286 struct nvmet_port *port = to_nvmet_port(item);
0287 int i;
0288
0289 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
0290 if (port->disc_addr.trtype == nvmet_transport[i].type)
0291 return snprintf(page, PAGE_SIZE,
0292 "%s\n", nvmet_transport[i].name);
0293 }
0294
0295 return sprintf(page, "\n");
0296 }
0297
0298 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
0299 {
0300 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
0301 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
0302 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
0303 }
0304
0305 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
0306 const char *page, size_t count)
0307 {
0308 struct nvmet_port *port = to_nvmet_port(item);
0309 int i;
0310
0311 if (nvmet_is_port_enabled(port, __func__))
0312 return -EACCES;
0313
0314 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
0315 if (sysfs_streq(page, nvmet_transport[i].name))
0316 goto found;
0317 }
0318
0319 pr_err("Invalid value '%s' for trtype\n", page);
0320 return -EINVAL;
0321
0322 found:
0323 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
0324 port->disc_addr.trtype = nvmet_transport[i].type;
0325 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
0326 nvmet_port_init_tsas_rdma(port);
0327 return count;
0328 }
0329
0330 CONFIGFS_ATTR(nvmet_, addr_trtype);
0331
0332
0333
0334
0335 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
0336 {
0337 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
0338 }
0339
0340 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
0341 const char *page, size_t count)
0342 {
0343 struct nvmet_ns *ns = to_nvmet_ns(item);
0344 struct nvmet_subsys *subsys = ns->subsys;
0345 size_t len;
0346 int ret;
0347
0348 mutex_lock(&subsys->lock);
0349 ret = -EBUSY;
0350 if (ns->enabled)
0351 goto out_unlock;
0352
0353 ret = -EINVAL;
0354 len = strcspn(page, "\n");
0355 if (!len)
0356 goto out_unlock;
0357
0358 kfree(ns->device_path);
0359 ret = -ENOMEM;
0360 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
0361 if (!ns->device_path)
0362 goto out_unlock;
0363
0364 mutex_unlock(&subsys->lock);
0365 return count;
0366
0367 out_unlock:
0368 mutex_unlock(&subsys->lock);
0369 return ret;
0370 }
0371
0372 CONFIGFS_ATTR(nvmet_ns_, device_path);
0373
0374 #ifdef CONFIG_PCI_P2PDMA
0375 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
0376 {
0377 struct nvmet_ns *ns = to_nvmet_ns(item);
0378
0379 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
0380 }
0381
0382 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
0383 const char *page, size_t count)
0384 {
0385 struct nvmet_ns *ns = to_nvmet_ns(item);
0386 struct pci_dev *p2p_dev = NULL;
0387 bool use_p2pmem;
0388 int ret = count;
0389 int error;
0390
0391 mutex_lock(&ns->subsys->lock);
0392 if (ns->enabled) {
0393 ret = -EBUSY;
0394 goto out_unlock;
0395 }
0396
0397 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
0398 if (error) {
0399 ret = error;
0400 goto out_unlock;
0401 }
0402
0403 ns->use_p2pmem = use_p2pmem;
0404 pci_dev_put(ns->p2p_dev);
0405 ns->p2p_dev = p2p_dev;
0406
0407 out_unlock:
0408 mutex_unlock(&ns->subsys->lock);
0409
0410 return ret;
0411 }
0412
0413 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
0414 #endif
0415
0416 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
0417 {
0418 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
0419 }
0420
0421 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
0422 const char *page, size_t count)
0423 {
0424 struct nvmet_ns *ns = to_nvmet_ns(item);
0425 struct nvmet_subsys *subsys = ns->subsys;
0426 int ret = 0;
0427
0428 mutex_lock(&subsys->lock);
0429 if (ns->enabled) {
0430 ret = -EBUSY;
0431 goto out_unlock;
0432 }
0433
0434 if (uuid_parse(page, &ns->uuid))
0435 ret = -EINVAL;
0436
0437 out_unlock:
0438 mutex_unlock(&subsys->lock);
0439 return ret ? ret : count;
0440 }
0441
0442 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
0443
0444 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
0445 {
0446 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
0447 }
0448
0449 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
0450 const char *page, size_t count)
0451 {
0452 struct nvmet_ns *ns = to_nvmet_ns(item);
0453 struct nvmet_subsys *subsys = ns->subsys;
0454 u8 nguid[16];
0455 const char *p = page;
0456 int i;
0457 int ret = 0;
0458
0459 mutex_lock(&subsys->lock);
0460 if (ns->enabled) {
0461 ret = -EBUSY;
0462 goto out_unlock;
0463 }
0464
0465 for (i = 0; i < 16; i++) {
0466 if (p + 2 > page + count) {
0467 ret = -EINVAL;
0468 goto out_unlock;
0469 }
0470 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
0471 ret = -EINVAL;
0472 goto out_unlock;
0473 }
0474
0475 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
0476 p += 2;
0477
0478 if (*p == '-' || *p == ':')
0479 p++;
0480 }
0481
0482 memcpy(&ns->nguid, nguid, sizeof(nguid));
0483 out_unlock:
0484 mutex_unlock(&subsys->lock);
0485 return ret ? ret : count;
0486 }
0487
0488 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
0489
0490 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
0491 {
0492 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
0493 }
0494
0495 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
0496 const char *page, size_t count)
0497 {
0498 struct nvmet_ns *ns = to_nvmet_ns(item);
0499 u32 oldgrpid, newgrpid;
0500 int ret;
0501
0502 ret = kstrtou32(page, 0, &newgrpid);
0503 if (ret)
0504 return ret;
0505
0506 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
0507 return -EINVAL;
0508
0509 down_write(&nvmet_ana_sem);
0510 oldgrpid = ns->anagrpid;
0511 nvmet_ana_group_enabled[newgrpid]++;
0512 ns->anagrpid = newgrpid;
0513 nvmet_ana_group_enabled[oldgrpid]--;
0514 nvmet_ana_chgcnt++;
0515 up_write(&nvmet_ana_sem);
0516
0517 nvmet_send_ana_event(ns->subsys, NULL);
0518 return count;
0519 }
0520
0521 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
0522
0523 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
0524 {
0525 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
0526 }
0527
0528 static ssize_t nvmet_ns_enable_store(struct config_item *item,
0529 const char *page, size_t count)
0530 {
0531 struct nvmet_ns *ns = to_nvmet_ns(item);
0532 bool enable;
0533 int ret = 0;
0534
0535 if (strtobool(page, &enable))
0536 return -EINVAL;
0537
0538 if (enable)
0539 ret = nvmet_ns_enable(ns);
0540 else
0541 nvmet_ns_disable(ns);
0542
0543 return ret ? ret : count;
0544 }
0545
0546 CONFIGFS_ATTR(nvmet_ns_, enable);
0547
0548 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
0549 {
0550 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
0551 }
0552
0553 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
0554 const char *page, size_t count)
0555 {
0556 struct nvmet_ns *ns = to_nvmet_ns(item);
0557 bool val;
0558
0559 if (strtobool(page, &val))
0560 return -EINVAL;
0561
0562 mutex_lock(&ns->subsys->lock);
0563 if (ns->enabled) {
0564 pr_err("disable ns before setting buffered_io value.\n");
0565 mutex_unlock(&ns->subsys->lock);
0566 return -EINVAL;
0567 }
0568
0569 ns->buffered_io = val;
0570 mutex_unlock(&ns->subsys->lock);
0571 return count;
0572 }
0573
0574 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
0575
0576 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
0577 const char *page, size_t count)
0578 {
0579 struct nvmet_ns *ns = to_nvmet_ns(item);
0580 bool val;
0581
0582 if (strtobool(page, &val))
0583 return -EINVAL;
0584
0585 if (!val)
0586 return -EINVAL;
0587
0588 mutex_lock(&ns->subsys->lock);
0589 if (!ns->enabled) {
0590 pr_err("enable ns before revalidate.\n");
0591 mutex_unlock(&ns->subsys->lock);
0592 return -EINVAL;
0593 }
0594 if (nvmet_ns_revalidate(ns))
0595 nvmet_ns_changed(ns->subsys, ns->nsid);
0596 mutex_unlock(&ns->subsys->lock);
0597 return count;
0598 }
0599
0600 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
0601
0602 static struct configfs_attribute *nvmet_ns_attrs[] = {
0603 &nvmet_ns_attr_device_path,
0604 &nvmet_ns_attr_device_nguid,
0605 &nvmet_ns_attr_device_uuid,
0606 &nvmet_ns_attr_ana_grpid,
0607 &nvmet_ns_attr_enable,
0608 &nvmet_ns_attr_buffered_io,
0609 &nvmet_ns_attr_revalidate_size,
0610 #ifdef CONFIG_PCI_P2PDMA
0611 &nvmet_ns_attr_p2pmem,
0612 #endif
0613 NULL,
0614 };
0615
0616 static void nvmet_ns_release(struct config_item *item)
0617 {
0618 struct nvmet_ns *ns = to_nvmet_ns(item);
0619
0620 nvmet_ns_free(ns);
0621 }
0622
0623 static struct configfs_item_operations nvmet_ns_item_ops = {
0624 .release = nvmet_ns_release,
0625 };
0626
0627 static const struct config_item_type nvmet_ns_type = {
0628 .ct_item_ops = &nvmet_ns_item_ops,
0629 .ct_attrs = nvmet_ns_attrs,
0630 .ct_owner = THIS_MODULE,
0631 };
0632
0633 static struct config_group *nvmet_ns_make(struct config_group *group,
0634 const char *name)
0635 {
0636 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
0637 struct nvmet_ns *ns;
0638 int ret;
0639 u32 nsid;
0640
0641 ret = kstrtou32(name, 0, &nsid);
0642 if (ret)
0643 goto out;
0644
0645 ret = -EINVAL;
0646 if (nsid == 0 || nsid == NVME_NSID_ALL) {
0647 pr_err("invalid nsid %#x", nsid);
0648 goto out;
0649 }
0650
0651 ret = -ENOMEM;
0652 ns = nvmet_ns_alloc(subsys, nsid);
0653 if (!ns)
0654 goto out;
0655 config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
0656
0657 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
0658
0659 return &ns->group;
0660 out:
0661 return ERR_PTR(ret);
0662 }
0663
0664 static struct configfs_group_operations nvmet_namespaces_group_ops = {
0665 .make_group = nvmet_ns_make,
0666 };
0667
0668 static const struct config_item_type nvmet_namespaces_type = {
0669 .ct_group_ops = &nvmet_namespaces_group_ops,
0670 .ct_owner = THIS_MODULE,
0671 };
0672
0673 #ifdef CONFIG_NVME_TARGET_PASSTHRU
0674
0675 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
0676 char *page)
0677 {
0678 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
0679
0680 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
0681 }
0682
0683 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
0684 const char *page, size_t count)
0685 {
0686 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
0687 size_t len;
0688 int ret;
0689
0690 mutex_lock(&subsys->lock);
0691
0692 ret = -EBUSY;
0693 if (subsys->passthru_ctrl)
0694 goto out_unlock;
0695
0696 ret = -EINVAL;
0697 len = strcspn(page, "\n");
0698 if (!len)
0699 goto out_unlock;
0700
0701 kfree(subsys->passthru_ctrl_path);
0702 ret = -ENOMEM;
0703 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
0704 if (!subsys->passthru_ctrl_path)
0705 goto out_unlock;
0706
0707 mutex_unlock(&subsys->lock);
0708
0709 return count;
0710 out_unlock:
0711 mutex_unlock(&subsys->lock);
0712 return ret;
0713 }
0714 CONFIGFS_ATTR(nvmet_passthru_, device_path);
0715
0716 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
0717 char *page)
0718 {
0719 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
0720
0721 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
0722 }
0723
0724 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
0725 const char *page, size_t count)
0726 {
0727 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
0728 bool enable;
0729 int ret = 0;
0730
0731 if (strtobool(page, &enable))
0732 return -EINVAL;
0733
0734 if (enable)
0735 ret = nvmet_passthru_ctrl_enable(subsys);
0736 else
0737 nvmet_passthru_ctrl_disable(subsys);
0738
0739 return ret ? ret : count;
0740 }
0741 CONFIGFS_ATTR(nvmet_passthru_, enable);
0742
0743 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
0744 char *page)
0745 {
0746 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
0747 }
0748
0749 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
0750 const char *page, size_t count)
0751 {
0752 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
0753 unsigned int timeout;
0754
0755 if (kstrtouint(page, 0, &timeout))
0756 return -EINVAL;
0757 subsys->admin_timeout = timeout;
0758 return count;
0759 }
0760 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
0761
0762 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
0763 char *page)
0764 {
0765 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
0766 }
0767
0768 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
0769 const char *page, size_t count)
0770 {
0771 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
0772 unsigned int timeout;
0773
0774 if (kstrtouint(page, 0, &timeout))
0775 return -EINVAL;
0776 subsys->io_timeout = timeout;
0777 return count;
0778 }
0779 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
0780
0781 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
0782 char *page)
0783 {
0784 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
0785 }
0786
0787 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
0788 const char *page, size_t count)
0789 {
0790 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
0791 unsigned int clear_ids;
0792
0793 if (kstrtouint(page, 0, &clear_ids))
0794 return -EINVAL;
0795 subsys->clear_ids = clear_ids;
0796 return count;
0797 }
0798 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
0799
0800 static struct configfs_attribute *nvmet_passthru_attrs[] = {
0801 &nvmet_passthru_attr_device_path,
0802 &nvmet_passthru_attr_enable,
0803 &nvmet_passthru_attr_admin_timeout,
0804 &nvmet_passthru_attr_io_timeout,
0805 &nvmet_passthru_attr_clear_ids,
0806 NULL,
0807 };
0808
0809 static const struct config_item_type nvmet_passthru_type = {
0810 .ct_attrs = nvmet_passthru_attrs,
0811 .ct_owner = THIS_MODULE,
0812 };
0813
0814 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
0815 {
0816 config_group_init_type_name(&subsys->passthru_group,
0817 "passthru", &nvmet_passthru_type);
0818 configfs_add_default_group(&subsys->passthru_group,
0819 &subsys->group);
0820 }
0821
0822 #else
0823
0824 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
0825 {
0826 }
0827
0828 #endif
0829
0830 static int nvmet_port_subsys_allow_link(struct config_item *parent,
0831 struct config_item *target)
0832 {
0833 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
0834 struct nvmet_subsys *subsys;
0835 struct nvmet_subsys_link *link, *p;
0836 int ret;
0837
0838 if (target->ci_type != &nvmet_subsys_type) {
0839 pr_err("can only link subsystems into the subsystems dir.!\n");
0840 return -EINVAL;
0841 }
0842 subsys = to_subsys(target);
0843 link = kmalloc(sizeof(*link), GFP_KERNEL);
0844 if (!link)
0845 return -ENOMEM;
0846 link->subsys = subsys;
0847
0848 down_write(&nvmet_config_sem);
0849 ret = -EEXIST;
0850 list_for_each_entry(p, &port->subsystems, entry) {
0851 if (p->subsys == subsys)
0852 goto out_free_link;
0853 }
0854
0855 if (list_empty(&port->subsystems)) {
0856 ret = nvmet_enable_port(port);
0857 if (ret)
0858 goto out_free_link;
0859 }
0860
0861 list_add_tail(&link->entry, &port->subsystems);
0862 nvmet_port_disc_changed(port, subsys);
0863
0864 up_write(&nvmet_config_sem);
0865 return 0;
0866
0867 out_free_link:
0868 up_write(&nvmet_config_sem);
0869 kfree(link);
0870 return ret;
0871 }
0872
0873 static void nvmet_port_subsys_drop_link(struct config_item *parent,
0874 struct config_item *target)
0875 {
0876 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
0877 struct nvmet_subsys *subsys = to_subsys(target);
0878 struct nvmet_subsys_link *p;
0879
0880 down_write(&nvmet_config_sem);
0881 list_for_each_entry(p, &port->subsystems, entry) {
0882 if (p->subsys == subsys)
0883 goto found;
0884 }
0885 up_write(&nvmet_config_sem);
0886 return;
0887
0888 found:
0889 list_del(&p->entry);
0890 nvmet_port_del_ctrls(port, subsys);
0891 nvmet_port_disc_changed(port, subsys);
0892
0893 if (list_empty(&port->subsystems))
0894 nvmet_disable_port(port);
0895 up_write(&nvmet_config_sem);
0896 kfree(p);
0897 }
0898
0899 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
0900 .allow_link = nvmet_port_subsys_allow_link,
0901 .drop_link = nvmet_port_subsys_drop_link,
0902 };
0903
0904 static const struct config_item_type nvmet_port_subsys_type = {
0905 .ct_item_ops = &nvmet_port_subsys_item_ops,
0906 .ct_owner = THIS_MODULE,
0907 };
0908
0909 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
0910 struct config_item *target)
0911 {
0912 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
0913 struct nvmet_host *host;
0914 struct nvmet_host_link *link, *p;
0915 int ret;
0916
0917 if (target->ci_type != &nvmet_host_type) {
0918 pr_err("can only link hosts into the allowed_hosts directory!\n");
0919 return -EINVAL;
0920 }
0921
0922 host = to_host(target);
0923 link = kmalloc(sizeof(*link), GFP_KERNEL);
0924 if (!link)
0925 return -ENOMEM;
0926 link->host = host;
0927
0928 down_write(&nvmet_config_sem);
0929 ret = -EINVAL;
0930 if (subsys->allow_any_host) {
0931 pr_err("can't add hosts when allow_any_host is set!\n");
0932 goto out_free_link;
0933 }
0934
0935 ret = -EEXIST;
0936 list_for_each_entry(p, &subsys->hosts, entry) {
0937 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
0938 goto out_free_link;
0939 }
0940 list_add_tail(&link->entry, &subsys->hosts);
0941 nvmet_subsys_disc_changed(subsys, host);
0942
0943 up_write(&nvmet_config_sem);
0944 return 0;
0945 out_free_link:
0946 up_write(&nvmet_config_sem);
0947 kfree(link);
0948 return ret;
0949 }
0950
0951 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
0952 struct config_item *target)
0953 {
0954 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
0955 struct nvmet_host *host = to_host(target);
0956 struct nvmet_host_link *p;
0957
0958 down_write(&nvmet_config_sem);
0959 list_for_each_entry(p, &subsys->hosts, entry) {
0960 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
0961 goto found;
0962 }
0963 up_write(&nvmet_config_sem);
0964 return;
0965
0966 found:
0967 list_del(&p->entry);
0968 nvmet_subsys_disc_changed(subsys, host);
0969
0970 up_write(&nvmet_config_sem);
0971 kfree(p);
0972 }
0973
0974 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
0975 .allow_link = nvmet_allowed_hosts_allow_link,
0976 .drop_link = nvmet_allowed_hosts_drop_link,
0977 };
0978
0979 static const struct config_item_type nvmet_allowed_hosts_type = {
0980 .ct_item_ops = &nvmet_allowed_hosts_item_ops,
0981 .ct_owner = THIS_MODULE,
0982 };
0983
0984 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
0985 char *page)
0986 {
0987 return snprintf(page, PAGE_SIZE, "%d\n",
0988 to_subsys(item)->allow_any_host);
0989 }
0990
0991 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
0992 const char *page, size_t count)
0993 {
0994 struct nvmet_subsys *subsys = to_subsys(item);
0995 bool allow_any_host;
0996 int ret = 0;
0997
0998 if (strtobool(page, &allow_any_host))
0999 return -EINVAL;
1000
1001 down_write(&nvmet_config_sem);
1002 if (allow_any_host && !list_empty(&subsys->hosts)) {
1003 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1004 ret = -EINVAL;
1005 goto out_unlock;
1006 }
1007
1008 if (subsys->allow_any_host != allow_any_host) {
1009 subsys->allow_any_host = allow_any_host;
1010 nvmet_subsys_disc_changed(subsys, NULL);
1011 }
1012
1013 out_unlock:
1014 up_write(&nvmet_config_sem);
1015 return ret ? ret : count;
1016 }
1017
1018 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1019
1020 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1021 char *page)
1022 {
1023 struct nvmet_subsys *subsys = to_subsys(item);
1024
1025 if (NVME_TERTIARY(subsys->ver))
1026 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1027 NVME_MAJOR(subsys->ver),
1028 NVME_MINOR(subsys->ver),
1029 NVME_TERTIARY(subsys->ver));
1030
1031 return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1032 NVME_MAJOR(subsys->ver),
1033 NVME_MINOR(subsys->ver));
1034 }
1035
1036 static ssize_t
1037 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1038 const char *page, size_t count)
1039 {
1040 int major, minor, tertiary = 0;
1041 int ret;
1042
1043 if (subsys->subsys_discovered) {
1044 if (NVME_TERTIARY(subsys->ver))
1045 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1046 NVME_MAJOR(subsys->ver),
1047 NVME_MINOR(subsys->ver),
1048 NVME_TERTIARY(subsys->ver));
1049 else
1050 pr_err("Can't set version number. %llu.%llu is already assigned\n",
1051 NVME_MAJOR(subsys->ver),
1052 NVME_MINOR(subsys->ver));
1053 return -EINVAL;
1054 }
1055
1056
1057 if (nvmet_is_passthru_subsys(subsys))
1058 return -EINVAL;
1059
1060 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1061 if (ret != 2 && ret != 3)
1062 return -EINVAL;
1063
1064 subsys->ver = NVME_VS(major, minor, tertiary);
1065
1066 return count;
1067 }
1068
1069 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1070 const char *page, size_t count)
1071 {
1072 struct nvmet_subsys *subsys = to_subsys(item);
1073 ssize_t ret;
1074
1075 down_write(&nvmet_config_sem);
1076 mutex_lock(&subsys->lock);
1077 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1078 mutex_unlock(&subsys->lock);
1079 up_write(&nvmet_config_sem);
1080
1081 return ret;
1082 }
1083 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1084
1085
1086 static bool nvmet_is_ascii(const char c)
1087 {
1088 return c >= 0x20 && c <= 0x7e;
1089 }
1090
1091 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1092 char *page)
1093 {
1094 struct nvmet_subsys *subsys = to_subsys(item);
1095
1096 return snprintf(page, PAGE_SIZE, "%.*s\n",
1097 NVMET_SN_MAX_SIZE, subsys->serial);
1098 }
1099
1100 static ssize_t
1101 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1102 const char *page, size_t count)
1103 {
1104 int pos, len = strcspn(page, "\n");
1105
1106 if (subsys->subsys_discovered) {
1107 pr_err("Can't set serial number. %s is already assigned\n",
1108 subsys->serial);
1109 return -EINVAL;
1110 }
1111
1112 if (!len || len > NVMET_SN_MAX_SIZE) {
1113 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1114 NVMET_SN_MAX_SIZE);
1115 return -EINVAL;
1116 }
1117
1118 for (pos = 0; pos < len; pos++) {
1119 if (!nvmet_is_ascii(page[pos])) {
1120 pr_err("Serial Number must contain only ASCII strings\n");
1121 return -EINVAL;
1122 }
1123 }
1124
1125 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1126
1127 return count;
1128 }
1129
1130 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1131 const char *page, size_t count)
1132 {
1133 struct nvmet_subsys *subsys = to_subsys(item);
1134 ssize_t ret;
1135
1136 down_write(&nvmet_config_sem);
1137 mutex_lock(&subsys->lock);
1138 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1139 mutex_unlock(&subsys->lock);
1140 up_write(&nvmet_config_sem);
1141
1142 return ret;
1143 }
1144 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1145
1146 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1147 char *page)
1148 {
1149 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1150 }
1151
1152 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1153 const char *page, size_t cnt)
1154 {
1155 u16 cntlid_min;
1156
1157 if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1158 return -EINVAL;
1159
1160 if (cntlid_min == 0)
1161 return -EINVAL;
1162
1163 down_write(&nvmet_config_sem);
1164 if (cntlid_min >= to_subsys(item)->cntlid_max)
1165 goto out_unlock;
1166 to_subsys(item)->cntlid_min = cntlid_min;
1167 up_write(&nvmet_config_sem);
1168 return cnt;
1169
1170 out_unlock:
1171 up_write(&nvmet_config_sem);
1172 return -EINVAL;
1173 }
1174 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1175
1176 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1177 char *page)
1178 {
1179 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1180 }
1181
1182 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1183 const char *page, size_t cnt)
1184 {
1185 u16 cntlid_max;
1186
1187 if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1188 return -EINVAL;
1189
1190 if (cntlid_max == 0)
1191 return -EINVAL;
1192
1193 down_write(&nvmet_config_sem);
1194 if (cntlid_max <= to_subsys(item)->cntlid_min)
1195 goto out_unlock;
1196 to_subsys(item)->cntlid_max = cntlid_max;
1197 up_write(&nvmet_config_sem);
1198 return cnt;
1199
1200 out_unlock:
1201 up_write(&nvmet_config_sem);
1202 return -EINVAL;
1203 }
1204 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1205
1206 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1207 char *page)
1208 {
1209 struct nvmet_subsys *subsys = to_subsys(item);
1210
1211 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1212 }
1213
1214 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1215 const char *page, size_t count)
1216 {
1217 int pos = 0, len;
1218
1219 if (subsys->subsys_discovered) {
1220 pr_err("Can't set model number. %s is already assigned\n",
1221 subsys->model_number);
1222 return -EINVAL;
1223 }
1224
1225 len = strcspn(page, "\n");
1226 if (!len)
1227 return -EINVAL;
1228
1229 if (len > NVMET_MN_MAX_SIZE) {
1230 pr_err("Model number size can not exceed %d Bytes\n",
1231 NVMET_MN_MAX_SIZE);
1232 return -EINVAL;
1233 }
1234
1235 for (pos = 0; pos < len; pos++) {
1236 if (!nvmet_is_ascii(page[pos]))
1237 return -EINVAL;
1238 }
1239
1240 subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL);
1241 if (!subsys->model_number)
1242 return -ENOMEM;
1243 return count;
1244 }
1245
1246 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1247 const char *page, size_t count)
1248 {
1249 struct nvmet_subsys *subsys = to_subsys(item);
1250 ssize_t ret;
1251
1252 down_write(&nvmet_config_sem);
1253 mutex_lock(&subsys->lock);
1254 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1255 mutex_unlock(&subsys->lock);
1256 up_write(&nvmet_config_sem);
1257
1258 return ret;
1259 }
1260 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1261
1262 #ifdef CONFIG_BLK_DEV_INTEGRITY
1263 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1264 char *page)
1265 {
1266 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1267 }
1268
1269 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1270 const char *page, size_t count)
1271 {
1272 struct nvmet_subsys *subsys = to_subsys(item);
1273 bool pi_enable;
1274
1275 if (strtobool(page, &pi_enable))
1276 return -EINVAL;
1277
1278 subsys->pi_support = pi_enable;
1279 return count;
1280 }
1281 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1282 #endif
1283
1284 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1285 &nvmet_subsys_attr_attr_allow_any_host,
1286 &nvmet_subsys_attr_attr_version,
1287 &nvmet_subsys_attr_attr_serial,
1288 &nvmet_subsys_attr_attr_cntlid_min,
1289 &nvmet_subsys_attr_attr_cntlid_max,
1290 &nvmet_subsys_attr_attr_model,
1291 #ifdef CONFIG_BLK_DEV_INTEGRITY
1292 &nvmet_subsys_attr_attr_pi_enable,
1293 #endif
1294 NULL,
1295 };
1296
1297
1298
1299
1300 static void nvmet_subsys_release(struct config_item *item)
1301 {
1302 struct nvmet_subsys *subsys = to_subsys(item);
1303
1304 nvmet_subsys_del_ctrls(subsys);
1305 nvmet_subsys_put(subsys);
1306 }
1307
1308 static struct configfs_item_operations nvmet_subsys_item_ops = {
1309 .release = nvmet_subsys_release,
1310 };
1311
1312 static const struct config_item_type nvmet_subsys_type = {
1313 .ct_item_ops = &nvmet_subsys_item_ops,
1314 .ct_attrs = nvmet_subsys_attrs,
1315 .ct_owner = THIS_MODULE,
1316 };
1317
1318 static struct config_group *nvmet_subsys_make(struct config_group *group,
1319 const char *name)
1320 {
1321 struct nvmet_subsys *subsys;
1322
1323 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1324 pr_err("can't create discovery subsystem through configfs\n");
1325 return ERR_PTR(-EINVAL);
1326 }
1327
1328 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1329 if (IS_ERR(subsys))
1330 return ERR_CAST(subsys);
1331
1332 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1333
1334 config_group_init_type_name(&subsys->namespaces_group,
1335 "namespaces", &nvmet_namespaces_type);
1336 configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1337
1338 config_group_init_type_name(&subsys->allowed_hosts_group,
1339 "allowed_hosts", &nvmet_allowed_hosts_type);
1340 configfs_add_default_group(&subsys->allowed_hosts_group,
1341 &subsys->group);
1342
1343 nvmet_add_passthru_group(subsys);
1344
1345 return &subsys->group;
1346 }
1347
1348 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1349 .make_group = nvmet_subsys_make,
1350 };
1351
1352 static const struct config_item_type nvmet_subsystems_type = {
1353 .ct_group_ops = &nvmet_subsystems_group_ops,
1354 .ct_owner = THIS_MODULE,
1355 };
1356
1357 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1358 char *page)
1359 {
1360 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1361 }
1362
1363 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1364 const char *page, size_t count)
1365 {
1366 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1367 struct nvmet_port *port = to_nvmet_port(item);
1368 bool enable;
1369
1370 if (strtobool(page, &enable))
1371 goto inval;
1372
1373 if (enable)
1374 nvmet_referral_enable(parent, port);
1375 else
1376 nvmet_referral_disable(parent, port);
1377
1378 return count;
1379 inval:
1380 pr_err("Invalid value '%s' for enable\n", page);
1381 return -EINVAL;
1382 }
1383
1384 CONFIGFS_ATTR(nvmet_referral_, enable);
1385
1386
1387
1388
1389 static struct configfs_attribute *nvmet_referral_attrs[] = {
1390 &nvmet_attr_addr_adrfam,
1391 &nvmet_attr_addr_portid,
1392 &nvmet_attr_addr_treq,
1393 &nvmet_attr_addr_traddr,
1394 &nvmet_attr_addr_trsvcid,
1395 &nvmet_attr_addr_trtype,
1396 &nvmet_referral_attr_enable,
1397 NULL,
1398 };
1399
1400 static void nvmet_referral_notify(struct config_group *group,
1401 struct config_item *item)
1402 {
1403 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1404 struct nvmet_port *port = to_nvmet_port(item);
1405
1406 nvmet_referral_disable(parent, port);
1407 }
1408
1409 static void nvmet_referral_release(struct config_item *item)
1410 {
1411 struct nvmet_port *port = to_nvmet_port(item);
1412
1413 kfree(port);
1414 }
1415
1416 static struct configfs_item_operations nvmet_referral_item_ops = {
1417 .release = nvmet_referral_release,
1418 };
1419
1420 static const struct config_item_type nvmet_referral_type = {
1421 .ct_owner = THIS_MODULE,
1422 .ct_attrs = nvmet_referral_attrs,
1423 .ct_item_ops = &nvmet_referral_item_ops,
1424 };
1425
1426 static struct config_group *nvmet_referral_make(
1427 struct config_group *group, const char *name)
1428 {
1429 struct nvmet_port *port;
1430
1431 port = kzalloc(sizeof(*port), GFP_KERNEL);
1432 if (!port)
1433 return ERR_PTR(-ENOMEM);
1434
1435 INIT_LIST_HEAD(&port->entry);
1436 config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1437
1438 return &port->group;
1439 }
1440
1441 static struct configfs_group_operations nvmet_referral_group_ops = {
1442 .make_group = nvmet_referral_make,
1443 .disconnect_notify = nvmet_referral_notify,
1444 };
1445
1446 static const struct config_item_type nvmet_referrals_type = {
1447 .ct_owner = THIS_MODULE,
1448 .ct_group_ops = &nvmet_referral_group_ops,
1449 };
1450
1451 static struct nvmet_type_name_map nvmet_ana_state[] = {
1452 { NVME_ANA_OPTIMIZED, "optimized" },
1453 { NVME_ANA_NONOPTIMIZED, "non-optimized" },
1454 { NVME_ANA_INACCESSIBLE, "inaccessible" },
1455 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
1456 { NVME_ANA_CHANGE, "change" },
1457 };
1458
1459 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1460 char *page)
1461 {
1462 struct nvmet_ana_group *grp = to_ana_group(item);
1463 enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1464 int i;
1465
1466 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1467 if (state == nvmet_ana_state[i].type)
1468 return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1469 }
1470
1471 return sprintf(page, "\n");
1472 }
1473
1474 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1475 const char *page, size_t count)
1476 {
1477 struct nvmet_ana_group *grp = to_ana_group(item);
1478 enum nvme_ana_state *ana_state = grp->port->ana_state;
1479 int i;
1480
1481 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1482 if (sysfs_streq(page, nvmet_ana_state[i].name))
1483 goto found;
1484 }
1485
1486 pr_err("Invalid value '%s' for ana_state\n", page);
1487 return -EINVAL;
1488
1489 found:
1490 down_write(&nvmet_ana_sem);
1491 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1492 nvmet_ana_chgcnt++;
1493 up_write(&nvmet_ana_sem);
1494 nvmet_port_send_ana_event(grp->port);
1495 return count;
1496 }
1497
1498 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1499
1500 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1501 &nvmet_ana_group_attr_ana_state,
1502 NULL,
1503 };
1504
1505 static void nvmet_ana_group_release(struct config_item *item)
1506 {
1507 struct nvmet_ana_group *grp = to_ana_group(item);
1508
1509 if (grp == &grp->port->ana_default_group)
1510 return;
1511
1512 down_write(&nvmet_ana_sem);
1513 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1514 nvmet_ana_group_enabled[grp->grpid]--;
1515 up_write(&nvmet_ana_sem);
1516
1517 nvmet_port_send_ana_event(grp->port);
1518 kfree(grp);
1519 }
1520
1521 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1522 .release = nvmet_ana_group_release,
1523 };
1524
1525 static const struct config_item_type nvmet_ana_group_type = {
1526 .ct_item_ops = &nvmet_ana_group_item_ops,
1527 .ct_attrs = nvmet_ana_group_attrs,
1528 .ct_owner = THIS_MODULE,
1529 };
1530
1531 static struct config_group *nvmet_ana_groups_make_group(
1532 struct config_group *group, const char *name)
1533 {
1534 struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1535 struct nvmet_ana_group *grp;
1536 u32 grpid;
1537 int ret;
1538
1539 ret = kstrtou32(name, 0, &grpid);
1540 if (ret)
1541 goto out;
1542
1543 ret = -EINVAL;
1544 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1545 goto out;
1546
1547 ret = -ENOMEM;
1548 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1549 if (!grp)
1550 goto out;
1551 grp->port = port;
1552 grp->grpid = grpid;
1553
1554 down_write(&nvmet_ana_sem);
1555 nvmet_ana_group_enabled[grpid]++;
1556 up_write(&nvmet_ana_sem);
1557
1558 nvmet_port_send_ana_event(grp->port);
1559
1560 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1561 return &grp->group;
1562 out:
1563 return ERR_PTR(ret);
1564 }
1565
1566 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1567 .make_group = nvmet_ana_groups_make_group,
1568 };
1569
1570 static const struct config_item_type nvmet_ana_groups_type = {
1571 .ct_group_ops = &nvmet_ana_groups_group_ops,
1572 .ct_owner = THIS_MODULE,
1573 };
1574
1575
1576
1577
1578 static void nvmet_port_release(struct config_item *item)
1579 {
1580 struct nvmet_port *port = to_nvmet_port(item);
1581
1582
1583 flush_workqueue(nvmet_wq);
1584 list_del(&port->global_entry);
1585
1586 kfree(port->ana_state);
1587 kfree(port);
1588 }
1589
1590 static struct configfs_attribute *nvmet_port_attrs[] = {
1591 &nvmet_attr_addr_adrfam,
1592 &nvmet_attr_addr_treq,
1593 &nvmet_attr_addr_traddr,
1594 &nvmet_attr_addr_trsvcid,
1595 &nvmet_attr_addr_trtype,
1596 &nvmet_attr_param_inline_data_size,
1597 #ifdef CONFIG_BLK_DEV_INTEGRITY
1598 &nvmet_attr_param_pi_enable,
1599 #endif
1600 NULL,
1601 };
1602
1603 static struct configfs_item_operations nvmet_port_item_ops = {
1604 .release = nvmet_port_release,
1605 };
1606
1607 static const struct config_item_type nvmet_port_type = {
1608 .ct_attrs = nvmet_port_attrs,
1609 .ct_item_ops = &nvmet_port_item_ops,
1610 .ct_owner = THIS_MODULE,
1611 };
1612
1613 static struct config_group *nvmet_ports_make(struct config_group *group,
1614 const char *name)
1615 {
1616 struct nvmet_port *port;
1617 u16 portid;
1618 u32 i;
1619
1620 if (kstrtou16(name, 0, &portid))
1621 return ERR_PTR(-EINVAL);
1622
1623 port = kzalloc(sizeof(*port), GFP_KERNEL);
1624 if (!port)
1625 return ERR_PTR(-ENOMEM);
1626
1627 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1628 sizeof(*port->ana_state), GFP_KERNEL);
1629 if (!port->ana_state) {
1630 kfree(port);
1631 return ERR_PTR(-ENOMEM);
1632 }
1633
1634 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1635 if (i == NVMET_DEFAULT_ANA_GRPID)
1636 port->ana_state[1] = NVME_ANA_OPTIMIZED;
1637 else
1638 port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1639 }
1640
1641 list_add(&port->global_entry, &nvmet_ports_list);
1642
1643 INIT_LIST_HEAD(&port->entry);
1644 INIT_LIST_HEAD(&port->subsystems);
1645 INIT_LIST_HEAD(&port->referrals);
1646 port->inline_data_size = -1;
1647
1648 port->disc_addr.portid = cpu_to_le16(portid);
1649 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1650 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1651 config_group_init_type_name(&port->group, name, &nvmet_port_type);
1652
1653 config_group_init_type_name(&port->subsys_group,
1654 "subsystems", &nvmet_port_subsys_type);
1655 configfs_add_default_group(&port->subsys_group, &port->group);
1656
1657 config_group_init_type_name(&port->referrals_group,
1658 "referrals", &nvmet_referrals_type);
1659 configfs_add_default_group(&port->referrals_group, &port->group);
1660
1661 config_group_init_type_name(&port->ana_groups_group,
1662 "ana_groups", &nvmet_ana_groups_type);
1663 configfs_add_default_group(&port->ana_groups_group, &port->group);
1664
1665 port->ana_default_group.port = port;
1666 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1667 config_group_init_type_name(&port->ana_default_group.group,
1668 __stringify(NVMET_DEFAULT_ANA_GRPID),
1669 &nvmet_ana_group_type);
1670 configfs_add_default_group(&port->ana_default_group.group,
1671 &port->ana_groups_group);
1672
1673 return &port->group;
1674 }
1675
1676 static struct configfs_group_operations nvmet_ports_group_ops = {
1677 .make_group = nvmet_ports_make,
1678 };
1679
1680 static const struct config_item_type nvmet_ports_type = {
1681 .ct_group_ops = &nvmet_ports_group_ops,
1682 .ct_owner = THIS_MODULE,
1683 };
1684
1685 static struct config_group nvmet_subsystems_group;
1686 static struct config_group nvmet_ports_group;
1687
1688 #ifdef CONFIG_NVME_TARGET_AUTH
1689 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1690 char *page)
1691 {
1692 u8 *dhchap_secret = to_host(item)->dhchap_secret;
1693
1694 if (!dhchap_secret)
1695 return sprintf(page, "\n");
1696 return sprintf(page, "%s\n", dhchap_secret);
1697 }
1698
1699 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1700 const char *page, size_t count)
1701 {
1702 struct nvmet_host *host = to_host(item);
1703 int ret;
1704
1705 ret = nvmet_auth_set_key(host, page, false);
1706
1707
1708
1709
1710
1711 return ret < 0 ? ret : count;
1712 }
1713
1714 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1715
1716 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1717 char *page)
1718 {
1719 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1720
1721 if (!dhchap_secret)
1722 return sprintf(page, "\n");
1723 return sprintf(page, "%s\n", dhchap_secret);
1724 }
1725
1726 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
1727 const char *page, size_t count)
1728 {
1729 struct nvmet_host *host = to_host(item);
1730 int ret;
1731
1732 ret = nvmet_auth_set_key(host, page, true);
1733
1734
1735
1736
1737
1738 return ret < 0 ? ret : count;
1739 }
1740
1741 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
1742
1743 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
1744 char *page)
1745 {
1746 struct nvmet_host *host = to_host(item);
1747 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
1748
1749 return sprintf(page, "%s\n", hash_name ? hash_name : "none");
1750 }
1751
1752 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
1753 const char *page, size_t count)
1754 {
1755 struct nvmet_host *host = to_host(item);
1756 u8 hmac_id;
1757
1758 hmac_id = nvme_auth_hmac_id(page);
1759 if (hmac_id == NVME_AUTH_HASH_INVALID)
1760 return -EINVAL;
1761 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
1762 return -ENOTSUPP;
1763 host->dhchap_hash_id = hmac_id;
1764 return count;
1765 }
1766
1767 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
1768
1769 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
1770 char *page)
1771 {
1772 struct nvmet_host *host = to_host(item);
1773 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
1774
1775 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
1776 }
1777
1778 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
1779 const char *page, size_t count)
1780 {
1781 struct nvmet_host *host = to_host(item);
1782 int dhgroup_id;
1783
1784 dhgroup_id = nvme_auth_dhgroup_id(page);
1785 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
1786 return -EINVAL;
1787 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
1788 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
1789
1790 if (!crypto_has_kpp(kpp, 0, 0))
1791 return -EINVAL;
1792 }
1793 host->dhchap_dhgroup_id = dhgroup_id;
1794 return count;
1795 }
1796
1797 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
1798
1799 static struct configfs_attribute *nvmet_host_attrs[] = {
1800 &nvmet_host_attr_dhchap_key,
1801 &nvmet_host_attr_dhchap_ctrl_key,
1802 &nvmet_host_attr_dhchap_hash,
1803 &nvmet_host_attr_dhchap_dhgroup,
1804 NULL,
1805 };
1806 #endif
1807
1808 static void nvmet_host_release(struct config_item *item)
1809 {
1810 struct nvmet_host *host = to_host(item);
1811
1812 #ifdef CONFIG_NVME_TARGET_AUTH
1813 kfree(host->dhchap_secret);
1814 #endif
1815 kfree(host);
1816 }
1817
1818 static struct configfs_item_operations nvmet_host_item_ops = {
1819 .release = nvmet_host_release,
1820 };
1821
1822 static const struct config_item_type nvmet_host_type = {
1823 .ct_item_ops = &nvmet_host_item_ops,
1824 #ifdef CONFIG_NVME_TARGET_AUTH
1825 .ct_attrs = nvmet_host_attrs,
1826 #endif
1827 .ct_owner = THIS_MODULE,
1828 };
1829
1830 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
1831 const char *name)
1832 {
1833 struct nvmet_host *host;
1834
1835 host = kzalloc(sizeof(*host), GFP_KERNEL);
1836 if (!host)
1837 return ERR_PTR(-ENOMEM);
1838
1839 #ifdef CONFIG_NVME_TARGET_AUTH
1840
1841 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
1842 #endif
1843
1844 config_group_init_type_name(&host->group, name, &nvmet_host_type);
1845
1846 return &host->group;
1847 }
1848
1849 static struct configfs_group_operations nvmet_hosts_group_ops = {
1850 .make_group = nvmet_hosts_make_group,
1851 };
1852
1853 static const struct config_item_type nvmet_hosts_type = {
1854 .ct_group_ops = &nvmet_hosts_group_ops,
1855 .ct_owner = THIS_MODULE,
1856 };
1857
1858 static struct config_group nvmet_hosts_group;
1859
1860 static const struct config_item_type nvmet_root_type = {
1861 .ct_owner = THIS_MODULE,
1862 };
1863
1864 static struct configfs_subsystem nvmet_configfs_subsystem = {
1865 .su_group = {
1866 .cg_item = {
1867 .ci_namebuf = "nvmet",
1868 .ci_type = &nvmet_root_type,
1869 },
1870 },
1871 };
1872
1873 int __init nvmet_init_configfs(void)
1874 {
1875 int ret;
1876
1877 config_group_init(&nvmet_configfs_subsystem.su_group);
1878 mutex_init(&nvmet_configfs_subsystem.su_mutex);
1879
1880 config_group_init_type_name(&nvmet_subsystems_group,
1881 "subsystems", &nvmet_subsystems_type);
1882 configfs_add_default_group(&nvmet_subsystems_group,
1883 &nvmet_configfs_subsystem.su_group);
1884
1885 config_group_init_type_name(&nvmet_ports_group,
1886 "ports", &nvmet_ports_type);
1887 configfs_add_default_group(&nvmet_ports_group,
1888 &nvmet_configfs_subsystem.su_group);
1889
1890 config_group_init_type_name(&nvmet_hosts_group,
1891 "hosts", &nvmet_hosts_type);
1892 configfs_add_default_group(&nvmet_hosts_group,
1893 &nvmet_configfs_subsystem.su_group);
1894
1895 ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
1896 if (ret) {
1897 pr_err("configfs_register_subsystem: %d\n", ret);
1898 return ret;
1899 }
1900
1901 return 0;
1902 }
1903
1904 void __exit nvmet_exit_configfs(void)
1905 {
1906 configfs_unregister_subsystem(&nvmet_configfs_subsystem);
1907 }