0001
0002 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0003
0004 #include <linux/platform_device.h>
0005 #include <linux/device.h>
0006 #include <linux/module.h>
0007 #include <linux/genalloc.h>
0008 #include <linux/vmalloc.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/list_sort.h>
0011 #include <linux/libnvdimm.h>
0012 #include <linux/ndctl.h>
0013 #include <nd-core.h>
0014 #include <linux/printk.h>
0015 #include <linux/seq_buf.h>
0016
0017 #include "../watermark.h"
0018 #include "nfit_test.h"
0019 #include "ndtest.h"
0020
0021 enum {
0022 DIMM_SIZE = SZ_32M,
0023 LABEL_SIZE = SZ_128K,
0024 NUM_INSTANCES = 2,
0025 NUM_DCR = 4,
0026 NDTEST_MAX_MAPPING = 6,
0027 };
0028
0029 #define NDTEST_SCM_DIMM_CMD_MASK \
0030 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
0031 (1ul << ND_CMD_GET_CONFIG_DATA) | \
0032 (1ul << ND_CMD_SET_CONFIG_DATA) | \
0033 (1ul << ND_CMD_CALL))
0034
0035 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
0036 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
0037 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
0038
0039 static DEFINE_SPINLOCK(ndtest_lock);
0040 static struct ndtest_priv *instances[NUM_INSTANCES];
0041 static struct class *ndtest_dimm_class;
0042 static struct gen_pool *ndtest_pool;
0043
0044 static struct ndtest_dimm dimm_group1[] = {
0045 {
0046 .size = DIMM_SIZE,
0047 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
0048 .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
0049 .physical_id = 0,
0050 .num_formats = 2,
0051 },
0052 {
0053 .size = DIMM_SIZE,
0054 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
0055 .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
0056 .physical_id = 1,
0057 .num_formats = 2,
0058 },
0059 {
0060 .size = DIMM_SIZE,
0061 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
0062 .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
0063 .physical_id = 2,
0064 .num_formats = 2,
0065 },
0066 {
0067 .size = DIMM_SIZE,
0068 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
0069 .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
0070 .physical_id = 3,
0071 .num_formats = 2,
0072 },
0073 {
0074 .size = DIMM_SIZE,
0075 .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
0076 .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
0077 .physical_id = 4,
0078 .num_formats = 2,
0079 },
0080 };
0081
0082 static struct ndtest_dimm dimm_group2[] = {
0083 {
0084 .size = DIMM_SIZE,
0085 .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
0086 .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
0087 .physical_id = 0,
0088 .num_formats = 1,
0089 .flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
0090 PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
0091 PAPR_PMEM_HEALTH_FATAL,
0092 },
0093 };
0094
0095 static struct ndtest_mapping region0_mapping[] = {
0096 {
0097 .dimm = 0,
0098 .position = 0,
0099 .start = 0,
0100 .size = SZ_16M,
0101 },
0102 {
0103 .dimm = 1,
0104 .position = 1,
0105 .start = 0,
0106 .size = SZ_16M,
0107 }
0108 };
0109
0110 static struct ndtest_mapping region1_mapping[] = {
0111 {
0112 .dimm = 0,
0113 .position = 0,
0114 .start = SZ_16M,
0115 .size = SZ_16M,
0116 },
0117 {
0118 .dimm = 1,
0119 .position = 1,
0120 .start = SZ_16M,
0121 .size = SZ_16M,
0122 },
0123 {
0124 .dimm = 2,
0125 .position = 2,
0126 .start = SZ_16M,
0127 .size = SZ_16M,
0128 },
0129 {
0130 .dimm = 3,
0131 .position = 3,
0132 .start = SZ_16M,
0133 .size = SZ_16M,
0134 },
0135 };
0136
0137 static struct ndtest_region bus0_regions[] = {
0138 {
0139 .type = ND_DEVICE_NAMESPACE_PMEM,
0140 .num_mappings = ARRAY_SIZE(region0_mapping),
0141 .mapping = region0_mapping,
0142 .size = DIMM_SIZE,
0143 .range_index = 1,
0144 },
0145 {
0146 .type = ND_DEVICE_NAMESPACE_PMEM,
0147 .num_mappings = ARRAY_SIZE(region1_mapping),
0148 .mapping = region1_mapping,
0149 .size = DIMM_SIZE * 2,
0150 .range_index = 2,
0151 },
0152 };
0153
0154 static struct ndtest_mapping region6_mapping[] = {
0155 {
0156 .dimm = 0,
0157 .position = 0,
0158 .start = 0,
0159 .size = DIMM_SIZE,
0160 },
0161 };
0162
0163 static struct ndtest_region bus1_regions[] = {
0164 {
0165 .type = ND_DEVICE_NAMESPACE_IO,
0166 .num_mappings = ARRAY_SIZE(region6_mapping),
0167 .mapping = region6_mapping,
0168 .size = DIMM_SIZE,
0169 .range_index = 1,
0170 },
0171 };
0172
0173 static struct ndtest_config bus_configs[NUM_INSTANCES] = {
0174
0175 {
0176 .dimm_start = 0,
0177 .dimm_count = ARRAY_SIZE(dimm_group1),
0178 .dimms = dimm_group1,
0179 .regions = bus0_regions,
0180 .num_regions = ARRAY_SIZE(bus0_regions),
0181 },
0182
0183 {
0184 .dimm_start = ARRAY_SIZE(dimm_group1),
0185 .dimm_count = ARRAY_SIZE(dimm_group2),
0186 .dimms = dimm_group2,
0187 .regions = bus1_regions,
0188 .num_regions = ARRAY_SIZE(bus1_regions),
0189 },
0190 };
0191
0192 static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
0193 {
0194 struct platform_device *pdev = to_platform_device(dev);
0195
0196 return container_of(pdev, struct ndtest_priv, pdev);
0197 }
0198
0199 static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
0200 struct nd_cmd_get_config_data_hdr *hdr)
0201 {
0202 unsigned int len;
0203
0204 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
0205 return -EINVAL;
0206
0207 hdr->status = 0;
0208 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
0209 memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
0210
0211 return buf_len - len;
0212 }
0213
0214 static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
0215 struct nd_cmd_set_config_hdr *hdr)
0216 {
0217 unsigned int len;
0218
0219 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
0220 return -EINVAL;
0221
0222 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
0223 memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
0224
0225 return buf_len - len;
0226 }
0227
0228 static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
0229 struct nd_cmd_get_config_size *size)
0230 {
0231 size->status = 0;
0232 size->max_xfer = 8;
0233 size->config_size = dimm->config_size;
0234
0235 return 0;
0236 }
0237
0238 static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
0239 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
0240 unsigned int buf_len, int *cmd_rc)
0241 {
0242 struct ndtest_dimm *dimm;
0243 int _cmd_rc;
0244
0245 if (!cmd_rc)
0246 cmd_rc = &_cmd_rc;
0247
0248 *cmd_rc = 0;
0249
0250 if (!nvdimm)
0251 return -EINVAL;
0252
0253 dimm = nvdimm_provider_data(nvdimm);
0254 if (!dimm)
0255 return -EINVAL;
0256
0257 switch (cmd) {
0258 case ND_CMD_GET_CONFIG_SIZE:
0259 *cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
0260 break;
0261 case ND_CMD_GET_CONFIG_DATA:
0262 *cmd_rc = ndtest_config_get(dimm, buf_len, buf);
0263 break;
0264 case ND_CMD_SET_CONFIG_DATA:
0265 *cmd_rc = ndtest_config_set(dimm, buf_len, buf);
0266 break;
0267 default:
0268 return -EINVAL;
0269 }
0270
0271
0272
0273
0274 if ((1 << cmd) & dimm->fail_cmd)
0275 return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
0276
0277 return 0;
0278 }
0279
0280 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
0281 {
0282 int i;
0283
0284 for (i = 0; i < NUM_INSTANCES; i++) {
0285 struct nfit_test_resource *n, *nfit_res = NULL;
0286 struct ndtest_priv *t = instances[i];
0287
0288 if (!t)
0289 continue;
0290 spin_lock(&ndtest_lock);
0291 list_for_each_entry(n, &t->resources, list) {
0292 if (addr >= n->res.start && (addr < n->res.start
0293 + resource_size(&n->res))) {
0294 nfit_res = n;
0295 break;
0296 } else if (addr >= (unsigned long) n->buf
0297 && (addr < (unsigned long) n->buf
0298 + resource_size(&n->res))) {
0299 nfit_res = n;
0300 break;
0301 }
0302 }
0303 spin_unlock(&ndtest_lock);
0304 if (nfit_res)
0305 return nfit_res;
0306 }
0307
0308 pr_warn("Failed to get resource\n");
0309
0310 return NULL;
0311 }
0312
0313 static void ndtest_release_resource(void *data)
0314 {
0315 struct nfit_test_resource *res = data;
0316
0317 spin_lock(&ndtest_lock);
0318 list_del(&res->list);
0319 spin_unlock(&ndtest_lock);
0320
0321 if (resource_size(&res->res) >= DIMM_SIZE)
0322 gen_pool_free(ndtest_pool, res->res.start,
0323 resource_size(&res->res));
0324 vfree(res->buf);
0325 kfree(res);
0326 }
0327
0328 static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
0329 dma_addr_t *dma)
0330 {
0331 dma_addr_t __dma;
0332 void *buf;
0333 struct nfit_test_resource *res;
0334 struct genpool_data_align data = {
0335 .align = SZ_128M,
0336 };
0337
0338 res = kzalloc(sizeof(*res), GFP_KERNEL);
0339 if (!res)
0340 return NULL;
0341
0342 buf = vmalloc(size);
0343 if (size >= DIMM_SIZE)
0344 __dma = gen_pool_alloc_algo(ndtest_pool, size,
0345 gen_pool_first_fit_align, &data);
0346 else
0347 __dma = (unsigned long) buf;
0348
0349 if (!__dma)
0350 goto buf_err;
0351
0352 INIT_LIST_HEAD(&res->list);
0353 res->dev = &p->pdev.dev;
0354 res->buf = buf;
0355 res->res.start = __dma;
0356 res->res.end = __dma + size - 1;
0357 res->res.name = "NFIT";
0358 spin_lock_init(&res->lock);
0359 INIT_LIST_HEAD(&res->requests);
0360 spin_lock(&ndtest_lock);
0361 list_add(&res->list, &p->resources);
0362 spin_unlock(&ndtest_lock);
0363
0364 if (dma)
0365 *dma = __dma;
0366
0367 if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
0368 return res->buf;
0369
0370 buf_err:
0371 if (__dma && size >= DIMM_SIZE)
0372 gen_pool_free(ndtest_pool, __dma, size);
0373 if (buf)
0374 vfree(buf);
0375 kfree(res);
0376
0377 return NULL;
0378 }
0379
0380 static ssize_t range_index_show(struct device *dev,
0381 struct device_attribute *attr, char *buf)
0382 {
0383 struct nd_region *nd_region = to_nd_region(dev);
0384 struct ndtest_region *region = nd_region_provider_data(nd_region);
0385
0386 return sprintf(buf, "%d\n", region->range_index);
0387 }
0388 static DEVICE_ATTR_RO(range_index);
0389
0390 static struct attribute *ndtest_region_attributes[] = {
0391 &dev_attr_range_index.attr,
0392 NULL,
0393 };
0394
0395 static const struct attribute_group ndtest_region_attribute_group = {
0396 .name = "papr",
0397 .attrs = ndtest_region_attributes,
0398 };
0399
0400 static const struct attribute_group *ndtest_region_attribute_groups[] = {
0401 &ndtest_region_attribute_group,
0402 NULL,
0403 };
0404
0405 static int ndtest_create_region(struct ndtest_priv *p,
0406 struct ndtest_region *region)
0407 {
0408 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
0409 struct nd_region_desc *ndr_desc, _ndr_desc;
0410 struct nd_interleave_set *nd_set;
0411 struct resource res;
0412 int i, ndimm = region->mapping[0].dimm;
0413 u64 uuid[2];
0414
0415 memset(&res, 0, sizeof(res));
0416 memset(&mappings, 0, sizeof(mappings));
0417 memset(&_ndr_desc, 0, sizeof(_ndr_desc));
0418 ndr_desc = &_ndr_desc;
0419
0420 if (!ndtest_alloc_resource(p, region->size, &res.start))
0421 return -ENOMEM;
0422
0423 res.end = res.start + region->size - 1;
0424 ndr_desc->mapping = mappings;
0425 ndr_desc->res = &res;
0426 ndr_desc->provider_data = region;
0427 ndr_desc->attr_groups = ndtest_region_attribute_groups;
0428
0429 if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
0430 pr_err("failed to parse UUID\n");
0431 return -ENXIO;
0432 }
0433
0434 nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
0435 if (!nd_set)
0436 return -ENOMEM;
0437
0438 nd_set->cookie1 = cpu_to_le64(uuid[0]);
0439 nd_set->cookie2 = cpu_to_le64(uuid[1]);
0440 nd_set->altcookie = nd_set->cookie1;
0441 ndr_desc->nd_set = nd_set;
0442
0443 for (i = 0; i < region->num_mappings; i++) {
0444 ndimm = region->mapping[i].dimm;
0445 mappings[i].start = region->mapping[i].start;
0446 mappings[i].size = region->mapping[i].size;
0447 mappings[i].position = region->mapping[i].position;
0448 mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
0449 }
0450
0451 ndr_desc->num_mappings = region->num_mappings;
0452 region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
0453
0454 if (!region->region) {
0455 dev_err(&p->pdev.dev, "Error registering region %pR\n",
0456 ndr_desc->res);
0457 return -ENXIO;
0458 }
0459
0460 return 0;
0461 }
0462
0463 static int ndtest_init_regions(struct ndtest_priv *p)
0464 {
0465 int i, ret = 0;
0466
0467 for (i = 0; i < p->config->num_regions; i++) {
0468 ret = ndtest_create_region(p, &p->config->regions[i]);
0469 if (ret)
0470 return ret;
0471 }
0472
0473 return 0;
0474 }
0475
0476 static void put_dimms(void *data)
0477 {
0478 struct ndtest_priv *p = data;
0479 int i;
0480
0481 for (i = 0; i < p->config->dimm_count; i++)
0482 if (p->config->dimms[i].dev) {
0483 device_unregister(p->config->dimms[i].dev);
0484 p->config->dimms[i].dev = NULL;
0485 }
0486 }
0487
0488 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
0489 char *buf)
0490 {
0491 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
0492
0493 return sprintf(buf, "%#x\n", dimm->handle);
0494 }
0495 static DEVICE_ATTR_RO(handle);
0496
0497 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
0498 char *buf)
0499 {
0500 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
0501
0502 return sprintf(buf, "%#x\n", dimm->fail_cmd);
0503 }
0504
0505 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
0506 const char *buf, size_t size)
0507 {
0508 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
0509 unsigned long val;
0510 ssize_t rc;
0511
0512 rc = kstrtol(buf, 0, &val);
0513 if (rc)
0514 return rc;
0515
0516 dimm->fail_cmd = val;
0517
0518 return size;
0519 }
0520 static DEVICE_ATTR_RW(fail_cmd);
0521
0522 static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
0523 char *buf)
0524 {
0525 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
0526
0527 return sprintf(buf, "%d\n", dimm->fail_cmd_code);
0528 }
0529
0530 static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
0531 const char *buf, size_t size)
0532 {
0533 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
0534 unsigned long val;
0535 ssize_t rc;
0536
0537 rc = kstrtol(buf, 0, &val);
0538 if (rc)
0539 return rc;
0540
0541 dimm->fail_cmd_code = val;
0542 return size;
0543 }
0544 static DEVICE_ATTR_RW(fail_cmd_code);
0545
0546 static struct attribute *dimm_attributes[] = {
0547 &dev_attr_handle.attr,
0548 &dev_attr_fail_cmd.attr,
0549 &dev_attr_fail_cmd_code.attr,
0550 NULL,
0551 };
0552
0553 static struct attribute_group dimm_attribute_group = {
0554 .attrs = dimm_attributes,
0555 };
0556
0557 static const struct attribute_group *dimm_attribute_groups[] = {
0558 &dimm_attribute_group,
0559 NULL,
0560 };
0561
0562 static ssize_t phys_id_show(struct device *dev,
0563 struct device_attribute *attr, char *buf)
0564 {
0565 struct nvdimm *nvdimm = to_nvdimm(dev);
0566 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
0567
0568 return sprintf(buf, "%#x\n", dimm->physical_id);
0569 }
0570 static DEVICE_ATTR_RO(phys_id);
0571
0572 static ssize_t vendor_show(struct device *dev,
0573 struct device_attribute *attr, char *buf)
0574 {
0575 return sprintf(buf, "0x1234567\n");
0576 }
0577 static DEVICE_ATTR_RO(vendor);
0578
0579 static ssize_t id_show(struct device *dev,
0580 struct device_attribute *attr, char *buf)
0581 {
0582 struct nvdimm *nvdimm = to_nvdimm(dev);
0583 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
0584
0585 return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
0586 0xa, 2016, ~(dimm->handle));
0587 }
0588 static DEVICE_ATTR_RO(id);
0589
0590 static ssize_t nvdimm_handle_show(struct device *dev,
0591 struct device_attribute *attr, char *buf)
0592 {
0593 struct nvdimm *nvdimm = to_nvdimm(dev);
0594 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
0595
0596 return sprintf(buf, "%#x\n", dimm->handle);
0597 }
0598
0599 static struct device_attribute dev_attr_nvdimm_show_handle = {
0600 .attr = { .name = "handle", .mode = 0444 },
0601 .show = nvdimm_handle_show,
0602 };
0603
0604 static ssize_t subsystem_vendor_show(struct device *dev,
0605 struct device_attribute *attr, char *buf)
0606 {
0607 return sprintf(buf, "0x%04x\n", 0);
0608 }
0609 static DEVICE_ATTR_RO(subsystem_vendor);
0610
0611 static ssize_t dirty_shutdown_show(struct device *dev,
0612 struct device_attribute *attr, char *buf)
0613 {
0614 return sprintf(buf, "%d\n", 42);
0615 }
0616 static DEVICE_ATTR_RO(dirty_shutdown);
0617
0618 static ssize_t formats_show(struct device *dev,
0619 struct device_attribute *attr, char *buf)
0620 {
0621 struct nvdimm *nvdimm = to_nvdimm(dev);
0622 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
0623
0624 return sprintf(buf, "%d\n", dimm->num_formats);
0625 }
0626 static DEVICE_ATTR_RO(formats);
0627
0628 static ssize_t format_show(struct device *dev,
0629 struct device_attribute *attr, char *buf)
0630 {
0631 struct nvdimm *nvdimm = to_nvdimm(dev);
0632 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
0633
0634 if (dimm->num_formats > 1)
0635 return sprintf(buf, "0x201\n");
0636
0637 return sprintf(buf, "0x101\n");
0638 }
0639 static DEVICE_ATTR_RO(format);
0640
0641 static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
0642 char *buf)
0643 {
0644 return sprintf(buf, "0x301\n");
0645 }
0646 static DEVICE_ATTR_RO(format1);
0647
0648 static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
0649 struct attribute *a, int n)
0650 {
0651 struct device *dev = container_of(kobj, struct device, kobj);
0652 struct nvdimm *nvdimm = to_nvdimm(dev);
0653 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
0654
0655 if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
0656 return 0;
0657
0658 return a->mode;
0659 }
0660
0661 static ssize_t flags_show(struct device *dev,
0662 struct device_attribute *attr, char *buf)
0663 {
0664 struct nvdimm *nvdimm = to_nvdimm(dev);
0665 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
0666 struct seq_buf s;
0667 u64 flags;
0668
0669 flags = dimm->flags;
0670
0671 seq_buf_init(&s, buf, PAGE_SIZE);
0672 if (flags & PAPR_PMEM_UNARMED_MASK)
0673 seq_buf_printf(&s, "not_armed ");
0674
0675 if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
0676 seq_buf_printf(&s, "flush_fail ");
0677
0678 if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
0679 seq_buf_printf(&s, "restore_fail ");
0680
0681 if (flags & PAPR_PMEM_SAVE_MASK)
0682 seq_buf_printf(&s, "save_fail ");
0683
0684 if (flags & PAPR_PMEM_SMART_EVENT_MASK)
0685 seq_buf_printf(&s, "smart_notify ");
0686
0687
0688 if (seq_buf_used(&s))
0689 seq_buf_printf(&s, "\n");
0690
0691 return seq_buf_used(&s);
0692 }
0693 static DEVICE_ATTR_RO(flags);
0694
0695 static struct attribute *ndtest_nvdimm_attributes[] = {
0696 &dev_attr_nvdimm_show_handle.attr,
0697 &dev_attr_vendor.attr,
0698 &dev_attr_id.attr,
0699 &dev_attr_phys_id.attr,
0700 &dev_attr_subsystem_vendor.attr,
0701 &dev_attr_dirty_shutdown.attr,
0702 &dev_attr_formats.attr,
0703 &dev_attr_format.attr,
0704 &dev_attr_format1.attr,
0705 &dev_attr_flags.attr,
0706 NULL,
0707 };
0708
0709 static const struct attribute_group ndtest_nvdimm_attribute_group = {
0710 .name = "papr",
0711 .attrs = ndtest_nvdimm_attributes,
0712 .is_visible = ndtest_nvdimm_attr_visible,
0713 };
0714
0715 static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
0716 &ndtest_nvdimm_attribute_group,
0717 NULL,
0718 };
0719
0720 static int ndtest_dimm_register(struct ndtest_priv *priv,
0721 struct ndtest_dimm *dimm, int id)
0722 {
0723 struct device *dev = &priv->pdev.dev;
0724 unsigned long dimm_flags = dimm->flags;
0725
0726 if (dimm->num_formats > 1)
0727 set_bit(NDD_LABELING, &dimm_flags);
0728
0729 if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
0730 set_bit(NDD_UNARMED, &dimm_flags);
0731
0732 dimm->nvdimm = nvdimm_create(priv->bus, dimm,
0733 ndtest_nvdimm_attribute_groups, dimm_flags,
0734 NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
0735 if (!dimm->nvdimm) {
0736 dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
0737 return -ENXIO;
0738 }
0739
0740 dimm->dev = device_create_with_groups(ndtest_dimm_class,
0741 &priv->pdev.dev,
0742 0, dimm, dimm_attribute_groups,
0743 "test_dimm%d", id);
0744 if (!dimm->dev) {
0745 pr_err("Could not create dimm device attributes\n");
0746 return -ENOMEM;
0747 }
0748
0749 return 0;
0750 }
0751
0752 static int ndtest_nvdimm_init(struct ndtest_priv *p)
0753 {
0754 struct ndtest_dimm *d;
0755 void *res;
0756 int i, id;
0757
0758 for (i = 0; i < p->config->dimm_count; i++) {
0759 d = &p->config->dimms[i];
0760 d->id = id = p->config->dimm_start + i;
0761 res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
0762 if (!res)
0763 return -ENOMEM;
0764
0765 d->label_area = res;
0766 sprintf(d->label_area, "label%d", id);
0767 d->config_size = LABEL_SIZE;
0768
0769 if (!ndtest_alloc_resource(p, d->size,
0770 &p->dimm_dma[id]))
0771 return -ENOMEM;
0772
0773 if (!ndtest_alloc_resource(p, LABEL_SIZE,
0774 &p->label_dma[id]))
0775 return -ENOMEM;
0776
0777 if (!ndtest_alloc_resource(p, LABEL_SIZE,
0778 &p->dcr_dma[id]))
0779 return -ENOMEM;
0780
0781 d->address = p->dimm_dma[id];
0782
0783 ndtest_dimm_register(p, d, id);
0784 }
0785
0786 return 0;
0787 }
0788
0789 static ssize_t compatible_show(struct device *dev,
0790 struct device_attribute *attr, char *buf)
0791 {
0792 return sprintf(buf, "nvdimm_test");
0793 }
0794 static DEVICE_ATTR_RO(compatible);
0795
0796 static struct attribute *of_node_attributes[] = {
0797 &dev_attr_compatible.attr,
0798 NULL
0799 };
0800
0801 static const struct attribute_group of_node_attribute_group = {
0802 .name = "of_node",
0803 .attrs = of_node_attributes,
0804 };
0805
0806 static const struct attribute_group *ndtest_attribute_groups[] = {
0807 &of_node_attribute_group,
0808 NULL,
0809 };
0810
0811 static int ndtest_bus_register(struct ndtest_priv *p)
0812 {
0813 p->config = &bus_configs[p->pdev.id];
0814
0815 p->bus_desc.ndctl = ndtest_ctl;
0816 p->bus_desc.module = THIS_MODULE;
0817 p->bus_desc.provider_name = NULL;
0818 p->bus_desc.attr_groups = ndtest_attribute_groups;
0819
0820 p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
0821 if (!p->bus) {
0822 dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
0823 return -ENOMEM;
0824 }
0825
0826 return 0;
0827 }
0828
0829 static int ndtest_remove(struct platform_device *pdev)
0830 {
0831 struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
0832
0833 nvdimm_bus_unregister(p->bus);
0834 return 0;
0835 }
0836
0837 static int ndtest_probe(struct platform_device *pdev)
0838 {
0839 struct ndtest_priv *p;
0840 int rc;
0841
0842 p = to_ndtest_priv(&pdev->dev);
0843 if (ndtest_bus_register(p))
0844 return -ENOMEM;
0845
0846 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
0847 sizeof(dma_addr_t), GFP_KERNEL);
0848 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
0849 sizeof(dma_addr_t), GFP_KERNEL);
0850 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
0851 sizeof(dma_addr_t), GFP_KERNEL);
0852
0853 rc = ndtest_nvdimm_init(p);
0854 if (rc)
0855 goto err;
0856
0857 rc = ndtest_init_regions(p);
0858 if (rc)
0859 goto err;
0860
0861 rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
0862 if (rc)
0863 goto err;
0864
0865 platform_set_drvdata(pdev, p);
0866
0867 return 0;
0868
0869 err:
0870 pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
0871 return rc;
0872 }
0873
0874 static const struct platform_device_id ndtest_id[] = {
0875 { KBUILD_MODNAME },
0876 { },
0877 };
0878
0879 static struct platform_driver ndtest_driver = {
0880 .probe = ndtest_probe,
0881 .remove = ndtest_remove,
0882 .driver = {
0883 .name = KBUILD_MODNAME,
0884 },
0885 .id_table = ndtest_id,
0886 };
0887
0888 static void ndtest_release(struct device *dev)
0889 {
0890 struct ndtest_priv *p = to_ndtest_priv(dev);
0891
0892 kfree(p);
0893 }
0894
0895 static void cleanup_devices(void)
0896 {
0897 int i;
0898
0899 for (i = 0; i < NUM_INSTANCES; i++)
0900 if (instances[i])
0901 platform_device_unregister(&instances[i]->pdev);
0902
0903 nfit_test_teardown();
0904
0905 if (ndtest_pool)
0906 gen_pool_destroy(ndtest_pool);
0907
0908
0909 if (ndtest_dimm_class)
0910 class_destroy(ndtest_dimm_class);
0911 }
0912
0913 static __init int ndtest_init(void)
0914 {
0915 int rc, i;
0916
0917 pmem_test();
0918 libnvdimm_test();
0919 device_dax_test();
0920 dax_pmem_test();
0921
0922 nfit_test_setup(ndtest_resource_lookup, NULL);
0923
0924 ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
0925 if (IS_ERR(ndtest_dimm_class)) {
0926 rc = PTR_ERR(ndtest_dimm_class);
0927 goto err_register;
0928 }
0929
0930 ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
0931 if (!ndtest_pool) {
0932 rc = -ENOMEM;
0933 goto err_register;
0934 }
0935
0936 if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
0937 rc = -ENOMEM;
0938 goto err_register;
0939 }
0940
0941
0942 for (i = 0; i < NUM_INSTANCES; i++) {
0943 struct ndtest_priv *priv;
0944 struct platform_device *pdev;
0945
0946 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0947 if (!priv) {
0948 rc = -ENOMEM;
0949 goto err_register;
0950 }
0951
0952 INIT_LIST_HEAD(&priv->resources);
0953 pdev = &priv->pdev;
0954 pdev->name = KBUILD_MODNAME;
0955 pdev->id = i;
0956 pdev->dev.release = ndtest_release;
0957 rc = platform_device_register(pdev);
0958 if (rc) {
0959 put_device(&pdev->dev);
0960 goto err_register;
0961 }
0962 get_device(&pdev->dev);
0963
0964 instances[i] = priv;
0965 }
0966
0967 rc = platform_driver_register(&ndtest_driver);
0968 if (rc)
0969 goto err_register;
0970
0971 return 0;
0972
0973 err_register:
0974 pr_err("Error registering platform device\n");
0975 cleanup_devices();
0976
0977 return rc;
0978 }
0979
0980 static __exit void ndtest_exit(void)
0981 {
0982 cleanup_devices();
0983 platform_driver_unregister(&ndtest_driver);
0984 }
0985
0986 module_init(ndtest_init);
0987 module_exit(ndtest_exit);
0988 MODULE_LICENSE("GPL");
0989 MODULE_AUTHOR("IBM Corporation");