0001
0002
0003
0004
0005 #include <linux/scatterlist.h>
0006 #include <linux/memregion.h>
0007 #include <linux/highmem.h>
0008 #include <linux/sched.h>
0009 #include <linux/slab.h>
0010 #include <linux/hash.h>
0011 #include <linux/sort.h>
0012 #include <linux/io.h>
0013 #include <linux/nd.h>
0014 #include "nd-core.h"
0015 #include "nd.h"
0016
0017
0018
0019
0020
0021 #include <linux/io-64-nonatomic-hi-lo.h>
0022
0023 static DEFINE_PER_CPU(int, flush_idx);
0024
0025 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
0026 struct nd_region_data *ndrd)
0027 {
0028 int i, j;
0029
0030 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
0031 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
0032 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
0033 struct resource *res = &nvdimm->flush_wpq[i];
0034 unsigned long pfn = PHYS_PFN(res->start);
0035 void __iomem *flush_page;
0036
0037
0038 for (j = 0; j < i; j++) {
0039 struct resource *res_j = &nvdimm->flush_wpq[j];
0040 unsigned long pfn_j = PHYS_PFN(res_j->start);
0041
0042 if (pfn == pfn_j)
0043 break;
0044 }
0045
0046 if (j < i)
0047 flush_page = (void __iomem *) ((unsigned long)
0048 ndrd_get_flush_wpq(ndrd, dimm, j)
0049 & PAGE_MASK);
0050 else
0051 flush_page = devm_nvdimm_ioremap(dev,
0052 PFN_PHYS(pfn), PAGE_SIZE);
0053 if (!flush_page)
0054 return -ENXIO;
0055 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
0056 + (res->start & ~PAGE_MASK));
0057 }
0058
0059 return 0;
0060 }
0061
0062 int nd_region_activate(struct nd_region *nd_region)
0063 {
0064 int i, j, num_flush = 0;
0065 struct nd_region_data *ndrd;
0066 struct device *dev = &nd_region->dev;
0067 size_t flush_data_size = sizeof(void *);
0068
0069 nvdimm_bus_lock(&nd_region->dev);
0070 for (i = 0; i < nd_region->ndr_mappings; i++) {
0071 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0072 struct nvdimm *nvdimm = nd_mapping->nvdimm;
0073
0074 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
0075 nvdimm_bus_unlock(&nd_region->dev);
0076 return -EBUSY;
0077 }
0078
0079
0080 flush_data_size += sizeof(void *);
0081 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
0082 if (!nvdimm->num_flush)
0083 continue;
0084 flush_data_size += nvdimm->num_flush * sizeof(void *);
0085 }
0086 nvdimm_bus_unlock(&nd_region->dev);
0087
0088 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
0089 if (!ndrd)
0090 return -ENOMEM;
0091 dev_set_drvdata(dev, ndrd);
0092
0093 if (!num_flush)
0094 return 0;
0095
0096 ndrd->hints_shift = ilog2(num_flush);
0097 for (i = 0; i < nd_region->ndr_mappings; i++) {
0098 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0099 struct nvdimm *nvdimm = nd_mapping->nvdimm;
0100 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
0101
0102 if (rc)
0103 return rc;
0104 }
0105
0106
0107
0108
0109
0110 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
0111
0112 if (!ndrd_get_flush_wpq(ndrd, i, 0))
0113 continue;
0114
0115 for (j = i + 1; j < nd_region->ndr_mappings; j++)
0116 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
0117 ndrd_get_flush_wpq(ndrd, j, 0))
0118 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
0119 }
0120
0121 return 0;
0122 }
0123
0124 static void nd_region_release(struct device *dev)
0125 {
0126 struct nd_region *nd_region = to_nd_region(dev);
0127 u16 i;
0128
0129 for (i = 0; i < nd_region->ndr_mappings; i++) {
0130 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0131 struct nvdimm *nvdimm = nd_mapping->nvdimm;
0132
0133 put_device(&nvdimm->dev);
0134 }
0135 free_percpu(nd_region->lane);
0136 if (!test_bit(ND_REGION_CXL, &nd_region->flags))
0137 memregion_free(nd_region->id);
0138 kfree(nd_region);
0139 }
0140
0141 struct nd_region *to_nd_region(struct device *dev)
0142 {
0143 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
0144
0145 WARN_ON(dev->type->release != nd_region_release);
0146 return nd_region;
0147 }
0148 EXPORT_SYMBOL_GPL(to_nd_region);
0149
0150 struct device *nd_region_dev(struct nd_region *nd_region)
0151 {
0152 if (!nd_region)
0153 return NULL;
0154 return &nd_region->dev;
0155 }
0156 EXPORT_SYMBOL_GPL(nd_region_dev);
0157
0158 void *nd_region_provider_data(struct nd_region *nd_region)
0159 {
0160 return nd_region->provider_data;
0161 }
0162 EXPORT_SYMBOL_GPL(nd_region_provider_data);
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 int nd_region_to_nstype(struct nd_region *nd_region)
0173 {
0174 if (is_memory(&nd_region->dev)) {
0175 u16 i, label;
0176
0177 for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
0178 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0179 struct nvdimm *nvdimm = nd_mapping->nvdimm;
0180
0181 if (test_bit(NDD_LABELING, &nvdimm->flags))
0182 label++;
0183 }
0184 if (label)
0185 return ND_DEVICE_NAMESPACE_PMEM;
0186 else
0187 return ND_DEVICE_NAMESPACE_IO;
0188 }
0189
0190 return 0;
0191 }
0192 EXPORT_SYMBOL(nd_region_to_nstype);
0193
0194 static unsigned long long region_size(struct nd_region *nd_region)
0195 {
0196 if (is_memory(&nd_region->dev)) {
0197 return nd_region->ndr_size;
0198 } else if (nd_region->ndr_mappings == 1) {
0199 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
0200
0201 return nd_mapping->size;
0202 }
0203
0204 return 0;
0205 }
0206
0207 static ssize_t size_show(struct device *dev,
0208 struct device_attribute *attr, char *buf)
0209 {
0210 struct nd_region *nd_region = to_nd_region(dev);
0211
0212 return sprintf(buf, "%llu\n", region_size(nd_region));
0213 }
0214 static DEVICE_ATTR_RO(size);
0215
0216 static ssize_t deep_flush_show(struct device *dev,
0217 struct device_attribute *attr, char *buf)
0218 {
0219 struct nd_region *nd_region = to_nd_region(dev);
0220
0221
0222
0223
0224
0225 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
0226 }
0227
0228 static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
0229 const char *buf, size_t len)
0230 {
0231 bool flush;
0232 int rc = strtobool(buf, &flush);
0233 struct nd_region *nd_region = to_nd_region(dev);
0234
0235 if (rc)
0236 return rc;
0237 if (!flush)
0238 return -EINVAL;
0239 rc = nvdimm_flush(nd_region, NULL);
0240 if (rc)
0241 return rc;
0242
0243 return len;
0244 }
0245 static DEVICE_ATTR_RW(deep_flush);
0246
0247 static ssize_t mappings_show(struct device *dev,
0248 struct device_attribute *attr, char *buf)
0249 {
0250 struct nd_region *nd_region = to_nd_region(dev);
0251
0252 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
0253 }
0254 static DEVICE_ATTR_RO(mappings);
0255
0256 static ssize_t nstype_show(struct device *dev,
0257 struct device_attribute *attr, char *buf)
0258 {
0259 struct nd_region *nd_region = to_nd_region(dev);
0260
0261 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
0262 }
0263 static DEVICE_ATTR_RO(nstype);
0264
0265 static ssize_t set_cookie_show(struct device *dev,
0266 struct device_attribute *attr, char *buf)
0267 {
0268 struct nd_region *nd_region = to_nd_region(dev);
0269 struct nd_interleave_set *nd_set = nd_region->nd_set;
0270 ssize_t rc = 0;
0271
0272 if (is_memory(dev) && nd_set)
0273 ;
0274 else
0275 return -ENXIO;
0276
0277
0278
0279
0280
0281
0282
0283 device_lock(dev);
0284 nvdimm_bus_lock(dev);
0285 wait_nvdimm_bus_probe_idle(dev);
0286 if (nd_region->ndr_mappings) {
0287 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
0288 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0289
0290 if (ndd) {
0291 struct nd_namespace_index *nsindex;
0292
0293 nsindex = to_namespace_index(ndd, ndd->ns_current);
0294 rc = sprintf(buf, "%#llx\n",
0295 nd_region_interleave_set_cookie(nd_region,
0296 nsindex));
0297 }
0298 }
0299 nvdimm_bus_unlock(dev);
0300 device_unlock(dev);
0301
0302 if (rc)
0303 return rc;
0304 return sprintf(buf, "%#llx\n", nd_set->cookie1);
0305 }
0306 static DEVICE_ATTR_RO(set_cookie);
0307
0308 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
0309 {
0310 resource_size_t available;
0311 int i;
0312
0313 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
0314
0315 available = 0;
0316 for (i = 0; i < nd_region->ndr_mappings; i++) {
0317 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0318 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0319
0320
0321 if (!ndd)
0322 return 0;
0323
0324 available += nd_pmem_available_dpa(nd_region, nd_mapping);
0325 }
0326
0327 return available;
0328 }
0329
0330 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
0331 {
0332 resource_size_t avail = 0;
0333 int i;
0334
0335 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
0336 for (i = 0; i < nd_region->ndr_mappings; i++) {
0337 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0338
0339 avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa(
0340 nd_region, nd_mapping));
0341 }
0342 return avail * nd_region->ndr_mappings;
0343 }
0344
0345 static ssize_t available_size_show(struct device *dev,
0346 struct device_attribute *attr, char *buf)
0347 {
0348 struct nd_region *nd_region = to_nd_region(dev);
0349 unsigned long long available = 0;
0350
0351
0352
0353
0354
0355
0356
0357 device_lock(dev);
0358 nvdimm_bus_lock(dev);
0359 wait_nvdimm_bus_probe_idle(dev);
0360 available = nd_region_available_dpa(nd_region);
0361 nvdimm_bus_unlock(dev);
0362 device_unlock(dev);
0363
0364 return sprintf(buf, "%llu\n", available);
0365 }
0366 static DEVICE_ATTR_RO(available_size);
0367
0368 static ssize_t max_available_extent_show(struct device *dev,
0369 struct device_attribute *attr, char *buf)
0370 {
0371 struct nd_region *nd_region = to_nd_region(dev);
0372 unsigned long long available = 0;
0373
0374 device_lock(dev);
0375 nvdimm_bus_lock(dev);
0376 wait_nvdimm_bus_probe_idle(dev);
0377 available = nd_region_allocatable_dpa(nd_region);
0378 nvdimm_bus_unlock(dev);
0379 device_unlock(dev);
0380
0381 return sprintf(buf, "%llu\n", available);
0382 }
0383 static DEVICE_ATTR_RO(max_available_extent);
0384
0385 static ssize_t init_namespaces_show(struct device *dev,
0386 struct device_attribute *attr, char *buf)
0387 {
0388 struct nd_region_data *ndrd = dev_get_drvdata(dev);
0389 ssize_t rc;
0390
0391 nvdimm_bus_lock(dev);
0392 if (ndrd)
0393 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
0394 else
0395 rc = -ENXIO;
0396 nvdimm_bus_unlock(dev);
0397
0398 return rc;
0399 }
0400 static DEVICE_ATTR_RO(init_namespaces);
0401
0402 static ssize_t namespace_seed_show(struct device *dev,
0403 struct device_attribute *attr, char *buf)
0404 {
0405 struct nd_region *nd_region = to_nd_region(dev);
0406 ssize_t rc;
0407
0408 nvdimm_bus_lock(dev);
0409 if (nd_region->ns_seed)
0410 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
0411 else
0412 rc = sprintf(buf, "\n");
0413 nvdimm_bus_unlock(dev);
0414 return rc;
0415 }
0416 static DEVICE_ATTR_RO(namespace_seed);
0417
0418 static ssize_t btt_seed_show(struct device *dev,
0419 struct device_attribute *attr, char *buf)
0420 {
0421 struct nd_region *nd_region = to_nd_region(dev);
0422 ssize_t rc;
0423
0424 nvdimm_bus_lock(dev);
0425 if (nd_region->btt_seed)
0426 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
0427 else
0428 rc = sprintf(buf, "\n");
0429 nvdimm_bus_unlock(dev);
0430
0431 return rc;
0432 }
0433 static DEVICE_ATTR_RO(btt_seed);
0434
0435 static ssize_t pfn_seed_show(struct device *dev,
0436 struct device_attribute *attr, char *buf)
0437 {
0438 struct nd_region *nd_region = to_nd_region(dev);
0439 ssize_t rc;
0440
0441 nvdimm_bus_lock(dev);
0442 if (nd_region->pfn_seed)
0443 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
0444 else
0445 rc = sprintf(buf, "\n");
0446 nvdimm_bus_unlock(dev);
0447
0448 return rc;
0449 }
0450 static DEVICE_ATTR_RO(pfn_seed);
0451
0452 static ssize_t dax_seed_show(struct device *dev,
0453 struct device_attribute *attr, char *buf)
0454 {
0455 struct nd_region *nd_region = to_nd_region(dev);
0456 ssize_t rc;
0457
0458 nvdimm_bus_lock(dev);
0459 if (nd_region->dax_seed)
0460 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
0461 else
0462 rc = sprintf(buf, "\n");
0463 nvdimm_bus_unlock(dev);
0464
0465 return rc;
0466 }
0467 static DEVICE_ATTR_RO(dax_seed);
0468
0469 static ssize_t read_only_show(struct device *dev,
0470 struct device_attribute *attr, char *buf)
0471 {
0472 struct nd_region *nd_region = to_nd_region(dev);
0473
0474 return sprintf(buf, "%d\n", nd_region->ro);
0475 }
0476
0477 static int revalidate_read_only(struct device *dev, void *data)
0478 {
0479 nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
0480 return 0;
0481 }
0482
0483 static ssize_t read_only_store(struct device *dev,
0484 struct device_attribute *attr, const char *buf, size_t len)
0485 {
0486 bool ro;
0487 int rc = strtobool(buf, &ro);
0488 struct nd_region *nd_region = to_nd_region(dev);
0489
0490 if (rc)
0491 return rc;
0492
0493 nd_region->ro = ro;
0494 device_for_each_child(dev, NULL, revalidate_read_only);
0495 return len;
0496 }
0497 static DEVICE_ATTR_RW(read_only);
0498
0499 static ssize_t align_show(struct device *dev,
0500 struct device_attribute *attr, char *buf)
0501 {
0502 struct nd_region *nd_region = to_nd_region(dev);
0503
0504 return sprintf(buf, "%#lx\n", nd_region->align);
0505 }
0506
0507 static ssize_t align_store(struct device *dev,
0508 struct device_attribute *attr, const char *buf, size_t len)
0509 {
0510 struct nd_region *nd_region = to_nd_region(dev);
0511 unsigned long val, dpa;
0512 u32 remainder;
0513 int rc;
0514
0515 rc = kstrtoul(buf, 0, &val);
0516 if (rc)
0517 return rc;
0518
0519 if (!nd_region->ndr_mappings)
0520 return -ENXIO;
0521
0522
0523
0524
0525
0526
0527
0528
0529 dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
0530 if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
0531 || val > region_size(nd_region) || remainder)
0532 return -EINVAL;
0533
0534
0535
0536
0537
0538
0539 nvdimm_bus_lock(dev);
0540 nd_region->align = val;
0541 nvdimm_bus_unlock(dev);
0542
0543 return len;
0544 }
0545 static DEVICE_ATTR_RW(align);
0546
0547 static ssize_t region_badblocks_show(struct device *dev,
0548 struct device_attribute *attr, char *buf)
0549 {
0550 struct nd_region *nd_region = to_nd_region(dev);
0551 ssize_t rc;
0552
0553 device_lock(dev);
0554 if (dev->driver)
0555 rc = badblocks_show(&nd_region->bb, buf, 0);
0556 else
0557 rc = -ENXIO;
0558 device_unlock(dev);
0559
0560 return rc;
0561 }
0562 static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
0563
0564 static ssize_t resource_show(struct device *dev,
0565 struct device_attribute *attr, char *buf)
0566 {
0567 struct nd_region *nd_region = to_nd_region(dev);
0568
0569 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
0570 }
0571 static DEVICE_ATTR_ADMIN_RO(resource);
0572
0573 static ssize_t persistence_domain_show(struct device *dev,
0574 struct device_attribute *attr, char *buf)
0575 {
0576 struct nd_region *nd_region = to_nd_region(dev);
0577
0578 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
0579 return sprintf(buf, "cpu_cache\n");
0580 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
0581 return sprintf(buf, "memory_controller\n");
0582 else
0583 return sprintf(buf, "\n");
0584 }
0585 static DEVICE_ATTR_RO(persistence_domain);
0586
0587 static struct attribute *nd_region_attributes[] = {
0588 &dev_attr_size.attr,
0589 &dev_attr_align.attr,
0590 &dev_attr_nstype.attr,
0591 &dev_attr_mappings.attr,
0592 &dev_attr_btt_seed.attr,
0593 &dev_attr_pfn_seed.attr,
0594 &dev_attr_dax_seed.attr,
0595 &dev_attr_deep_flush.attr,
0596 &dev_attr_read_only.attr,
0597 &dev_attr_set_cookie.attr,
0598 &dev_attr_available_size.attr,
0599 &dev_attr_max_available_extent.attr,
0600 &dev_attr_namespace_seed.attr,
0601 &dev_attr_init_namespaces.attr,
0602 &dev_attr_badblocks.attr,
0603 &dev_attr_resource.attr,
0604 &dev_attr_persistence_domain.attr,
0605 NULL,
0606 };
0607
0608 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
0609 {
0610 struct device *dev = container_of(kobj, typeof(*dev), kobj);
0611 struct nd_region *nd_region = to_nd_region(dev);
0612 struct nd_interleave_set *nd_set = nd_region->nd_set;
0613 int type = nd_region_to_nstype(nd_region);
0614
0615 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
0616 return 0;
0617
0618 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
0619 return 0;
0620
0621 if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
0622 return 0;
0623
0624 if (a == &dev_attr_resource.attr && !is_memory(dev))
0625 return 0;
0626
0627 if (a == &dev_attr_deep_flush.attr) {
0628 int has_flush = nvdimm_has_flush(nd_region);
0629
0630 if (has_flush == 1)
0631 return a->mode;
0632 else if (has_flush == 0)
0633 return 0444;
0634 else
0635 return 0;
0636 }
0637
0638 if (a == &dev_attr_persistence_domain.attr) {
0639 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
0640 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
0641 return 0;
0642 return a->mode;
0643 }
0644
0645 if (a == &dev_attr_align.attr)
0646 return a->mode;
0647
0648 if (a != &dev_attr_set_cookie.attr
0649 && a != &dev_attr_available_size.attr)
0650 return a->mode;
0651
0652 if (type == ND_DEVICE_NAMESPACE_PMEM &&
0653 a == &dev_attr_available_size.attr)
0654 return a->mode;
0655 else if (is_memory(dev) && nd_set)
0656 return a->mode;
0657
0658 return 0;
0659 }
0660
0661 static ssize_t mappingN(struct device *dev, char *buf, int n)
0662 {
0663 struct nd_region *nd_region = to_nd_region(dev);
0664 struct nd_mapping *nd_mapping;
0665 struct nvdimm *nvdimm;
0666
0667 if (n >= nd_region->ndr_mappings)
0668 return -ENXIO;
0669 nd_mapping = &nd_region->mapping[n];
0670 nvdimm = nd_mapping->nvdimm;
0671
0672 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
0673 nd_mapping->start, nd_mapping->size,
0674 nd_mapping->position);
0675 }
0676
0677 #define REGION_MAPPING(idx) \
0678 static ssize_t mapping##idx##_show(struct device *dev, \
0679 struct device_attribute *attr, char *buf) \
0680 { \
0681 return mappingN(dev, buf, idx); \
0682 } \
0683 static DEVICE_ATTR_RO(mapping##idx)
0684
0685
0686
0687
0688
0689 REGION_MAPPING(0);
0690 REGION_MAPPING(1);
0691 REGION_MAPPING(2);
0692 REGION_MAPPING(3);
0693 REGION_MAPPING(4);
0694 REGION_MAPPING(5);
0695 REGION_MAPPING(6);
0696 REGION_MAPPING(7);
0697 REGION_MAPPING(8);
0698 REGION_MAPPING(9);
0699 REGION_MAPPING(10);
0700 REGION_MAPPING(11);
0701 REGION_MAPPING(12);
0702 REGION_MAPPING(13);
0703 REGION_MAPPING(14);
0704 REGION_MAPPING(15);
0705 REGION_MAPPING(16);
0706 REGION_MAPPING(17);
0707 REGION_MAPPING(18);
0708 REGION_MAPPING(19);
0709 REGION_MAPPING(20);
0710 REGION_MAPPING(21);
0711 REGION_MAPPING(22);
0712 REGION_MAPPING(23);
0713 REGION_MAPPING(24);
0714 REGION_MAPPING(25);
0715 REGION_MAPPING(26);
0716 REGION_MAPPING(27);
0717 REGION_MAPPING(28);
0718 REGION_MAPPING(29);
0719 REGION_MAPPING(30);
0720 REGION_MAPPING(31);
0721
0722 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
0723 {
0724 struct device *dev = container_of(kobj, struct device, kobj);
0725 struct nd_region *nd_region = to_nd_region(dev);
0726
0727 if (n < nd_region->ndr_mappings)
0728 return a->mode;
0729 return 0;
0730 }
0731
0732 static struct attribute *mapping_attributes[] = {
0733 &dev_attr_mapping0.attr,
0734 &dev_attr_mapping1.attr,
0735 &dev_attr_mapping2.attr,
0736 &dev_attr_mapping3.attr,
0737 &dev_attr_mapping4.attr,
0738 &dev_attr_mapping5.attr,
0739 &dev_attr_mapping6.attr,
0740 &dev_attr_mapping7.attr,
0741 &dev_attr_mapping8.attr,
0742 &dev_attr_mapping9.attr,
0743 &dev_attr_mapping10.attr,
0744 &dev_attr_mapping11.attr,
0745 &dev_attr_mapping12.attr,
0746 &dev_attr_mapping13.attr,
0747 &dev_attr_mapping14.attr,
0748 &dev_attr_mapping15.attr,
0749 &dev_attr_mapping16.attr,
0750 &dev_attr_mapping17.attr,
0751 &dev_attr_mapping18.attr,
0752 &dev_attr_mapping19.attr,
0753 &dev_attr_mapping20.attr,
0754 &dev_attr_mapping21.attr,
0755 &dev_attr_mapping22.attr,
0756 &dev_attr_mapping23.attr,
0757 &dev_attr_mapping24.attr,
0758 &dev_attr_mapping25.attr,
0759 &dev_attr_mapping26.attr,
0760 &dev_attr_mapping27.attr,
0761 &dev_attr_mapping28.attr,
0762 &dev_attr_mapping29.attr,
0763 &dev_attr_mapping30.attr,
0764 &dev_attr_mapping31.attr,
0765 NULL,
0766 };
0767
0768 static const struct attribute_group nd_mapping_attribute_group = {
0769 .is_visible = mapping_visible,
0770 .attrs = mapping_attributes,
0771 };
0772
0773 static const struct attribute_group nd_region_attribute_group = {
0774 .attrs = nd_region_attributes,
0775 .is_visible = region_visible,
0776 };
0777
0778 static const struct attribute_group *nd_region_attribute_groups[] = {
0779 &nd_device_attribute_group,
0780 &nd_region_attribute_group,
0781 &nd_numa_attribute_group,
0782 &nd_mapping_attribute_group,
0783 NULL,
0784 };
0785
0786 static const struct device_type nd_pmem_device_type = {
0787 .name = "nd_pmem",
0788 .release = nd_region_release,
0789 .groups = nd_region_attribute_groups,
0790 };
0791
0792 static const struct device_type nd_volatile_device_type = {
0793 .name = "nd_volatile",
0794 .release = nd_region_release,
0795 .groups = nd_region_attribute_groups,
0796 };
0797
0798 bool is_nd_pmem(struct device *dev)
0799 {
0800 return dev ? dev->type == &nd_pmem_device_type : false;
0801 }
0802
0803 bool is_nd_volatile(struct device *dev)
0804 {
0805 return dev ? dev->type == &nd_volatile_device_type : false;
0806 }
0807
0808 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
0809 struct nd_namespace_index *nsindex)
0810 {
0811 struct nd_interleave_set *nd_set = nd_region->nd_set;
0812
0813 if (!nd_set)
0814 return 0;
0815
0816 if (nsindex && __le16_to_cpu(nsindex->major) == 1
0817 && __le16_to_cpu(nsindex->minor) == 1)
0818 return nd_set->cookie1;
0819 return nd_set->cookie2;
0820 }
0821
0822 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
0823 {
0824 struct nd_interleave_set *nd_set = nd_region->nd_set;
0825
0826 if (nd_set)
0827 return nd_set->altcookie;
0828 return 0;
0829 }
0830
0831 void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
0832 {
0833 struct nd_label_ent *label_ent, *e;
0834
0835 lockdep_assert_held(&nd_mapping->lock);
0836 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
0837 list_del(&label_ent->list);
0838 kfree(label_ent);
0839 }
0840 }
0841
0842
0843
0844
0845
0846 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
0847 {
0848 nvdimm_bus_lock(dev);
0849 if (nd_region->ns_seed == dev) {
0850 nd_region_create_ns_seed(nd_region);
0851 } else if (is_nd_btt(dev)) {
0852 struct nd_btt *nd_btt = to_nd_btt(dev);
0853
0854 if (nd_region->btt_seed == dev)
0855 nd_region_create_btt_seed(nd_region);
0856 if (nd_region->ns_seed == &nd_btt->ndns->dev)
0857 nd_region_create_ns_seed(nd_region);
0858 } else if (is_nd_pfn(dev)) {
0859 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
0860
0861 if (nd_region->pfn_seed == dev)
0862 nd_region_create_pfn_seed(nd_region);
0863 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
0864 nd_region_create_ns_seed(nd_region);
0865 } else if (is_nd_dax(dev)) {
0866 struct nd_dax *nd_dax = to_nd_dax(dev);
0867
0868 if (nd_region->dax_seed == dev)
0869 nd_region_create_dax_seed(nd_region);
0870 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
0871 nd_region_create_ns_seed(nd_region);
0872 }
0873 nvdimm_bus_unlock(dev);
0874 }
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
0894 {
0895 unsigned int cpu, lane;
0896
0897 cpu = get_cpu();
0898 if (nd_region->num_lanes < nr_cpu_ids) {
0899 struct nd_percpu_lane *ndl_lock, *ndl_count;
0900
0901 lane = cpu % nd_region->num_lanes;
0902 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
0903 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
0904 if (ndl_count->count++ == 0)
0905 spin_lock(&ndl_lock->lock);
0906 } else
0907 lane = cpu;
0908
0909 return lane;
0910 }
0911 EXPORT_SYMBOL(nd_region_acquire_lane);
0912
0913 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
0914 {
0915 if (nd_region->num_lanes < nr_cpu_ids) {
0916 unsigned int cpu = get_cpu();
0917 struct nd_percpu_lane *ndl_lock, *ndl_count;
0918
0919 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
0920 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
0921 if (--ndl_count->count == 0)
0922 spin_unlock(&ndl_lock->lock);
0923 put_cpu();
0924 }
0925 put_cpu();
0926 }
0927 EXPORT_SYMBOL(nd_region_release_lane);
0928
0929
0930
0931
0932
0933 #define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
0934
0935 static unsigned long default_align(struct nd_region *nd_region)
0936 {
0937 unsigned long align;
0938 u32 remainder;
0939 int mappings;
0940
0941 align = MEMREMAP_COMPAT_ALIGN_MAX;
0942 if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
0943 align = PAGE_SIZE;
0944
0945 mappings = max_t(u16, 1, nd_region->ndr_mappings);
0946 div_u64_rem(align, mappings, &remainder);
0947 if (remainder)
0948 align *= mappings;
0949
0950 return align;
0951 }
0952
0953 static struct lock_class_key nvdimm_region_key;
0954
0955 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
0956 struct nd_region_desc *ndr_desc,
0957 const struct device_type *dev_type, const char *caller)
0958 {
0959 struct nd_region *nd_region;
0960 struct device *dev;
0961 unsigned int i;
0962 int ro = 0;
0963
0964 for (i = 0; i < ndr_desc->num_mappings; i++) {
0965 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
0966 struct nvdimm *nvdimm = mapping->nvdimm;
0967
0968 if ((mapping->start | mapping->size) % PAGE_SIZE) {
0969 dev_err(&nvdimm_bus->dev,
0970 "%s: %s mapping%d is not %ld aligned\n",
0971 caller, dev_name(&nvdimm->dev), i, PAGE_SIZE);
0972 return NULL;
0973 }
0974
0975 if (test_bit(NDD_UNARMED, &nvdimm->flags))
0976 ro = 1;
0977
0978 }
0979
0980 nd_region =
0981 kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings),
0982 GFP_KERNEL);
0983
0984 if (!nd_region)
0985 return NULL;
0986
0987 if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
0988 nd_region->id = ndr_desc->memregion;
0989 } else {
0990 nd_region->id = memregion_alloc(GFP_KERNEL);
0991 if (nd_region->id < 0)
0992 goto err_id;
0993 }
0994
0995 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
0996 if (!nd_region->lane)
0997 goto err_percpu;
0998
0999 for (i = 0; i < nr_cpu_ids; i++) {
1000 struct nd_percpu_lane *ndl;
1001
1002 ndl = per_cpu_ptr(nd_region->lane, i);
1003 spin_lock_init(&ndl->lock);
1004 ndl->count = 0;
1005 }
1006
1007 for (i = 0; i < ndr_desc->num_mappings; i++) {
1008 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1009 struct nvdimm *nvdimm = mapping->nvdimm;
1010
1011 nd_region->mapping[i].nvdimm = nvdimm;
1012 nd_region->mapping[i].start = mapping->start;
1013 nd_region->mapping[i].size = mapping->size;
1014 nd_region->mapping[i].position = mapping->position;
1015 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1016 mutex_init(&nd_region->mapping[i].lock);
1017
1018 get_device(&nvdimm->dev);
1019 }
1020 nd_region->ndr_mappings = ndr_desc->num_mappings;
1021 nd_region->provider_data = ndr_desc->provider_data;
1022 nd_region->nd_set = ndr_desc->nd_set;
1023 nd_region->num_lanes = ndr_desc->num_lanes;
1024 nd_region->flags = ndr_desc->flags;
1025 nd_region->ro = ro;
1026 nd_region->numa_node = ndr_desc->numa_node;
1027 nd_region->target_node = ndr_desc->target_node;
1028 ida_init(&nd_region->ns_ida);
1029 ida_init(&nd_region->btt_ida);
1030 ida_init(&nd_region->pfn_ida);
1031 ida_init(&nd_region->dax_ida);
1032 dev = &nd_region->dev;
1033 dev_set_name(dev, "region%d", nd_region->id);
1034 dev->parent = &nvdimm_bus->dev;
1035 dev->type = dev_type;
1036 dev->groups = ndr_desc->attr_groups;
1037 dev->of_node = ndr_desc->of_node;
1038 nd_region->ndr_size = resource_size(ndr_desc->res);
1039 nd_region->ndr_start = ndr_desc->res->start;
1040 nd_region->align = default_align(nd_region);
1041 if (ndr_desc->flush)
1042 nd_region->flush = ndr_desc->flush;
1043 else
1044 nd_region->flush = NULL;
1045
1046 device_initialize(dev);
1047 lockdep_set_class(&dev->mutex, &nvdimm_region_key);
1048 nd_device_register(dev);
1049
1050 return nd_region;
1051
1052 err_percpu:
1053 if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
1054 memregion_free(nd_region->id);
1055 err_id:
1056 kfree(nd_region);
1057 return NULL;
1058 }
1059
1060 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1061 struct nd_region_desc *ndr_desc)
1062 {
1063 ndr_desc->num_lanes = ND_MAX_LANES;
1064 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1065 __func__);
1066 }
1067 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1068
1069 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1070 struct nd_region_desc *ndr_desc)
1071 {
1072 ndr_desc->num_lanes = ND_MAX_LANES;
1073 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1074 __func__);
1075 }
1076 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1077
1078 void nvdimm_region_delete(struct nd_region *nd_region)
1079 {
1080 if (nd_region)
1081 nd_device_unregister(&nd_region->dev, ND_SYNC);
1082 }
1083 EXPORT_SYMBOL_GPL(nvdimm_region_delete);
1084
1085 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1086 {
1087 int rc = 0;
1088
1089 if (!nd_region->flush)
1090 rc = generic_nvdimm_flush(nd_region);
1091 else {
1092 if (nd_region->flush(nd_region, bio))
1093 rc = -EIO;
1094 }
1095
1096 return rc;
1097 }
1098
1099
1100
1101
1102 int generic_nvdimm_flush(struct nd_region *nd_region)
1103 {
1104 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1105 int i, idx;
1106
1107
1108
1109
1110
1111 idx = this_cpu_read(flush_idx);
1112 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1113
1114
1115
1116
1117
1118
1119
1120
1121 pmem_wmb();
1122 for (i = 0; i < nd_region->ndr_mappings; i++)
1123 if (ndrd_get_flush_wpq(ndrd, i, 0))
1124 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1125 wmb();
1126
1127 return 0;
1128 }
1129 EXPORT_SYMBOL_GPL(nvdimm_flush);
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139 int nvdimm_has_flush(struct nd_region *nd_region)
1140 {
1141 int i;
1142
1143
1144 if (nd_region->ndr_mappings == 0
1145 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1146 return -ENXIO;
1147
1148
1149 if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
1150 return 1;
1151
1152
1153 for (i = 0; i < nd_region->ndr_mappings; i++) {
1154 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1155 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1156
1157
1158 if (nvdimm->num_flush)
1159 return 1;
1160 }
1161
1162
1163
1164
1165
1166 return 0;
1167 }
1168 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1169
1170 int nvdimm_has_cache(struct nd_region *nd_region)
1171 {
1172 return is_nd_pmem(&nd_region->dev) &&
1173 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1174 }
1175 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1176
1177 bool is_nvdimm_sync(struct nd_region *nd_region)
1178 {
1179 if (is_nd_volatile(&nd_region->dev))
1180 return true;
1181
1182 return is_nd_pmem(&nd_region->dev) &&
1183 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1184 }
1185 EXPORT_SYMBOL_GPL(is_nvdimm_sync);
1186
1187 struct conflict_context {
1188 struct nd_region *nd_region;
1189 resource_size_t start, size;
1190 };
1191
1192 static int region_conflict(struct device *dev, void *data)
1193 {
1194 struct nd_region *nd_region;
1195 struct conflict_context *ctx = data;
1196 resource_size_t res_end, region_end, region_start;
1197
1198 if (!is_memory(dev))
1199 return 0;
1200
1201 nd_region = to_nd_region(dev);
1202 if (nd_region == ctx->nd_region)
1203 return 0;
1204
1205 res_end = ctx->start + ctx->size;
1206 region_start = nd_region->ndr_start;
1207 region_end = region_start + nd_region->ndr_size;
1208 if (ctx->start >= region_start && ctx->start < region_end)
1209 return -EBUSY;
1210 if (res_end > region_start && res_end <= region_end)
1211 return -EBUSY;
1212 return 0;
1213 }
1214
1215 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1216 resource_size_t size)
1217 {
1218 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1219 struct conflict_context ctx = {
1220 .nd_region = nd_region,
1221 .start = start,
1222 .size = size,
1223 };
1224
1225 return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1226 }