Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
0004  */
0005 #include <linux/module.h>
0006 #include <linux/device.h>
0007 #include <linux/sort.h>
0008 #include <linux/slab.h>
0009 #include <linux/list.h>
0010 #include <linux/nd.h>
0011 #include "nd-core.h"
0012 #include "pmem.h"
0013 #include "pfn.h"
0014 #include "nd.h"
0015 
0016 static void namespace_io_release(struct device *dev)
0017 {
0018     struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
0019 
0020     kfree(nsio);
0021 }
0022 
0023 static void namespace_pmem_release(struct device *dev)
0024 {
0025     struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0026     struct nd_region *nd_region = to_nd_region(dev->parent);
0027 
0028     if (nspm->id >= 0)
0029         ida_simple_remove(&nd_region->ns_ida, nspm->id);
0030     kfree(nspm->alt_name);
0031     kfree(nspm->uuid);
0032     kfree(nspm);
0033 }
0034 
0035 static bool is_namespace_pmem(const struct device *dev);
0036 static bool is_namespace_io(const struct device *dev);
0037 
0038 static int is_uuid_busy(struct device *dev, void *data)
0039 {
0040     uuid_t *uuid1 = data, *uuid2 = NULL;
0041 
0042     if (is_namespace_pmem(dev)) {
0043         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0044 
0045         uuid2 = nspm->uuid;
0046     } else if (is_nd_btt(dev)) {
0047         struct nd_btt *nd_btt = to_nd_btt(dev);
0048 
0049         uuid2 = nd_btt->uuid;
0050     } else if (is_nd_pfn(dev)) {
0051         struct nd_pfn *nd_pfn = to_nd_pfn(dev);
0052 
0053         uuid2 = nd_pfn->uuid;
0054     }
0055 
0056     if (uuid2 && uuid_equal(uuid1, uuid2))
0057         return -EBUSY;
0058 
0059     return 0;
0060 }
0061 
0062 static int is_namespace_uuid_busy(struct device *dev, void *data)
0063 {
0064     if (is_nd_region(dev))
0065         return device_for_each_child(dev, data, is_uuid_busy);
0066     return 0;
0067 }
0068 
0069 /**
0070  * nd_is_uuid_unique - verify that no other namespace has @uuid
0071  * @dev: any device on a nvdimm_bus
0072  * @uuid: uuid to check
0073  */
0074 bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
0075 {
0076     struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
0077 
0078     if (!nvdimm_bus)
0079         return false;
0080     WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
0081     if (device_for_each_child(&nvdimm_bus->dev, uuid,
0082                 is_namespace_uuid_busy) != 0)
0083         return false;
0084     return true;
0085 }
0086 
0087 bool pmem_should_map_pages(struct device *dev)
0088 {
0089     struct nd_region *nd_region = to_nd_region(dev->parent);
0090     struct nd_namespace_common *ndns = to_ndns(dev);
0091     struct nd_namespace_io *nsio;
0092 
0093     if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
0094         return false;
0095 
0096     if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
0097         return false;
0098 
0099     if (is_nd_pfn(dev) || is_nd_btt(dev))
0100         return false;
0101 
0102     if (ndns->force_raw)
0103         return false;
0104 
0105     nsio = to_nd_namespace_io(dev);
0106     if (region_intersects(nsio->res.start, resource_size(&nsio->res),
0107                 IORESOURCE_SYSTEM_RAM,
0108                 IORES_DESC_NONE) == REGION_MIXED)
0109         return false;
0110 
0111     return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
0112 }
0113 EXPORT_SYMBOL(pmem_should_map_pages);
0114 
0115 unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
0116 {
0117     if (is_namespace_pmem(&ndns->dev)) {
0118         struct nd_namespace_pmem *nspm;
0119 
0120         nspm = to_nd_namespace_pmem(&ndns->dev);
0121         if (nspm->lbasize == 0 || nspm->lbasize == 512)
0122             /* default */;
0123         else if (nspm->lbasize == 4096)
0124             return 4096;
0125         else
0126             dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
0127                     nspm->lbasize);
0128     }
0129 
0130     /*
0131      * There is no namespace label (is_namespace_io()), or the label
0132      * indicates the default sector size.
0133      */
0134     return 512;
0135 }
0136 EXPORT_SYMBOL(pmem_sector_size);
0137 
0138 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
0139         char *name)
0140 {
0141     struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
0142     const char *suffix = NULL;
0143 
0144     if (ndns->claim && is_nd_btt(ndns->claim))
0145         suffix = "s";
0146 
0147     if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
0148         int nsidx = 0;
0149 
0150         if (is_namespace_pmem(&ndns->dev)) {
0151             struct nd_namespace_pmem *nspm;
0152 
0153             nspm = to_nd_namespace_pmem(&ndns->dev);
0154             nsidx = nspm->id;
0155         }
0156 
0157         if (nsidx)
0158             sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
0159                     suffix ? suffix : "");
0160         else
0161             sprintf(name, "pmem%d%s", nd_region->id,
0162                     suffix ? suffix : "");
0163     } else {
0164         return NULL;
0165     }
0166 
0167     return name;
0168 }
0169 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
0170 
0171 const uuid_t *nd_dev_to_uuid(struct device *dev)
0172 {
0173     if (!dev)
0174         return &uuid_null;
0175 
0176     if (is_namespace_pmem(dev)) {
0177         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0178 
0179         return nspm->uuid;
0180     } else
0181         return &uuid_null;
0182 }
0183 EXPORT_SYMBOL(nd_dev_to_uuid);
0184 
0185 static ssize_t nstype_show(struct device *dev,
0186         struct device_attribute *attr, char *buf)
0187 {
0188     struct nd_region *nd_region = to_nd_region(dev->parent);
0189 
0190     return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
0191 }
0192 static DEVICE_ATTR_RO(nstype);
0193 
0194 static ssize_t __alt_name_store(struct device *dev, const char *buf,
0195         const size_t len)
0196 {
0197     char *input, *pos, *alt_name, **ns_altname;
0198     ssize_t rc;
0199 
0200     if (is_namespace_pmem(dev)) {
0201         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0202 
0203         ns_altname = &nspm->alt_name;
0204     } else
0205         return -ENXIO;
0206 
0207     if (dev->driver || to_ndns(dev)->claim)
0208         return -EBUSY;
0209 
0210     input = kstrndup(buf, len, GFP_KERNEL);
0211     if (!input)
0212         return -ENOMEM;
0213 
0214     pos = strim(input);
0215     if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
0216         rc = -EINVAL;
0217         goto out;
0218     }
0219 
0220     alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
0221     if (!alt_name) {
0222         rc = -ENOMEM;
0223         goto out;
0224     }
0225     kfree(*ns_altname);
0226     *ns_altname = alt_name;
0227     sprintf(*ns_altname, "%s", pos);
0228     rc = len;
0229 
0230 out:
0231     kfree(input);
0232     return rc;
0233 }
0234 
0235 static int nd_namespace_label_update(struct nd_region *nd_region,
0236         struct device *dev)
0237 {
0238     dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
0239             "namespace must be idle during label update\n");
0240     if (dev->driver || to_ndns(dev)->claim)
0241         return 0;
0242 
0243     /*
0244      * Only allow label writes that will result in a valid namespace
0245      * or deletion of an existing namespace.
0246      */
0247     if (is_namespace_pmem(dev)) {
0248         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0249         resource_size_t size = resource_size(&nspm->nsio.res);
0250 
0251         if (size == 0 && nspm->uuid)
0252             /* delete allocation */;
0253         else if (!nspm->uuid)
0254             return 0;
0255 
0256         return nd_pmem_namespace_label_update(nd_region, nspm, size);
0257     } else
0258         return -ENXIO;
0259 }
0260 
0261 static ssize_t alt_name_store(struct device *dev,
0262         struct device_attribute *attr, const char *buf, size_t len)
0263 {
0264     struct nd_region *nd_region = to_nd_region(dev->parent);
0265     ssize_t rc;
0266 
0267     device_lock(dev);
0268     nvdimm_bus_lock(dev);
0269     wait_nvdimm_bus_probe_idle(dev);
0270     rc = __alt_name_store(dev, buf, len);
0271     if (rc >= 0)
0272         rc = nd_namespace_label_update(nd_region, dev);
0273     dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
0274     nvdimm_bus_unlock(dev);
0275     device_unlock(dev);
0276 
0277     return rc < 0 ? rc : len;
0278 }
0279 
0280 static ssize_t alt_name_show(struct device *dev,
0281         struct device_attribute *attr, char *buf)
0282 {
0283     char *ns_altname;
0284 
0285     if (is_namespace_pmem(dev)) {
0286         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0287 
0288         ns_altname = nspm->alt_name;
0289     } else
0290         return -ENXIO;
0291 
0292     return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
0293 }
0294 static DEVICE_ATTR_RW(alt_name);
0295 
0296 static int scan_free(struct nd_region *nd_region,
0297         struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
0298         resource_size_t n)
0299 {
0300     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0301     int rc = 0;
0302 
0303     while (n) {
0304         struct resource *res, *last;
0305 
0306         last = NULL;
0307         for_each_dpa_resource(ndd, res)
0308             if (strcmp(res->name, label_id->id) == 0)
0309                 last = res;
0310         res = last;
0311         if (!res)
0312             return 0;
0313 
0314         if (n >= resource_size(res)) {
0315             n -= resource_size(res);
0316             nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
0317             nvdimm_free_dpa(ndd, res);
0318             /* retry with last resource deleted */
0319             continue;
0320         }
0321 
0322         rc = adjust_resource(res, res->start, resource_size(res) - n);
0323         if (rc == 0)
0324             res->flags |= DPA_RESOURCE_ADJUSTED;
0325         nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
0326         break;
0327     }
0328 
0329     return rc;
0330 }
0331 
0332 /**
0333  * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
0334  * @nd_region: the set of dimms to reclaim @n bytes from
0335  * @label_id: unique identifier for the namespace consuming this dpa range
0336  * @n: number of bytes per-dimm to release
0337  *
0338  * Assumes resources are ordered.  Starting from the end try to
0339  * adjust_resource() the allocation to @n, but if @n is larger than the
0340  * allocation delete it and find the 'new' last allocation in the label
0341  * set.
0342  */
0343 static int shrink_dpa_allocation(struct nd_region *nd_region,
0344         struct nd_label_id *label_id, resource_size_t n)
0345 {
0346     int i;
0347 
0348     for (i = 0; i < nd_region->ndr_mappings; i++) {
0349         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0350         int rc;
0351 
0352         rc = scan_free(nd_region, nd_mapping, label_id, n);
0353         if (rc)
0354             return rc;
0355     }
0356 
0357     return 0;
0358 }
0359 
0360 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
0361         struct nd_region *nd_region, struct nd_mapping *nd_mapping,
0362         resource_size_t n)
0363 {
0364     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0365     struct resource *res;
0366     int rc = 0;
0367 
0368     /* first resource allocation for this label-id or dimm */
0369     res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
0370     if (!res)
0371         rc = -EBUSY;
0372 
0373     nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
0374     return rc ? n : 0;
0375 }
0376 
0377 
0378 /**
0379  * space_valid() - validate free dpa space against constraints
0380  * @nd_region: hosting region of the free space
0381  * @ndd: dimm device data for debug
0382  * @label_id: namespace id to allocate space
0383  * @prev: potential allocation that precedes free space
0384  * @next: allocation that follows the given free space range
0385  * @exist: first allocation with same id in the mapping
0386  * @n: range that must satisfied for pmem allocations
0387  * @valid: free space range to validate
0388  *
0389  * BLK-space is valid as long as it does not precede a PMEM
0390  * allocation in a given region. PMEM-space must be contiguous
0391  * and adjacent to an existing existing allocation (if one
0392  * exists).  If reserving PMEM any space is valid.
0393  */
0394 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
0395         struct nd_label_id *label_id, struct resource *prev,
0396         struct resource *next, struct resource *exist,
0397         resource_size_t n, struct resource *valid)
0398 {
0399     bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
0400     unsigned long align;
0401 
0402     align = nd_region->align / nd_region->ndr_mappings;
0403     valid->start = ALIGN(valid->start, align);
0404     valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
0405 
0406     if (valid->start >= valid->end)
0407         goto invalid;
0408 
0409     if (is_reserve)
0410         return;
0411 
0412     /* allocation needs to be contiguous, so this is all or nothing */
0413     if (resource_size(valid) < n)
0414         goto invalid;
0415 
0416     /* we've got all the space we need and no existing allocation */
0417     if (!exist)
0418         return;
0419 
0420     /* allocation needs to be contiguous with the existing namespace */
0421     if (valid->start == exist->end + 1
0422             || valid->end == exist->start - 1)
0423         return;
0424 
0425  invalid:
0426     /* truncate @valid size to 0 */
0427     valid->end = valid->start - 1;
0428 }
0429 
0430 enum alloc_loc {
0431     ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
0432 };
0433 
0434 static resource_size_t scan_allocate(struct nd_region *nd_region,
0435         struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
0436         resource_size_t n)
0437 {
0438     resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
0439     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0440     struct resource *res, *exist = NULL, valid;
0441     const resource_size_t to_allocate = n;
0442     int first;
0443 
0444     for_each_dpa_resource(ndd, res)
0445         if (strcmp(label_id->id, res->name) == 0)
0446             exist = res;
0447 
0448     valid.start = nd_mapping->start;
0449     valid.end = mapping_end;
0450     valid.name = "free space";
0451  retry:
0452     first = 0;
0453     for_each_dpa_resource(ndd, res) {
0454         struct resource *next = res->sibling, *new_res = NULL;
0455         resource_size_t allocate, available = 0;
0456         enum alloc_loc loc = ALLOC_ERR;
0457         const char *action;
0458         int rc = 0;
0459 
0460         /* ignore resources outside this nd_mapping */
0461         if (res->start > mapping_end)
0462             continue;
0463         if (res->end < nd_mapping->start)
0464             continue;
0465 
0466         /* space at the beginning of the mapping */
0467         if (!first++ && res->start > nd_mapping->start) {
0468             valid.start = nd_mapping->start;
0469             valid.end = res->start - 1;
0470             space_valid(nd_region, ndd, label_id, NULL, next, exist,
0471                     to_allocate, &valid);
0472             available = resource_size(&valid);
0473             if (available)
0474                 loc = ALLOC_BEFORE;
0475         }
0476 
0477         /* space between allocations */
0478         if (!loc && next) {
0479             valid.start = res->start + resource_size(res);
0480             valid.end = min(mapping_end, next->start - 1);
0481             space_valid(nd_region, ndd, label_id, res, next, exist,
0482                     to_allocate, &valid);
0483             available = resource_size(&valid);
0484             if (available)
0485                 loc = ALLOC_MID;
0486         }
0487 
0488         /* space at the end of the mapping */
0489         if (!loc && !next) {
0490             valid.start = res->start + resource_size(res);
0491             valid.end = mapping_end;
0492             space_valid(nd_region, ndd, label_id, res, next, exist,
0493                     to_allocate, &valid);
0494             available = resource_size(&valid);
0495             if (available)
0496                 loc = ALLOC_AFTER;
0497         }
0498 
0499         if (!loc || !available)
0500             continue;
0501         allocate = min(available, n);
0502         switch (loc) {
0503         case ALLOC_BEFORE:
0504             if (strcmp(res->name, label_id->id) == 0) {
0505                 /* adjust current resource up */
0506                 rc = adjust_resource(res, res->start - allocate,
0507                         resource_size(res) + allocate);
0508                 action = "cur grow up";
0509             } else
0510                 action = "allocate";
0511             break;
0512         case ALLOC_MID:
0513             if (strcmp(next->name, label_id->id) == 0) {
0514                 /* adjust next resource up */
0515                 rc = adjust_resource(next, next->start
0516                         - allocate, resource_size(next)
0517                         + allocate);
0518                 new_res = next;
0519                 action = "next grow up";
0520             } else if (strcmp(res->name, label_id->id) == 0) {
0521                 action = "grow down";
0522             } else
0523                 action = "allocate";
0524             break;
0525         case ALLOC_AFTER:
0526             if (strcmp(res->name, label_id->id) == 0)
0527                 action = "grow down";
0528             else
0529                 action = "allocate";
0530             break;
0531         default:
0532             return n;
0533         }
0534 
0535         if (strcmp(action, "allocate") == 0) {
0536             new_res = nvdimm_allocate_dpa(ndd, label_id,
0537                     valid.start, allocate);
0538             if (!new_res)
0539                 rc = -EBUSY;
0540         } else if (strcmp(action, "grow down") == 0) {
0541             /* adjust current resource down */
0542             rc = adjust_resource(res, res->start, resource_size(res)
0543                     + allocate);
0544             if (rc == 0)
0545                 res->flags |= DPA_RESOURCE_ADJUSTED;
0546         }
0547 
0548         if (!new_res)
0549             new_res = res;
0550 
0551         nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
0552                 action, loc, rc);
0553 
0554         if (rc)
0555             return n;
0556 
0557         n -= allocate;
0558         if (n) {
0559             /*
0560              * Retry scan with newly inserted resources.
0561              * For example, if we did an ALLOC_BEFORE
0562              * insertion there may also have been space
0563              * available for an ALLOC_AFTER insertion, so we
0564              * need to check this same resource again
0565              */
0566             goto retry;
0567         } else
0568             return 0;
0569     }
0570 
0571     if (n == to_allocate)
0572         return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
0573     return n;
0574 }
0575 
0576 static int merge_dpa(struct nd_region *nd_region,
0577         struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
0578 {
0579     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0580     struct resource *res;
0581 
0582     if (strncmp("pmem", label_id->id, 4) == 0)
0583         return 0;
0584  retry:
0585     for_each_dpa_resource(ndd, res) {
0586         int rc;
0587         struct resource *next = res->sibling;
0588         resource_size_t end = res->start + resource_size(res);
0589 
0590         if (!next || strcmp(res->name, label_id->id) != 0
0591                 || strcmp(next->name, label_id->id) != 0
0592                 || end != next->start)
0593             continue;
0594         end += resource_size(next);
0595         nvdimm_free_dpa(ndd, next);
0596         rc = adjust_resource(res, res->start, end - res->start);
0597         nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
0598         if (rc)
0599             return rc;
0600         res->flags |= DPA_RESOURCE_ADJUSTED;
0601         goto retry;
0602     }
0603 
0604     return 0;
0605 }
0606 
0607 int __reserve_free_pmem(struct device *dev, void *data)
0608 {
0609     struct nvdimm *nvdimm = data;
0610     struct nd_region *nd_region;
0611     struct nd_label_id label_id;
0612     int i;
0613 
0614     if (!is_memory(dev))
0615         return 0;
0616 
0617     nd_region = to_nd_region(dev);
0618     if (nd_region->ndr_mappings == 0)
0619         return 0;
0620 
0621     memset(&label_id, 0, sizeof(label_id));
0622     strcat(label_id.id, "pmem-reserve");
0623     for (i = 0; i < nd_region->ndr_mappings; i++) {
0624         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0625         resource_size_t n, rem = 0;
0626 
0627         if (nd_mapping->nvdimm != nvdimm)
0628             continue;
0629 
0630         n = nd_pmem_available_dpa(nd_region, nd_mapping);
0631         if (n == 0)
0632             return 0;
0633         rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
0634         dev_WARN_ONCE(&nd_region->dev, rem,
0635                 "pmem reserve underrun: %#llx of %#llx bytes\n",
0636                 (unsigned long long) n - rem,
0637                 (unsigned long long) n);
0638         return rem ? -ENXIO : 0;
0639     }
0640 
0641     return 0;
0642 }
0643 
0644 void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
0645         struct nd_mapping *nd_mapping)
0646 {
0647     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0648     struct resource *res, *_res;
0649 
0650     for_each_dpa_resource_safe(ndd, res, _res)
0651         if (strcmp(res->name, "pmem-reserve") == 0)
0652             nvdimm_free_dpa(ndd, res);
0653 }
0654 
0655 /**
0656  * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
0657  * @nd_region: the set of dimms to allocate @n more bytes from
0658  * @label_id: unique identifier for the namespace consuming this dpa range
0659  * @n: number of bytes per-dimm to add to the existing allocation
0660  *
0661  * Assumes resources are ordered.  For BLK regions, first consume
0662  * BLK-only available DPA free space, then consume PMEM-aliased DPA
0663  * space starting at the highest DPA.  For PMEM regions start
0664  * allocations from the start of an interleave set and end at the first
0665  * BLK allocation or the end of the interleave set, whichever comes
0666  * first.
0667  */
0668 static int grow_dpa_allocation(struct nd_region *nd_region,
0669         struct nd_label_id *label_id, resource_size_t n)
0670 {
0671     int i;
0672 
0673     for (i = 0; i < nd_region->ndr_mappings; i++) {
0674         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0675         resource_size_t rem = n;
0676         int rc;
0677 
0678         rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
0679         dev_WARN_ONCE(&nd_region->dev, rem,
0680                 "allocation underrun: %#llx of %#llx bytes\n",
0681                 (unsigned long long) n - rem,
0682                 (unsigned long long) n);
0683         if (rem)
0684             return -ENXIO;
0685 
0686         rc = merge_dpa(nd_region, nd_mapping, label_id);
0687         if (rc)
0688             return rc;
0689     }
0690 
0691     return 0;
0692 }
0693 
0694 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
0695         struct nd_namespace_pmem *nspm, resource_size_t size)
0696 {
0697     struct resource *res = &nspm->nsio.res;
0698     resource_size_t offset = 0;
0699 
0700     if (size && !nspm->uuid) {
0701         WARN_ON_ONCE(1);
0702         size = 0;
0703     }
0704 
0705     if (size && nspm->uuid) {
0706         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
0707         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0708         struct nd_label_id label_id;
0709         struct resource *res;
0710 
0711         if (!ndd) {
0712             size = 0;
0713             goto out;
0714         }
0715 
0716         nd_label_gen_id(&label_id, nspm->uuid, 0);
0717 
0718         /* calculate a spa offset from the dpa allocation offset */
0719         for_each_dpa_resource(ndd, res)
0720             if (strcmp(res->name, label_id.id) == 0) {
0721                 offset = (res->start - nd_mapping->start)
0722                     * nd_region->ndr_mappings;
0723                 goto out;
0724             }
0725 
0726         WARN_ON_ONCE(1);
0727         size = 0;
0728     }
0729 
0730  out:
0731     res->start = nd_region->ndr_start + offset;
0732     res->end = res->start + size - 1;
0733 }
0734 
0735 static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
0736              const char *where)
0737 {
0738     if (!uuid) {
0739         dev_dbg(dev, "%s: uuid not set\n", where);
0740         return true;
0741     }
0742     return false;
0743 }
0744 
0745 static ssize_t __size_store(struct device *dev, unsigned long long val)
0746 {
0747     resource_size_t allocated = 0, available = 0;
0748     struct nd_region *nd_region = to_nd_region(dev->parent);
0749     struct nd_namespace_common *ndns = to_ndns(dev);
0750     struct nd_mapping *nd_mapping;
0751     struct nvdimm_drvdata *ndd;
0752     struct nd_label_id label_id;
0753     u32 flags = 0, remainder;
0754     int rc, i, id = -1;
0755     uuid_t *uuid = NULL;
0756 
0757     if (dev->driver || ndns->claim)
0758         return -EBUSY;
0759 
0760     if (is_namespace_pmem(dev)) {
0761         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0762 
0763         uuid = nspm->uuid;
0764         id = nspm->id;
0765     }
0766 
0767     /*
0768      * We need a uuid for the allocation-label and dimm(s) on which
0769      * to store the label.
0770      */
0771     if (uuid_not_set(uuid, dev, __func__))
0772         return -ENXIO;
0773     if (nd_region->ndr_mappings == 0) {
0774         dev_dbg(dev, "not associated with dimm(s)\n");
0775         return -ENXIO;
0776     }
0777 
0778     div_u64_rem(val, nd_region->align, &remainder);
0779     if (remainder) {
0780         dev_dbg(dev, "%llu is not %ldK aligned\n", val,
0781                 nd_region->align / SZ_1K);
0782         return -EINVAL;
0783     }
0784 
0785     nd_label_gen_id(&label_id, uuid, flags);
0786     for (i = 0; i < nd_region->ndr_mappings; i++) {
0787         nd_mapping = &nd_region->mapping[i];
0788         ndd = to_ndd(nd_mapping);
0789 
0790         /*
0791          * All dimms in an interleave set, need to be enabled
0792          * for the size to be changed.
0793          */
0794         if (!ndd)
0795             return -ENXIO;
0796 
0797         allocated += nvdimm_allocated_dpa(ndd, &label_id);
0798     }
0799     available = nd_region_allocatable_dpa(nd_region);
0800 
0801     if (val > available + allocated)
0802         return -ENOSPC;
0803 
0804     if (val == allocated)
0805         return 0;
0806 
0807     val = div_u64(val, nd_region->ndr_mappings);
0808     allocated = div_u64(allocated, nd_region->ndr_mappings);
0809     if (val < allocated)
0810         rc = shrink_dpa_allocation(nd_region, &label_id,
0811                 allocated - val);
0812     else
0813         rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
0814 
0815     if (rc)
0816         return rc;
0817 
0818     if (is_namespace_pmem(dev)) {
0819         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0820 
0821         nd_namespace_pmem_set_resource(nd_region, nspm,
0822                 val * nd_region->ndr_mappings);
0823     }
0824 
0825     /*
0826      * Try to delete the namespace if we deleted all of its
0827      * allocation, this is not the seed or 0th device for the
0828      * region, and it is not actively claimed by a btt, pfn, or dax
0829      * instance.
0830      */
0831     if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
0832         nd_device_unregister(dev, ND_ASYNC);
0833 
0834     return rc;
0835 }
0836 
0837 static ssize_t size_store(struct device *dev,
0838         struct device_attribute *attr, const char *buf, size_t len)
0839 {
0840     struct nd_region *nd_region = to_nd_region(dev->parent);
0841     unsigned long long val;
0842     uuid_t **uuid = NULL;
0843     int rc;
0844 
0845     rc = kstrtoull(buf, 0, &val);
0846     if (rc)
0847         return rc;
0848 
0849     device_lock(dev);
0850     nvdimm_bus_lock(dev);
0851     wait_nvdimm_bus_probe_idle(dev);
0852     rc = __size_store(dev, val);
0853     if (rc >= 0)
0854         rc = nd_namespace_label_update(nd_region, dev);
0855 
0856     if (is_namespace_pmem(dev)) {
0857         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0858 
0859         uuid = &nspm->uuid;
0860     }
0861 
0862     if (rc == 0 && val == 0 && uuid) {
0863         /* setting size zero == 'delete namespace' */
0864         kfree(*uuid);
0865         *uuid = NULL;
0866     }
0867 
0868     dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
0869 
0870     nvdimm_bus_unlock(dev);
0871     device_unlock(dev);
0872 
0873     return rc < 0 ? rc : len;
0874 }
0875 
0876 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
0877 {
0878     struct device *dev = &ndns->dev;
0879 
0880     if (is_namespace_pmem(dev)) {
0881         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0882 
0883         return resource_size(&nspm->nsio.res);
0884     } else if (is_namespace_io(dev)) {
0885         struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
0886 
0887         return resource_size(&nsio->res);
0888     } else
0889         WARN_ONCE(1, "unknown namespace type\n");
0890     return 0;
0891 }
0892 
0893 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
0894 {
0895     resource_size_t size;
0896 
0897     nvdimm_bus_lock(&ndns->dev);
0898     size = __nvdimm_namespace_capacity(ndns);
0899     nvdimm_bus_unlock(&ndns->dev);
0900 
0901     return size;
0902 }
0903 EXPORT_SYMBOL(nvdimm_namespace_capacity);
0904 
0905 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
0906 {
0907     int i;
0908     bool locked = false;
0909     struct device *dev = &ndns->dev;
0910     struct nd_region *nd_region = to_nd_region(dev->parent);
0911 
0912     for (i = 0; i < nd_region->ndr_mappings; i++) {
0913         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0914         struct nvdimm *nvdimm = nd_mapping->nvdimm;
0915 
0916         if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
0917             dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
0918             locked = true;
0919         }
0920     }
0921     return locked;
0922 }
0923 EXPORT_SYMBOL(nvdimm_namespace_locked);
0924 
0925 static ssize_t size_show(struct device *dev,
0926         struct device_attribute *attr, char *buf)
0927 {
0928     return sprintf(buf, "%llu\n", (unsigned long long)
0929             nvdimm_namespace_capacity(to_ndns(dev)));
0930 }
0931 static DEVICE_ATTR(size, 0444, size_show, size_store);
0932 
0933 static uuid_t *namespace_to_uuid(struct device *dev)
0934 {
0935     if (is_namespace_pmem(dev)) {
0936         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0937 
0938         return nspm->uuid;
0939     }
0940     return ERR_PTR(-ENXIO);
0941 }
0942 
0943 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
0944              char *buf)
0945 {
0946     uuid_t *uuid = namespace_to_uuid(dev);
0947 
0948     if (IS_ERR(uuid))
0949         return PTR_ERR(uuid);
0950     if (uuid)
0951         return sprintf(buf, "%pUb\n", uuid);
0952     return sprintf(buf, "\n");
0953 }
0954 
0955 /**
0956  * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
0957  * @nd_region: parent region so we can updates all dimms in the set
0958  * @dev: namespace type for generating label_id
0959  * @new_uuid: incoming uuid
0960  * @old_uuid: reference to the uuid storage location in the namespace object
0961  */
0962 static int namespace_update_uuid(struct nd_region *nd_region,
0963                  struct device *dev, uuid_t *new_uuid,
0964                  uuid_t **old_uuid)
0965 {
0966     struct nd_label_id old_label_id;
0967     struct nd_label_id new_label_id;
0968     int i;
0969 
0970     if (!nd_is_uuid_unique(dev, new_uuid))
0971         return -EINVAL;
0972 
0973     if (*old_uuid == NULL)
0974         goto out;
0975 
0976     /*
0977      * If we've already written a label with this uuid, then it's
0978      * too late to rename because we can't reliably update the uuid
0979      * without losing the old namespace.  Userspace must delete this
0980      * namespace to abandon the old uuid.
0981      */
0982     for (i = 0; i < nd_region->ndr_mappings; i++) {
0983         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
0984 
0985         /*
0986          * This check by itself is sufficient because old_uuid
0987          * would be NULL above if this uuid did not exist in the
0988          * currently written set.
0989          *
0990          * FIXME: can we delete uuid with zero dpa allocated?
0991          */
0992         if (list_empty(&nd_mapping->labels))
0993             return -EBUSY;
0994     }
0995 
0996     nd_label_gen_id(&old_label_id, *old_uuid, 0);
0997     nd_label_gen_id(&new_label_id, new_uuid, 0);
0998     for (i = 0; i < nd_region->ndr_mappings; i++) {
0999         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1000         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1001         struct nd_label_ent *label_ent;
1002         struct resource *res;
1003 
1004         for_each_dpa_resource(ndd, res)
1005             if (strcmp(res->name, old_label_id.id) == 0)
1006                 sprintf((void *) res->name, "%s",
1007                         new_label_id.id);
1008 
1009         mutex_lock(&nd_mapping->lock);
1010         list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1011             struct nd_namespace_label *nd_label = label_ent->label;
1012             struct nd_label_id label_id;
1013             uuid_t uuid;
1014 
1015             if (!nd_label)
1016                 continue;
1017             nsl_get_uuid(ndd, nd_label, &uuid);
1018             nd_label_gen_id(&label_id, &uuid,
1019                     nsl_get_flags(ndd, nd_label));
1020             if (strcmp(old_label_id.id, label_id.id) == 0)
1021                 set_bit(ND_LABEL_REAP, &label_ent->flags);
1022         }
1023         mutex_unlock(&nd_mapping->lock);
1024     }
1025     kfree(*old_uuid);
1026  out:
1027     *old_uuid = new_uuid;
1028     return 0;
1029 }
1030 
1031 static ssize_t uuid_store(struct device *dev,
1032         struct device_attribute *attr, const char *buf, size_t len)
1033 {
1034     struct nd_region *nd_region = to_nd_region(dev->parent);
1035     uuid_t *uuid = NULL;
1036     uuid_t **ns_uuid;
1037     ssize_t rc = 0;
1038 
1039     if (is_namespace_pmem(dev)) {
1040         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1041 
1042         ns_uuid = &nspm->uuid;
1043     } else
1044         return -ENXIO;
1045 
1046     device_lock(dev);
1047     nvdimm_bus_lock(dev);
1048     wait_nvdimm_bus_probe_idle(dev);
1049     if (to_ndns(dev)->claim)
1050         rc = -EBUSY;
1051     if (rc >= 0)
1052         rc = nd_uuid_store(dev, &uuid, buf, len);
1053     if (rc >= 0)
1054         rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1055     if (rc >= 0)
1056         rc = nd_namespace_label_update(nd_region, dev);
1057     else
1058         kfree(uuid);
1059     dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1060             buf[len - 1] == '\n' ? "" : "\n");
1061     nvdimm_bus_unlock(dev);
1062     device_unlock(dev);
1063 
1064     return rc < 0 ? rc : len;
1065 }
1066 static DEVICE_ATTR_RW(uuid);
1067 
1068 static ssize_t resource_show(struct device *dev,
1069         struct device_attribute *attr, char *buf)
1070 {
1071     struct resource *res;
1072 
1073     if (is_namespace_pmem(dev)) {
1074         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1075 
1076         res = &nspm->nsio.res;
1077     } else if (is_namespace_io(dev)) {
1078         struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1079 
1080         res = &nsio->res;
1081     } else
1082         return -ENXIO;
1083 
1084     /* no address to convey if the namespace has no allocation */
1085     if (resource_size(res) == 0)
1086         return -ENXIO;
1087     return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1088 }
1089 static DEVICE_ATTR_ADMIN_RO(resource);
1090 
1091 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1092 
1093 static ssize_t sector_size_show(struct device *dev,
1094         struct device_attribute *attr, char *buf)
1095 {
1096     if (is_namespace_pmem(dev)) {
1097         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1098 
1099         return nd_size_select_show(nspm->lbasize,
1100                 pmem_lbasize_supported, buf);
1101     }
1102     return -ENXIO;
1103 }
1104 
1105 static ssize_t sector_size_store(struct device *dev,
1106         struct device_attribute *attr, const char *buf, size_t len)
1107 {
1108     struct nd_region *nd_region = to_nd_region(dev->parent);
1109     const unsigned long *supported;
1110     unsigned long *lbasize;
1111     ssize_t rc = 0;
1112 
1113     if (is_namespace_pmem(dev)) {
1114         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1115 
1116         lbasize = &nspm->lbasize;
1117         supported = pmem_lbasize_supported;
1118     } else
1119         return -ENXIO;
1120 
1121     device_lock(dev);
1122     nvdimm_bus_lock(dev);
1123     if (to_ndns(dev)->claim)
1124         rc = -EBUSY;
1125     if (rc >= 0)
1126         rc = nd_size_select_store(dev, buf, lbasize, supported);
1127     if (rc >= 0)
1128         rc = nd_namespace_label_update(nd_region, dev);
1129     dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1130             buf, buf[len - 1] == '\n' ? "" : "\n");
1131     nvdimm_bus_unlock(dev);
1132     device_unlock(dev);
1133 
1134     return rc ? rc : len;
1135 }
1136 static DEVICE_ATTR_RW(sector_size);
1137 
1138 static ssize_t dpa_extents_show(struct device *dev,
1139         struct device_attribute *attr, char *buf)
1140 {
1141     struct nd_region *nd_region = to_nd_region(dev->parent);
1142     struct nd_label_id label_id;
1143     uuid_t *uuid = NULL;
1144     int count = 0, i;
1145     u32 flags = 0;
1146 
1147     nvdimm_bus_lock(dev);
1148     if (is_namespace_pmem(dev)) {
1149         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1150 
1151         uuid = nspm->uuid;
1152         flags = 0;
1153     }
1154 
1155     if (!uuid)
1156         goto out;
1157 
1158     nd_label_gen_id(&label_id, uuid, flags);
1159     for (i = 0; i < nd_region->ndr_mappings; i++) {
1160         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1161         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1162         struct resource *res;
1163 
1164         for_each_dpa_resource(ndd, res)
1165             if (strcmp(res->name, label_id.id) == 0)
1166                 count++;
1167     }
1168  out:
1169     nvdimm_bus_unlock(dev);
1170 
1171     return sprintf(buf, "%d\n", count);
1172 }
1173 static DEVICE_ATTR_RO(dpa_extents);
1174 
1175 static int btt_claim_class(struct device *dev)
1176 {
1177     struct nd_region *nd_region = to_nd_region(dev->parent);
1178     int i, loop_bitmask = 0;
1179 
1180     for (i = 0; i < nd_region->ndr_mappings; i++) {
1181         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1182         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1183         struct nd_namespace_index *nsindex;
1184 
1185         /*
1186          * If any of the DIMMs do not support labels the only
1187          * possible BTT format is v1.
1188          */
1189         if (!ndd) {
1190             loop_bitmask = 0;
1191             break;
1192         }
1193 
1194         nsindex = to_namespace_index(ndd, ndd->ns_current);
1195         if (nsindex == NULL)
1196             loop_bitmask |= 1;
1197         else {
1198             /* check whether existing labels are v1.1 or v1.2 */
1199             if (__le16_to_cpu(nsindex->major) == 1
1200                     && __le16_to_cpu(nsindex->minor) == 1)
1201                 loop_bitmask |= 2;
1202             else
1203                 loop_bitmask |= 4;
1204         }
1205     }
1206     /*
1207      * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1208      * block is found, a v1.1 label for any mapping will set bit 1, and a
1209      * v1.2 label will set bit 2.
1210      *
1211      * At the end of the loop, at most one of the three bits must be set.
1212      * If multiple bits were set, it means the different mappings disagree
1213      * about their labels, and this must be cleaned up first.
1214      *
1215      * If all the label index blocks are found to agree, nsindex of NULL
1216      * implies labels haven't been initialized yet, and when they will,
1217      * they will be of the 1.2 format, so we can assume BTT2.0
1218      *
1219      * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1220      * found, we enforce BTT2.0
1221      *
1222      * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1223      */
1224     switch (loop_bitmask) {
1225     case 0:
1226     case 2:
1227         return NVDIMM_CCLASS_BTT;
1228     case 1:
1229     case 4:
1230         return NVDIMM_CCLASS_BTT2;
1231     default:
1232         return -ENXIO;
1233     }
1234 }
1235 
1236 static ssize_t holder_show(struct device *dev,
1237         struct device_attribute *attr, char *buf)
1238 {
1239     struct nd_namespace_common *ndns = to_ndns(dev);
1240     ssize_t rc;
1241 
1242     device_lock(dev);
1243     rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1244     device_unlock(dev);
1245 
1246     return rc;
1247 }
1248 static DEVICE_ATTR_RO(holder);
1249 
1250 static int __holder_class_store(struct device *dev, const char *buf)
1251 {
1252     struct nd_namespace_common *ndns = to_ndns(dev);
1253 
1254     if (dev->driver || ndns->claim)
1255         return -EBUSY;
1256 
1257     if (sysfs_streq(buf, "btt")) {
1258         int rc = btt_claim_class(dev);
1259 
1260         if (rc < NVDIMM_CCLASS_NONE)
1261             return rc;
1262         ndns->claim_class = rc;
1263     } else if (sysfs_streq(buf, "pfn"))
1264         ndns->claim_class = NVDIMM_CCLASS_PFN;
1265     else if (sysfs_streq(buf, "dax"))
1266         ndns->claim_class = NVDIMM_CCLASS_DAX;
1267     else if (sysfs_streq(buf, ""))
1268         ndns->claim_class = NVDIMM_CCLASS_NONE;
1269     else
1270         return -EINVAL;
1271 
1272     return 0;
1273 }
1274 
1275 static ssize_t holder_class_store(struct device *dev,
1276         struct device_attribute *attr, const char *buf, size_t len)
1277 {
1278     struct nd_region *nd_region = to_nd_region(dev->parent);
1279     int rc;
1280 
1281     device_lock(dev);
1282     nvdimm_bus_lock(dev);
1283     wait_nvdimm_bus_probe_idle(dev);
1284     rc = __holder_class_store(dev, buf);
1285     if (rc >= 0)
1286         rc = nd_namespace_label_update(nd_region, dev);
1287     dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1288     nvdimm_bus_unlock(dev);
1289     device_unlock(dev);
1290 
1291     return rc < 0 ? rc : len;
1292 }
1293 
1294 static ssize_t holder_class_show(struct device *dev,
1295         struct device_attribute *attr, char *buf)
1296 {
1297     struct nd_namespace_common *ndns = to_ndns(dev);
1298     ssize_t rc;
1299 
1300     device_lock(dev);
1301     if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1302         rc = sprintf(buf, "\n");
1303     else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1304             (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1305         rc = sprintf(buf, "btt\n");
1306     else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1307         rc = sprintf(buf, "pfn\n");
1308     else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1309         rc = sprintf(buf, "dax\n");
1310     else
1311         rc = sprintf(buf, "<unknown>\n");
1312     device_unlock(dev);
1313 
1314     return rc;
1315 }
1316 static DEVICE_ATTR_RW(holder_class);
1317 
1318 static ssize_t mode_show(struct device *dev,
1319         struct device_attribute *attr, char *buf)
1320 {
1321     struct nd_namespace_common *ndns = to_ndns(dev);
1322     struct device *claim;
1323     char *mode;
1324     ssize_t rc;
1325 
1326     device_lock(dev);
1327     claim = ndns->claim;
1328     if (claim && is_nd_btt(claim))
1329         mode = "safe";
1330     else if (claim && is_nd_pfn(claim))
1331         mode = "memory";
1332     else if (claim && is_nd_dax(claim))
1333         mode = "dax";
1334     else if (!claim && pmem_should_map_pages(dev))
1335         mode = "memory";
1336     else
1337         mode = "raw";
1338     rc = sprintf(buf, "%s\n", mode);
1339     device_unlock(dev);
1340 
1341     return rc;
1342 }
1343 static DEVICE_ATTR_RO(mode);
1344 
1345 static ssize_t force_raw_store(struct device *dev,
1346         struct device_attribute *attr, const char *buf, size_t len)
1347 {
1348     bool force_raw;
1349     int rc = strtobool(buf, &force_raw);
1350 
1351     if (rc)
1352         return rc;
1353 
1354     to_ndns(dev)->force_raw = force_raw;
1355     return len;
1356 }
1357 
1358 static ssize_t force_raw_show(struct device *dev,
1359         struct device_attribute *attr, char *buf)
1360 {
1361     return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1362 }
1363 static DEVICE_ATTR_RW(force_raw);
1364 
1365 static struct attribute *nd_namespace_attributes[] = {
1366     &dev_attr_nstype.attr,
1367     &dev_attr_size.attr,
1368     &dev_attr_mode.attr,
1369     &dev_attr_uuid.attr,
1370     &dev_attr_holder.attr,
1371     &dev_attr_resource.attr,
1372     &dev_attr_alt_name.attr,
1373     &dev_attr_force_raw.attr,
1374     &dev_attr_sector_size.attr,
1375     &dev_attr_dpa_extents.attr,
1376     &dev_attr_holder_class.attr,
1377     NULL,
1378 };
1379 
1380 static umode_t namespace_visible(struct kobject *kobj,
1381         struct attribute *a, int n)
1382 {
1383     struct device *dev = container_of(kobj, struct device, kobj);
1384 
1385     if (is_namespace_pmem(dev)) {
1386         if (a == &dev_attr_size.attr)
1387             return 0644;
1388 
1389         return a->mode;
1390     }
1391 
1392     /* base is_namespace_io() attributes */
1393     if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
1394         a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
1395         a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
1396         a == &dev_attr_resource.attr)
1397         return a->mode;
1398 
1399     return 0;
1400 }
1401 
1402 static struct attribute_group nd_namespace_attribute_group = {
1403     .attrs = nd_namespace_attributes,
1404     .is_visible = namespace_visible,
1405 };
1406 
1407 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1408     &nd_device_attribute_group,
1409     &nd_namespace_attribute_group,
1410     &nd_numa_attribute_group,
1411     NULL,
1412 };
1413 
1414 static const struct device_type namespace_io_device_type = {
1415     .name = "nd_namespace_io",
1416     .release = namespace_io_release,
1417     .groups = nd_namespace_attribute_groups,
1418 };
1419 
1420 static const struct device_type namespace_pmem_device_type = {
1421     .name = "nd_namespace_pmem",
1422     .release = namespace_pmem_release,
1423     .groups = nd_namespace_attribute_groups,
1424 };
1425 
1426 static bool is_namespace_pmem(const struct device *dev)
1427 {
1428     return dev ? dev->type == &namespace_pmem_device_type : false;
1429 }
1430 
1431 static bool is_namespace_io(const struct device *dev)
1432 {
1433     return dev ? dev->type == &namespace_io_device_type : false;
1434 }
1435 
1436 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1437 {
1438     struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1439     struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1440     struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1441     struct nd_namespace_common *ndns = NULL;
1442     resource_size_t size;
1443 
1444     if (nd_btt || nd_pfn || nd_dax) {
1445         if (nd_btt)
1446             ndns = nd_btt->ndns;
1447         else if (nd_pfn)
1448             ndns = nd_pfn->ndns;
1449         else if (nd_dax)
1450             ndns = nd_dax->nd_pfn.ndns;
1451 
1452         if (!ndns)
1453             return ERR_PTR(-ENODEV);
1454 
1455         /*
1456          * Flush any in-progess probes / removals in the driver
1457          * for the raw personality of this namespace.
1458          */
1459         device_lock(&ndns->dev);
1460         device_unlock(&ndns->dev);
1461         if (ndns->dev.driver) {
1462             dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1463                     dev_name(dev));
1464             return ERR_PTR(-EBUSY);
1465         }
1466         if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1467                     "host (%s) vs claim (%s) mismatch\n",
1468                     dev_name(dev),
1469                     dev_name(ndns->claim)))
1470             return ERR_PTR(-ENXIO);
1471     } else {
1472         ndns = to_ndns(dev);
1473         if (ndns->claim) {
1474             dev_dbg(dev, "claimed by %s, failing probe\n",
1475                 dev_name(ndns->claim));
1476 
1477             return ERR_PTR(-ENXIO);
1478         }
1479     }
1480 
1481     if (nvdimm_namespace_locked(ndns))
1482         return ERR_PTR(-EACCES);
1483 
1484     size = nvdimm_namespace_capacity(ndns);
1485     if (size < ND_MIN_NAMESPACE_SIZE) {
1486         dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1487                 &size, ND_MIN_NAMESPACE_SIZE);
1488         return ERR_PTR(-ENODEV);
1489     }
1490 
1491     /*
1492      * Note, alignment validation for fsdax and devdax mode
1493      * namespaces happens in nd_pfn_validate() where infoblock
1494      * padding parameters can be applied.
1495      */
1496     if (pmem_should_map_pages(dev)) {
1497         struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
1498         struct resource *res = &nsio->res;
1499 
1500         if (!IS_ALIGNED(res->start | (res->end + 1),
1501                     memremap_compat_align())) {
1502             dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
1503             return ERR_PTR(-EOPNOTSUPP);
1504         }
1505     }
1506 
1507     if (is_namespace_pmem(&ndns->dev)) {
1508         struct nd_namespace_pmem *nspm;
1509 
1510         nspm = to_nd_namespace_pmem(&ndns->dev);
1511         if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1512             return ERR_PTR(-ENODEV);
1513     }
1514 
1515     return ndns;
1516 }
1517 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1518 
1519 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1520         resource_size_t size)
1521 {
1522     return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1523 }
1524 EXPORT_SYMBOL_GPL(devm_namespace_enable);
1525 
1526 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1527 {
1528     devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1529 }
1530 EXPORT_SYMBOL_GPL(devm_namespace_disable);
1531 
1532 static struct device **create_namespace_io(struct nd_region *nd_region)
1533 {
1534     struct nd_namespace_io *nsio;
1535     struct device *dev, **devs;
1536     struct resource *res;
1537 
1538     nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1539     if (!nsio)
1540         return NULL;
1541 
1542     devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1543     if (!devs) {
1544         kfree(nsio);
1545         return NULL;
1546     }
1547 
1548     dev = &nsio->common.dev;
1549     dev->type = &namespace_io_device_type;
1550     dev->parent = &nd_region->dev;
1551     res = &nsio->res;
1552     res->name = dev_name(&nd_region->dev);
1553     res->flags = IORESOURCE_MEM;
1554     res->start = nd_region->ndr_start;
1555     res->end = res->start + nd_region->ndr_size - 1;
1556 
1557     devs[0] = dev;
1558     return devs;
1559 }
1560 
1561 static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
1562                 u64 cookie, u16 pos)
1563 {
1564     struct nd_namespace_label *found = NULL;
1565     int i;
1566 
1567     for (i = 0; i < nd_region->ndr_mappings; i++) {
1568         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1569         struct nd_interleave_set *nd_set = nd_region->nd_set;
1570         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1571         struct nd_label_ent *label_ent;
1572         bool found_uuid = false;
1573 
1574         list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1575             struct nd_namespace_label *nd_label = label_ent->label;
1576             u16 position;
1577 
1578             if (!nd_label)
1579                 continue;
1580             position = nsl_get_position(ndd, nd_label);
1581 
1582             if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
1583                 continue;
1584 
1585             if (!nsl_uuid_equal(ndd, nd_label, uuid))
1586                 continue;
1587 
1588             if (!nsl_validate_type_guid(ndd, nd_label,
1589                             &nd_set->type_guid))
1590                 continue;
1591 
1592             if (found_uuid) {
1593                 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1594                 return false;
1595             }
1596             found_uuid = true;
1597             if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
1598                 continue;
1599             if (position != pos)
1600                 continue;
1601             found = nd_label;
1602             break;
1603         }
1604         if (found)
1605             break;
1606     }
1607     return found != NULL;
1608 }
1609 
1610 static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
1611 {
1612     int i;
1613 
1614     if (!pmem_id)
1615         return -ENODEV;
1616 
1617     for (i = 0; i < nd_region->ndr_mappings; i++) {
1618         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1619         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1620         struct nd_namespace_label *nd_label = NULL;
1621         u64 hw_start, hw_end, pmem_start, pmem_end;
1622         struct nd_label_ent *label_ent;
1623 
1624         lockdep_assert_held(&nd_mapping->lock);
1625         list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1626             nd_label = label_ent->label;
1627             if (!nd_label)
1628                 continue;
1629             if (nsl_uuid_equal(ndd, nd_label, pmem_id))
1630                 break;
1631             nd_label = NULL;
1632         }
1633 
1634         if (!nd_label) {
1635             WARN_ON(1);
1636             return -EINVAL;
1637         }
1638 
1639         /*
1640          * Check that this label is compliant with the dpa
1641          * range published in NFIT
1642          */
1643         hw_start = nd_mapping->start;
1644         hw_end = hw_start + nd_mapping->size;
1645         pmem_start = nsl_get_dpa(ndd, nd_label);
1646         pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
1647         if (pmem_start >= hw_start && pmem_start < hw_end
1648                 && pmem_end <= hw_end && pmem_end > hw_start)
1649             /* pass */;
1650         else {
1651             dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1652                 dev_name(ndd->dev),
1653                 nsl_uuid_raw(ndd, nd_label));
1654             return -EINVAL;
1655         }
1656 
1657         /* move recently validated label to the front of the list */
1658         list_move(&label_ent->list, &nd_mapping->labels);
1659     }
1660     return 0;
1661 }
1662 
1663 /**
1664  * create_namespace_pmem - validate interleave set labelling, retrieve label0
1665  * @nd_region: region with mappings to validate
1666  * @nspm: target namespace to create
1667  * @nd_label: target pmem namespace label to evaluate
1668  */
1669 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1670                         struct nd_mapping *nd_mapping,
1671                         struct nd_namespace_label *nd_label)
1672 {
1673     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1674     struct nd_namespace_index *nsindex =
1675         to_namespace_index(ndd, ndd->ns_current);
1676     u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1677     u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1678     struct nd_label_ent *label_ent;
1679     struct nd_namespace_pmem *nspm;
1680     resource_size_t size = 0;
1681     struct resource *res;
1682     struct device *dev;
1683     uuid_t uuid;
1684     int rc = 0;
1685     u16 i;
1686 
1687     if (cookie == 0) {
1688         dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1689         return ERR_PTR(-ENXIO);
1690     }
1691 
1692     if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
1693         dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1694             nsl_uuid_raw(ndd, nd_label));
1695         if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
1696             return ERR_PTR(-EAGAIN);
1697 
1698         dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1699             nsl_uuid_raw(ndd, nd_label));
1700     }
1701 
1702     nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1703     if (!nspm)
1704         return ERR_PTR(-ENOMEM);
1705 
1706     nspm->id = -1;
1707     dev = &nspm->nsio.common.dev;
1708     dev->type = &namespace_pmem_device_type;
1709     dev->parent = &nd_region->dev;
1710     res = &nspm->nsio.res;
1711     res->name = dev_name(&nd_region->dev);
1712     res->flags = IORESOURCE_MEM;
1713 
1714     for (i = 0; i < nd_region->ndr_mappings; i++) {
1715         nsl_get_uuid(ndd, nd_label, &uuid);
1716         if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
1717             continue;
1718         if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
1719             continue;
1720         break;
1721     }
1722 
1723     if (i < nd_region->ndr_mappings) {
1724         struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1725 
1726         /*
1727          * Give up if we don't find an instance of a uuid at each
1728          * position (from 0 to nd_region->ndr_mappings - 1), or if we
1729          * find a dimm with two instances of the same uuid.
1730          */
1731         dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1732             nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
1733         rc = -EINVAL;
1734         goto err;
1735     }
1736 
1737     /*
1738      * Fix up each mapping's 'labels' to have the validated pmem label for
1739      * that position at labels[0], and NULL at labels[1].  In the process,
1740      * check that the namespace aligns with interleave-set.
1741      */
1742     nsl_get_uuid(ndd, nd_label, &uuid);
1743     rc = select_pmem_id(nd_region, &uuid);
1744     if (rc)
1745         goto err;
1746 
1747     /* Calculate total size and populate namespace properties from label0 */
1748     for (i = 0; i < nd_region->ndr_mappings; i++) {
1749         struct nd_namespace_label *label0;
1750         struct nvdimm_drvdata *ndd;
1751 
1752         nd_mapping = &nd_region->mapping[i];
1753         label_ent = list_first_entry_or_null(&nd_mapping->labels,
1754                 typeof(*label_ent), list);
1755         label0 = label_ent ? label_ent->label : NULL;
1756 
1757         if (!label0) {
1758             WARN_ON(1);
1759             continue;
1760         }
1761 
1762         ndd = to_ndd(nd_mapping);
1763         size += nsl_get_rawsize(ndd, label0);
1764         if (nsl_get_position(ndd, label0) != 0)
1765             continue;
1766         WARN_ON(nspm->alt_name || nspm->uuid);
1767         nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
1768                      NSLABEL_NAME_LEN, GFP_KERNEL);
1769         nsl_get_uuid(ndd, label0, &uuid);
1770         nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1771         nspm->lbasize = nsl_get_lbasize(ndd, label0);
1772         nspm->nsio.common.claim_class =
1773             nsl_get_claim_class(ndd, label0);
1774     }
1775 
1776     if (!nspm->alt_name || !nspm->uuid) {
1777         rc = -ENOMEM;
1778         goto err;
1779     }
1780 
1781     nd_namespace_pmem_set_resource(nd_region, nspm, size);
1782 
1783     return dev;
1784  err:
1785     namespace_pmem_release(dev);
1786     switch (rc) {
1787     case -EINVAL:
1788         dev_dbg(&nd_region->dev, "invalid label(s)\n");
1789         break;
1790     case -ENODEV:
1791         dev_dbg(&nd_region->dev, "label not found\n");
1792         break;
1793     default:
1794         dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
1795         break;
1796     }
1797     return ERR_PTR(rc);
1798 }
1799 
1800 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
1801 {
1802     struct nd_namespace_pmem *nspm;
1803     struct resource *res;
1804     struct device *dev;
1805 
1806     if (!is_memory(&nd_region->dev))
1807         return NULL;
1808 
1809     nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1810     if (!nspm)
1811         return NULL;
1812 
1813     dev = &nspm->nsio.common.dev;
1814     dev->type = &namespace_pmem_device_type;
1815     dev->parent = &nd_region->dev;
1816     res = &nspm->nsio.res;
1817     res->name = dev_name(&nd_region->dev);
1818     res->flags = IORESOURCE_MEM;
1819 
1820     nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1821     if (nspm->id < 0) {
1822         kfree(nspm);
1823         return NULL;
1824     }
1825     dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
1826     nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1827 
1828     return dev;
1829 }
1830 
1831 static struct lock_class_key nvdimm_namespace_key;
1832 
1833 void nd_region_create_ns_seed(struct nd_region *nd_region)
1834 {
1835     WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1836 
1837     if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
1838         return;
1839 
1840     nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
1841 
1842     /*
1843      * Seed creation failures are not fatal, provisioning is simply
1844      * disabled until memory becomes available
1845      */
1846     if (!nd_region->ns_seed)
1847         dev_err(&nd_region->dev, "failed to create namespace\n");
1848     else {
1849         device_initialize(nd_region->ns_seed);
1850         lockdep_set_class(&nd_region->ns_seed->mutex,
1851                   &nvdimm_namespace_key);
1852         nd_device_register(nd_region->ns_seed);
1853     }
1854 }
1855 
1856 void nd_region_create_dax_seed(struct nd_region *nd_region)
1857 {
1858     WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1859     nd_region->dax_seed = nd_dax_create(nd_region);
1860     /*
1861      * Seed creation failures are not fatal, provisioning is simply
1862      * disabled until memory becomes available
1863      */
1864     if (!nd_region->dax_seed)
1865         dev_err(&nd_region->dev, "failed to create dax namespace\n");
1866 }
1867 
1868 void nd_region_create_pfn_seed(struct nd_region *nd_region)
1869 {
1870     WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1871     nd_region->pfn_seed = nd_pfn_create(nd_region);
1872     /*
1873      * Seed creation failures are not fatal, provisioning is simply
1874      * disabled until memory becomes available
1875      */
1876     if (!nd_region->pfn_seed)
1877         dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1878 }
1879 
1880 void nd_region_create_btt_seed(struct nd_region *nd_region)
1881 {
1882     WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1883     nd_region->btt_seed = nd_btt_create(nd_region);
1884     /*
1885      * Seed creation failures are not fatal, provisioning is simply
1886      * disabled until memory becomes available
1887      */
1888     if (!nd_region->btt_seed)
1889         dev_err(&nd_region->dev, "failed to create btt namespace\n");
1890 }
1891 
1892 static int add_namespace_resource(struct nd_region *nd_region,
1893         struct nd_namespace_label *nd_label, struct device **devs,
1894         int count)
1895 {
1896     struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1897     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1898     int i;
1899 
1900     for (i = 0; i < count; i++) {
1901         uuid_t *uuid = namespace_to_uuid(devs[i]);
1902 
1903         if (IS_ERR(uuid)) {
1904             WARN_ON(1);
1905             continue;
1906         }
1907 
1908         if (!nsl_uuid_equal(ndd, nd_label, uuid))
1909             continue;
1910         dev_err(&nd_region->dev,
1911             "error: conflicting extents for uuid: %pUb\n", uuid);
1912         return -ENXIO;
1913     }
1914 
1915     return i;
1916 }
1917 
1918 static int cmp_dpa(const void *a, const void *b)
1919 {
1920     const struct device *dev_a = *(const struct device **) a;
1921     const struct device *dev_b = *(const struct device **) b;
1922     struct nd_namespace_pmem *nspm_a, *nspm_b;
1923 
1924     if (is_namespace_io(dev_a))
1925         return 0;
1926 
1927     nspm_a = to_nd_namespace_pmem(dev_a);
1928     nspm_b = to_nd_namespace_pmem(dev_b);
1929 
1930     return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
1931             sizeof(resource_size_t));
1932 }
1933 
1934 static struct device **scan_labels(struct nd_region *nd_region)
1935 {
1936     int i, count = 0;
1937     struct device *dev, **devs = NULL;
1938     struct nd_label_ent *label_ent, *e;
1939     struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1940     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1941     resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
1942 
1943     /* "safe" because create_namespace_pmem() might list_move() label_ent */
1944     list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1945         struct nd_namespace_label *nd_label = label_ent->label;
1946         struct device **__devs;
1947 
1948         if (!nd_label)
1949             continue;
1950 
1951         /* skip labels that describe extents outside of the region */
1952         if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
1953             nsl_get_dpa(ndd, nd_label) > map_end)
1954             continue;
1955 
1956         i = add_namespace_resource(nd_region, nd_label, devs, count);
1957         if (i < 0)
1958             goto err;
1959         if (i < count)
1960             continue;
1961         __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1962         if (!__devs)
1963             goto err;
1964         memcpy(__devs, devs, sizeof(dev) * count);
1965         kfree(devs);
1966         devs = __devs;
1967 
1968         dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
1969         if (IS_ERR(dev)) {
1970             switch (PTR_ERR(dev)) {
1971             case -EAGAIN:
1972                 /* skip invalid labels */
1973                 continue;
1974             case -ENODEV:
1975                 /* fallthrough to seed creation */
1976                 break;
1977             default:
1978                 goto err;
1979             }
1980         } else
1981             devs[count++] = dev;
1982 
1983     }
1984 
1985     dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
1986         count == 1 ? "" : "s");
1987 
1988     if (count == 0) {
1989         struct nd_namespace_pmem *nspm;
1990 
1991         /* Publish a zero-sized namespace for userspace to configure. */
1992         nd_mapping_free_labels(nd_mapping);
1993 
1994         devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1995         if (!devs)
1996             goto err;
1997 
1998         nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1999         if (!nspm)
2000             goto err;
2001         dev = &nspm->nsio.common.dev;
2002         dev->type = &namespace_pmem_device_type;
2003         nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2004         dev->parent = &nd_region->dev;
2005         devs[count++] = dev;
2006     } else if (is_memory(&nd_region->dev)) {
2007         /* clean unselected labels */
2008         for (i = 0; i < nd_region->ndr_mappings; i++) {
2009             struct list_head *l, *e;
2010             LIST_HEAD(list);
2011             int j;
2012 
2013             nd_mapping = &nd_region->mapping[i];
2014             if (list_empty(&nd_mapping->labels)) {
2015                 WARN_ON(1);
2016                 continue;
2017             }
2018 
2019             j = count;
2020             list_for_each_safe(l, e, &nd_mapping->labels) {
2021                 if (!j--)
2022                     break;
2023                 list_move_tail(l, &list);
2024             }
2025             nd_mapping_free_labels(nd_mapping);
2026             list_splice_init(&list, &nd_mapping->labels);
2027         }
2028     }
2029 
2030     if (count > 1)
2031         sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2032 
2033     return devs;
2034 
2035  err:
2036     if (devs) {
2037         for (i = 0; devs[i]; i++)
2038             namespace_pmem_release(devs[i]);
2039         kfree(devs);
2040     }
2041     return NULL;
2042 }
2043 
2044 static struct device **create_namespaces(struct nd_region *nd_region)
2045 {
2046     struct nd_mapping *nd_mapping;
2047     struct device **devs;
2048     int i;
2049 
2050     if (nd_region->ndr_mappings == 0)
2051         return NULL;
2052 
2053     /* lock down all mappings while we scan labels */
2054     for (i = 0; i < nd_region->ndr_mappings; i++) {
2055         nd_mapping = &nd_region->mapping[i];
2056         mutex_lock_nested(&nd_mapping->lock, i);
2057     }
2058 
2059     devs = scan_labels(nd_region);
2060 
2061     for (i = 0; i < nd_region->ndr_mappings; i++) {
2062         int reverse = nd_region->ndr_mappings - 1 - i;
2063 
2064         nd_mapping = &nd_region->mapping[reverse];
2065         mutex_unlock(&nd_mapping->lock);
2066     }
2067 
2068     return devs;
2069 }
2070 
2071 static void deactivate_labels(void *region)
2072 {
2073     struct nd_region *nd_region = region;
2074     int i;
2075 
2076     for (i = 0; i < nd_region->ndr_mappings; i++) {
2077         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2078         struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2079         struct nvdimm *nvdimm = nd_mapping->nvdimm;
2080 
2081         mutex_lock(&nd_mapping->lock);
2082         nd_mapping_free_labels(nd_mapping);
2083         mutex_unlock(&nd_mapping->lock);
2084 
2085         put_ndd(ndd);
2086         nd_mapping->ndd = NULL;
2087         if (ndd)
2088             atomic_dec(&nvdimm->busy);
2089     }
2090 }
2091 
2092 static int init_active_labels(struct nd_region *nd_region)
2093 {
2094     int i, rc = 0;
2095 
2096     for (i = 0; i < nd_region->ndr_mappings; i++) {
2097         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2098         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2099         struct nvdimm *nvdimm = nd_mapping->nvdimm;
2100         struct nd_label_ent *label_ent;
2101         int count, j;
2102 
2103         /*
2104          * If the dimm is disabled then we may need to prevent
2105          * the region from being activated.
2106          */
2107         if (!ndd) {
2108             if (test_bit(NDD_LOCKED, &nvdimm->flags))
2109                 /* fail, label data may be unreadable */;
2110             else if (test_bit(NDD_LABELING, &nvdimm->flags))
2111                 /* fail, labels needed to disambiguate dpa */;
2112             else
2113                 continue;
2114 
2115             dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2116                     dev_name(&nd_mapping->nvdimm->dev),
2117                     test_bit(NDD_LOCKED, &nvdimm->flags)
2118                     ? "locked" : "disabled");
2119             rc = -ENXIO;
2120             goto out;
2121         }
2122         nd_mapping->ndd = ndd;
2123         atomic_inc(&nvdimm->busy);
2124         get_ndd(ndd);
2125 
2126         count = nd_label_active_count(ndd);
2127         dev_dbg(ndd->dev, "count: %d\n", count);
2128         if (!count)
2129             continue;
2130         for (j = 0; j < count; j++) {
2131             struct nd_namespace_label *label;
2132 
2133             label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2134             if (!label_ent)
2135                 break;
2136             label = nd_label_active(ndd, j);
2137             label_ent->label = label;
2138 
2139             mutex_lock(&nd_mapping->lock);
2140             list_add_tail(&label_ent->list, &nd_mapping->labels);
2141             mutex_unlock(&nd_mapping->lock);
2142         }
2143 
2144         if (j < count)
2145             break;
2146     }
2147 
2148     if (i < nd_region->ndr_mappings)
2149         rc = -ENOMEM;
2150 
2151 out:
2152     if (rc) {
2153         deactivate_labels(nd_region);
2154         return rc;
2155     }
2156 
2157     return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2158                     nd_region);
2159 }
2160 
2161 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2162 {
2163     struct device **devs = NULL;
2164     int i, rc = 0, type;
2165 
2166     *err = 0;
2167     nvdimm_bus_lock(&nd_region->dev);
2168     rc = init_active_labels(nd_region);
2169     if (rc) {
2170         nvdimm_bus_unlock(&nd_region->dev);
2171         return rc;
2172     }
2173 
2174     type = nd_region_to_nstype(nd_region);
2175     switch (type) {
2176     case ND_DEVICE_NAMESPACE_IO:
2177         devs = create_namespace_io(nd_region);
2178         break;
2179     case ND_DEVICE_NAMESPACE_PMEM:
2180         devs = create_namespaces(nd_region);
2181         break;
2182     default:
2183         break;
2184     }
2185     nvdimm_bus_unlock(&nd_region->dev);
2186 
2187     if (!devs)
2188         return -ENODEV;
2189 
2190     for (i = 0; devs[i]; i++) {
2191         struct device *dev = devs[i];
2192         int id;
2193 
2194         if (type == ND_DEVICE_NAMESPACE_PMEM) {
2195             struct nd_namespace_pmem *nspm;
2196 
2197             nspm = to_nd_namespace_pmem(dev);
2198             id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2199                         GFP_KERNEL);
2200             nspm->id = id;
2201         } else
2202             id = i;
2203 
2204         if (id < 0)
2205             break;
2206         dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2207         device_initialize(dev);
2208         lockdep_set_class(&dev->mutex, &nvdimm_namespace_key);
2209         nd_device_register(dev);
2210     }
2211     if (i)
2212         nd_region->ns_seed = devs[0];
2213 
2214     if (devs[i]) {
2215         int j;
2216 
2217         for (j = i; devs[j]; j++) {
2218             struct device *dev = devs[j];
2219 
2220             device_initialize(dev);
2221             put_device(dev);
2222         }
2223         *err = j - i;
2224         /*
2225          * All of the namespaces we tried to register failed, so
2226          * fail region activation.
2227          */
2228         if (*err == 0)
2229             rc = -ENODEV;
2230     }
2231     kfree(devs);
2232 
2233     if (rc == -ENODEV)
2234         return rc;
2235 
2236     return i;
2237 }