Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
0004  */
0005 #ifndef __ND_H__
0006 #define __ND_H__
0007 #include <linux/libnvdimm.h>
0008 #include <linux/badblocks.h>
0009 #include <linux/blkdev.h>
0010 #include <linux/device.h>
0011 #include <linux/mutex.h>
0012 #include <linux/ndctl.h>
0013 #include <linux/types.h>
0014 #include <linux/nd.h>
0015 #include "label.h"
0016 
0017 enum {
0018     /*
0019      * Limits the maximum number of block apertures a dimm can
0020      * support and is an input to the geometry/on-disk-format of a
0021      * BTT instance
0022      */
0023     ND_MAX_LANES = 256,
0024     INT_LBASIZE_ALIGNMENT = 64,
0025     NVDIMM_IO_ATOMIC = 1,
0026 };
0027 
0028 struct nvdimm_drvdata {
0029     struct device *dev;
0030     int nslabel_size;
0031     struct nd_cmd_get_config_size nsarea;
0032     void *data;
0033     bool cxl;
0034     int ns_current, ns_next;
0035     struct resource dpa;
0036     struct kref kref;
0037 };
0038 
0039 static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
0040                      struct nd_namespace_label *nd_label)
0041 {
0042     if (ndd->cxl)
0043         return nd_label->cxl.name;
0044     return nd_label->efi.name;
0045 }
0046 
0047 static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
0048                    struct nd_namespace_label *nd_label, u8 *name)
0049 {
0050     if (ndd->cxl)
0051         return memcpy(name, nd_label->cxl.name, NSLABEL_NAME_LEN);
0052     return memcpy(name, nd_label->efi.name, NSLABEL_NAME_LEN);
0053 }
0054 
0055 static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
0056                    struct nd_namespace_label *nd_label, u8 *name)
0057 {
0058     if (!name)
0059         return NULL;
0060     if (ndd->cxl)
0061         return memcpy(nd_label->cxl.name, name, NSLABEL_NAME_LEN);
0062     return memcpy(nd_label->efi.name, name, NSLABEL_NAME_LEN);
0063 }
0064 
0065 static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
0066                    struct nd_namespace_label *nd_label)
0067 {
0068     if (ndd->cxl)
0069         return __le32_to_cpu(nd_label->cxl.slot);
0070     return __le32_to_cpu(nd_label->efi.slot);
0071 }
0072 
0073 static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
0074                 struct nd_namespace_label *nd_label, u32 slot)
0075 {
0076     if (ndd->cxl)
0077         nd_label->cxl.slot = __cpu_to_le32(slot);
0078     else
0079         nd_label->efi.slot = __cpu_to_le32(slot);
0080 }
0081 
0082 static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
0083                    struct nd_namespace_label *nd_label)
0084 {
0085     if (ndd->cxl)
0086         return __le64_to_cpu(nd_label->cxl.checksum);
0087     return __le64_to_cpu(nd_label->efi.checksum);
0088 }
0089 
0090 static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
0091                     struct nd_namespace_label *nd_label,
0092                     u64 checksum)
0093 {
0094     if (ndd->cxl)
0095         nd_label->cxl.checksum = __cpu_to_le64(checksum);
0096     else
0097         nd_label->efi.checksum = __cpu_to_le64(checksum);
0098 }
0099 
0100 static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
0101                 struct nd_namespace_label *nd_label)
0102 {
0103     if (ndd->cxl)
0104         return __le32_to_cpu(nd_label->cxl.flags);
0105     return __le32_to_cpu(nd_label->efi.flags);
0106 }
0107 
0108 static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
0109                  struct nd_namespace_label *nd_label, u32 flags)
0110 {
0111     if (ndd->cxl)
0112         nd_label->cxl.flags = __cpu_to_le32(flags);
0113     else
0114         nd_label->efi.flags = __cpu_to_le32(flags);
0115 }
0116 
0117 static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
0118                   struct nd_namespace_label *nd_label)
0119 {
0120     if (ndd->cxl)
0121         return __le64_to_cpu(nd_label->cxl.dpa);
0122     return __le64_to_cpu(nd_label->efi.dpa);
0123 }
0124 
0125 static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
0126                    struct nd_namespace_label *nd_label, u64 dpa)
0127 {
0128     if (ndd->cxl)
0129         nd_label->cxl.dpa = __cpu_to_le64(dpa);
0130     else
0131         nd_label->efi.dpa = __cpu_to_le64(dpa);
0132 }
0133 
0134 static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
0135                   struct nd_namespace_label *nd_label)
0136 {
0137     if (ndd->cxl)
0138         return __le64_to_cpu(nd_label->cxl.rawsize);
0139     return __le64_to_cpu(nd_label->efi.rawsize);
0140 }
0141 
0142 static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
0143                    struct nd_namespace_label *nd_label,
0144                    u64 rawsize)
0145 {
0146     if (ndd->cxl)
0147         nd_label->cxl.rawsize = __cpu_to_le64(rawsize);
0148     else
0149         nd_label->efi.rawsize = __cpu_to_le64(rawsize);
0150 }
0151 
0152 static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
0153                      struct nd_namespace_label *nd_label)
0154 {
0155     /* WARN future refactor attempts that break this assumption */
0156     if (dev_WARN_ONCE(ndd->dev, ndd->cxl,
0157               "CXL labels do not use the isetcookie concept\n"))
0158         return 0;
0159     return __le64_to_cpu(nd_label->efi.isetcookie);
0160 }
0161 
0162 static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
0163                       struct nd_namespace_label *nd_label,
0164                       u64 isetcookie)
0165 {
0166     if (!ndd->cxl)
0167         nd_label->efi.isetcookie = __cpu_to_le64(isetcookie);
0168 }
0169 
0170 static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
0171                        struct nd_namespace_label *nd_label,
0172                        u64 cookie)
0173 {
0174     /*
0175      * Let the EFI and CXL validation comingle, where fields that
0176      * don't matter to CXL always validate.
0177      */
0178     if (ndd->cxl)
0179         return true;
0180     return cookie == __le64_to_cpu(nd_label->efi.isetcookie);
0181 }
0182 
0183 static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
0184                    struct nd_namespace_label *nd_label)
0185 {
0186     if (ndd->cxl)
0187         return __le16_to_cpu(nd_label->cxl.position);
0188     return __le16_to_cpu(nd_label->efi.position);
0189 }
0190 
0191 static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
0192                     struct nd_namespace_label *nd_label,
0193                     u16 position)
0194 {
0195     if (ndd->cxl)
0196         nd_label->cxl.position = __cpu_to_le16(position);
0197     else
0198         nd_label->efi.position = __cpu_to_le16(position);
0199 }
0200 
0201 static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
0202                  struct nd_namespace_label *nd_label)
0203 {
0204     if (ndd->cxl)
0205         return 0;
0206     return __le16_to_cpu(nd_label->efi.nlabel);
0207 }
0208 
0209 static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
0210                   struct nd_namespace_label *nd_label,
0211                   u16 nlabel)
0212 {
0213     if (!ndd->cxl)
0214         nd_label->efi.nlabel = __cpu_to_le16(nlabel);
0215 }
0216 
0217 static inline u16 nsl_get_nrange(struct nvdimm_drvdata *ndd,
0218                  struct nd_namespace_label *nd_label)
0219 {
0220     if (ndd->cxl)
0221         return __le16_to_cpu(nd_label->cxl.nrange);
0222     return 1;
0223 }
0224 
0225 static inline void nsl_set_nrange(struct nvdimm_drvdata *ndd,
0226                   struct nd_namespace_label *nd_label,
0227                   u16 nrange)
0228 {
0229     if (ndd->cxl)
0230         nd_label->cxl.nrange = __cpu_to_le16(nrange);
0231 }
0232 
0233 static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
0234                   struct nd_namespace_label *nd_label)
0235 {
0236     /*
0237      * Yes, for some reason the EFI labels convey a massive 64-bit
0238      * lbasize, that got fixed for CXL.
0239      */
0240     if (ndd->cxl)
0241         return __le16_to_cpu(nd_label->cxl.lbasize);
0242     return __le64_to_cpu(nd_label->efi.lbasize);
0243 }
0244 
0245 static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
0246                    struct nd_namespace_label *nd_label,
0247                    u64 lbasize)
0248 {
0249     if (ndd->cxl)
0250         nd_label->cxl.lbasize = __cpu_to_le16(lbasize);
0251     else
0252         nd_label->efi.lbasize = __cpu_to_le64(lbasize);
0253 }
0254 
0255 static inline const uuid_t *nsl_get_uuid(struct nvdimm_drvdata *ndd,
0256                      struct nd_namespace_label *nd_label,
0257                      uuid_t *uuid)
0258 {
0259     if (ndd->cxl)
0260         import_uuid(uuid, nd_label->cxl.uuid);
0261     else
0262         import_uuid(uuid, nd_label->efi.uuid);
0263     return uuid;
0264 }
0265 
0266 static inline const uuid_t *nsl_set_uuid(struct nvdimm_drvdata *ndd,
0267                      struct nd_namespace_label *nd_label,
0268                      const uuid_t *uuid)
0269 {
0270     if (ndd->cxl)
0271         export_uuid(nd_label->cxl.uuid, uuid);
0272     else
0273         export_uuid(nd_label->efi.uuid, uuid);
0274     return uuid;
0275 }
0276 
0277 static inline bool nsl_uuid_equal(struct nvdimm_drvdata *ndd,
0278                   struct nd_namespace_label *nd_label,
0279                   const uuid_t *uuid)
0280 {
0281     uuid_t tmp;
0282 
0283     if (ndd->cxl)
0284         import_uuid(&tmp, nd_label->cxl.uuid);
0285     else
0286         import_uuid(&tmp, nd_label->efi.uuid);
0287     return uuid_equal(&tmp, uuid);
0288 }
0289 
0290 static inline const u8 *nsl_uuid_raw(struct nvdimm_drvdata *ndd,
0291                      struct nd_namespace_label *nd_label)
0292 {
0293     if (ndd->cxl)
0294         return nd_label->cxl.uuid;
0295     return nd_label->efi.uuid;
0296 }
0297 
0298 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
0299                 struct nd_namespace_label *nd_label, guid_t *guid);
0300 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
0301                         struct nd_namespace_label *nd_label);
0302 
0303 struct nd_region_data {
0304     int ns_count;
0305     int ns_active;
0306     unsigned int hints_shift;
0307     void __iomem *flush_wpq[];
0308 };
0309 
0310 static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
0311         int dimm, int hint)
0312 {
0313     unsigned int num = 1 << ndrd->hints_shift;
0314     unsigned int mask = num - 1;
0315 
0316     return ndrd->flush_wpq[dimm * num + (hint & mask)];
0317 }
0318 
0319 static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
0320         int hint, void __iomem *flush)
0321 {
0322     unsigned int num = 1 << ndrd->hints_shift;
0323     unsigned int mask = num - 1;
0324 
0325     ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
0326 }
0327 
0328 static inline struct nd_namespace_index *to_namespace_index(
0329         struct nvdimm_drvdata *ndd, int i)
0330 {
0331     if (i < 0)
0332         return NULL;
0333 
0334     return ndd->data + sizeof_namespace_index(ndd) * i;
0335 }
0336 
0337 static inline struct nd_namespace_index *to_current_namespace_index(
0338         struct nvdimm_drvdata *ndd)
0339 {
0340     return to_namespace_index(ndd, ndd->ns_current);
0341 }
0342 
0343 static inline struct nd_namespace_index *to_next_namespace_index(
0344         struct nvdimm_drvdata *ndd)
0345 {
0346     return to_namespace_index(ndd, ndd->ns_next);
0347 }
0348 
0349 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
0350 
0351 #define efi_namespace_label_has(ndd, field) \
0352     (!ndd->cxl && offsetof(struct nvdimm_efi_label, field) \
0353         < sizeof_namespace_label(ndd))
0354 
0355 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
0356     dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
0357         (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
0358         (unsigned long long) (res ? resource_size(res) : 0), \
0359         (unsigned long long) (res ? res->start : 0), ##arg)
0360 
0361 #define for_each_dpa_resource(ndd, res) \
0362     for (res = (ndd)->dpa.child; res; res = res->sibling)
0363 
0364 #define for_each_dpa_resource_safe(ndd, res, next) \
0365     for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
0366             res; res = next, next = next ? next->sibling : NULL)
0367 
0368 struct nd_percpu_lane {
0369     int count;
0370     spinlock_t lock;
0371 };
0372 
0373 enum nd_label_flags {
0374     ND_LABEL_REAP,
0375 };
0376 struct nd_label_ent {
0377     struct list_head list;
0378     unsigned long flags;
0379     struct nd_namespace_label *label;
0380 };
0381 
0382 enum nd_mapping_lock_class {
0383     ND_MAPPING_CLASS0,
0384     ND_MAPPING_UUID_SCAN,
0385 };
0386 
0387 struct nd_mapping {
0388     struct nvdimm *nvdimm;
0389     u64 start;
0390     u64 size;
0391     int position;
0392     struct list_head labels;
0393     struct mutex lock;
0394     /*
0395      * @ndd is for private use at region enable / disable time for
0396      * get_ndd() + put_ndd(), all other nd_mapping to ndd
0397      * conversions use to_ndd() which respects enabled state of the
0398      * nvdimm.
0399      */
0400     struct nvdimm_drvdata *ndd;
0401 };
0402 
0403 struct nd_region {
0404     struct device dev;
0405     struct ida ns_ida;
0406     struct ida btt_ida;
0407     struct ida pfn_ida;
0408     struct ida dax_ida;
0409     unsigned long flags;
0410     struct device *ns_seed;
0411     struct device *btt_seed;
0412     struct device *pfn_seed;
0413     struct device *dax_seed;
0414     unsigned long align;
0415     u16 ndr_mappings;
0416     u64 ndr_size;
0417     u64 ndr_start;
0418     int id, num_lanes, ro, numa_node, target_node;
0419     void *provider_data;
0420     struct kernfs_node *bb_state;
0421     struct badblocks bb;
0422     struct nd_interleave_set *nd_set;
0423     struct nd_percpu_lane __percpu *lane;
0424     int (*flush)(struct nd_region *nd_region, struct bio *bio);
0425     struct nd_mapping mapping[];
0426 };
0427 
0428 static inline bool nsl_validate_nlabel(struct nd_region *nd_region,
0429                        struct nvdimm_drvdata *ndd,
0430                        struct nd_namespace_label *nd_label)
0431 {
0432     if (ndd->cxl)
0433         return true;
0434     return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings;
0435 }
0436 
0437 /*
0438  * Lookup next in the repeating sequence of 01, 10, and 11.
0439  */
0440 static inline unsigned nd_inc_seq(unsigned seq)
0441 {
0442     static const unsigned next[] = { 0, 2, 3, 1 };
0443 
0444     return next[seq & 3];
0445 }
0446 
0447 struct btt;
0448 struct nd_btt {
0449     struct device dev;
0450     struct nd_namespace_common *ndns;
0451     struct btt *btt;
0452     unsigned long lbasize;
0453     u64 size;
0454     uuid_t *uuid;
0455     int id;
0456     int initial_offset;
0457     u16 version_major;
0458     u16 version_minor;
0459 };
0460 
0461 enum nd_pfn_mode {
0462     PFN_MODE_NONE,
0463     PFN_MODE_RAM,
0464     PFN_MODE_PMEM,
0465 };
0466 
0467 struct nd_pfn {
0468     int id;
0469     uuid_t *uuid;
0470     struct device dev;
0471     unsigned long align;
0472     unsigned long npfns;
0473     enum nd_pfn_mode mode;
0474     struct nd_pfn_sb *pfn_sb;
0475     struct nd_namespace_common *ndns;
0476 };
0477 
0478 struct nd_dax {
0479     struct nd_pfn nd_pfn;
0480 };
0481 
0482 static inline u32 nd_info_block_reserve(void)
0483 {
0484     return ALIGN(SZ_8K, PAGE_SIZE);
0485 }
0486 
0487 enum nd_async_mode {
0488     ND_SYNC,
0489     ND_ASYNC,
0490 };
0491 
0492 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
0493 void wait_nvdimm_bus_probe_idle(struct device *dev);
0494 void nd_device_register(struct device *dev);
0495 void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
0496 void nd_device_notify(struct device *dev, enum nvdimm_event event);
0497 int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
0498         size_t len);
0499 ssize_t nd_size_select_show(unsigned long current_size,
0500         const unsigned long *supported, char *buf);
0501 ssize_t nd_size_select_store(struct device *dev, const char *buf,
0502         unsigned long *current_size, const unsigned long *supported);
0503 int __init nvdimm_init(void);
0504 int __init nd_region_init(void);
0505 int __init nd_label_init(void);
0506 void nvdimm_exit(void);
0507 void nd_region_exit(void);
0508 struct nvdimm;
0509 extern const struct attribute_group nd_device_attribute_group;
0510 extern const struct attribute_group nd_numa_attribute_group;
0511 extern const struct attribute_group *nvdimm_bus_attribute_groups[];
0512 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
0513 int nvdimm_check_config_data(struct device *dev);
0514 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
0515 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
0516 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
0517                size_t offset, size_t len);
0518 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
0519         void *buf, size_t len);
0520 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
0521         unsigned int len);
0522 void nvdimm_set_labeling(struct device *dev);
0523 void nvdimm_set_locked(struct device *dev);
0524 void nvdimm_clear_locked(struct device *dev);
0525 int nvdimm_security_setup_events(struct device *dev);
0526 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
0527 int nvdimm_security_unlock(struct device *dev);
0528 #else
0529 static inline int nvdimm_security_unlock(struct device *dev)
0530 {
0531     return 0;
0532 }
0533 #endif
0534 struct nd_btt *to_nd_btt(struct device *dev);
0535 
0536 struct nd_gen_sb {
0537     char reserved[SZ_4K - 8];
0538     __le64 checksum;
0539 };
0540 
0541 u64 nd_sb_checksum(struct nd_gen_sb *sb);
0542 #if IS_ENABLED(CONFIG_BTT)
0543 int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
0544 bool is_nd_btt(struct device *dev);
0545 struct device *nd_btt_create(struct nd_region *nd_region);
0546 #else
0547 static inline int nd_btt_probe(struct device *dev,
0548         struct nd_namespace_common *ndns)
0549 {
0550     return -ENODEV;
0551 }
0552 
0553 static inline bool is_nd_btt(struct device *dev)
0554 {
0555     return false;
0556 }
0557 
0558 static inline struct device *nd_btt_create(struct nd_region *nd_region)
0559 {
0560     return NULL;
0561 }
0562 #endif
0563 
0564 struct nd_pfn *to_nd_pfn(struct device *dev);
0565 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
0566 
0567 #define MAX_NVDIMM_ALIGN    4
0568 
0569 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
0570 bool is_nd_pfn(struct device *dev);
0571 struct device *nd_pfn_create(struct nd_region *nd_region);
0572 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
0573         struct nd_namespace_common *ndns);
0574 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
0575 extern const struct attribute_group *nd_pfn_attribute_groups[];
0576 #else
0577 static inline int nd_pfn_probe(struct device *dev,
0578         struct nd_namespace_common *ndns)
0579 {
0580     return -ENODEV;
0581 }
0582 
0583 static inline bool is_nd_pfn(struct device *dev)
0584 {
0585     return false;
0586 }
0587 
0588 static inline struct device *nd_pfn_create(struct nd_region *nd_region)
0589 {
0590     return NULL;
0591 }
0592 
0593 static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
0594 {
0595     return -ENODEV;
0596 }
0597 #endif
0598 
0599 struct nd_dax *to_nd_dax(struct device *dev);
0600 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
0601 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
0602 bool is_nd_dax(struct device *dev);
0603 struct device *nd_dax_create(struct nd_region *nd_region);
0604 #else
0605 static inline int nd_dax_probe(struct device *dev,
0606         struct nd_namespace_common *ndns)
0607 {
0608     return -ENODEV;
0609 }
0610 
0611 static inline bool is_nd_dax(struct device *dev)
0612 {
0613     return false;
0614 }
0615 
0616 static inline struct device *nd_dax_create(struct nd_region *nd_region)
0617 {
0618     return NULL;
0619 }
0620 #endif
0621 
0622 int nd_region_to_nstype(struct nd_region *nd_region);
0623 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
0624 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
0625         struct nd_namespace_index *nsindex);
0626 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
0627 void nvdimm_bus_lock(struct device *dev);
0628 void nvdimm_bus_unlock(struct device *dev);
0629 bool is_nvdimm_bus_locked(struct device *dev);
0630 void nvdimm_check_and_set_ro(struct gendisk *disk);
0631 void nvdimm_drvdata_release(struct kref *kref);
0632 void put_ndd(struct nvdimm_drvdata *ndd);
0633 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
0634 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
0635 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
0636         struct nd_label_id *label_id, resource_size_t start,
0637         resource_size_t n);
0638 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
0639 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
0640 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
0641 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
0642 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
0643 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
0644         char *name);
0645 unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
0646 struct range;
0647 void nvdimm_badblocks_populate(struct nd_region *nd_region,
0648         struct badblocks *bb, const struct range *range);
0649 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
0650         resource_size_t size);
0651 void devm_namespace_disable(struct device *dev,
0652         struct nd_namespace_common *ndns);
0653 #if IS_ENABLED(CONFIG_ND_CLAIM)
0654 /* max struct page size independent of kernel config */
0655 #define MAX_STRUCT_PAGE_SIZE 64
0656 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
0657 #else
0658 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
0659                    struct dev_pagemap *pgmap)
0660 {
0661     return -ENXIO;
0662 }
0663 #endif
0664 int nd_region_activate(struct nd_region *nd_region);
0665 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
0666         unsigned int len)
0667 {
0668     if (bb->count) {
0669         sector_t first_bad;
0670         int num_bad;
0671 
0672         return !!badblocks_check(bb, sector, len / 512, &first_bad,
0673                 &num_bad);
0674     }
0675 
0676     return false;
0677 }
0678 const uuid_t *nd_dev_to_uuid(struct device *dev);
0679 bool pmem_should_map_pages(struct device *dev);
0680 #endif /* __ND_H__ */