0001
0002
0003
0004
0005 #include <linux/device.h>
0006 #include <linux/ndctl.h>
0007 #include <linux/uuid.h>
0008 #include <linux/slab.h>
0009 #include <linux/io.h>
0010 #include <linux/nd.h>
0011 #include "nd-core.h"
0012 #include "label.h"
0013 #include "nd.h"
0014
0015 static guid_t nvdimm_btt_guid;
0016 static guid_t nvdimm_btt2_guid;
0017 static guid_t nvdimm_pfn_guid;
0018 static guid_t nvdimm_dax_guid;
0019
0020 static uuid_t nvdimm_btt_uuid;
0021 static uuid_t nvdimm_btt2_uuid;
0022 static uuid_t nvdimm_pfn_uuid;
0023 static uuid_t nvdimm_dax_uuid;
0024
0025 static uuid_t cxl_region_uuid;
0026 static uuid_t cxl_namespace_uuid;
0027
0028 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
0029
0030 static u32 best_seq(u32 a, u32 b)
0031 {
0032 a &= NSINDEX_SEQ_MASK;
0033 b &= NSINDEX_SEQ_MASK;
0034
0035 if (a == 0 || a == b)
0036 return b;
0037 else if (b == 0)
0038 return a;
0039 else if (nd_inc_seq(a) == b)
0040 return b;
0041 else
0042 return a;
0043 }
0044
0045 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
0046 {
0047 return ndd->nslabel_size;
0048 }
0049
0050 static size_t __sizeof_namespace_index(u32 nslot)
0051 {
0052 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
0053 NSINDEX_ALIGN);
0054 }
0055
0056 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
0057 size_t index_size)
0058 {
0059 return (ndd->nsarea.config_size - index_size * 2) /
0060 sizeof_namespace_label(ndd);
0061 }
0062
0063 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
0064 {
0065 u32 tmp_nslot, n;
0066
0067 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
0068 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
0069
0070 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
0071 }
0072
0073 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
0074 {
0075 u32 nslot, space, size;
0076
0077
0078
0079
0080
0081
0082
0083 nslot = nvdimm_num_label_slots(ndd);
0084 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
0085 size = __sizeof_namespace_index(nslot) * 2;
0086 if (size <= space && nslot >= 2)
0087 return size / 2;
0088
0089 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
0090 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
0091 return 0;
0092 }
0093
0094 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
0095 {
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 struct nd_namespace_index *nsindex[] = {
0124 to_namespace_index(ndd, 0),
0125 to_namespace_index(ndd, 1),
0126 };
0127 const int num_index = ARRAY_SIZE(nsindex);
0128 struct device *dev = ndd->dev;
0129 bool valid[2] = { 0 };
0130 int i, num_valid = 0;
0131 u32 seq;
0132
0133 for (i = 0; i < num_index; i++) {
0134 u32 nslot;
0135 u8 sig[NSINDEX_SIG_LEN];
0136 u64 sum_save, sum, size;
0137 unsigned int version, labelsize;
0138
0139 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
0140 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
0141 dev_dbg(dev, "nsindex%d signature invalid\n", i);
0142 continue;
0143 }
0144
0145
0146 version = __le16_to_cpu(nsindex[i]->major) * 100
0147 + __le16_to_cpu(nsindex[i]->minor);
0148 if (version >= 102)
0149 labelsize = 1 << (7 + nsindex[i]->labelsize);
0150 else
0151 labelsize = 128;
0152
0153 if (labelsize != sizeof_namespace_label(ndd)) {
0154 dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
0155 i, nsindex[i]->labelsize);
0156 continue;
0157 }
0158
0159 sum_save = __le64_to_cpu(nsindex[i]->checksum);
0160 nsindex[i]->checksum = __cpu_to_le64(0);
0161 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
0162 nsindex[i]->checksum = __cpu_to_le64(sum_save);
0163 if (sum != sum_save) {
0164 dev_dbg(dev, "nsindex%d checksum invalid\n", i);
0165 continue;
0166 }
0167
0168 seq = __le32_to_cpu(nsindex[i]->seq);
0169 if ((seq & NSINDEX_SEQ_MASK) == 0) {
0170 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
0171 continue;
0172 }
0173
0174
0175 if (__le64_to_cpu(nsindex[i]->myoff)
0176 != i * sizeof_namespace_index(ndd)) {
0177 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
0178 i, (unsigned long long)
0179 __le64_to_cpu(nsindex[i]->myoff));
0180 continue;
0181 }
0182 if (__le64_to_cpu(nsindex[i]->otheroff)
0183 != (!i) * sizeof_namespace_index(ndd)) {
0184 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
0185 i, (unsigned long long)
0186 __le64_to_cpu(nsindex[i]->otheroff));
0187 continue;
0188 }
0189 if (__le64_to_cpu(nsindex[i]->labeloff)
0190 != 2 * sizeof_namespace_index(ndd)) {
0191 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
0192 i, (unsigned long long)
0193 __le64_to_cpu(nsindex[i]->labeloff));
0194 continue;
0195 }
0196
0197 size = __le64_to_cpu(nsindex[i]->mysize);
0198 if (size > sizeof_namespace_index(ndd)
0199 || size < sizeof(struct nd_namespace_index)) {
0200 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
0201 continue;
0202 }
0203
0204 nslot = __le32_to_cpu(nsindex[i]->nslot);
0205 if (nslot * sizeof_namespace_label(ndd)
0206 + 2 * sizeof_namespace_index(ndd)
0207 > ndd->nsarea.config_size) {
0208 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
0209 i, nslot, ndd->nsarea.config_size);
0210 continue;
0211 }
0212 valid[i] = true;
0213 num_valid++;
0214 }
0215
0216 switch (num_valid) {
0217 case 0:
0218 break;
0219 case 1:
0220 for (i = 0; i < num_index; i++)
0221 if (valid[i])
0222 return i;
0223
0224 WARN_ON(1);
0225 break;
0226 default:
0227
0228 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
0229 __le32_to_cpu(nsindex[1]->seq));
0230 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
0231 return 1;
0232 else
0233 return 0;
0234 break;
0235 }
0236
0237 return -1;
0238 }
0239
0240 static int nd_label_validate(struct nvdimm_drvdata *ndd)
0241 {
0242
0243
0244
0245
0246
0247
0248
0249
0250 int label_size[] = { 128, 256 };
0251 int i, rc;
0252
0253 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
0254 ndd->nslabel_size = label_size[i];
0255 rc = __nd_label_validate(ndd);
0256 if (rc >= 0)
0257 return rc;
0258 }
0259
0260 return -1;
0261 }
0262
0263 static void nd_label_copy(struct nvdimm_drvdata *ndd,
0264 struct nd_namespace_index *dst,
0265 struct nd_namespace_index *src)
0266 {
0267
0268 if (!dst || !src)
0269 return;
0270
0271 memcpy(dst, src, sizeof_namespace_index(ndd));
0272 }
0273
0274 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
0275 {
0276 void *base = to_namespace_index(ndd, 0);
0277
0278 return base + 2 * sizeof_namespace_index(ndd);
0279 }
0280
0281 static int to_slot(struct nvdimm_drvdata *ndd,
0282 struct nd_namespace_label *nd_label)
0283 {
0284 unsigned long label, base;
0285
0286 label = (unsigned long) nd_label;
0287 base = (unsigned long) nd_label_base(ndd);
0288
0289 return (label - base) / sizeof_namespace_label(ndd);
0290 }
0291
0292 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
0293 {
0294 unsigned long label, base;
0295
0296 base = (unsigned long) nd_label_base(ndd);
0297 label = base + sizeof_namespace_label(ndd) * slot;
0298
0299 return (struct nd_namespace_label *) label;
0300 }
0301
0302 #define for_each_clear_bit_le(bit, addr, size) \
0303 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
0304 (bit) < (size); \
0305 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
0316 struct nd_namespace_index **nsindex_out,
0317 unsigned long **free, u32 *nslot)
0318 {
0319 struct nd_namespace_index *nsindex;
0320
0321 nsindex = to_namespace_index(ndd, idx);
0322 if (nsindex == NULL)
0323 return false;
0324
0325 *free = (unsigned long *) nsindex->free;
0326 *nslot = __le32_to_cpu(nsindex->nslot);
0327 *nsindex_out = nsindex;
0328
0329 return true;
0330 }
0331
0332 char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
0333 u32 flags)
0334 {
0335 if (!label_id || !uuid)
0336 return NULL;
0337 snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid);
0338 return label_id->id;
0339 }
0340
0341 static bool preamble_current(struct nvdimm_drvdata *ndd,
0342 struct nd_namespace_index **nsindex,
0343 unsigned long **free, u32 *nslot)
0344 {
0345 return preamble_index(ndd, ndd->ns_current, nsindex,
0346 free, nslot);
0347 }
0348
0349 static bool preamble_next(struct nvdimm_drvdata *ndd,
0350 struct nd_namespace_index **nsindex,
0351 unsigned long **free, u32 *nslot)
0352 {
0353 return preamble_index(ndd, ndd->ns_next, nsindex,
0354 free, nslot);
0355 }
0356
0357 static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
0358 struct nd_namespace_label *nd_label)
0359 {
0360 u64 sum, sum_save;
0361
0362 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
0363 return true;
0364
0365 sum_save = nsl_get_checksum(ndd, nd_label);
0366 nsl_set_checksum(ndd, nd_label, 0);
0367 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
0368 nsl_set_checksum(ndd, nd_label, sum_save);
0369 return sum == sum_save;
0370 }
0371
0372 static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
0373 struct nd_namespace_label *nd_label)
0374 {
0375 u64 sum;
0376
0377 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
0378 return;
0379 nsl_set_checksum(ndd, nd_label, 0);
0380 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
0381 nsl_set_checksum(ndd, nd_label, sum);
0382 }
0383
0384 static bool slot_valid(struct nvdimm_drvdata *ndd,
0385 struct nd_namespace_label *nd_label, u32 slot)
0386 {
0387 bool valid;
0388
0389
0390 if (slot != nsl_get_slot(ndd, nd_label))
0391 return false;
0392 valid = nsl_validate_checksum(ndd, nd_label);
0393 if (!valid)
0394 dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
0395 return valid;
0396 }
0397
0398 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
0399 {
0400 struct nd_namespace_index *nsindex;
0401 unsigned long *free;
0402 u32 nslot, slot;
0403
0404 if (!preamble_current(ndd, &nsindex, &free, &nslot))
0405 return 0;
0406
0407 for_each_clear_bit_le(slot, free, nslot) {
0408 struct nd_namespace_label *nd_label;
0409 struct nd_region *nd_region = NULL;
0410 struct nd_label_id label_id;
0411 struct resource *res;
0412 uuid_t label_uuid;
0413 u32 flags;
0414
0415 nd_label = to_label(ndd, slot);
0416
0417 if (!slot_valid(ndd, nd_label, slot))
0418 continue;
0419
0420 nsl_get_uuid(ndd, nd_label, &label_uuid);
0421 flags = nsl_get_flags(ndd, nd_label);
0422 nd_label_gen_id(&label_id, &label_uuid, flags);
0423 res = nvdimm_allocate_dpa(ndd, &label_id,
0424 nsl_get_dpa(ndd, nd_label),
0425 nsl_get_rawsize(ndd, nd_label));
0426 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
0427 if (!res)
0428 return -EBUSY;
0429 }
0430
0431 return 0;
0432 }
0433
0434 int nd_label_data_init(struct nvdimm_drvdata *ndd)
0435 {
0436 size_t config_size, read_size, max_xfer, offset;
0437 struct nd_namespace_index *nsindex;
0438 unsigned int i;
0439 int rc = 0;
0440 u32 nslot;
0441
0442 if (ndd->data)
0443 return 0;
0444
0445 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
0446 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
0447 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
0448 return -ENXIO;
0449 }
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 ndd->nslabel_size = 128;
0462 read_size = sizeof_namespace_index(ndd) * 2;
0463 if (!read_size)
0464 return -ENXIO;
0465
0466
0467 config_size = ndd->nsarea.config_size;
0468 ndd->data = kvzalloc(config_size, GFP_KERNEL);
0469 if (!ndd->data)
0470 return -ENOMEM;
0471
0472
0473
0474
0475
0476
0477
0478
0479 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
0480 if (read_size < max_xfer) {
0481
0482 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
0483 DIV_ROUND_UP(config_size, max_xfer);
0484
0485 if (max_xfer < read_size)
0486 max_xfer = read_size;
0487 }
0488
0489
0490 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
0491 config_size);
0492
0493
0494 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
0495 if (rc)
0496 goto out_err;
0497
0498
0499 ndd->ns_current = nd_label_validate(ndd);
0500 if (ndd->ns_current < 0)
0501 return 0;
0502
0503
0504 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
0505
0506
0507 nsindex = to_current_namespace_index(ndd);
0508 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
0509
0510
0511 offset = __le64_to_cpu(nsindex->labeloff);
0512 nslot = __le32_to_cpu(nsindex->nslot);
0513
0514
0515 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
0516 size_t label_read_size;
0517
0518
0519 if (test_bit_le(i, nsindex->free)) {
0520 memset(ndd->data + offset, 0, ndd->nslabel_size);
0521 continue;
0522 }
0523
0524
0525 if (offset + ndd->nslabel_size <= read_size)
0526 continue;
0527
0528
0529 if (read_size < offset)
0530 read_size = offset;
0531
0532
0533 label_read_size = offset + ndd->nslabel_size - read_size;
0534 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
0535 max_xfer;
0536
0537
0538 if (read_size + label_read_size > config_size)
0539 label_read_size = config_size - read_size;
0540
0541
0542 rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
0543 read_size, label_read_size);
0544 if (rc)
0545 goto out_err;
0546
0547
0548 read_size += label_read_size;
0549 }
0550
0551 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
0552 out_err:
0553 return rc;
0554 }
0555
0556 int nd_label_active_count(struct nvdimm_drvdata *ndd)
0557 {
0558 struct nd_namespace_index *nsindex;
0559 unsigned long *free;
0560 u32 nslot, slot;
0561 int count = 0;
0562
0563 if (!preamble_current(ndd, &nsindex, &free, &nslot))
0564 return 0;
0565
0566 for_each_clear_bit_le(slot, free, nslot) {
0567 struct nd_namespace_label *nd_label;
0568
0569 nd_label = to_label(ndd, slot);
0570
0571 if (!slot_valid(ndd, nd_label, slot)) {
0572 u32 label_slot = nsl_get_slot(ndd, nd_label);
0573 u64 size = nsl_get_rawsize(ndd, nd_label);
0574 u64 dpa = nsl_get_dpa(ndd, nd_label);
0575
0576 dev_dbg(ndd->dev,
0577 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
0578 slot, label_slot, dpa, size);
0579 continue;
0580 }
0581 count++;
0582 }
0583 return count;
0584 }
0585
0586 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
0587 {
0588 struct nd_namespace_index *nsindex;
0589 unsigned long *free;
0590 u32 nslot, slot;
0591
0592 if (!preamble_current(ndd, &nsindex, &free, &nslot))
0593 return NULL;
0594
0595 for_each_clear_bit_le(slot, free, nslot) {
0596 struct nd_namespace_label *nd_label;
0597
0598 nd_label = to_label(ndd, slot);
0599 if (!slot_valid(ndd, nd_label, slot))
0600 continue;
0601
0602 if (n-- == 0)
0603 return to_label(ndd, slot);
0604 }
0605
0606 return NULL;
0607 }
0608
0609 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
0610 {
0611 struct nd_namespace_index *nsindex;
0612 unsigned long *free;
0613 u32 nslot, slot;
0614
0615 if (!preamble_next(ndd, &nsindex, &free, &nslot))
0616 return UINT_MAX;
0617
0618 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
0619
0620 slot = find_next_bit_le(free, nslot, 0);
0621 if (slot == nslot)
0622 return UINT_MAX;
0623
0624 clear_bit_le(slot, free);
0625
0626 return slot;
0627 }
0628
0629 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
0630 {
0631 struct nd_namespace_index *nsindex;
0632 unsigned long *free;
0633 u32 nslot;
0634
0635 if (!preamble_next(ndd, &nsindex, &free, &nslot))
0636 return false;
0637
0638 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
0639
0640 if (slot < nslot)
0641 return !test_and_set_bit_le(slot, free);
0642 return false;
0643 }
0644
0645 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
0646 {
0647 struct nd_namespace_index *nsindex;
0648 unsigned long *free;
0649 u32 nslot;
0650
0651 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
0652
0653 if (!preamble_next(ndd, &nsindex, &free, &nslot))
0654 return nvdimm_num_label_slots(ndd);
0655
0656 return bitmap_weight(free, nslot);
0657 }
0658
0659 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
0660 unsigned long flags)
0661 {
0662 struct nd_namespace_index *nsindex;
0663 unsigned long offset;
0664 u64 checksum;
0665 u32 nslot;
0666 int rc;
0667
0668 nsindex = to_namespace_index(ndd, index);
0669 if (flags & ND_NSINDEX_INIT)
0670 nslot = nvdimm_num_label_slots(ndd);
0671 else
0672 nslot = __le32_to_cpu(nsindex->nslot);
0673
0674 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
0675 memset(&nsindex->flags, 0, 3);
0676 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
0677 nsindex->seq = __cpu_to_le32(seq);
0678 offset = (unsigned long) nsindex
0679 - (unsigned long) to_namespace_index(ndd, 0);
0680 nsindex->myoff = __cpu_to_le64(offset);
0681 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
0682 offset = (unsigned long) to_namespace_index(ndd,
0683 nd_label_next_nsindex(index))
0684 - (unsigned long) to_namespace_index(ndd, 0);
0685 nsindex->otheroff = __cpu_to_le64(offset);
0686 offset = (unsigned long) nd_label_base(ndd)
0687 - (unsigned long) to_namespace_index(ndd, 0);
0688 nsindex->labeloff = __cpu_to_le64(offset);
0689 nsindex->nslot = __cpu_to_le32(nslot);
0690 nsindex->major = __cpu_to_le16(1);
0691 if (sizeof_namespace_label(ndd) < 256)
0692 nsindex->minor = __cpu_to_le16(1);
0693 else
0694 nsindex->minor = __cpu_to_le16(2);
0695 nsindex->checksum = __cpu_to_le64(0);
0696 if (flags & ND_NSINDEX_INIT) {
0697 unsigned long *free = (unsigned long *) nsindex->free;
0698 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
0699 int last_bits, i;
0700
0701 memset(nsindex->free, 0xff, nfree / 8);
0702 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
0703 clear_bit_le(nslot + i, free);
0704 }
0705 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
0706 nsindex->checksum = __cpu_to_le64(checksum);
0707 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
0708 nsindex, sizeof_namespace_index(ndd));
0709 if (rc < 0)
0710 return rc;
0711
0712 if (flags & ND_NSINDEX_INIT)
0713 return 0;
0714
0715
0716 WARN_ON(index != ndd->ns_next);
0717 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
0718 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
0719 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
0720 WARN_ON(ndd->ns_current == ndd->ns_next);
0721
0722 return 0;
0723 }
0724
0725 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
0726 struct nd_namespace_label *nd_label)
0727 {
0728 return (unsigned long) nd_label
0729 - (unsigned long) to_namespace_index(ndd, 0);
0730 }
0731
0732 static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid)
0733 {
0734 if (guid_equal(guid, &nvdimm_btt_guid))
0735 return NVDIMM_CCLASS_BTT;
0736 else if (guid_equal(guid, &nvdimm_btt2_guid))
0737 return NVDIMM_CCLASS_BTT2;
0738 else if (guid_equal(guid, &nvdimm_pfn_guid))
0739 return NVDIMM_CCLASS_PFN;
0740 else if (guid_equal(guid, &nvdimm_dax_guid))
0741 return NVDIMM_CCLASS_DAX;
0742 else if (guid_equal(guid, &guid_null))
0743 return NVDIMM_CCLASS_NONE;
0744
0745 return NVDIMM_CCLASS_UNKNOWN;
0746 }
0747
0748
0749 static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid)
0750 {
0751 if (uuid_equal(uuid, &nvdimm_btt_uuid))
0752 return NVDIMM_CCLASS_BTT;
0753 else if (uuid_equal(uuid, &nvdimm_btt2_uuid))
0754 return NVDIMM_CCLASS_BTT2;
0755 else if (uuid_equal(uuid, &nvdimm_pfn_uuid))
0756 return NVDIMM_CCLASS_PFN;
0757 else if (uuid_equal(uuid, &nvdimm_dax_uuid))
0758 return NVDIMM_CCLASS_DAX;
0759 else if (uuid_equal(uuid, &uuid_null))
0760 return NVDIMM_CCLASS_NONE;
0761
0762 return NVDIMM_CCLASS_UNKNOWN;
0763 }
0764
0765 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
0766 guid_t *target)
0767 {
0768 if (claim_class == NVDIMM_CCLASS_BTT)
0769 return &nvdimm_btt_guid;
0770 else if (claim_class == NVDIMM_CCLASS_BTT2)
0771 return &nvdimm_btt2_guid;
0772 else if (claim_class == NVDIMM_CCLASS_PFN)
0773 return &nvdimm_pfn_guid;
0774 else if (claim_class == NVDIMM_CCLASS_DAX)
0775 return &nvdimm_dax_guid;
0776 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
0777
0778
0779
0780
0781 return target;
0782 } else
0783 return &guid_null;
0784 }
0785
0786
0787 static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class,
0788 uuid_t *target)
0789 {
0790 if (claim_class == NVDIMM_CCLASS_BTT)
0791 return &nvdimm_btt_uuid;
0792 else if (claim_class == NVDIMM_CCLASS_BTT2)
0793 return &nvdimm_btt2_uuid;
0794 else if (claim_class == NVDIMM_CCLASS_PFN)
0795 return &nvdimm_pfn_uuid;
0796 else if (claim_class == NVDIMM_CCLASS_DAX)
0797 return &nvdimm_dax_uuid;
0798 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
0799
0800
0801
0802
0803 return target;
0804 } else
0805 return &uuid_null;
0806 }
0807
0808 static void reap_victim(struct nd_mapping *nd_mapping,
0809 struct nd_label_ent *victim)
0810 {
0811 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0812 u32 slot = to_slot(ndd, victim->label);
0813
0814 dev_dbg(ndd->dev, "free: %d\n", slot);
0815 nd_label_free_slot(ndd, slot);
0816 victim->label = NULL;
0817 }
0818
0819 static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
0820 struct nd_namespace_label *nd_label, guid_t *guid)
0821 {
0822 if (efi_namespace_label_has(ndd, type_guid))
0823 guid_copy(&nd_label->efi.type_guid, guid);
0824 }
0825
0826 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
0827 struct nd_namespace_label *nd_label, guid_t *guid)
0828 {
0829 if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid))
0830 return true;
0831 if (!guid_equal(&nd_label->efi.type_guid, guid)) {
0832 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
0833 &nd_label->efi.type_guid);
0834 return false;
0835 }
0836 return true;
0837 }
0838
0839 static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
0840 struct nd_namespace_label *nd_label,
0841 enum nvdimm_claim_class claim_class)
0842 {
0843 if (ndd->cxl) {
0844 uuid_t uuid;
0845
0846 import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
0847 export_uuid(nd_label->cxl.abstraction_uuid,
0848 to_abstraction_uuid(claim_class, &uuid));
0849 return;
0850 }
0851
0852 if (!efi_namespace_label_has(ndd, abstraction_guid))
0853 return;
0854 guid_copy(&nd_label->efi.abstraction_guid,
0855 to_abstraction_guid(claim_class,
0856 &nd_label->efi.abstraction_guid));
0857 }
0858
0859 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
0860 struct nd_namespace_label *nd_label)
0861 {
0862 if (ndd->cxl) {
0863 uuid_t uuid;
0864
0865 import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
0866 return uuid_to_nvdimm_cclass(&uuid);
0867 }
0868 if (!efi_namespace_label_has(ndd, abstraction_guid))
0869 return NVDIMM_CCLASS_NONE;
0870 return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid);
0871 }
0872
0873 static int __pmem_label_update(struct nd_region *nd_region,
0874 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
0875 int pos, unsigned long flags)
0876 {
0877 struct nd_namespace_common *ndns = &nspm->nsio.common;
0878 struct nd_interleave_set *nd_set = nd_region->nd_set;
0879 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0880 struct nd_namespace_label *nd_label;
0881 struct nd_namespace_index *nsindex;
0882 struct nd_label_ent *label_ent;
0883 struct nd_label_id label_id;
0884 struct resource *res;
0885 unsigned long *free;
0886 u32 nslot, slot;
0887 size_t offset;
0888 u64 cookie;
0889 int rc;
0890
0891 if (!preamble_next(ndd, &nsindex, &free, &nslot))
0892 return -ENXIO;
0893
0894 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
0895 nd_label_gen_id(&label_id, nspm->uuid, 0);
0896 for_each_dpa_resource(ndd, res)
0897 if (strcmp(res->name, label_id.id) == 0)
0898 break;
0899
0900 if (!res) {
0901 WARN_ON_ONCE(1);
0902 return -ENXIO;
0903 }
0904
0905
0906 slot = nd_label_alloc_slot(ndd);
0907 if (slot == UINT_MAX)
0908 return -ENXIO;
0909 dev_dbg(ndd->dev, "allocated: %d\n", slot);
0910
0911 nd_label = to_label(ndd, slot);
0912 memset(nd_label, 0, sizeof_namespace_label(ndd));
0913 nsl_set_uuid(ndd, nd_label, nspm->uuid);
0914 nsl_set_name(ndd, nd_label, nspm->alt_name);
0915 nsl_set_flags(ndd, nd_label, flags);
0916 nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
0917 nsl_set_nrange(ndd, nd_label, 1);
0918 nsl_set_position(ndd, nd_label, pos);
0919 nsl_set_isetcookie(ndd, nd_label, cookie);
0920 nsl_set_rawsize(ndd, nd_label, resource_size(res));
0921 nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
0922 nsl_set_dpa(ndd, nd_label, res->start);
0923 nsl_set_slot(ndd, nd_label, slot);
0924 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
0925 nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
0926 nsl_calculate_checksum(ndd, nd_label);
0927 nd_dbg_dpa(nd_region, ndd, res, "\n");
0928
0929
0930 offset = nd_label_offset(ndd, nd_label);
0931 rc = nvdimm_set_config_data(ndd, offset, nd_label,
0932 sizeof_namespace_label(ndd));
0933 if (rc < 0)
0934 return rc;
0935
0936
0937 mutex_lock(&nd_mapping->lock);
0938 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
0939 if (!label_ent->label)
0940 continue;
0941 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) ||
0942 nsl_uuid_equal(ndd, label_ent->label, nspm->uuid))
0943 reap_victim(nd_mapping, label_ent);
0944 }
0945
0946
0947 rc = nd_label_write_index(ndd, ndd->ns_next,
0948 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
0949 if (rc == 0) {
0950 list_for_each_entry(label_ent, &nd_mapping->labels, list)
0951 if (!label_ent->label) {
0952 label_ent->label = nd_label;
0953 nd_label = NULL;
0954 break;
0955 }
0956 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
0957 "failed to track label: %d\n",
0958 to_slot(ndd, nd_label));
0959 if (nd_label)
0960 rc = -ENXIO;
0961 }
0962 mutex_unlock(&nd_mapping->lock);
0963
0964 return rc;
0965 }
0966
0967 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
0968 {
0969 int i, old_num_labels = 0;
0970 struct nd_label_ent *label_ent;
0971 struct nd_namespace_index *nsindex;
0972 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
0973
0974 mutex_lock(&nd_mapping->lock);
0975 list_for_each_entry(label_ent, &nd_mapping->labels, list)
0976 old_num_labels++;
0977 mutex_unlock(&nd_mapping->lock);
0978
0979
0980
0981
0982
0983 for (i = old_num_labels; i < num_labels; i++) {
0984 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
0985 if (!label_ent)
0986 return -ENOMEM;
0987 mutex_lock(&nd_mapping->lock);
0988 list_add_tail(&label_ent->list, &nd_mapping->labels);
0989 mutex_unlock(&nd_mapping->lock);
0990 }
0991
0992 if (ndd->ns_current == -1 || ndd->ns_next == -1)
0993 ;
0994 else
0995 return max(num_labels, old_num_labels);
0996
0997 nsindex = to_namespace_index(ndd, 0);
0998 memset(nsindex, 0, ndd->nsarea.config_size);
0999 for (i = 0; i < 2; i++) {
1000 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1001
1002 if (rc)
1003 return rc;
1004 }
1005 ndd->ns_next = 1;
1006 ndd->ns_current = 0;
1007
1008 return max(num_labels, old_num_labels);
1009 }
1010
1011 static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid)
1012 {
1013 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1014 struct nd_label_ent *label_ent, *e;
1015 struct nd_namespace_index *nsindex;
1016 unsigned long *free;
1017 LIST_HEAD(list);
1018 u32 nslot, slot;
1019 int active = 0;
1020
1021 if (!uuid)
1022 return 0;
1023
1024
1025 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1026 return 0;
1027
1028 mutex_lock(&nd_mapping->lock);
1029 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1030 struct nd_namespace_label *nd_label = label_ent->label;
1031
1032 if (!nd_label)
1033 continue;
1034 active++;
1035 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1036 continue;
1037 active--;
1038 slot = to_slot(ndd, nd_label);
1039 nd_label_free_slot(ndd, slot);
1040 dev_dbg(ndd->dev, "free: %d\n", slot);
1041 list_move_tail(&label_ent->list, &list);
1042 label_ent->label = NULL;
1043 }
1044 list_splice_tail_init(&list, &nd_mapping->labels);
1045
1046 if (active == 0) {
1047 nd_mapping_free_labels(nd_mapping);
1048 dev_dbg(ndd->dev, "no more active labels\n");
1049 }
1050 mutex_unlock(&nd_mapping->lock);
1051
1052 return nd_label_write_index(ndd, ndd->ns_next,
1053 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1054 }
1055
1056 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1057 struct nd_namespace_pmem *nspm, resource_size_t size)
1058 {
1059 int i, rc;
1060
1061 for (i = 0; i < nd_region->ndr_mappings; i++) {
1062 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1063 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1064 struct resource *res;
1065 int count = 0;
1066
1067 if (size == 0) {
1068 rc = del_labels(nd_mapping, nspm->uuid);
1069 if (rc)
1070 return rc;
1071 continue;
1072 }
1073
1074 for_each_dpa_resource(ndd, res)
1075 if (strncmp(res->name, "pmem", 4) == 0)
1076 count++;
1077 WARN_ON_ONCE(!count);
1078
1079 rc = init_labels(nd_mapping, count);
1080 if (rc < 0)
1081 return rc;
1082
1083 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1084 NSLABEL_FLAG_UPDATING);
1085 if (rc)
1086 return rc;
1087 }
1088
1089 if (size == 0)
1090 return 0;
1091
1092
1093 for (i = 0; i < nd_region->ndr_mappings; i++) {
1094 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1095
1096 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1097 if (rc)
1098 return rc;
1099 }
1100
1101 return 0;
1102 }
1103
1104 int __init nd_label_init(void)
1105 {
1106 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1107 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1108 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1109 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1110
1111 WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid));
1112 WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid));
1113 WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid));
1114 WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid));
1115
1116 WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid));
1117 WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid));
1118
1119 return 0;
1120 }