0001
0002
0003
0004
0005
0006 #include <linux/highmem.h>
0007 #include <linux/debugfs.h>
0008 #include <linux/blkdev.h>
0009 #include <linux/pagemap.h>
0010 #include <linux/module.h>
0011 #include <linux/device.h>
0012 #include <linux/mutex.h>
0013 #include <linux/hdreg.h>
0014 #include <linux/sizes.h>
0015 #include <linux/ndctl.h>
0016 #include <linux/fs.h>
0017 #include <linux/nd.h>
0018 #include <linux/backing-dev.h>
0019 #include "btt.h"
0020 #include "nd.h"
0021
0022 enum log_ent_request {
0023 LOG_NEW_ENT = 0,
0024 LOG_OLD_ENT
0025 };
0026
0027 static struct device *to_dev(struct arena_info *arena)
0028 {
0029 return &arena->nd_btt->dev;
0030 }
0031
0032 static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
0033 {
0034 return offset + nd_btt->initial_offset;
0035 }
0036
0037 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
0038 void *buf, size_t n, unsigned long flags)
0039 {
0040 struct nd_btt *nd_btt = arena->nd_btt;
0041 struct nd_namespace_common *ndns = nd_btt->ndns;
0042
0043
0044 offset = adjust_initial_offset(nd_btt, offset);
0045 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
0046 }
0047
0048 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
0049 void *buf, size_t n, unsigned long flags)
0050 {
0051 struct nd_btt *nd_btt = arena->nd_btt;
0052 struct nd_namespace_common *ndns = nd_btt->ndns;
0053
0054
0055 offset = adjust_initial_offset(nd_btt, offset);
0056 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
0057 }
0058
0059 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
0060 {
0061 int ret;
0062
0063
0064
0065
0066
0067
0068 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
0069 "arena->infooff: %#llx is unaligned\n", arena->infooff);
0070 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
0071 "arena->info2off: %#llx is unaligned\n", arena->info2off);
0072
0073 ret = arena_write_bytes(arena, arena->info2off, super,
0074 sizeof(struct btt_sb), 0);
0075 if (ret)
0076 return ret;
0077
0078 return arena_write_bytes(arena, arena->infooff, super,
0079 sizeof(struct btt_sb), 0);
0080 }
0081
0082 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
0083 {
0084 return arena_read_bytes(arena, arena->infooff, super,
0085 sizeof(struct btt_sb), 0);
0086 }
0087
0088
0089
0090
0091
0092
0093
0094 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
0095 unsigned long flags)
0096 {
0097 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
0098
0099 if (unlikely(lba >= arena->external_nlba))
0100 dev_err_ratelimited(to_dev(arena),
0101 "%s: lba %#x out of range (max: %#x)\n",
0102 __func__, lba, arena->external_nlba);
0103 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
0104 }
0105
0106 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
0107 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
0108 {
0109 u32 ze;
0110 __le32 mapping_le;
0111
0112
0113
0114
0115
0116 mapping = ent_lba(mapping);
0117
0118 ze = (z_flag << 1) + e_flag;
0119 switch (ze) {
0120 case 0:
0121
0122
0123
0124
0125
0126
0127 mapping |= MAP_ENT_NORMAL;
0128 break;
0129 case 1:
0130 mapping |= (1 << MAP_ERR_SHIFT);
0131 break;
0132 case 2:
0133 mapping |= (1 << MAP_TRIM_SHIFT);
0134 break;
0135 default:
0136
0137
0138
0139
0140
0141 dev_err_ratelimited(to_dev(arena),
0142 "Invalid use of Z and E flags\n");
0143 return -EIO;
0144 }
0145
0146 mapping_le = cpu_to_le32(mapping);
0147 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
0148 }
0149
0150 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
0151 int *trim, int *error, unsigned long rwb_flags)
0152 {
0153 int ret;
0154 __le32 in;
0155 u32 raw_mapping, postmap, ze, z_flag, e_flag;
0156 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
0157
0158 if (unlikely(lba >= arena->external_nlba))
0159 dev_err_ratelimited(to_dev(arena),
0160 "%s: lba %#x out of range (max: %#x)\n",
0161 __func__, lba, arena->external_nlba);
0162
0163 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
0164 if (ret)
0165 return ret;
0166
0167 raw_mapping = le32_to_cpu(in);
0168
0169 z_flag = ent_z_flag(raw_mapping);
0170 e_flag = ent_e_flag(raw_mapping);
0171 ze = (z_flag << 1) + e_flag;
0172 postmap = ent_lba(raw_mapping);
0173
0174
0175 z_flag = 0;
0176 e_flag = 0;
0177
0178 switch (ze) {
0179 case 0:
0180
0181 *mapping = lba;
0182 break;
0183 case 1:
0184 *mapping = postmap;
0185 e_flag = 1;
0186 break;
0187 case 2:
0188 *mapping = postmap;
0189 z_flag = 1;
0190 break;
0191 case 3:
0192 *mapping = postmap;
0193 break;
0194 default:
0195 return -EIO;
0196 }
0197
0198 if (trim)
0199 *trim = z_flag;
0200 if (error)
0201 *error = e_flag;
0202
0203 return ret;
0204 }
0205
0206 static int btt_log_group_read(struct arena_info *arena, u32 lane,
0207 struct log_group *log)
0208 {
0209 return arena_read_bytes(arena,
0210 arena->logoff + (lane * LOG_GRP_SIZE), log,
0211 LOG_GRP_SIZE, 0);
0212 }
0213
0214 static struct dentry *debugfs_root;
0215
0216 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
0217 int idx)
0218 {
0219 char dirname[32];
0220 struct dentry *d;
0221
0222
0223 if (!parent)
0224 return;
0225
0226 snprintf(dirname, 32, "arena%d", idx);
0227 d = debugfs_create_dir(dirname, parent);
0228 if (IS_ERR_OR_NULL(d))
0229 return;
0230 a->debugfs_dir = d;
0231
0232 debugfs_create_x64("size", S_IRUGO, d, &a->size);
0233 debugfs_create_x64("external_lba_start", S_IRUGO, d,
0234 &a->external_lba_start);
0235 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
0236 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
0237 &a->internal_lbasize);
0238 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
0239 debugfs_create_u32("external_lbasize", S_IRUGO, d,
0240 &a->external_lbasize);
0241 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
0242 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
0243 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
0244 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
0245 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
0246 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
0247 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
0248 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
0249 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
0250 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
0251 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
0252 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
0253 }
0254
0255 static void btt_debugfs_init(struct btt *btt)
0256 {
0257 int i = 0;
0258 struct arena_info *arena;
0259
0260 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
0261 debugfs_root);
0262 if (IS_ERR_OR_NULL(btt->debugfs_dir))
0263 return;
0264
0265 list_for_each_entry(arena, &btt->arena_list, list) {
0266 arena_debugfs_init(arena, btt->debugfs_dir, i);
0267 i++;
0268 }
0269 }
0270
0271 static u32 log_seq(struct log_group *log, int log_idx)
0272 {
0273 return le32_to_cpu(log->ent[log_idx].seq);
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 static int btt_log_get_old(struct arena_info *a, struct log_group *log)
0286 {
0287 int idx0 = a->log_index[0];
0288 int idx1 = a->log_index[1];
0289 int old;
0290
0291
0292
0293
0294
0295
0296 if (log_seq(log, idx0) == 0) {
0297 log->ent[idx0].seq = cpu_to_le32(1);
0298 return 0;
0299 }
0300
0301 if (log_seq(log, idx0) == log_seq(log, idx1))
0302 return -EINVAL;
0303 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
0304 return -EINVAL;
0305
0306 if (log_seq(log, idx0) < log_seq(log, idx1)) {
0307 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
0308 old = 0;
0309 else
0310 old = 1;
0311 } else {
0312 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
0313 old = 1;
0314 else
0315 old = 0;
0316 }
0317
0318 return old;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327 static int btt_log_read(struct arena_info *arena, u32 lane,
0328 struct log_entry *ent, int old_flag)
0329 {
0330 int ret;
0331 int old_ent, ret_ent;
0332 struct log_group log;
0333
0334 ret = btt_log_group_read(arena, lane, &log);
0335 if (ret)
0336 return -EIO;
0337
0338 old_ent = btt_log_get_old(arena, &log);
0339 if (old_ent < 0 || old_ent > 1) {
0340 dev_err(to_dev(arena),
0341 "log corruption (%d): lane %d seq [%d, %d]\n",
0342 old_ent, lane, log.ent[arena->log_index[0]].seq,
0343 log.ent[arena->log_index[1]].seq);
0344
0345 return -EIO;
0346 }
0347
0348 ret_ent = (old_flag ? old_ent : (1 - old_ent));
0349
0350 if (ent != NULL)
0351 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
0352
0353 return ret_ent;
0354 }
0355
0356
0357
0358
0359
0360
0361 static int __btt_log_write(struct arena_info *arena, u32 lane,
0362 u32 sub, struct log_entry *ent, unsigned long flags)
0363 {
0364 int ret;
0365 u32 group_slot = arena->log_index[sub];
0366 unsigned int log_half = LOG_ENT_SIZE / 2;
0367 void *src = ent;
0368 u64 ns_off;
0369
0370 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
0371 (group_slot * LOG_ENT_SIZE);
0372
0373 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
0374 if (ret)
0375 return ret;
0376
0377 ns_off += log_half;
0378 src += log_half;
0379 return arena_write_bytes(arena, ns_off, src, log_half, flags);
0380 }
0381
0382 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
0383 struct log_entry *ent)
0384 {
0385 int ret;
0386
0387 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
0388 if (ret)
0389 return ret;
0390
0391
0392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
0393 if (++(arena->freelist[lane].seq) == 4)
0394 arena->freelist[lane].seq = 1;
0395 if (ent_e_flag(le32_to_cpu(ent->old_map)))
0396 arena->freelist[lane].has_err = 1;
0397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
0398
0399 return ret;
0400 }
0401
0402
0403
0404
0405
0406 static int btt_map_init(struct arena_info *arena)
0407 {
0408 int ret = -EINVAL;
0409 void *zerobuf;
0410 size_t offset = 0;
0411 size_t chunk_size = SZ_2M;
0412 size_t mapsize = arena->logoff - arena->mapoff;
0413
0414 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
0415 if (!zerobuf)
0416 return -ENOMEM;
0417
0418
0419
0420
0421
0422
0423 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
0424 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
0425
0426 while (mapsize) {
0427 size_t size = min(mapsize, chunk_size);
0428
0429 dev_WARN_ONCE(to_dev(arena), size < 512,
0430 "chunk size: %#zx is unaligned\n", size);
0431 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
0432 size, 0);
0433 if (ret)
0434 goto free;
0435
0436 offset += size;
0437 mapsize -= size;
0438 cond_resched();
0439 }
0440
0441 free:
0442 kfree(zerobuf);
0443 return ret;
0444 }
0445
0446
0447
0448
0449
0450 static int btt_log_init(struct arena_info *arena)
0451 {
0452 size_t logsize = arena->info2off - arena->logoff;
0453 size_t chunk_size = SZ_4K, offset = 0;
0454 struct log_entry ent;
0455 void *zerobuf;
0456 int ret;
0457 u32 i;
0458
0459 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
0460 if (!zerobuf)
0461 return -ENOMEM;
0462
0463
0464
0465
0466
0467 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
0468 "arena->logoff: %#llx is unaligned\n", arena->logoff);
0469
0470 while (logsize) {
0471 size_t size = min(logsize, chunk_size);
0472
0473 dev_WARN_ONCE(to_dev(arena), size < 512,
0474 "chunk size: %#zx is unaligned\n", size);
0475 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
0476 size, 0);
0477 if (ret)
0478 goto free;
0479
0480 offset += size;
0481 logsize -= size;
0482 cond_resched();
0483 }
0484
0485 for (i = 0; i < arena->nfree; i++) {
0486 ent.lba = cpu_to_le32(i);
0487 ent.old_map = cpu_to_le32(arena->external_nlba + i);
0488 ent.new_map = cpu_to_le32(arena->external_nlba + i);
0489 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
0490 ret = __btt_log_write(arena, i, 0, &ent, 0);
0491 if (ret)
0492 goto free;
0493 }
0494
0495 free:
0496 kfree(zerobuf);
0497 return ret;
0498 }
0499
0500 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
0501 {
0502 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
0503 }
0504
0505 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
0506 {
0507 int ret = 0;
0508
0509 if (arena->freelist[lane].has_err) {
0510 void *zero_page = page_address(ZERO_PAGE(0));
0511 u32 lba = arena->freelist[lane].block;
0512 u64 nsoff = to_namespace_offset(arena, lba);
0513 unsigned long len = arena->sector_size;
0514
0515 mutex_lock(&arena->err_lock);
0516
0517 while (len) {
0518 unsigned long chunk = min(len, PAGE_SIZE);
0519
0520 ret = arena_write_bytes(arena, nsoff, zero_page,
0521 chunk, 0);
0522 if (ret)
0523 break;
0524 len -= chunk;
0525 nsoff += chunk;
0526 if (len == 0)
0527 arena->freelist[lane].has_err = 0;
0528 }
0529 mutex_unlock(&arena->err_lock);
0530 }
0531 return ret;
0532 }
0533
0534 static int btt_freelist_init(struct arena_info *arena)
0535 {
0536 int new, ret;
0537 struct log_entry log_new;
0538 u32 i, map_entry, log_oldmap, log_newmap;
0539
0540 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
0541 GFP_KERNEL);
0542 if (!arena->freelist)
0543 return -ENOMEM;
0544
0545 for (i = 0; i < arena->nfree; i++) {
0546 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
0547 if (new < 0)
0548 return new;
0549
0550
0551 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
0552 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
0553
0554
0555 arena->freelist[i].sub = 1 - new;
0556 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
0557 arena->freelist[i].block = log_oldmap;
0558
0559
0560
0561
0562
0563 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
0564 !ent_normal(le32_to_cpu(log_new.old_map))) {
0565 arena->freelist[i].has_err = 1;
0566 ret = arena_clear_freelist_error(arena, i);
0567 if (ret)
0568 dev_err_ratelimited(to_dev(arena),
0569 "Unable to clear known errors\n");
0570 }
0571
0572
0573 if (log_oldmap == log_newmap)
0574 continue;
0575
0576
0577 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
0578 NULL, NULL, 0);
0579 if (ret)
0580 return ret;
0581
0582
0583
0584
0585
0586
0587
0588
0589 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
0590
0591
0592
0593
0594 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
0595 le32_to_cpu(log_new.new_map), 0, 0, 0);
0596 if (ret)
0597 return ret;
0598 }
0599 }
0600
0601 return 0;
0602 }
0603
0604 static bool ent_is_padding(struct log_entry *ent)
0605 {
0606 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
0607 && (ent->seq == 0);
0608 }
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623 static int log_set_indices(struct arena_info *arena)
0624 {
0625 bool idx_set = false, initial_state = true;
0626 int ret, log_index[2] = {-1, -1};
0627 u32 i, j, next_idx = 0;
0628 struct log_group log;
0629 u32 pad_count = 0;
0630
0631 for (i = 0; i < arena->nfree; i++) {
0632 ret = btt_log_group_read(arena, i, &log);
0633 if (ret < 0)
0634 return ret;
0635
0636 for (j = 0; j < 4; j++) {
0637 if (!idx_set) {
0638 if (ent_is_padding(&log.ent[j])) {
0639 pad_count++;
0640 continue;
0641 } else {
0642
0643 if ((next_idx == 1) &&
0644 (j == log_index[0]))
0645 continue;
0646
0647 log_index[next_idx] = j;
0648 next_idx++;
0649 }
0650 if (next_idx == 2) {
0651
0652 idx_set = true;
0653 } else if (next_idx > 2) {
0654
0655 return -ENXIO;
0656 }
0657 } else {
0658
0659
0660
0661
0662
0663
0664 if (j == log_index[0]) {
0665
0666 if (ent_is_padding(&log.ent[j]))
0667 return -ENXIO;
0668 } else if (j == log_index[1]) {
0669 ;
0670
0671
0672
0673
0674
0675
0676 } else {
0677
0678 if (!ent_is_padding(&log.ent[j]))
0679 return -ENXIO;
0680 }
0681 }
0682 }
0683
0684
0685
0686
0687
0688 if (pad_count < 3)
0689 initial_state = false;
0690 pad_count = 0;
0691 }
0692
0693 if (!initial_state && !idx_set)
0694 return -ENXIO;
0695
0696
0697
0698
0699
0700 if (initial_state)
0701 log_index[1] = 1;
0702
0703
0704
0705
0706
0707 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
0708 ;
0709 else {
0710 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
0711 return -ENXIO;
0712 }
0713
0714 arena->log_index[0] = log_index[0];
0715 arena->log_index[1] = log_index[1];
0716 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
0717 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
0718 return 0;
0719 }
0720
0721 static int btt_rtt_init(struct arena_info *arena)
0722 {
0723 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
0724 if (arena->rtt == NULL)
0725 return -ENOMEM;
0726
0727 return 0;
0728 }
0729
0730 static int btt_maplocks_init(struct arena_info *arena)
0731 {
0732 u32 i;
0733
0734 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
0735 GFP_KERNEL);
0736 if (!arena->map_locks)
0737 return -ENOMEM;
0738
0739 for (i = 0; i < arena->nfree; i++)
0740 spin_lock_init(&arena->map_locks[i].lock);
0741
0742 return 0;
0743 }
0744
0745 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
0746 size_t start, size_t arena_off)
0747 {
0748 struct arena_info *arena;
0749 u64 logsize, mapsize, datasize;
0750 u64 available = size;
0751
0752 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
0753 if (!arena)
0754 return NULL;
0755 arena->nd_btt = btt->nd_btt;
0756 arena->sector_size = btt->sector_size;
0757 mutex_init(&arena->err_lock);
0758
0759 if (!size)
0760 return arena;
0761
0762 arena->size = size;
0763 arena->external_lba_start = start;
0764 arena->external_lbasize = btt->lbasize;
0765 arena->internal_lbasize = roundup(arena->external_lbasize,
0766 INT_LBASIZE_ALIGNMENT);
0767 arena->nfree = BTT_DEFAULT_NFREE;
0768 arena->version_major = btt->nd_btt->version_major;
0769 arena->version_minor = btt->nd_btt->version_minor;
0770
0771 if (available % BTT_PG_SIZE)
0772 available -= (available % BTT_PG_SIZE);
0773
0774
0775 available -= 2 * BTT_PG_SIZE;
0776
0777
0778 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
0779 available -= logsize;
0780
0781
0782 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
0783 arena->internal_lbasize + MAP_ENT_SIZE);
0784 arena->external_nlba = arena->internal_nlba - arena->nfree;
0785
0786 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
0787 datasize = available - mapsize;
0788
0789
0790 arena->infooff = arena_off;
0791 arena->dataoff = arena->infooff + BTT_PG_SIZE;
0792 arena->mapoff = arena->dataoff + datasize;
0793 arena->logoff = arena->mapoff + mapsize;
0794 arena->info2off = arena->logoff + logsize;
0795
0796
0797 arena->log_index[0] = 0;
0798 arena->log_index[1] = 1;
0799 return arena;
0800 }
0801
0802 static void free_arenas(struct btt *btt)
0803 {
0804 struct arena_info *arena, *next;
0805
0806 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
0807 list_del(&arena->list);
0808 kfree(arena->rtt);
0809 kfree(arena->map_locks);
0810 kfree(arena->freelist);
0811 debugfs_remove_recursive(arena->debugfs_dir);
0812 kfree(arena);
0813 }
0814 }
0815
0816
0817
0818
0819
0820 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
0821 u64 arena_off)
0822 {
0823 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
0824 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
0825 arena->external_nlba = le32_to_cpu(super->external_nlba);
0826 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
0827 arena->nfree = le32_to_cpu(super->nfree);
0828 arena->version_major = le16_to_cpu(super->version_major);
0829 arena->version_minor = le16_to_cpu(super->version_minor);
0830
0831 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
0832 le64_to_cpu(super->nextoff));
0833 arena->infooff = arena_off;
0834 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
0835 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
0836 arena->logoff = arena_off + le64_to_cpu(super->logoff);
0837 arena->info2off = arena_off + le64_to_cpu(super->info2off);
0838
0839 arena->size = (le64_to_cpu(super->nextoff) > 0)
0840 ? (le64_to_cpu(super->nextoff))
0841 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
0842
0843 arena->flags = le32_to_cpu(super->flags);
0844 }
0845
0846 static int discover_arenas(struct btt *btt)
0847 {
0848 int ret = 0;
0849 struct arena_info *arena;
0850 struct btt_sb *super;
0851 size_t remaining = btt->rawsize;
0852 u64 cur_nlba = 0;
0853 size_t cur_off = 0;
0854 int num_arenas = 0;
0855
0856 super = kzalloc(sizeof(*super), GFP_KERNEL);
0857 if (!super)
0858 return -ENOMEM;
0859
0860 while (remaining) {
0861
0862 arena = alloc_arena(btt, 0, 0, 0);
0863 if (!arena) {
0864 ret = -ENOMEM;
0865 goto out_super;
0866 }
0867
0868 arena->infooff = cur_off;
0869 ret = btt_info_read(arena, super);
0870 if (ret)
0871 goto out;
0872
0873 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
0874 if (remaining == btt->rawsize) {
0875 btt->init_state = INIT_NOTFOUND;
0876 dev_info(to_dev(arena), "No existing arenas\n");
0877 goto out;
0878 } else {
0879 dev_err(to_dev(arena),
0880 "Found corrupted metadata!\n");
0881 ret = -ENODEV;
0882 goto out;
0883 }
0884 }
0885
0886 arena->external_lba_start = cur_nlba;
0887 parse_arena_meta(arena, super, cur_off);
0888
0889 ret = log_set_indices(arena);
0890 if (ret) {
0891 dev_err(to_dev(arena),
0892 "Unable to deduce log/padding indices\n");
0893 goto out;
0894 }
0895
0896 ret = btt_freelist_init(arena);
0897 if (ret)
0898 goto out;
0899
0900 ret = btt_rtt_init(arena);
0901 if (ret)
0902 goto out;
0903
0904 ret = btt_maplocks_init(arena);
0905 if (ret)
0906 goto out;
0907
0908 list_add_tail(&arena->list, &btt->arena_list);
0909
0910 remaining -= arena->size;
0911 cur_off += arena->size;
0912 cur_nlba += arena->external_nlba;
0913 num_arenas++;
0914
0915 if (arena->nextoff == 0)
0916 break;
0917 }
0918 btt->num_arenas = num_arenas;
0919 btt->nlba = cur_nlba;
0920 btt->init_state = INIT_READY;
0921
0922 kfree(super);
0923 return ret;
0924
0925 out:
0926 kfree(arena);
0927 free_arenas(btt);
0928 out_super:
0929 kfree(super);
0930 return ret;
0931 }
0932
0933 static int create_arenas(struct btt *btt)
0934 {
0935 size_t remaining = btt->rawsize;
0936 size_t cur_off = 0;
0937
0938 while (remaining) {
0939 struct arena_info *arena;
0940 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
0941
0942 remaining -= arena_size;
0943 if (arena_size < ARENA_MIN_SIZE)
0944 break;
0945
0946 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
0947 if (!arena) {
0948 free_arenas(btt);
0949 return -ENOMEM;
0950 }
0951 btt->nlba += arena->external_nlba;
0952 if (remaining >= ARENA_MIN_SIZE)
0953 arena->nextoff = arena->size;
0954 else
0955 arena->nextoff = 0;
0956 cur_off += arena_size;
0957 list_add_tail(&arena->list, &btt->arena_list);
0958 }
0959
0960 return 0;
0961 }
0962
0963
0964
0965
0966
0967
0968
0969 static int btt_arena_write_layout(struct arena_info *arena)
0970 {
0971 int ret;
0972 u64 sum;
0973 struct btt_sb *super;
0974 struct nd_btt *nd_btt = arena->nd_btt;
0975 const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
0976
0977 ret = btt_map_init(arena);
0978 if (ret)
0979 return ret;
0980
0981 ret = btt_log_init(arena);
0982 if (ret)
0983 return ret;
0984
0985 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
0986 if (!super)
0987 return -ENOMEM;
0988
0989 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
0990 export_uuid(super->uuid, nd_btt->uuid);
0991 export_uuid(super->parent_uuid, parent_uuid);
0992 super->flags = cpu_to_le32(arena->flags);
0993 super->version_major = cpu_to_le16(arena->version_major);
0994 super->version_minor = cpu_to_le16(arena->version_minor);
0995 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
0996 super->external_nlba = cpu_to_le32(arena->external_nlba);
0997 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
0998 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
0999 super->nfree = cpu_to_le32(arena->nfree);
1000 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1001 super->nextoff = cpu_to_le64(arena->nextoff);
1002
1003
1004
1005
1006 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1007 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1008 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1009 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1010
1011 super->flags = 0;
1012 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1013 super->checksum = cpu_to_le64(sum);
1014
1015 ret = btt_info_write(arena, super);
1016
1017 kfree(super);
1018 return ret;
1019 }
1020
1021
1022
1023
1024
1025 static int btt_meta_init(struct btt *btt)
1026 {
1027 int ret = 0;
1028 struct arena_info *arena;
1029
1030 mutex_lock(&btt->init_lock);
1031 list_for_each_entry(arena, &btt->arena_list, list) {
1032 ret = btt_arena_write_layout(arena);
1033 if (ret)
1034 goto unlock;
1035
1036 ret = btt_freelist_init(arena);
1037 if (ret)
1038 goto unlock;
1039
1040 ret = btt_rtt_init(arena);
1041 if (ret)
1042 goto unlock;
1043
1044 ret = btt_maplocks_init(arena);
1045 if (ret)
1046 goto unlock;
1047 }
1048
1049 btt->init_state = INIT_READY;
1050
1051 unlock:
1052 mutex_unlock(&btt->init_lock);
1053 return ret;
1054 }
1055
1056 static u32 btt_meta_size(struct btt *btt)
1057 {
1058 return btt->lbasize - btt->sector_size;
1059 }
1060
1061
1062
1063
1064
1065
1066
1067
1068 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1069 struct arena_info **arena)
1070 {
1071 struct arena_info *arena_list;
1072 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1073
1074 list_for_each_entry(arena_list, &btt->arena_list, list) {
1075 if (lba < arena_list->external_nlba) {
1076 *arena = arena_list;
1077 *premap = lba;
1078 return 0;
1079 }
1080 lba -= arena_list->external_nlba;
1081 }
1082
1083 return -EIO;
1084 }
1085
1086
1087
1088
1089
1090 static void lock_map(struct arena_info *arena, u32 premap)
1091 __acquires(&arena->map_locks[idx].lock)
1092 {
1093 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1094
1095 spin_lock(&arena->map_locks[idx].lock);
1096 }
1097
1098 static void unlock_map(struct arena_info *arena, u32 premap)
1099 __releases(&arena->map_locks[idx].lock)
1100 {
1101 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1102
1103 spin_unlock(&arena->map_locks[idx].lock);
1104 }
1105
1106 static int btt_data_read(struct arena_info *arena, struct page *page,
1107 unsigned int off, u32 lba, u32 len)
1108 {
1109 int ret;
1110 u64 nsoff = to_namespace_offset(arena, lba);
1111 void *mem = kmap_atomic(page);
1112
1113 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1114 kunmap_atomic(mem);
1115
1116 return ret;
1117 }
1118
1119 static int btt_data_write(struct arena_info *arena, u32 lba,
1120 struct page *page, unsigned int off, u32 len)
1121 {
1122 int ret;
1123 u64 nsoff = to_namespace_offset(arena, lba);
1124 void *mem = kmap_atomic(page);
1125
1126 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1127 kunmap_atomic(mem);
1128
1129 return ret;
1130 }
1131
1132 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1133 {
1134 void *mem = kmap_atomic(page);
1135
1136 memset(mem + off, 0, len);
1137 kunmap_atomic(mem);
1138 }
1139
1140 #ifdef CONFIG_BLK_DEV_INTEGRITY
1141 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1142 struct arena_info *arena, u32 postmap, int rw)
1143 {
1144 unsigned int len = btt_meta_size(btt);
1145 u64 meta_nsoff;
1146 int ret = 0;
1147
1148 if (bip == NULL)
1149 return 0;
1150
1151 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1152
1153 while (len) {
1154 unsigned int cur_len;
1155 struct bio_vec bv;
1156 void *mem;
1157
1158 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1159
1160
1161
1162
1163
1164
1165 cur_len = min(len, bv.bv_len);
1166 mem = bvec_kmap_local(&bv);
1167 if (rw)
1168 ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
1169 NVDIMM_IO_ATOMIC);
1170 else
1171 ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
1172 NVDIMM_IO_ATOMIC);
1173
1174 kunmap_local(mem);
1175 if (ret)
1176 return ret;
1177
1178 len -= cur_len;
1179 meta_nsoff += cur_len;
1180 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1181 return -EIO;
1182 }
1183
1184 return ret;
1185 }
1186
1187 #else
1188 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1189 struct arena_info *arena, u32 postmap, int rw)
1190 {
1191 return 0;
1192 }
1193 #endif
1194
1195 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1196 struct page *page, unsigned int off, sector_t sector,
1197 unsigned int len)
1198 {
1199 int ret = 0;
1200 int t_flag, e_flag;
1201 struct arena_info *arena = NULL;
1202 u32 lane = 0, premap, postmap;
1203
1204 while (len) {
1205 u32 cur_len;
1206
1207 lane = nd_region_acquire_lane(btt->nd_region);
1208
1209 ret = lba_to_arena(btt, sector, &premap, &arena);
1210 if (ret)
1211 goto out_lane;
1212
1213 cur_len = min(btt->sector_size, len);
1214
1215 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1216 NVDIMM_IO_ATOMIC);
1217 if (ret)
1218 goto out_lane;
1219
1220
1221
1222
1223
1224
1225 while (1) {
1226 u32 new_map;
1227 int new_t, new_e;
1228
1229 if (t_flag) {
1230 zero_fill_data(page, off, cur_len);
1231 goto out_lane;
1232 }
1233
1234 if (e_flag) {
1235 ret = -EIO;
1236 goto out_lane;
1237 }
1238
1239 arena->rtt[lane] = RTT_VALID | postmap;
1240
1241
1242
1243
1244 barrier();
1245
1246 ret = btt_map_read(arena, premap, &new_map, &new_t,
1247 &new_e, NVDIMM_IO_ATOMIC);
1248 if (ret)
1249 goto out_rtt;
1250
1251 if ((postmap == new_map) && (t_flag == new_t) &&
1252 (e_flag == new_e))
1253 break;
1254
1255 postmap = new_map;
1256 t_flag = new_t;
1257 e_flag = new_e;
1258 }
1259
1260 ret = btt_data_read(arena, page, off, postmap, cur_len);
1261 if (ret) {
1262
1263 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1264 dev_warn_ratelimited(to_dev(arena),
1265 "Error persistently tracking bad blocks at %#x\n",
1266 premap);
1267 goto out_rtt;
1268 }
1269
1270 if (bip) {
1271 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1272 if (ret)
1273 goto out_rtt;
1274 }
1275
1276 arena->rtt[lane] = RTT_INVALID;
1277 nd_region_release_lane(btt->nd_region, lane);
1278
1279 len -= cur_len;
1280 off += cur_len;
1281 sector += btt->sector_size >> SECTOR_SHIFT;
1282 }
1283
1284 return 0;
1285
1286 out_rtt:
1287 arena->rtt[lane] = RTT_INVALID;
1288 out_lane:
1289 nd_region_release_lane(btt->nd_region, lane);
1290 return ret;
1291 }
1292
1293
1294
1295
1296
1297
1298 static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1299 u32 postmap)
1300 {
1301 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1302 to_namespace_offset(arena, postmap));
1303 sector_t phys_sector = nsoff >> 9;
1304
1305 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1306 }
1307
1308 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1309 sector_t sector, struct page *page, unsigned int off,
1310 unsigned int len)
1311 {
1312 int ret = 0;
1313 struct arena_info *arena = NULL;
1314 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1315 struct log_entry log;
1316 int sub;
1317
1318 while (len) {
1319 u32 cur_len;
1320 int e_flag;
1321
1322 retry:
1323 lane = nd_region_acquire_lane(btt->nd_region);
1324
1325 ret = lba_to_arena(btt, sector, &premap, &arena);
1326 if (ret)
1327 goto out_lane;
1328 cur_len = min(btt->sector_size, len);
1329
1330 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1331 ret = -EIO;
1332 goto out_lane;
1333 }
1334
1335 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1336 arena->freelist[lane].has_err = 1;
1337
1338 if (mutex_is_locked(&arena->err_lock)
1339 || arena->freelist[lane].has_err) {
1340 nd_region_release_lane(btt->nd_region, lane);
1341
1342 ret = arena_clear_freelist_error(arena, lane);
1343 if (ret)
1344 return ret;
1345
1346
1347 goto retry;
1348 }
1349
1350 new_postmap = arena->freelist[lane].block;
1351
1352
1353 for (i = 0; i < arena->nfree; i++)
1354 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1355 cpu_relax();
1356
1357
1358 if (new_postmap >= arena->internal_nlba) {
1359 ret = -EIO;
1360 goto out_lane;
1361 }
1362
1363 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1364 if (ret)
1365 goto out_lane;
1366
1367 if (bip) {
1368 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1369 WRITE);
1370 if (ret)
1371 goto out_lane;
1372 }
1373
1374 lock_map(arena, premap);
1375 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1376 NVDIMM_IO_ATOMIC);
1377 if (ret)
1378 goto out_map;
1379 if (old_postmap >= arena->internal_nlba) {
1380 ret = -EIO;
1381 goto out_map;
1382 }
1383 if (e_flag)
1384 set_e_flag(old_postmap);
1385
1386 log.lba = cpu_to_le32(premap);
1387 log.old_map = cpu_to_le32(old_postmap);
1388 log.new_map = cpu_to_le32(new_postmap);
1389 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1390 sub = arena->freelist[lane].sub;
1391 ret = btt_flog_write(arena, lane, sub, &log);
1392 if (ret)
1393 goto out_map;
1394
1395 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1396 NVDIMM_IO_ATOMIC);
1397 if (ret)
1398 goto out_map;
1399
1400 unlock_map(arena, premap);
1401 nd_region_release_lane(btt->nd_region, lane);
1402
1403 if (e_flag) {
1404 ret = arena_clear_freelist_error(arena, lane);
1405 if (ret)
1406 return ret;
1407 }
1408
1409 len -= cur_len;
1410 off += cur_len;
1411 sector += btt->sector_size >> SECTOR_SHIFT;
1412 }
1413
1414 return 0;
1415
1416 out_map:
1417 unlock_map(arena, premap);
1418 out_lane:
1419 nd_region_release_lane(btt->nd_region, lane);
1420 return ret;
1421 }
1422
1423 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1424 struct page *page, unsigned int len, unsigned int off,
1425 enum req_op op, sector_t sector)
1426 {
1427 int ret;
1428
1429 if (!op_is_write(op)) {
1430 ret = btt_read_pg(btt, bip, page, off, sector, len);
1431 flush_dcache_page(page);
1432 } else {
1433 flush_dcache_page(page);
1434 ret = btt_write_pg(btt, bip, sector, page, off, len);
1435 }
1436
1437 return ret;
1438 }
1439
1440 static void btt_submit_bio(struct bio *bio)
1441 {
1442 struct bio_integrity_payload *bip = bio_integrity(bio);
1443 struct btt *btt = bio->bi_bdev->bd_disk->private_data;
1444 struct bvec_iter iter;
1445 unsigned long start;
1446 struct bio_vec bvec;
1447 int err = 0;
1448 bool do_acct;
1449
1450 if (!bio_integrity_prep(bio))
1451 return;
1452
1453 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
1454 if (do_acct)
1455 start = bio_start_io_acct(bio);
1456 bio_for_each_segment(bvec, bio, iter) {
1457 unsigned int len = bvec.bv_len;
1458
1459 if (len > PAGE_SIZE || len < btt->sector_size ||
1460 len % btt->sector_size) {
1461 dev_err_ratelimited(&btt->nd_btt->dev,
1462 "unaligned bio segment (len: %d)\n", len);
1463 bio->bi_status = BLK_STS_IOERR;
1464 break;
1465 }
1466
1467 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1468 bio_op(bio), iter.bi_sector);
1469 if (err) {
1470 dev_err(&btt->nd_btt->dev,
1471 "io error in %s sector %lld, len %d,\n",
1472 (op_is_write(bio_op(bio))) ? "WRITE" :
1473 "READ",
1474 (unsigned long long) iter.bi_sector, len);
1475 bio->bi_status = errno_to_blk_status(err);
1476 break;
1477 }
1478 }
1479 if (do_acct)
1480 bio_end_io_acct(bio, start);
1481
1482 bio_endio(bio);
1483 }
1484
1485 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1486 struct page *page, enum req_op op)
1487 {
1488 struct btt *btt = bdev->bd_disk->private_data;
1489 int rc;
1490
1491 rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
1492 if (rc == 0)
1493 page_endio(page, op_is_write(op), 0);
1494
1495 return rc;
1496 }
1497
1498
1499 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1500 {
1501
1502 geo->heads = 1 << 6;
1503 geo->sectors = 1 << 5;
1504 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1505 return 0;
1506 }
1507
1508 static const struct block_device_operations btt_fops = {
1509 .owner = THIS_MODULE,
1510 .submit_bio = btt_submit_bio,
1511 .rw_page = btt_rw_page,
1512 .getgeo = btt_getgeo,
1513 };
1514
1515 static int btt_blk_init(struct btt *btt)
1516 {
1517 struct nd_btt *nd_btt = btt->nd_btt;
1518 struct nd_namespace_common *ndns = nd_btt->ndns;
1519 int rc = -ENOMEM;
1520
1521 btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
1522 if (!btt->btt_disk)
1523 return -ENOMEM;
1524
1525 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1526 btt->btt_disk->first_minor = 0;
1527 btt->btt_disk->fops = &btt_fops;
1528 btt->btt_disk->private_data = btt;
1529
1530 blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
1531 blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
1532 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
1533
1534 if (btt_meta_size(btt)) {
1535 rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1536 if (rc)
1537 goto out_cleanup_disk;
1538 }
1539
1540 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1541 rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1542 if (rc)
1543 goto out_cleanup_disk;
1544
1545 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1546 nvdimm_check_and_set_ro(btt->btt_disk);
1547
1548 return 0;
1549
1550 out_cleanup_disk:
1551 put_disk(btt->btt_disk);
1552 return rc;
1553 }
1554
1555 static void btt_blk_cleanup(struct btt *btt)
1556 {
1557 del_gendisk(btt->btt_disk);
1558 put_disk(btt->btt_disk);
1559 }
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1579 u32 lbasize, uuid_t *uuid,
1580 struct nd_region *nd_region)
1581 {
1582 int ret;
1583 struct btt *btt;
1584 struct nd_namespace_io *nsio;
1585 struct device *dev = &nd_btt->dev;
1586
1587 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1588 if (!btt)
1589 return NULL;
1590
1591 btt->nd_btt = nd_btt;
1592 btt->rawsize = rawsize;
1593 btt->lbasize = lbasize;
1594 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1595 INIT_LIST_HEAD(&btt->arena_list);
1596 mutex_init(&btt->init_lock);
1597 btt->nd_region = nd_region;
1598 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1599 btt->phys_bb = &nsio->bb;
1600
1601 ret = discover_arenas(btt);
1602 if (ret) {
1603 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1604 return NULL;
1605 }
1606
1607 if (btt->init_state != INIT_READY && nd_region->ro) {
1608 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1609 dev_name(&nd_region->dev));
1610 return NULL;
1611 } else if (btt->init_state != INIT_READY) {
1612 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1613 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1614 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1615 btt->num_arenas, rawsize);
1616
1617 ret = create_arenas(btt);
1618 if (ret) {
1619 dev_info(dev, "init: create_arenas: %d\n", ret);
1620 return NULL;
1621 }
1622
1623 ret = btt_meta_init(btt);
1624 if (ret) {
1625 dev_err(dev, "init: error in meta_init: %d\n", ret);
1626 return NULL;
1627 }
1628 }
1629
1630 ret = btt_blk_init(btt);
1631 if (ret) {
1632 dev_err(dev, "init: error in blk_init: %d\n", ret);
1633 return NULL;
1634 }
1635
1636 btt_debugfs_init(btt);
1637
1638 return btt;
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650 static void btt_fini(struct btt *btt)
1651 {
1652 if (btt) {
1653 btt_blk_cleanup(btt);
1654 free_arenas(btt);
1655 debugfs_remove_recursive(btt->debugfs_dir);
1656 }
1657 }
1658
1659 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1660 {
1661 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1662 struct nd_region *nd_region;
1663 struct btt_sb *btt_sb;
1664 struct btt *btt;
1665 size_t size, rawsize;
1666 int rc;
1667
1668 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1669 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1670 return -ENODEV;
1671 }
1672
1673 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1674 if (!btt_sb)
1675 return -ENOMEM;
1676
1677 size = nvdimm_namespace_capacity(ndns);
1678 rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1679 if (rc)
1680 return rc;
1681
1682
1683
1684
1685
1686
1687
1688 nd_btt_version(nd_btt, ndns, btt_sb);
1689
1690 rawsize = size - nd_btt->initial_offset;
1691 if (rawsize < ARENA_MIN_SIZE) {
1692 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1693 dev_name(&ndns->dev),
1694 ARENA_MIN_SIZE + nd_btt->initial_offset);
1695 return -ENXIO;
1696 }
1697 nd_region = to_nd_region(nd_btt->dev.parent);
1698 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1699 nd_region);
1700 if (!btt)
1701 return -ENOMEM;
1702 nd_btt->btt = btt;
1703
1704 return 0;
1705 }
1706 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1707
1708 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1709 {
1710 struct btt *btt = nd_btt->btt;
1711
1712 btt_fini(btt);
1713 nd_btt->btt = NULL;
1714
1715 return 0;
1716 }
1717 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1718
1719 static int __init nd_btt_init(void)
1720 {
1721 int rc = 0;
1722
1723 debugfs_root = debugfs_create_dir("btt", NULL);
1724 if (IS_ERR_OR_NULL(debugfs_root))
1725 rc = -ENXIO;
1726
1727 return rc;
1728 }
1729
1730 static void __exit nd_btt_exit(void)
1731 {
1732 debugfs_remove_recursive(debugfs_root);
1733 }
1734
1735 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1736 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1737 MODULE_LICENSE("GPL v2");
1738 module_init(nd_btt_init);
1739 module_exit(nd_btt_exit);