0001
0002
0003
0004
0005
0006
0007
0008
0009 #define pr_fmt(fmt) "bcache: %s() " fmt, __func__
0010
0011 #include "util.h"
0012 #include "bset.h"
0013
0014 #include <linux/console.h>
0015 #include <linux/sched/clock.h>
0016 #include <linux/random.h>
0017 #include <linux/prefetch.h>
0018
0019 #ifdef CONFIG_BCACHE_DEBUG
0020
0021 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
0022 {
0023 struct bkey *k, *next;
0024
0025 for (k = i->start; k < bset_bkey_last(i); k = next) {
0026 next = bkey_next(k);
0027
0028 pr_err("block %u key %u/%u: ", set,
0029 (unsigned int) ((u64 *) k - i->d), i->keys);
0030
0031 if (b->ops->key_dump)
0032 b->ops->key_dump(b, k);
0033 else
0034 pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
0035
0036 if (next < bset_bkey_last(i) &&
0037 bkey_cmp(k, b->ops->is_extents ?
0038 &START_KEY(next) : next) > 0)
0039 pr_err("Key skipped backwards\n");
0040 }
0041 }
0042
0043 void bch_dump_bucket(struct btree_keys *b)
0044 {
0045 unsigned int i;
0046
0047 console_lock();
0048 for (i = 0; i <= b->nsets; i++)
0049 bch_dump_bset(b, b->set[i].data,
0050 bset_sector_offset(b, b->set[i].data));
0051 console_unlock();
0052 }
0053
0054 int __bch_count_data(struct btree_keys *b)
0055 {
0056 unsigned int ret = 0;
0057 struct btree_iter iter;
0058 struct bkey *k;
0059
0060 if (b->ops->is_extents)
0061 for_each_key(b, k, &iter)
0062 ret += KEY_SIZE(k);
0063 return ret;
0064 }
0065
0066 void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
0067 {
0068 va_list args;
0069 struct bkey *k, *p = NULL;
0070 struct btree_iter iter;
0071 const char *err;
0072
0073 for_each_key(b, k, &iter) {
0074 if (b->ops->is_extents) {
0075 err = "Keys out of order";
0076 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
0077 goto bug;
0078
0079 if (bch_ptr_invalid(b, k))
0080 continue;
0081
0082 err = "Overlapping keys";
0083 if (p && bkey_cmp(p, &START_KEY(k)) > 0)
0084 goto bug;
0085 } else {
0086 if (bch_ptr_bad(b, k))
0087 continue;
0088
0089 err = "Duplicate keys";
0090 if (p && !bkey_cmp(p, k))
0091 goto bug;
0092 }
0093 p = k;
0094 }
0095 #if 0
0096 err = "Key larger than btree node key";
0097 if (p && bkey_cmp(p, &b->key) > 0)
0098 goto bug;
0099 #endif
0100 return;
0101 bug:
0102 bch_dump_bucket(b);
0103
0104 va_start(args, fmt);
0105 vprintk(fmt, args);
0106 va_end(args);
0107
0108 panic("bch_check_keys error: %s:\n", err);
0109 }
0110
0111 static void bch_btree_iter_next_check(struct btree_iter *iter)
0112 {
0113 struct bkey *k = iter->data->k, *next = bkey_next(k);
0114
0115 if (next < iter->data->end &&
0116 bkey_cmp(k, iter->b->ops->is_extents ?
0117 &START_KEY(next) : next) > 0) {
0118 bch_dump_bucket(iter->b);
0119 panic("Key skipped backwards\n");
0120 }
0121 }
0122
0123 #else
0124
0125 static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
0126
0127 #endif
0128
0129
0130
0131 int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
0132 {
0133 size_t oldsize = bch_keylist_nkeys(l);
0134 size_t newsize = oldsize + u64s;
0135 uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
0136 uint64_t *new_keys;
0137
0138 newsize = roundup_pow_of_two(newsize);
0139
0140 if (newsize <= KEYLIST_INLINE ||
0141 roundup_pow_of_two(oldsize) == newsize)
0142 return 0;
0143
0144 new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
0145
0146 if (!new_keys)
0147 return -ENOMEM;
0148
0149 if (!old_keys)
0150 memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
0151
0152 l->keys_p = new_keys;
0153 l->top_p = new_keys + oldsize;
0154
0155 return 0;
0156 }
0157
0158
0159 struct bkey *bch_keylist_pop(struct keylist *l)
0160 {
0161 struct bkey *k = l->keys;
0162
0163 if (k == l->top)
0164 return NULL;
0165
0166 while (bkey_next(k) != l->top)
0167 k = bkey_next(k);
0168
0169 return l->top = k;
0170 }
0171
0172
0173 void bch_keylist_pop_front(struct keylist *l)
0174 {
0175 l->top_p -= bkey_u64s(l->keys);
0176
0177 memmove(l->keys,
0178 bkey_next(l->keys),
0179 bch_keylist_bytes(l));
0180 }
0181
0182
0183
0184 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
0185 unsigned int i)
0186 {
0187 BUG_ON(i > KEY_PTRS(src));
0188
0189
0190 memcpy(dest, src, 2 * sizeof(uint64_t));
0191 dest->ptr[0] = src->ptr[i];
0192 SET_KEY_PTRS(dest, 1);
0193
0194 SET_KEY_CSUM(dest, 0);
0195 }
0196
0197 bool __bch_cut_front(const struct bkey *where, struct bkey *k)
0198 {
0199 unsigned int i, len = 0;
0200
0201 if (bkey_cmp(where, &START_KEY(k)) <= 0)
0202 return false;
0203
0204 if (bkey_cmp(where, k) < 0)
0205 len = KEY_OFFSET(k) - KEY_OFFSET(where);
0206 else
0207 bkey_copy_key(k, where);
0208
0209 for (i = 0; i < KEY_PTRS(k); i++)
0210 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
0211
0212 BUG_ON(len > KEY_SIZE(k));
0213 SET_KEY_SIZE(k, len);
0214 return true;
0215 }
0216
0217 bool __bch_cut_back(const struct bkey *where, struct bkey *k)
0218 {
0219 unsigned int len = 0;
0220
0221 if (bkey_cmp(where, k) >= 0)
0222 return false;
0223
0224 BUG_ON(KEY_INODE(where) != KEY_INODE(k));
0225
0226 if (bkey_cmp(where, &START_KEY(k)) > 0)
0227 len = KEY_OFFSET(where) - KEY_START(k);
0228
0229 bkey_copy_key(k, where);
0230
0231 BUG_ON(len > KEY_SIZE(k));
0232 SET_KEY_SIZE(k, len);
0233 return true;
0234 }
0235
0236
0237
0238
0239 #define BKEY_MID_BITS 3
0240 #define BKEY_EXPONENT_BITS 7
0241 #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
0242 #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
0243
0244 struct bkey_float {
0245 unsigned int exponent:BKEY_EXPONENT_BITS;
0246 unsigned int m:BKEY_MID_BITS;
0247 unsigned int mantissa:BKEY_MANTISSA_BITS;
0248 } __packed;
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 #define BSET_CACHELINE 128
0266
0267
0268 static inline size_t btree_keys_bytes(struct btree_keys *b)
0269 {
0270 return PAGE_SIZE << b->page_order;
0271 }
0272
0273 static inline size_t btree_keys_cachelines(struct btree_keys *b)
0274 {
0275 return btree_keys_bytes(b) / BSET_CACHELINE;
0276 }
0277
0278
0279 static inline size_t bset_tree_bytes(struct btree_keys *b)
0280 {
0281 return btree_keys_cachelines(b) * sizeof(struct bkey_float);
0282 }
0283
0284
0285 static inline size_t bset_prev_bytes(struct btree_keys *b)
0286 {
0287 return btree_keys_cachelines(b) * sizeof(uint8_t);
0288 }
0289
0290
0291
0292 void bch_btree_keys_free(struct btree_keys *b)
0293 {
0294 struct bset_tree *t = b->set;
0295
0296 if (bset_prev_bytes(b) < PAGE_SIZE)
0297 kfree(t->prev);
0298 else
0299 free_pages((unsigned long) t->prev,
0300 get_order(bset_prev_bytes(b)));
0301
0302 if (bset_tree_bytes(b) < PAGE_SIZE)
0303 kfree(t->tree);
0304 else
0305 free_pages((unsigned long) t->tree,
0306 get_order(bset_tree_bytes(b)));
0307
0308 free_pages((unsigned long) t->data, b->page_order);
0309
0310 t->prev = NULL;
0311 t->tree = NULL;
0312 t->data = NULL;
0313 }
0314
0315 int bch_btree_keys_alloc(struct btree_keys *b,
0316 unsigned int page_order,
0317 gfp_t gfp)
0318 {
0319 struct bset_tree *t = b->set;
0320
0321 BUG_ON(t->data);
0322
0323 b->page_order = page_order;
0324
0325 t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
0326 if (!t->data)
0327 goto err;
0328
0329 t->tree = bset_tree_bytes(b) < PAGE_SIZE
0330 ? kmalloc(bset_tree_bytes(b), gfp)
0331 : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
0332 if (!t->tree)
0333 goto err;
0334
0335 t->prev = bset_prev_bytes(b) < PAGE_SIZE
0336 ? kmalloc(bset_prev_bytes(b), gfp)
0337 : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
0338 if (!t->prev)
0339 goto err;
0340
0341 return 0;
0342 err:
0343 bch_btree_keys_free(b);
0344 return -ENOMEM;
0345 }
0346
0347 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
0348 bool *expensive_debug_checks)
0349 {
0350 b->ops = ops;
0351 b->expensive_debug_checks = expensive_debug_checks;
0352 b->nsets = 0;
0353 b->last_set_unwritten = 0;
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 }
0364
0365
0366
0367
0368
0369
0370
0371 static unsigned int inorder_next(unsigned int j, unsigned int size)
0372 {
0373 if (j * 2 + 1 < size) {
0374 j = j * 2 + 1;
0375
0376 while (j * 2 < size)
0377 j *= 2;
0378 } else
0379 j >>= ffz(j) + 1;
0380
0381 return j;
0382 }
0383
0384
0385
0386
0387
0388 static unsigned int inorder_prev(unsigned int j, unsigned int size)
0389 {
0390 if (j * 2 < size) {
0391 j = j * 2;
0392
0393 while (j * 2 + 1 < size)
0394 j = j * 2 + 1;
0395 } else
0396 j >>= ffs(j);
0397
0398 return j;
0399 }
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 static unsigned int __to_inorder(unsigned int j,
0416 unsigned int size,
0417 unsigned int extra)
0418 {
0419 unsigned int b = fls(j);
0420 unsigned int shift = fls(size - 1) - b;
0421
0422 j ^= 1U << (b - 1);
0423 j <<= 1;
0424 j |= 1;
0425 j <<= shift;
0426
0427 if (j > extra)
0428 j -= (j - extra) >> 1;
0429
0430 return j;
0431 }
0432
0433
0434
0435
0436
0437 static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
0438 {
0439 return __to_inorder(j, t->size, t->extra);
0440 }
0441
0442 static unsigned int __inorder_to_tree(unsigned int j,
0443 unsigned int size,
0444 unsigned int extra)
0445 {
0446 unsigned int shift;
0447
0448 if (j > extra)
0449 j += j - extra;
0450
0451 shift = ffs(j);
0452
0453 j >>= shift;
0454 j |= roundup_pow_of_two(size) >> shift;
0455
0456 return j;
0457 }
0458
0459
0460
0461
0462
0463 static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
0464 {
0465 return __inorder_to_tree(j, t->size, t->extra);
0466 }
0467
0468 #if 0
0469 void inorder_test(void)
0470 {
0471 unsigned long done = 0;
0472 ktime_t start = ktime_get();
0473
0474 for (unsigned int size = 2;
0475 size < 65536000;
0476 size++) {
0477 unsigned int extra =
0478 (size - rounddown_pow_of_two(size - 1)) << 1;
0479 unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
0480
0481 if (!(size % 4096))
0482 pr_notice("loop %u, %llu per us\n", size,
0483 done / ktime_us_delta(ktime_get(), start));
0484
0485 while (1) {
0486 if (__inorder_to_tree(i, size, extra) != j)
0487 panic("size %10u j %10u i %10u", size, j, i);
0488
0489 if (__to_inorder(j, size, extra) != i)
0490 panic("size %10u j %10u i %10u", size, j, i);
0491
0492 if (j == rounddown_pow_of_two(size) - 1)
0493 break;
0494
0495 BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
0496
0497 j = inorder_next(j, size);
0498 i++;
0499 }
0500
0501 done += size - 1;
0502 }
0503 }
0504 #endif
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 static struct bkey *cacheline_to_bkey(struct bset_tree *t,
0526 unsigned int cacheline,
0527 unsigned int offset)
0528 {
0529 return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
0530 }
0531
0532 static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
0533 {
0534 return ((void *) k - (void *) t->data) / BSET_CACHELINE;
0535 }
0536
0537 static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
0538 unsigned int cacheline,
0539 struct bkey *k)
0540 {
0541 return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
0542 }
0543
0544 static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
0545 {
0546 return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
0547 }
0548
0549 static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
0550 {
0551 return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
0552 }
0553
0554
0555
0556
0557
0558 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
0559 {
0560 return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
0561 }
0562
0563 static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
0564 {
0565 low >>= shift;
0566 low |= (high << 1) << (63U - shift);
0567 return low;
0568 }
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584 static inline unsigned int bfloat_mantissa(const struct bkey *k,
0585 struct bkey_float *f)
0586 {
0587 const uint64_t *p = &k->low - (f->exponent >> 6);
0588
0589 return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
0590 }
0591
0592 static void make_bfloat(struct bset_tree *t, unsigned int j)
0593 {
0594 struct bkey_float *f = &t->tree[j];
0595 struct bkey *m = tree_to_bkey(t, j);
0596 struct bkey *p = tree_to_prev_bkey(t, j);
0597
0598 struct bkey *l = is_power_of_2(j)
0599 ? t->data->start
0600 : tree_to_prev_bkey(t, j >> ffs(j));
0601
0602 struct bkey *r = is_power_of_2(j + 1)
0603 ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
0604 : tree_to_bkey(t, j >> (ffz(j) + 1));
0605
0606 BUG_ON(m < l || m > r);
0607 BUG_ON(bkey_next(p) != m);
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619 if (KEY_INODE(l) != KEY_INODE(r))
0620 f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
0621 else
0622 f->exponent = fls64(r->low ^ l->low);
0623
0624 f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
0625
0626
0627
0628
0629
0630
0631 if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
0632 f->mantissa = bfloat_mantissa(m, f) - 1;
0633 else
0634 f->exponent = 127;
0635 }
0636
0637 static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
0638 {
0639 if (t != b->set) {
0640 unsigned int j = roundup(t[-1].size,
0641 64 / sizeof(struct bkey_float));
0642
0643 t->tree = t[-1].tree + j;
0644 t->prev = t[-1].prev + j;
0645 }
0646
0647 while (t < b->set + MAX_BSETS)
0648 t++->size = 0;
0649 }
0650
0651 static void bch_bset_build_unwritten_tree(struct btree_keys *b)
0652 {
0653 struct bset_tree *t = bset_tree_last(b);
0654
0655 BUG_ON(b->last_set_unwritten);
0656 b->last_set_unwritten = 1;
0657
0658 bset_alloc_tree(b, t);
0659
0660 if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
0661 t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
0662 t->size = 1;
0663 }
0664 }
0665
0666 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
0667 {
0668 if (i != b->set->data) {
0669 b->set[++b->nsets].data = i;
0670 i->seq = b->set->data->seq;
0671 } else
0672 get_random_bytes(&i->seq, sizeof(uint64_t));
0673
0674 i->magic = magic;
0675 i->version = 0;
0676 i->keys = 0;
0677
0678 bch_bset_build_unwritten_tree(b);
0679 }
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 void bch_bset_build_written_tree(struct btree_keys *b)
0691 {
0692 struct bset_tree *t = bset_tree_last(b);
0693 struct bkey *prev = NULL, *k = t->data->start;
0694 unsigned int j, cacheline = 1;
0695
0696 b->last_set_unwritten = 0;
0697
0698 bset_alloc_tree(b, t);
0699
0700 t->size = min_t(unsigned int,
0701 bkey_to_cacheline(t, bset_bkey_last(t->data)),
0702 b->set->tree + btree_keys_cachelines(b) - t->tree);
0703
0704 if (t->size < 2) {
0705 t->size = 0;
0706 return;
0707 }
0708
0709 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
0710
0711
0712 for (j = inorder_next(0, t->size);
0713 j;
0714 j = inorder_next(j, t->size)) {
0715 while (bkey_to_cacheline(t, k) < cacheline) {
0716 prev = k;
0717 k = bkey_next(k);
0718 }
0719
0720 t->prev[j] = bkey_u64s(prev);
0721 t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
0722 }
0723
0724 while (bkey_next(k) != bset_bkey_last(t->data))
0725 k = bkey_next(k);
0726
0727 t->end = *k;
0728
0729
0730 for (j = inorder_next(0, t->size);
0731 j;
0732 j = inorder_next(j, t->size))
0733 make_bfloat(t, j);
0734 }
0735
0736
0737
0738 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
0739 {
0740 struct bset_tree *t;
0741 unsigned int inorder, j = 1;
0742
0743 for (t = b->set; t <= bset_tree_last(b); t++)
0744 if (k < bset_bkey_last(t->data))
0745 goto found_set;
0746
0747 BUG();
0748 found_set:
0749 if (!t->size || !bset_written(b, t))
0750 return;
0751
0752 inorder = bkey_to_cacheline(t, k);
0753
0754 if (k == t->data->start)
0755 goto fix_left;
0756
0757 if (bkey_next(k) == bset_bkey_last(t->data)) {
0758 t->end = *k;
0759 goto fix_right;
0760 }
0761
0762 j = inorder_to_tree(inorder, t);
0763
0764 if (j &&
0765 j < t->size &&
0766 k == tree_to_bkey(t, j))
0767 fix_left: do {
0768 make_bfloat(t, j);
0769 j = j * 2;
0770 } while (j < t->size);
0771
0772 j = inorder_to_tree(inorder + 1, t);
0773
0774 if (j &&
0775 j < t->size &&
0776 k == tree_to_prev_bkey(t, j))
0777 fix_right: do {
0778 make_bfloat(t, j);
0779 j = j * 2 + 1;
0780 } while (j < t->size);
0781 }
0782
0783 static void bch_bset_fix_lookup_table(struct btree_keys *b,
0784 struct bset_tree *t,
0785 struct bkey *k)
0786 {
0787 unsigned int shift = bkey_u64s(k);
0788 unsigned int j = bkey_to_cacheline(t, k);
0789
0790
0791 if (!t->size)
0792 return;
0793
0794
0795
0796
0797
0798
0799 while (j < t->size &&
0800 table_to_bkey(t, j) <= k)
0801 j++;
0802
0803
0804
0805
0806
0807 for (; j < t->size; j++) {
0808 t->prev[j] += shift;
0809
0810 if (t->prev[j] > 7) {
0811 k = table_to_bkey(t, j - 1);
0812
0813 while (k < cacheline_to_bkey(t, j, 0))
0814 k = bkey_next(k);
0815
0816 t->prev[j] = bkey_to_cacheline_offset(t, j, k);
0817 }
0818 }
0819
0820 if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
0821 return;
0822
0823
0824
0825 for (k = table_to_bkey(t, t->size - 1);
0826 k != bset_bkey_last(t->data);
0827 k = bkey_next(k))
0828 if (t->size == bkey_to_cacheline(t, k)) {
0829 t->prev[t->size] =
0830 bkey_to_cacheline_offset(t, t->size, k);
0831 t->size++;
0832 }
0833 }
0834
0835
0836
0837
0838
0839
0840 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
0841 {
0842 if (!b->ops->key_merge)
0843 return false;
0844
0845
0846
0847
0848
0849
0850 if (!bch_bkey_equal_header(l, r) ||
0851 bkey_cmp(l, &START_KEY(r)))
0852 return false;
0853
0854 return b->ops->key_merge(b, l, r);
0855 }
0856
0857 void bch_bset_insert(struct btree_keys *b, struct bkey *where,
0858 struct bkey *insert)
0859 {
0860 struct bset_tree *t = bset_tree_last(b);
0861
0862 BUG_ON(!b->last_set_unwritten);
0863 BUG_ON(bset_byte_offset(b, t->data) +
0864 __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
0865 PAGE_SIZE << b->page_order);
0866
0867 memmove((uint64_t *) where + bkey_u64s(insert),
0868 where,
0869 (void *) bset_bkey_last(t->data) - (void *) where);
0870
0871 t->data->keys += bkey_u64s(insert);
0872 bkey_copy(where, insert);
0873 bch_bset_fix_lookup_table(b, t, where);
0874 }
0875
0876 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
0877 struct bkey *replace_key)
0878 {
0879 unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
0880 struct bset *i = bset_tree_last(b)->data;
0881 struct bkey *m, *prev = NULL;
0882 struct btree_iter iter;
0883 struct bkey preceding_key_on_stack = ZERO_KEY;
0884 struct bkey *preceding_key_p = &preceding_key_on_stack;
0885
0886 BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
0887
0888
0889
0890
0891
0892
0893 if (b->ops->is_extents)
0894 preceding_key(&START_KEY(k), &preceding_key_p);
0895 else
0896 preceding_key(k, &preceding_key_p);
0897
0898 m = bch_btree_iter_init(b, &iter, preceding_key_p);
0899
0900 if (b->ops->insert_fixup(b, k, &iter, replace_key))
0901 return status;
0902
0903 status = BTREE_INSERT_STATUS_INSERT;
0904
0905 while (m != bset_bkey_last(i) &&
0906 bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) {
0907 prev = m;
0908 m = bkey_next(m);
0909 }
0910
0911
0912 status = BTREE_INSERT_STATUS_BACK_MERGE;
0913 if (prev &&
0914 bch_bkey_try_merge(b, prev, k))
0915 goto merged;
0916 #if 0
0917 status = BTREE_INSERT_STATUS_OVERWROTE;
0918 if (m != bset_bkey_last(i) &&
0919 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
0920 goto copy;
0921 #endif
0922 status = BTREE_INSERT_STATUS_FRONT_MERGE;
0923 if (m != bset_bkey_last(i) &&
0924 bch_bkey_try_merge(b, k, m))
0925 goto copy;
0926
0927 bch_bset_insert(b, m, k);
0928 copy: bkey_copy(m, k);
0929 merged:
0930 return status;
0931 }
0932
0933
0934
0935 struct bset_search_iter {
0936 struct bkey *l, *r;
0937 };
0938
0939 static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
0940 const struct bkey *search)
0941 {
0942 unsigned int li = 0, ri = t->size;
0943
0944 while (li + 1 != ri) {
0945 unsigned int m = (li + ri) >> 1;
0946
0947 if (bkey_cmp(table_to_bkey(t, m), search) > 0)
0948 ri = m;
0949 else
0950 li = m;
0951 }
0952
0953 return (struct bset_search_iter) {
0954 table_to_bkey(t, li),
0955 ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
0956 };
0957 }
0958
0959 static struct bset_search_iter bset_search_tree(struct bset_tree *t,
0960 const struct bkey *search)
0961 {
0962 struct bkey *l, *r;
0963 struct bkey_float *f;
0964 unsigned int inorder, j, n = 1;
0965
0966 do {
0967 unsigned int p = n << 4;
0968
0969 if (p < t->size)
0970 prefetch(&t->tree[p]);
0971
0972 j = n;
0973 f = &t->tree[j];
0974
0975 if (likely(f->exponent != 127)) {
0976 if (f->mantissa >= bfloat_mantissa(search, f))
0977 n = j * 2;
0978 else
0979 n = j * 2 + 1;
0980 } else {
0981 if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
0982 n = j * 2;
0983 else
0984 n = j * 2 + 1;
0985 }
0986 } while (n < t->size);
0987
0988 inorder = to_inorder(j, t);
0989
0990
0991
0992
0993
0994 if (n & 1) {
0995 l = cacheline_to_bkey(t, inorder, f->m);
0996
0997 if (++inorder != t->size) {
0998 f = &t->tree[inorder_next(j, t->size)];
0999 r = cacheline_to_bkey(t, inorder, f->m);
1000 } else
1001 r = bset_bkey_last(t->data);
1002 } else {
1003 r = cacheline_to_bkey(t, inorder, f->m);
1004
1005 if (--inorder) {
1006 f = &t->tree[inorder_prev(j, t->size)];
1007 l = cacheline_to_bkey(t, inorder, f->m);
1008 } else
1009 l = t->data->start;
1010 }
1011
1012 return (struct bset_search_iter) {l, r};
1013 }
1014
1015 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
1016 const struct bkey *search)
1017 {
1018 struct bset_search_iter i;
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 if (unlikely(!t->size)) {
1036 i.l = t->data->start;
1037 i.r = bset_bkey_last(t->data);
1038 } else if (bset_written(b, t)) {
1039
1040
1041
1042
1043
1044
1045
1046 if (unlikely(bkey_cmp(search, &t->end) >= 0))
1047 return bset_bkey_last(t->data);
1048
1049 if (unlikely(bkey_cmp(search, t->data->start) < 0))
1050 return t->data->start;
1051
1052 i = bset_search_tree(t, search);
1053 } else {
1054 BUG_ON(!b->nsets &&
1055 t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
1056
1057 i = bset_search_write_set(t, search);
1058 }
1059
1060 if (btree_keys_expensive_checks(b)) {
1061 BUG_ON(bset_written(b, t) &&
1062 i.l != t->data->start &&
1063 bkey_cmp(tree_to_prev_bkey(t,
1064 inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
1065 search) > 0);
1066
1067 BUG_ON(i.r != bset_bkey_last(t->data) &&
1068 bkey_cmp(i.r, search) <= 0);
1069 }
1070
1071 while (likely(i.l != i.r) &&
1072 bkey_cmp(i.l, search) <= 0)
1073 i.l = bkey_next(i.l);
1074
1075 return i.l;
1076 }
1077
1078
1079
1080 typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
1081 struct btree_iter_set);
1082
1083 static inline bool btree_iter_cmp(struct btree_iter_set l,
1084 struct btree_iter_set r)
1085 {
1086 return bkey_cmp(l.k, r.k) > 0;
1087 }
1088
1089 static inline bool btree_iter_end(struct btree_iter *iter)
1090 {
1091 return !iter->used;
1092 }
1093
1094 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
1095 struct bkey *end)
1096 {
1097 if (k != end)
1098 BUG_ON(!heap_add(iter,
1099 ((struct btree_iter_set) { k, end }),
1100 btree_iter_cmp));
1101 }
1102
1103 static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
1104 struct btree_iter *iter,
1105 struct bkey *search,
1106 struct bset_tree *start)
1107 {
1108 struct bkey *ret = NULL;
1109
1110 iter->size = ARRAY_SIZE(iter->data);
1111 iter->used = 0;
1112
1113 #ifdef CONFIG_BCACHE_DEBUG
1114 iter->b = b;
1115 #endif
1116
1117 for (; start <= bset_tree_last(b); start++) {
1118 ret = bch_bset_search(b, start, search);
1119 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
1120 }
1121
1122 return ret;
1123 }
1124
1125 struct bkey *bch_btree_iter_init(struct btree_keys *b,
1126 struct btree_iter *iter,
1127 struct bkey *search)
1128 {
1129 return __bch_btree_iter_init(b, iter, search, b->set);
1130 }
1131
1132 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
1133 btree_iter_cmp_fn *cmp)
1134 {
1135 struct btree_iter_set b __maybe_unused;
1136 struct bkey *ret = NULL;
1137
1138 if (!btree_iter_end(iter)) {
1139 bch_btree_iter_next_check(iter);
1140
1141 ret = iter->data->k;
1142 iter->data->k = bkey_next(iter->data->k);
1143
1144 if (iter->data->k > iter->data->end) {
1145 WARN_ONCE(1, "bset was corrupt!\n");
1146 iter->data->k = iter->data->end;
1147 }
1148
1149 if (iter->data->k == iter->data->end)
1150 heap_pop(iter, b, cmp);
1151 else
1152 heap_sift(iter, 0, cmp);
1153 }
1154
1155 return ret;
1156 }
1157
1158 struct bkey *bch_btree_iter_next(struct btree_iter *iter)
1159 {
1160 return __bch_btree_iter_next(iter, btree_iter_cmp);
1161
1162 }
1163
1164 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
1165 struct btree_keys *b, ptr_filter_fn fn)
1166 {
1167 struct bkey *ret;
1168
1169 do {
1170 ret = bch_btree_iter_next(iter);
1171 } while (ret && fn(b, ret));
1172
1173 return ret;
1174 }
1175
1176
1177
1178 void bch_bset_sort_state_free(struct bset_sort_state *state)
1179 {
1180 mempool_exit(&state->pool);
1181 }
1182
1183 int bch_bset_sort_state_init(struct bset_sort_state *state,
1184 unsigned int page_order)
1185 {
1186 spin_lock_init(&state->time.lock);
1187
1188 state->page_order = page_order;
1189 state->crit_factor = int_sqrt(1 << page_order);
1190
1191 return mempool_init_page_pool(&state->pool, 1, page_order);
1192 }
1193
1194 static void btree_mergesort(struct btree_keys *b, struct bset *out,
1195 struct btree_iter *iter,
1196 bool fixup, bool remove_stale)
1197 {
1198 int i;
1199 struct bkey *k, *last = NULL;
1200 BKEY_PADDED(k) tmp;
1201 bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
1202 ? bch_ptr_bad
1203 : bch_ptr_invalid;
1204
1205
1206 for (i = iter->used / 2 - 1; i >= 0; --i)
1207 heap_sift(iter, i, b->ops->sort_cmp);
1208
1209 while (!btree_iter_end(iter)) {
1210 if (b->ops->sort_fixup && fixup)
1211 k = b->ops->sort_fixup(iter, &tmp.k);
1212 else
1213 k = NULL;
1214
1215 if (!k)
1216 k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
1217
1218 if (bad(b, k))
1219 continue;
1220
1221 if (!last) {
1222 last = out->start;
1223 bkey_copy(last, k);
1224 } else if (!bch_bkey_try_merge(b, last, k)) {
1225 last = bkey_next(last);
1226 bkey_copy(last, k);
1227 }
1228 }
1229
1230 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1231
1232 pr_debug("sorted %i keys\n", out->keys);
1233 }
1234
1235 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
1236 unsigned int start, unsigned int order, bool fixup,
1237 struct bset_sort_state *state)
1238 {
1239 uint64_t start_time;
1240 bool used_mempool = false;
1241 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
1242 order);
1243 if (!out) {
1244 struct page *outp;
1245
1246 BUG_ON(order > state->page_order);
1247
1248 outp = mempool_alloc(&state->pool, GFP_NOIO);
1249 out = page_address(outp);
1250 used_mempool = true;
1251 order = state->page_order;
1252 }
1253
1254 start_time = local_clock();
1255
1256 btree_mergesort(b, out, iter, fixup, false);
1257 b->nsets = start;
1258
1259 if (!start && order == b->page_order) {
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 out->magic = b->set->data->magic;
1272 out->seq = b->set->data->seq;
1273 out->version = b->set->data->version;
1274 swap(out, b->set->data);
1275 } else {
1276 b->set[start].data->keys = out->keys;
1277 memcpy(b->set[start].data->start, out->start,
1278 (void *) bset_bkey_last(out) - (void *) out->start);
1279 }
1280
1281 if (used_mempool)
1282 mempool_free(virt_to_page(out), &state->pool);
1283 else
1284 free_pages((unsigned long) out, order);
1285
1286 bch_bset_build_written_tree(b);
1287
1288 if (!start)
1289 bch_time_stats_update(&state->time, start_time);
1290 }
1291
1292 void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
1293 struct bset_sort_state *state)
1294 {
1295 size_t order = b->page_order, keys = 0;
1296 struct btree_iter iter;
1297 int oldsize = bch_count_data(b);
1298
1299 __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
1300
1301 if (start) {
1302 unsigned int i;
1303
1304 for (i = start; i <= b->nsets; i++)
1305 keys += b->set[i].data->keys;
1306
1307 order = get_order(__set_bytes(b->set->data, keys));
1308 }
1309
1310 __btree_sort(b, &iter, start, order, false, state);
1311
1312 EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
1313 }
1314
1315 void bch_btree_sort_and_fix_extents(struct btree_keys *b,
1316 struct btree_iter *iter,
1317 struct bset_sort_state *state)
1318 {
1319 __btree_sort(b, iter, 0, b->page_order, true, state);
1320 }
1321
1322 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
1323 struct bset_sort_state *state)
1324 {
1325 uint64_t start_time = local_clock();
1326 struct btree_iter iter;
1327
1328 bch_btree_iter_init(b, &iter, NULL);
1329
1330 btree_mergesort(b, new->set->data, &iter, false, true);
1331
1332 bch_time_stats_update(&state->time, start_time);
1333
1334 new->set->size = 0;
1335 }
1336
1337 #define SORT_CRIT (4096 / sizeof(uint64_t))
1338
1339 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
1340 {
1341 unsigned int crit = SORT_CRIT;
1342 int i;
1343
1344
1345 if (!b->nsets)
1346 goto out;
1347
1348 for (i = b->nsets - 1; i >= 0; --i) {
1349 crit *= state->crit_factor;
1350
1351 if (b->set[i].data->keys < crit) {
1352 bch_btree_sort_partial(b, i, state);
1353 return;
1354 }
1355 }
1356
1357
1358 if (b->nsets + 1 == MAX_BSETS) {
1359 bch_btree_sort(b, state);
1360 return;
1361 }
1362
1363 out:
1364 bch_bset_build_written_tree(b);
1365 }
1366
1367 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
1368 {
1369 unsigned int i;
1370
1371 for (i = 0; i <= b->nsets; i++) {
1372 struct bset_tree *t = &b->set[i];
1373 size_t bytes = t->data->keys * sizeof(uint64_t);
1374 size_t j;
1375
1376 if (bset_written(b, t)) {
1377 stats->sets_written++;
1378 stats->bytes_written += bytes;
1379
1380 stats->floats += t->size - 1;
1381
1382 for (j = 1; j < t->size; j++)
1383 if (t->tree[j].exponent == 127)
1384 stats->failed++;
1385 } else {
1386 stats->sets_unwritten++;
1387 stats->bytes_unwritten += bytes;
1388 }
1389 }
1390 }