Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _BCACHE_BSET_H
0003 #define _BCACHE_BSET_H
0004 
0005 #include <linux/kernel.h>
0006 #include <linux/types.h>
0007 
0008 #include "bcache_ondisk.h"
0009 #include "util.h" /* for time_stats */
0010 
0011 /*
0012  * BKEYS:
0013  *
0014  * A bkey contains a key, a size field, a variable number of pointers, and some
0015  * ancillary flag bits.
0016  *
0017  * We use two different functions for validating bkeys, bch_ptr_invalid and
0018  * bch_ptr_bad().
0019  *
0020  * bch_ptr_invalid() primarily filters out keys and pointers that would be
0021  * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
0022  * pointer that occur in normal practice but don't point to real data.
0023  *
0024  * The one exception to the rule that ptr_invalid() filters out invalid keys is
0025  * that it also filters out keys of size 0 - these are keys that have been
0026  * completely overwritten. It'd be safe to delete these in memory while leaving
0027  * them on disk, just unnecessary work - so we filter them out when resorting
0028  * instead.
0029  *
0030  * We can't filter out stale keys when we're resorting, because garbage
0031  * collection needs to find them to ensure bucket gens don't wrap around -
0032  * unless we're rewriting the btree node those stale keys still exist on disk.
0033  *
0034  * We also implement functions here for removing some number of sectors from the
0035  * front or the back of a bkey - this is mainly used for fixing overlapping
0036  * extents, by removing the overlapping sectors from the older key.
0037  *
0038  * BSETS:
0039  *
0040  * A bset is an array of bkeys laid out contiguously in memory in sorted order,
0041  * along with a header. A btree node is made up of a number of these, written at
0042  * different times.
0043  *
0044  * There could be many of them on disk, but we never allow there to be more than
0045  * 4 in memory - we lazily resort as needed.
0046  *
0047  * We implement code here for creating and maintaining auxiliary search trees
0048  * (described below) for searching an individial bset, and on top of that we
0049  * implement a btree iterator.
0050  *
0051  * BTREE ITERATOR:
0052  *
0053  * Most of the code in bcache doesn't care about an individual bset - it needs
0054  * to search entire btree nodes and iterate over them in sorted order.
0055  *
0056  * The btree iterator code serves both functions; it iterates through the keys
0057  * in a btree node in sorted order, starting from either keys after a specific
0058  * point (if you pass it a search key) or the start of the btree node.
0059  *
0060  * AUXILIARY SEARCH TREES:
0061  *
0062  * Since keys are variable length, we can't use a binary search on a bset - we
0063  * wouldn't be able to find the start of the next key. But binary searches are
0064  * slow anyways, due to terrible cache behaviour; bcache originally used binary
0065  * searches and that code topped out at under 50k lookups/second.
0066  *
0067  * So we need to construct some sort of lookup table. Since we only insert keys
0068  * into the last (unwritten) set, most of the keys within a given btree node are
0069  * usually in sets that are mostly constant. We use two different types of
0070  * lookup tables to take advantage of this.
0071  *
0072  * Both lookup tables share in common that they don't index every key in the
0073  * set; they index one key every BSET_CACHELINE bytes, and then a linear search
0074  * is used for the rest.
0075  *
0076  * For sets that have been written to disk and are no longer being inserted
0077  * into, we construct a binary search tree in an array - traversing a binary
0078  * search tree in an array gives excellent locality of reference and is very
0079  * fast, since both children of any node are adjacent to each other in memory
0080  * (and their grandchildren, and great grandchildren...) - this means
0081  * prefetching can be used to great effect.
0082  *
0083  * It's quite useful performance wise to keep these nodes small - not just
0084  * because they're more likely to be in L2, but also because we can prefetch
0085  * more nodes on a single cacheline and thus prefetch more iterations in advance
0086  * when traversing this tree.
0087  *
0088  * Nodes in the auxiliary search tree must contain both a key to compare against
0089  * (we don't want to fetch the key from the set, that would defeat the purpose),
0090  * and a pointer to the key. We use a few tricks to compress both of these.
0091  *
0092  * To compress the pointer, we take advantage of the fact that one node in the
0093  * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
0094  * a function (to_inorder()) that takes the index of a node in a binary tree and
0095  * returns what its index would be in an inorder traversal, so we only have to
0096  * store the low bits of the offset.
0097  *
0098  * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
0099  * compress that,  we take advantage of the fact that when we're traversing the
0100  * search tree at every iteration we know that both our search key and the key
0101  * we're looking for lie within some range - bounded by our previous
0102  * comparisons. (We special case the start of a search so that this is true even
0103  * at the root of the tree).
0104  *
0105  * So we know the key we're looking for is between a and b, and a and b don't
0106  * differ higher than bit 50, we don't need to check anything higher than bit
0107  * 50.
0108  *
0109  * We don't usually need the rest of the bits, either; we only need enough bits
0110  * to partition the key range we're currently checking.  Consider key n - the
0111  * key our auxiliary search tree node corresponds to, and key p, the key
0112  * immediately preceding n.  The lowest bit we need to store in the auxiliary
0113  * search tree is the highest bit that differs between n and p.
0114  *
0115  * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
0116  * comparison. But we'd really like our nodes in the auxiliary search tree to be
0117  * of fixed size.
0118  *
0119  * The solution is to make them fixed size, and when we're constructing a node
0120  * check if p and n differed in the bits we needed them to. If they don't we
0121  * flag that node, and when doing lookups we fallback to comparing against the
0122  * real key. As long as this doesn't happen to often (and it seems to reliably
0123  * happen a bit less than 1% of the time), we win - even on failures, that key
0124  * is then more likely to be in cache than if we were doing binary searches all
0125  * the way, since we're touching so much less memory.
0126  *
0127  * The keys in the auxiliary search tree are stored in (software) floating
0128  * point, with an exponent and a mantissa. The exponent needs to be big enough
0129  * to address all the bits in the original key, but the number of bits in the
0130  * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
0131  *
0132  * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
0133  * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
0134  * We need one node per 128 bytes in the btree node, which means the auxiliary
0135  * search trees take up 3% as much memory as the btree itself.
0136  *
0137  * Constructing these auxiliary search trees is moderately expensive, and we
0138  * don't want to be constantly rebuilding the search tree for the last set
0139  * whenever we insert another key into it. For the unwritten set, we use a much
0140  * simpler lookup table - it's just a flat array, so index i in the lookup table
0141  * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
0142  * within each byte range works the same as with the auxiliary search trees.
0143  *
0144  * These are much easier to keep up to date when we insert a key - we do it
0145  * somewhat lazily; when we shift a key up we usually just increment the pointer
0146  * to it, only when it would overflow do we go to the trouble of finding the
0147  * first key in that range of bytes again.
0148  */
0149 
0150 struct btree_keys;
0151 struct btree_iter;
0152 struct btree_iter_set;
0153 struct bkey_float;
0154 
0155 #define MAX_BSETS       4U
0156 
0157 struct bset_tree {
0158     /*
0159      * We construct a binary tree in an array as if the array
0160      * started at 1, so that things line up on the same cachelines
0161      * better: see comments in bset.c at cacheline_to_bkey() for
0162      * details
0163      */
0164 
0165     /* size of the binary tree and prev array */
0166     unsigned int        size;
0167 
0168     /* function of size - precalculated for to_inorder() */
0169     unsigned int        extra;
0170 
0171     /* copy of the last key in the set */
0172     struct bkey     end;
0173     struct bkey_float   *tree;
0174 
0175     /*
0176      * The nodes in the bset tree point to specific keys - this
0177      * array holds the sizes of the previous key.
0178      *
0179      * Conceptually it's a member of struct bkey_float, but we want
0180      * to keep bkey_float to 4 bytes and prev isn't used in the fast
0181      * path.
0182      */
0183     uint8_t         *prev;
0184 
0185     /* The actual btree node, with pointers to each sorted set */
0186     struct bset     *data;
0187 };
0188 
0189 struct btree_keys_ops {
0190     bool        (*sort_cmp)(struct btree_iter_set l,
0191                     struct btree_iter_set r);
0192     struct bkey *(*sort_fixup)(struct btree_iter *iter,
0193                        struct bkey *tmp);
0194     bool        (*insert_fixup)(struct btree_keys *b,
0195                     struct bkey *insert,
0196                     struct btree_iter *iter,
0197                     struct bkey *replace_key);
0198     bool        (*key_invalid)(struct btree_keys *bk,
0199                        const struct bkey *k);
0200     bool        (*key_bad)(struct btree_keys *bk,
0201                    const struct bkey *k);
0202     bool        (*key_merge)(struct btree_keys *bk,
0203                      struct bkey *l, struct bkey *r);
0204     void        (*key_to_text)(char *buf,
0205                        size_t size,
0206                        const struct bkey *k);
0207     void        (*key_dump)(struct btree_keys *keys,
0208                     const struct bkey *k);
0209 
0210     /*
0211      * Only used for deciding whether to use START_KEY(k) or just the key
0212      * itself in a couple places
0213      */
0214     bool        is_extents;
0215 };
0216 
0217 struct btree_keys {
0218     const struct btree_keys_ops *ops;
0219     uint8_t         page_order;
0220     uint8_t         nsets;
0221     unsigned int        last_set_unwritten:1;
0222     bool            *expensive_debug_checks;
0223 
0224     /*
0225      * Sets of sorted keys - the real btree node - plus a binary search tree
0226      *
0227      * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
0228      * to the memory we have allocated for this btree node. Additionally,
0229      * set[0]->data points to the entire btree node as it exists on disk.
0230      */
0231     struct bset_tree    set[MAX_BSETS];
0232 };
0233 
0234 static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
0235 {
0236     return b->set + b->nsets;
0237 }
0238 
0239 static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
0240 {
0241     return t <= b->set + b->nsets - b->last_set_unwritten;
0242 }
0243 
0244 static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
0245 {
0246     return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
0247 }
0248 
0249 static inline unsigned int bset_byte_offset(struct btree_keys *b,
0250                         struct bset *i)
0251 {
0252     return ((size_t) i) - ((size_t) b->set->data);
0253 }
0254 
0255 static inline unsigned int bset_sector_offset(struct btree_keys *b,
0256                           struct bset *i)
0257 {
0258     return bset_byte_offset(b, i) >> 9;
0259 }
0260 
0261 #define __set_bytes(i, k)   (sizeof(*(i)) + (k) * sizeof(uint64_t))
0262 #define set_bytes(i)        __set_bytes(i, i->keys)
0263 
0264 #define __set_blocks(i, k, block_bytes)             \
0265     DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
0266 #define set_blocks(i, block_bytes)              \
0267     __set_blocks(i, (i)->keys, block_bytes)
0268 
0269 static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
0270 {
0271     struct bset_tree *t = bset_tree_last(b);
0272 
0273     BUG_ON((PAGE_SIZE << b->page_order) <
0274            (bset_byte_offset(b, t->data) + set_bytes(t->data)));
0275 
0276     if (!b->last_set_unwritten)
0277         return 0;
0278 
0279     return ((PAGE_SIZE << b->page_order) -
0280         (bset_byte_offset(b, t->data) + set_bytes(t->data))) /
0281         sizeof(u64);
0282 }
0283 
0284 static inline struct bset *bset_next_set(struct btree_keys *b,
0285                      unsigned int block_bytes)
0286 {
0287     struct bset *i = bset_tree_last(b)->data;
0288 
0289     return ((void *) i) + roundup(set_bytes(i), block_bytes);
0290 }
0291 
0292 void bch_btree_keys_free(struct btree_keys *b);
0293 int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
0294              gfp_t gfp);
0295 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
0296              bool *expensive_debug_checks);
0297 
0298 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
0299 void bch_bset_build_written_tree(struct btree_keys *b);
0300 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
0301 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
0302 void bch_bset_insert(struct btree_keys *b, struct bkey *where,
0303              struct bkey *insert);
0304 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
0305                   struct bkey *replace_key);
0306 
0307 enum {
0308     BTREE_INSERT_STATUS_NO_INSERT = 0,
0309     BTREE_INSERT_STATUS_INSERT,
0310     BTREE_INSERT_STATUS_BACK_MERGE,
0311     BTREE_INSERT_STATUS_OVERWROTE,
0312     BTREE_INSERT_STATUS_FRONT_MERGE,
0313 };
0314 
0315 /* Btree key iteration */
0316 
0317 struct btree_iter {
0318     size_t size, used;
0319 #ifdef CONFIG_BCACHE_DEBUG
0320     struct btree_keys *b;
0321 #endif
0322     struct btree_iter_set {
0323         struct bkey *k, *end;
0324     } data[MAX_BSETS];
0325 };
0326 
0327 typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
0328 
0329 struct bkey *bch_btree_iter_next(struct btree_iter *iter);
0330 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
0331                     struct btree_keys *b,
0332                     ptr_filter_fn fn);
0333 
0334 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
0335              struct bkey *end);
0336 struct bkey *bch_btree_iter_init(struct btree_keys *b,
0337                  struct btree_iter *iter,
0338                  struct bkey *search);
0339 
0340 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
0341                    const struct bkey *search);
0342 
0343 /*
0344  * Returns the first key that is strictly greater than search
0345  */
0346 static inline struct bkey *bch_bset_search(struct btree_keys *b,
0347                        struct bset_tree *t,
0348                        const struct bkey *search)
0349 {
0350     return search ? __bch_bset_search(b, t, search) : t->data->start;
0351 }
0352 
0353 #define for_each_key_filter(b, k, iter, filter)             \
0354     for (bch_btree_iter_init((b), (iter), NULL);            \
0355          ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
0356 
0357 #define for_each_key(b, k, iter)                    \
0358     for (bch_btree_iter_init((b), (iter), NULL);            \
0359          ((k) = bch_btree_iter_next(iter));)
0360 
0361 /* Sorting */
0362 
0363 struct bset_sort_state {
0364     mempool_t       pool;
0365 
0366     unsigned int        page_order;
0367     unsigned int        crit_factor;
0368 
0369     struct time_stats   time;
0370 };
0371 
0372 void bch_bset_sort_state_free(struct bset_sort_state *state);
0373 int bch_bset_sort_state_init(struct bset_sort_state *state,
0374                  unsigned int page_order);
0375 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
0376 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
0377              struct bset_sort_state *state);
0378 void bch_btree_sort_and_fix_extents(struct btree_keys *b,
0379                     struct btree_iter *iter,
0380                     struct bset_sort_state *state);
0381 void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
0382                 struct bset_sort_state *state);
0383 
0384 static inline void bch_btree_sort(struct btree_keys *b,
0385                   struct bset_sort_state *state)
0386 {
0387     bch_btree_sort_partial(b, 0, state);
0388 }
0389 
0390 struct bset_stats {
0391     size_t sets_written, sets_unwritten;
0392     size_t bytes_written, bytes_unwritten;
0393     size_t floats, failed;
0394 };
0395 
0396 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
0397 
0398 /* Bkey utility code */
0399 
0400 #define bset_bkey_last(i)   bkey_idx((struct bkey *) (i)->d, \
0401                      (unsigned int)(i)->keys)
0402 
0403 static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
0404 {
0405     return bkey_idx(i->start, idx);
0406 }
0407 
0408 static inline void bkey_init(struct bkey *k)
0409 {
0410     *k = ZERO_KEY;
0411 }
0412 
0413 static __always_inline int64_t bkey_cmp(const struct bkey *l,
0414                     const struct bkey *r)
0415 {
0416     return unlikely(KEY_INODE(l) != KEY_INODE(r))
0417         ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
0418         : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
0419 }
0420 
0421 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
0422                   unsigned int i);
0423 bool __bch_cut_front(const struct bkey *where, struct bkey *k);
0424 bool __bch_cut_back(const struct bkey *where, struct bkey *k);
0425 
0426 static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
0427 {
0428     BUG_ON(bkey_cmp(where, k) > 0);
0429     return __bch_cut_front(where, k);
0430 }
0431 
0432 static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
0433 {
0434     BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
0435     return __bch_cut_back(where, k);
0436 }
0437 
0438 /*
0439  * Pointer '*preceding_key_p' points to a memory object to store preceding
0440  * key of k. If the preceding key does not exist, set '*preceding_key_p' to
0441  * NULL. So the caller of preceding_key() needs to take care of memory
0442  * which '*preceding_key_p' pointed to before calling preceding_key().
0443  * Currently the only caller of preceding_key() is bch_btree_insert_key(),
0444  * and it points to an on-stack variable, so the memory release is handled
0445  * by stackframe itself.
0446  */
0447 static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
0448 {
0449     if (KEY_INODE(k) || KEY_OFFSET(k)) {
0450         (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
0451         if (!(*preceding_key_p)->low)
0452             (*preceding_key_p)->high--;
0453         (*preceding_key_p)->low--;
0454     } else {
0455         (*preceding_key_p) = NULL;
0456     }
0457 }
0458 
0459 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
0460 {
0461     return b->ops->key_invalid(b, k);
0462 }
0463 
0464 static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
0465 {
0466     return b->ops->key_bad(b, k);
0467 }
0468 
0469 static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
0470                     size_t size, const struct bkey *k)
0471 {
0472     return b->ops->key_to_text(buf, size, k);
0473 }
0474 
0475 static inline bool bch_bkey_equal_header(const struct bkey *l,
0476                      const struct bkey *r)
0477 {
0478     return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
0479         KEY_PTRS(l) == KEY_PTRS(r) &&
0480         KEY_CSUM(l) == KEY_CSUM(r));
0481 }
0482 
0483 /* Keylists */
0484 
0485 struct keylist {
0486     union {
0487         struct bkey     *keys;
0488         uint64_t        *keys_p;
0489     };
0490     union {
0491         struct bkey     *top;
0492         uint64_t        *top_p;
0493     };
0494 
0495     /* Enough room for btree_split's keys without realloc */
0496 #define KEYLIST_INLINE      16
0497     uint64_t        inline_keys[KEYLIST_INLINE];
0498 };
0499 
0500 static inline void bch_keylist_init(struct keylist *l)
0501 {
0502     l->top_p = l->keys_p = l->inline_keys;
0503 }
0504 
0505 static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k)
0506 {
0507     l->keys = k;
0508     l->top = bkey_next(k);
0509 }
0510 
0511 static inline void bch_keylist_push(struct keylist *l)
0512 {
0513     l->top = bkey_next(l->top);
0514 }
0515 
0516 static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
0517 {
0518     bkey_copy(l->top, k);
0519     bch_keylist_push(l);
0520 }
0521 
0522 static inline bool bch_keylist_empty(struct keylist *l)
0523 {
0524     return l->top == l->keys;
0525 }
0526 
0527 static inline void bch_keylist_reset(struct keylist *l)
0528 {
0529     l->top = l->keys;
0530 }
0531 
0532 static inline void bch_keylist_free(struct keylist *l)
0533 {
0534     if (l->keys_p != l->inline_keys)
0535         kfree(l->keys_p);
0536 }
0537 
0538 static inline size_t bch_keylist_nkeys(struct keylist *l)
0539 {
0540     return l->top_p - l->keys_p;
0541 }
0542 
0543 static inline size_t bch_keylist_bytes(struct keylist *l)
0544 {
0545     return bch_keylist_nkeys(l) * sizeof(uint64_t);
0546 }
0547 
0548 struct bkey *bch_keylist_pop(struct keylist *l);
0549 void bch_keylist_pop_front(struct keylist *l);
0550 int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
0551 
0552 /* Debug stuff */
0553 
0554 #ifdef CONFIG_BCACHE_DEBUG
0555 
0556 int __bch_count_data(struct btree_keys *b);
0557 void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
0558                      const char *fmt,
0559                      ...);
0560 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
0561 void bch_dump_bucket(struct btree_keys *b);
0562 
0563 #else
0564 
0565 static inline int __bch_count_data(struct btree_keys *b) { return -1; }
0566 static inline void __printf(2, 3)
0567     __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
0568 static inline void bch_dump_bucket(struct btree_keys *b) {}
0569 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
0570 
0571 #endif
0572 
0573 static inline bool btree_keys_expensive_checks(struct btree_keys *b)
0574 {
0575 #ifdef CONFIG_BCACHE_DEBUG
0576     return *b->expensive_debug_checks;
0577 #else
0578     return false;
0579 #endif
0580 }
0581 
0582 static inline int bch_count_data(struct btree_keys *b)
0583 {
0584     return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
0585 }
0586 
0587 #define bch_check_keys(b, ...)                      \
0588 do {                                    \
0589     if (btree_keys_expensive_checks(b))             \
0590         __bch_check_keys(b, __VA_ARGS__);           \
0591 } while (0)
0592 
0593 #endif