Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Assorted bcache debug code
0004  *
0005  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
0006  * Copyright 2012 Google, Inc.
0007  */
0008 
0009 #include "bcache.h"
0010 #include "btree.h"
0011 #include "debug.h"
0012 #include "extents.h"
0013 
0014 #include <linux/console.h>
0015 #include <linux/debugfs.h>
0016 #include <linux/module.h>
0017 #include <linux/random.h>
0018 #include <linux/seq_file.h>
0019 
0020 struct dentry *bcache_debug;
0021 
0022 #ifdef CONFIG_BCACHE_DEBUG
0023 
0024 #define for_each_written_bset(b, start, i)              \
0025     for (i = (start);                       \
0026          (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
0027          i->seq == (start)->seq;                    \
0028          i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
0029          block_bytes(b->c->cache))
0030 
0031 void bch_btree_verify(struct btree *b)
0032 {
0033     struct btree *v = b->c->verify_data;
0034     struct bset *ondisk, *sorted, *inmemory;
0035     struct bio *bio;
0036 
0037     if (!b->c->verify || !b->c->verify_ondisk)
0038         return;
0039 
0040     down(&b->io_mutex);
0041     mutex_lock(&b->c->verify_lock);
0042 
0043     ondisk = b->c->verify_ondisk;
0044     sorted = b->c->verify_data->keys.set->data;
0045     inmemory = b->keys.set->data;
0046 
0047     bkey_copy(&v->key, &b->key);
0048     v->written = 0;
0049     v->level = b->level;
0050     v->keys.ops = b->keys.ops;
0051 
0052     bio = bch_bbio_alloc(b->c);
0053     bio_set_dev(bio, b->c->cache->bdev);
0054     bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
0055     bio->bi_iter.bi_size    = KEY_SIZE(&v->key) << 9;
0056     bio->bi_opf     = REQ_OP_READ | REQ_META;
0057     bch_bio_map(bio, sorted);
0058 
0059     submit_bio_wait(bio);
0060     bch_bbio_free(bio, b->c);
0061 
0062     memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
0063 
0064     bch_btree_node_read_done(v);
0065     sorted = v->keys.set->data;
0066 
0067     if (inmemory->keys != sorted->keys ||
0068         memcmp(inmemory->start,
0069            sorted->start,
0070            (void *) bset_bkey_last(inmemory) -
0071            (void *) inmemory->start)) {
0072         struct bset *i;
0073         unsigned int j;
0074 
0075         console_lock();
0076 
0077         pr_err("*** in memory:\n");
0078         bch_dump_bset(&b->keys, inmemory, 0);
0079 
0080         pr_err("*** read back in:\n");
0081         bch_dump_bset(&v->keys, sorted, 0);
0082 
0083         for_each_written_bset(b, ondisk, i) {
0084             unsigned int block = ((void *) i - (void *) ondisk) /
0085                 block_bytes(b->c->cache);
0086 
0087             pr_err("*** on disk block %u:\n", block);
0088             bch_dump_bset(&b->keys, i, block);
0089         }
0090 
0091         pr_err("*** block %zu not written\n",
0092                ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
0093 
0094         for (j = 0; j < inmemory->keys; j++)
0095             if (inmemory->d[j] != sorted->d[j])
0096                 break;
0097 
0098         pr_err("b->written %u\n", b->written);
0099 
0100         console_unlock();
0101         panic("verify failed at %u\n", j);
0102     }
0103 
0104     mutex_unlock(&b->c->verify_lock);
0105     up(&b->io_mutex);
0106 }
0107 
0108 void bch_data_verify(struct cached_dev *dc, struct bio *bio)
0109 {
0110     unsigned int nr_segs = bio_segments(bio);
0111     struct bio *check;
0112     struct bio_vec bv, cbv;
0113     struct bvec_iter iter, citer = { 0 };
0114 
0115     check = bio_kmalloc(nr_segs, GFP_NOIO);
0116     if (!check)
0117         return;
0118     bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
0119          REQ_OP_READ);
0120     check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
0121     check->bi_iter.bi_size = bio->bi_iter.bi_size;
0122 
0123     bch_bio_map(check, NULL);
0124     if (bch_bio_alloc_pages(check, GFP_NOIO))
0125         goto out_put;
0126 
0127     submit_bio_wait(check);
0128 
0129     citer.bi_size = UINT_MAX;
0130     bio_for_each_segment(bv, bio, iter) {
0131         void *p1 = bvec_kmap_local(&bv);
0132         void *p2;
0133 
0134         cbv = bio_iter_iovec(check, citer);
0135         p2 = bvec_kmap_local(&cbv);
0136 
0137         cache_set_err_on(memcmp(p1, p2, bv.bv_len),
0138                  dc->disk.c,
0139                  "verify failed at dev %pg sector %llu",
0140                  dc->bdev,
0141                  (uint64_t) bio->bi_iter.bi_sector);
0142 
0143         kunmap_local(p2);
0144         kunmap_local(p1);
0145         bio_advance_iter(check, &citer, bv.bv_len);
0146     }
0147 
0148     bio_free_pages(check);
0149 out_put:
0150     bio_uninit(check);
0151     kfree(check);
0152 }
0153 
0154 #endif
0155 
0156 #ifdef CONFIG_DEBUG_FS
0157 
0158 /* XXX: cache set refcounting */
0159 
0160 struct dump_iterator {
0161     char            buf[PAGE_SIZE];
0162     size_t          bytes;
0163     struct cache_set    *c;
0164     struct keybuf       keys;
0165 };
0166 
0167 static bool dump_pred(struct keybuf *buf, struct bkey *k)
0168 {
0169     return true;
0170 }
0171 
0172 static ssize_t bch_dump_read(struct file *file, char __user *buf,
0173                  size_t size, loff_t *ppos)
0174 {
0175     struct dump_iterator *i = file->private_data;
0176     ssize_t ret = 0;
0177     char kbuf[80];
0178 
0179     while (size) {
0180         struct keybuf_key *w;
0181         unsigned int bytes = min(i->bytes, size);
0182 
0183         if (copy_to_user(buf, i->buf, bytes))
0184             return -EFAULT;
0185 
0186         ret  += bytes;
0187         buf  += bytes;
0188         size     -= bytes;
0189         i->bytes -= bytes;
0190         memmove(i->buf, i->buf + bytes, i->bytes);
0191 
0192         if (i->bytes)
0193             break;
0194 
0195         w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
0196         if (!w)
0197             break;
0198 
0199         bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
0200         i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
0201         bch_keybuf_del(&i->keys, w);
0202     }
0203 
0204     return ret;
0205 }
0206 
0207 static int bch_dump_open(struct inode *inode, struct file *file)
0208 {
0209     struct cache_set *c = inode->i_private;
0210     struct dump_iterator *i;
0211 
0212     i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
0213     if (!i)
0214         return -ENOMEM;
0215 
0216     file->private_data = i;
0217     i->c = c;
0218     bch_keybuf_init(&i->keys);
0219     i->keys.last_scanned = KEY(0, 0, 0);
0220 
0221     return 0;
0222 }
0223 
0224 static int bch_dump_release(struct inode *inode, struct file *file)
0225 {
0226     kfree(file->private_data);
0227     return 0;
0228 }
0229 
0230 static const struct file_operations cache_set_debug_ops = {
0231     .owner      = THIS_MODULE,
0232     .open       = bch_dump_open,
0233     .read       = bch_dump_read,
0234     .release    = bch_dump_release
0235 };
0236 
0237 void bch_debug_init_cache_set(struct cache_set *c)
0238 {
0239     if (!IS_ERR_OR_NULL(bcache_debug)) {
0240         char name[50];
0241 
0242         snprintf(name, 50, "bcache-%pU", c->set_uuid);
0243         c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
0244                            &cache_set_debug_ops);
0245     }
0246 }
0247 
0248 #endif
0249 
0250 void bch_debug_exit(void)
0251 {
0252     debugfs_remove_recursive(bcache_debug);
0253 }
0254 
0255 void __init bch_debug_init(void)
0256 {
0257     /*
0258      * it is unnecessary to check return value of
0259      * debugfs_create_file(), we should not care
0260      * about this.
0261      */
0262     bcache_debug = debugfs_create_dir("bcache", NULL);
0263 }