0001
0002 #undef TRACE_SYSTEM
0003 #define TRACE_SYSTEM bcache
0004
0005 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
0006 #define _TRACE_BCACHE_H
0007
0008 #include <linux/tracepoint.h>
0009
0010 DECLARE_EVENT_CLASS(bcache_request,
0011 TP_PROTO(struct bcache_device *d, struct bio *bio),
0012 TP_ARGS(d, bio),
0013
0014 TP_STRUCT__entry(
0015 __field(dev_t, dev )
0016 __field(unsigned int, orig_major )
0017 __field(unsigned int, orig_minor )
0018 __field(sector_t, sector )
0019 __field(dev_t, orig_sector )
0020 __field(unsigned int, nr_sector )
0021 __array(char, rwbs, 6 )
0022 ),
0023
0024 TP_fast_assign(
0025 __entry->dev = bio_dev(bio);
0026 __entry->orig_major = d->disk->major;
0027 __entry->orig_minor = d->disk->first_minor;
0028 __entry->sector = bio->bi_iter.bi_sector;
0029 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
0030 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
0031 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
0032 ),
0033
0034 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
0035 MAJOR(__entry->dev), MINOR(__entry->dev),
0036 __entry->rwbs, (unsigned long long)__entry->sector,
0037 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
0038 (unsigned long long)__entry->orig_sector)
0039 );
0040
0041 DECLARE_EVENT_CLASS(bkey,
0042 TP_PROTO(struct bkey *k),
0043 TP_ARGS(k),
0044
0045 TP_STRUCT__entry(
0046 __field(u32, size )
0047 __field(u32, inode )
0048 __field(u64, offset )
0049 __field(bool, dirty )
0050 ),
0051
0052 TP_fast_assign(
0053 __entry->inode = KEY_INODE(k);
0054 __entry->offset = KEY_OFFSET(k);
0055 __entry->size = KEY_SIZE(k);
0056 __entry->dirty = KEY_DIRTY(k);
0057 ),
0058
0059 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
0060 __entry->offset, __entry->size, __entry->dirty)
0061 );
0062
0063 DECLARE_EVENT_CLASS(btree_node,
0064 TP_PROTO(struct btree *b),
0065 TP_ARGS(b),
0066
0067 TP_STRUCT__entry(
0068 __field(size_t, bucket )
0069 ),
0070
0071 TP_fast_assign(
0072 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
0073 ),
0074
0075 TP_printk("bucket %zu", __entry->bucket)
0076 );
0077
0078
0079
0080 DEFINE_EVENT(bcache_request, bcache_request_start,
0081 TP_PROTO(struct bcache_device *d, struct bio *bio),
0082 TP_ARGS(d, bio)
0083 );
0084
0085 DEFINE_EVENT(bcache_request, bcache_request_end,
0086 TP_PROTO(struct bcache_device *d, struct bio *bio),
0087 TP_ARGS(d, bio)
0088 );
0089
0090 DECLARE_EVENT_CLASS(bcache_bio,
0091 TP_PROTO(struct bio *bio),
0092 TP_ARGS(bio),
0093
0094 TP_STRUCT__entry(
0095 __field(dev_t, dev )
0096 __field(sector_t, sector )
0097 __field(unsigned int, nr_sector )
0098 __array(char, rwbs, 6 )
0099 ),
0100
0101 TP_fast_assign(
0102 __entry->dev = bio_dev(bio);
0103 __entry->sector = bio->bi_iter.bi_sector;
0104 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
0105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
0106 ),
0107
0108 TP_printk("%d,%d %s %llu + %u",
0109 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
0110 (unsigned long long)__entry->sector, __entry->nr_sector)
0111 );
0112
0113 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
0114 TP_PROTO(struct bio *bio),
0115 TP_ARGS(bio)
0116 );
0117
0118 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
0119 TP_PROTO(struct bio *bio),
0120 TP_ARGS(bio)
0121 );
0122
0123 TRACE_EVENT(bcache_read,
0124 TP_PROTO(struct bio *bio, bool hit, bool bypass),
0125 TP_ARGS(bio, hit, bypass),
0126
0127 TP_STRUCT__entry(
0128 __field(dev_t, dev )
0129 __field(sector_t, sector )
0130 __field(unsigned int, nr_sector )
0131 __array(char, rwbs, 6 )
0132 __field(bool, cache_hit )
0133 __field(bool, bypass )
0134 ),
0135
0136 TP_fast_assign(
0137 __entry->dev = bio_dev(bio);
0138 __entry->sector = bio->bi_iter.bi_sector;
0139 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
0140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
0141 __entry->cache_hit = hit;
0142 __entry->bypass = bypass;
0143 ),
0144
0145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
0146 MAJOR(__entry->dev), MINOR(__entry->dev),
0147 __entry->rwbs, (unsigned long long)__entry->sector,
0148 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
0149 );
0150
0151 TRACE_EVENT(bcache_write,
0152 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
0153 bool writeback, bool bypass),
0154 TP_ARGS(c, inode, bio, writeback, bypass),
0155
0156 TP_STRUCT__entry(
0157 __array(char, uuid, 16 )
0158 __field(u64, inode )
0159 __field(sector_t, sector )
0160 __field(unsigned int, nr_sector )
0161 __array(char, rwbs, 6 )
0162 __field(bool, writeback )
0163 __field(bool, bypass )
0164 ),
0165
0166 TP_fast_assign(
0167 memcpy(__entry->uuid, c->set_uuid, 16);
0168 __entry->inode = inode;
0169 __entry->sector = bio->bi_iter.bi_sector;
0170 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
0171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
0172 __entry->writeback = writeback;
0173 __entry->bypass = bypass;
0174 ),
0175
0176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
0177 __entry->uuid, __entry->inode,
0178 __entry->rwbs, (unsigned long long)__entry->sector,
0179 __entry->nr_sector, __entry->writeback, __entry->bypass)
0180 );
0181
0182 DEFINE_EVENT(bcache_bio, bcache_read_retry,
0183 TP_PROTO(struct bio *bio),
0184 TP_ARGS(bio)
0185 );
0186
0187 DEFINE_EVENT(bkey, bcache_cache_insert,
0188 TP_PROTO(struct bkey *k),
0189 TP_ARGS(k)
0190 );
0191
0192
0193
0194 DECLARE_EVENT_CLASS(cache_set,
0195 TP_PROTO(struct cache_set *c),
0196 TP_ARGS(c),
0197
0198 TP_STRUCT__entry(
0199 __array(char, uuid, 16 )
0200 ),
0201
0202 TP_fast_assign(
0203 memcpy(__entry->uuid, c->set_uuid, 16);
0204 ),
0205
0206 TP_printk("%pU", __entry->uuid)
0207 );
0208
0209 DEFINE_EVENT(bkey, bcache_journal_replay_key,
0210 TP_PROTO(struct bkey *k),
0211 TP_ARGS(k)
0212 );
0213
0214 DEFINE_EVENT(cache_set, bcache_journal_full,
0215 TP_PROTO(struct cache_set *c),
0216 TP_ARGS(c)
0217 );
0218
0219 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
0220 TP_PROTO(struct cache_set *c),
0221 TP_ARGS(c)
0222 );
0223
0224 TRACE_EVENT(bcache_journal_write,
0225 TP_PROTO(struct bio *bio, u32 keys),
0226 TP_ARGS(bio, keys),
0227
0228 TP_STRUCT__entry(
0229 __field(dev_t, dev )
0230 __field(sector_t, sector )
0231 __field(unsigned int, nr_sector )
0232 __array(char, rwbs, 6 )
0233 __field(u32, nr_keys )
0234 ),
0235
0236 TP_fast_assign(
0237 __entry->dev = bio_dev(bio);
0238 __entry->sector = bio->bi_iter.bi_sector;
0239 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
0240 __entry->nr_keys = keys;
0241 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
0242 ),
0243
0244 TP_printk("%d,%d %s %llu + %u keys %u",
0245 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
0246 (unsigned long long)__entry->sector, __entry->nr_sector,
0247 __entry->nr_keys)
0248 );
0249
0250
0251
0252 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
0253 TP_PROTO(struct cache_set *c),
0254 TP_ARGS(c)
0255 );
0256
0257 DEFINE_EVENT(btree_node, bcache_btree_read,
0258 TP_PROTO(struct btree *b),
0259 TP_ARGS(b)
0260 );
0261
0262 TRACE_EVENT(bcache_btree_write,
0263 TP_PROTO(struct btree *b),
0264 TP_ARGS(b),
0265
0266 TP_STRUCT__entry(
0267 __field(size_t, bucket )
0268 __field(unsigned, block )
0269 __field(unsigned, keys )
0270 ),
0271
0272 TP_fast_assign(
0273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
0274 __entry->block = b->written;
0275 __entry->keys = b->keys.set[b->keys.nsets].data->keys;
0276 ),
0277
0278 TP_printk("bucket %zu written block %u + %u",
0279 __entry->bucket, __entry->block, __entry->keys)
0280 );
0281
0282 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
0283 TP_PROTO(struct btree *b),
0284 TP_ARGS(b)
0285 );
0286
0287 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
0288 TP_PROTO(struct cache_set *c),
0289 TP_ARGS(c)
0290 );
0291
0292 DEFINE_EVENT(btree_node, bcache_btree_node_free,
0293 TP_PROTO(struct btree *b),
0294 TP_ARGS(b)
0295 );
0296
0297 TRACE_EVENT(bcache_btree_gc_coalesce,
0298 TP_PROTO(unsigned nodes),
0299 TP_ARGS(nodes),
0300
0301 TP_STRUCT__entry(
0302 __field(unsigned, nodes )
0303 ),
0304
0305 TP_fast_assign(
0306 __entry->nodes = nodes;
0307 ),
0308
0309 TP_printk("coalesced %u nodes", __entry->nodes)
0310 );
0311
0312 DEFINE_EVENT(cache_set, bcache_gc_start,
0313 TP_PROTO(struct cache_set *c),
0314 TP_ARGS(c)
0315 );
0316
0317 DEFINE_EVENT(cache_set, bcache_gc_end,
0318 TP_PROTO(struct cache_set *c),
0319 TP_ARGS(c)
0320 );
0321
0322 DEFINE_EVENT(bkey, bcache_gc_copy,
0323 TP_PROTO(struct bkey *k),
0324 TP_ARGS(k)
0325 );
0326
0327 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
0328 TP_PROTO(struct bkey *k),
0329 TP_ARGS(k)
0330 );
0331
0332 TRACE_EVENT(bcache_btree_insert_key,
0333 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
0334 TP_ARGS(b, k, op, status),
0335
0336 TP_STRUCT__entry(
0337 __field(u64, btree_node )
0338 __field(u32, btree_level )
0339 __field(u32, inode )
0340 __field(u64, offset )
0341 __field(u32, size )
0342 __field(u8, dirty )
0343 __field(u8, op )
0344 __field(u8, status )
0345 ),
0346
0347 TP_fast_assign(
0348 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
0349 __entry->btree_level = b->level;
0350 __entry->inode = KEY_INODE(k);
0351 __entry->offset = KEY_OFFSET(k);
0352 __entry->size = KEY_SIZE(k);
0353 __entry->dirty = KEY_DIRTY(k);
0354 __entry->op = op;
0355 __entry->status = status;
0356 ),
0357
0358 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
0359 __entry->status, __entry->op,
0360 __entry->btree_node, __entry->btree_level,
0361 __entry->inode, __entry->offset,
0362 __entry->size, __entry->dirty)
0363 );
0364
0365 DECLARE_EVENT_CLASS(btree_split,
0366 TP_PROTO(struct btree *b, unsigned keys),
0367 TP_ARGS(b, keys),
0368
0369 TP_STRUCT__entry(
0370 __field(size_t, bucket )
0371 __field(unsigned, keys )
0372 ),
0373
0374 TP_fast_assign(
0375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
0376 __entry->keys = keys;
0377 ),
0378
0379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
0380 );
0381
0382 DEFINE_EVENT(btree_split, bcache_btree_node_split,
0383 TP_PROTO(struct btree *b, unsigned keys),
0384 TP_ARGS(b, keys)
0385 );
0386
0387 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
0388 TP_PROTO(struct btree *b, unsigned keys),
0389 TP_ARGS(b, keys)
0390 );
0391
0392 DEFINE_EVENT(btree_node, bcache_btree_set_root,
0393 TP_PROTO(struct btree *b),
0394 TP_ARGS(b)
0395 );
0396
0397 TRACE_EVENT(bcache_keyscan,
0398 TP_PROTO(unsigned nr_found,
0399 unsigned start_inode, uint64_t start_offset,
0400 unsigned end_inode, uint64_t end_offset),
0401 TP_ARGS(nr_found,
0402 start_inode, start_offset,
0403 end_inode, end_offset),
0404
0405 TP_STRUCT__entry(
0406 __field(__u32, nr_found )
0407 __field(__u32, start_inode )
0408 __field(__u64, start_offset )
0409 __field(__u32, end_inode )
0410 __field(__u64, end_offset )
0411 ),
0412
0413 TP_fast_assign(
0414 __entry->nr_found = nr_found;
0415 __entry->start_inode = start_inode;
0416 __entry->start_offset = start_offset;
0417 __entry->end_inode = end_inode;
0418 __entry->end_offset = end_offset;
0419 ),
0420
0421 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
0422 __entry->start_inode, __entry->start_offset,
0423 __entry->end_inode, __entry->end_offset)
0424 );
0425
0426
0427
0428 TRACE_EVENT(bcache_invalidate,
0429 TP_PROTO(struct cache *ca, size_t bucket),
0430 TP_ARGS(ca, bucket),
0431
0432 TP_STRUCT__entry(
0433 __field(unsigned, sectors )
0434 __field(dev_t, dev )
0435 __field(__u64, offset )
0436 ),
0437
0438 TP_fast_assign(
0439 __entry->dev = ca->bdev->bd_dev;
0440 __entry->offset = bucket << ca->set->bucket_bits;
0441 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
0442 ),
0443
0444 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
0445 __entry->sectors, MAJOR(__entry->dev),
0446 MINOR(__entry->dev), __entry->offset)
0447 );
0448
0449 TRACE_EVENT(bcache_alloc,
0450 TP_PROTO(struct cache *ca, size_t bucket),
0451 TP_ARGS(ca, bucket),
0452
0453 TP_STRUCT__entry(
0454 __field(dev_t, dev )
0455 __field(__u64, offset )
0456 ),
0457
0458 TP_fast_assign(
0459 __entry->dev = ca->bdev->bd_dev;
0460 __entry->offset = bucket << ca->set->bucket_bits;
0461 ),
0462
0463 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
0464 MINOR(__entry->dev), __entry->offset)
0465 );
0466
0467 TRACE_EVENT(bcache_alloc_fail,
0468 TP_PROTO(struct cache *ca, unsigned reserve),
0469 TP_ARGS(ca, reserve),
0470
0471 TP_STRUCT__entry(
0472 __field(dev_t, dev )
0473 __field(unsigned, free )
0474 __field(unsigned, free_inc )
0475 __field(unsigned, blocked )
0476 ),
0477
0478 TP_fast_assign(
0479 __entry->dev = ca->bdev->bd_dev;
0480 __entry->free = fifo_used(&ca->free[reserve]);
0481 __entry->free_inc = fifo_used(&ca->free_inc);
0482 __entry->blocked = atomic_read(&ca->set->prio_blocked);
0483 ),
0484
0485 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
0486 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
0487 __entry->free_inc, __entry->blocked)
0488 );
0489
0490
0491
0492 DEFINE_EVENT(bkey, bcache_writeback,
0493 TP_PROTO(struct bkey *k),
0494 TP_ARGS(k)
0495 );
0496
0497 DEFINE_EVENT(bkey, bcache_writeback_collision,
0498 TP_PROTO(struct bkey *k),
0499 TP_ARGS(k)
0500 );
0501
0502 #endif
0503
0504
0505 #include <trace/define_trace.h>