0001
0002 #ifndef BLKTRACE_H
0003 #define BLKTRACE_H
0004
0005 #include <linux/blk-mq.h>
0006 #include <linux/relay.h>
0007 #include <linux/compat.h>
0008 #include <uapi/linux/blktrace_api.h>
0009 #include <linux/list.h>
0010 #include <linux/blk_types.h>
0011
0012 #if defined(CONFIG_BLK_DEV_IO_TRACE)
0013
0014 #include <linux/sysfs.h>
0015
0016 struct blk_trace {
0017 int trace_state;
0018 struct rchan *rchan;
0019 unsigned long __percpu *sequence;
0020 unsigned char __percpu *msg_data;
0021 u16 act_mask;
0022 u64 start_lba;
0023 u64 end_lba;
0024 u32 pid;
0025 u32 dev;
0026 struct dentry *dir;
0027 struct list_head running_list;
0028 atomic_t dropped;
0029 };
0030
0031 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
0032 extern void blk_trace_shutdown(struct request_queue *);
0033 __printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
0034 struct cgroup_subsys_state *css, const char *fmt, ...);
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 #define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
0050 do { \
0051 struct blk_trace *bt; \
0052 \
0053 rcu_read_lock(); \
0054 bt = rcu_dereference((q)->blk_trace); \
0055 if (unlikely(bt)) \
0056 __blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
0057 rcu_read_unlock(); \
0058 } while (0)
0059 #define blk_add_trace_msg(q, fmt, ...) \
0060 blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
0061 #define BLK_TN_MAX_MSG 128
0062
0063 static inline bool blk_trace_note_message_enabled(struct request_queue *q)
0064 {
0065 struct blk_trace *bt;
0066 bool ret;
0067
0068 rcu_read_lock();
0069 bt = rcu_dereference(q->blk_trace);
0070 ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
0071 rcu_read_unlock();
0072 return ret;
0073 }
0074
0075 extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
0076 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
0077 struct block_device *bdev,
0078 char __user *arg);
0079 extern int blk_trace_startstop(struct request_queue *q, int start);
0080 extern int blk_trace_remove(struct request_queue *q);
0081
0082 #else
0083 # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
0084 # define blk_trace_shutdown(q) do { } while (0)
0085 # define blk_add_driver_data(rq, data, len) do {} while (0)
0086 # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
0087 # define blk_trace_startstop(q, start) (-ENOTTY)
0088 # define blk_trace_remove(q) (-ENOTTY)
0089 # define blk_add_trace_msg(q, fmt, ...) do { } while (0)
0090 # define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
0091 # define blk_trace_note_message_enabled(q) (false)
0092 #endif
0093
0094 #ifdef CONFIG_COMPAT
0095
0096 struct compat_blk_user_trace_setup {
0097 char name[BLKTRACE_BDEV_SIZE];
0098 u16 act_mask;
0099 u32 buf_size;
0100 u32 buf_nr;
0101 compat_u64 start_lba;
0102 compat_u64 end_lba;
0103 u32 pid;
0104 };
0105 #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
0106
0107 #endif
0108
0109 void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
0110
0111 static inline sector_t blk_rq_trace_sector(struct request *rq)
0112 {
0113
0114
0115
0116
0117 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
0118 return 0;
0119 return blk_rq_pos(rq);
0120 }
0121
0122 static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
0123 {
0124 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
0125 }
0126
0127 #endif