0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/fs.h>
0010 #include <linux/f2fs_fs.h>
0011 #include <linux/seq_file.h>
0012
0013 #include "f2fs.h"
0014 #include "iostat.h"
0015 #include <trace/events/f2fs.h>
0016
0017 #define NUM_PREALLOC_IOSTAT_CTXS 128
0018 static struct kmem_cache *bio_iostat_ctx_cache;
0019 static mempool_t *bio_iostat_ctx_pool;
0020
0021 int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
0022 {
0023 struct super_block *sb = seq->private;
0024 struct f2fs_sb_info *sbi = F2FS_SB(sb);
0025 time64_t now = ktime_get_real_seconds();
0026
0027 if (!sbi->iostat_enable)
0028 return 0;
0029
0030 seq_printf(seq, "time: %-16llu\n", now);
0031
0032
0033 seq_puts(seq, "[WRITE]\n");
0034 seq_printf(seq, "app buffered: %-16llu\n",
0035 sbi->rw_iostat[APP_BUFFERED_IO]);
0036 seq_printf(seq, "app direct: %-16llu\n",
0037 sbi->rw_iostat[APP_DIRECT_IO]);
0038 seq_printf(seq, "app mapped: %-16llu\n",
0039 sbi->rw_iostat[APP_MAPPED_IO]);
0040
0041
0042 seq_printf(seq, "fs data: %-16llu\n",
0043 sbi->rw_iostat[FS_DATA_IO]);
0044 seq_printf(seq, "fs node: %-16llu\n",
0045 sbi->rw_iostat[FS_NODE_IO]);
0046 seq_printf(seq, "fs meta: %-16llu\n",
0047 sbi->rw_iostat[FS_META_IO]);
0048 seq_printf(seq, "fs gc data: %-16llu\n",
0049 sbi->rw_iostat[FS_GC_DATA_IO]);
0050 seq_printf(seq, "fs gc node: %-16llu\n",
0051 sbi->rw_iostat[FS_GC_NODE_IO]);
0052 seq_printf(seq, "fs cp data: %-16llu\n",
0053 sbi->rw_iostat[FS_CP_DATA_IO]);
0054 seq_printf(seq, "fs cp node: %-16llu\n",
0055 sbi->rw_iostat[FS_CP_NODE_IO]);
0056 seq_printf(seq, "fs cp meta: %-16llu\n",
0057 sbi->rw_iostat[FS_CP_META_IO]);
0058
0059
0060 seq_puts(seq, "[READ]\n");
0061 seq_printf(seq, "app buffered: %-16llu\n",
0062 sbi->rw_iostat[APP_BUFFERED_READ_IO]);
0063 seq_printf(seq, "app direct: %-16llu\n",
0064 sbi->rw_iostat[APP_DIRECT_READ_IO]);
0065 seq_printf(seq, "app mapped: %-16llu\n",
0066 sbi->rw_iostat[APP_MAPPED_READ_IO]);
0067
0068
0069 seq_printf(seq, "fs data: %-16llu\n",
0070 sbi->rw_iostat[FS_DATA_READ_IO]);
0071 seq_printf(seq, "fs gc data: %-16llu\n",
0072 sbi->rw_iostat[FS_GDATA_READ_IO]);
0073 seq_printf(seq, "fs compr_data: %-16llu\n",
0074 sbi->rw_iostat[FS_CDATA_READ_IO]);
0075 seq_printf(seq, "fs node: %-16llu\n",
0076 sbi->rw_iostat[FS_NODE_READ_IO]);
0077 seq_printf(seq, "fs meta: %-16llu\n",
0078 sbi->rw_iostat[FS_META_READ_IO]);
0079
0080
0081 seq_puts(seq, "[OTHER]\n");
0082 seq_printf(seq, "fs discard: %-16llu\n",
0083 sbi->rw_iostat[FS_DISCARD]);
0084
0085 return 0;
0086 }
0087
0088 static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
0089 {
0090 int io, idx = 0;
0091 unsigned int cnt;
0092 struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
0093 struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
0094 unsigned long flags;
0095
0096 spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
0097 for (idx = 0; idx < MAX_IO_TYPE; idx++) {
0098 for (io = 0; io < NR_PAGE_TYPE; io++) {
0099 cnt = io_lat->bio_cnt[idx][io];
0100 iostat_lat[idx][io].peak_lat =
0101 jiffies_to_msecs(io_lat->peak_lat[idx][io]);
0102 iostat_lat[idx][io].cnt = cnt;
0103 iostat_lat[idx][io].avg_lat = cnt ?
0104 jiffies_to_msecs(io_lat->sum_lat[idx][io]) / cnt : 0;
0105 io_lat->sum_lat[idx][io] = 0;
0106 io_lat->peak_lat[idx][io] = 0;
0107 io_lat->bio_cnt[idx][io] = 0;
0108 }
0109 }
0110 spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
0111
0112 trace_f2fs_iostat_latency(sbi, iostat_lat);
0113 }
0114
0115 static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
0116 {
0117 unsigned long long iostat_diff[NR_IO_TYPE];
0118 int i;
0119 unsigned long flags;
0120
0121 if (time_is_after_jiffies(sbi->iostat_next_period))
0122 return;
0123
0124
0125 spin_lock_irqsave(&sbi->iostat_lock, flags);
0126 if (time_is_after_jiffies(sbi->iostat_next_period)) {
0127 spin_unlock_irqrestore(&sbi->iostat_lock, flags);
0128 return;
0129 }
0130 sbi->iostat_next_period = jiffies +
0131 msecs_to_jiffies(sbi->iostat_period_ms);
0132
0133 for (i = 0; i < NR_IO_TYPE; i++) {
0134 iostat_diff[i] = sbi->rw_iostat[i] -
0135 sbi->prev_rw_iostat[i];
0136 sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
0137 }
0138 spin_unlock_irqrestore(&sbi->iostat_lock, flags);
0139
0140 trace_f2fs_iostat(sbi, iostat_diff);
0141
0142 __record_iostat_latency(sbi);
0143 }
0144
0145 void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
0146 {
0147 struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
0148 int i;
0149
0150 spin_lock_irq(&sbi->iostat_lock);
0151 for (i = 0; i < NR_IO_TYPE; i++) {
0152 sbi->rw_iostat[i] = 0;
0153 sbi->prev_rw_iostat[i] = 0;
0154 }
0155 spin_unlock_irq(&sbi->iostat_lock);
0156
0157 spin_lock_irq(&sbi->iostat_lat_lock);
0158 memset(io_lat, 0, sizeof(struct iostat_lat_info));
0159 spin_unlock_irq(&sbi->iostat_lat_lock);
0160 }
0161
0162 void f2fs_update_iostat(struct f2fs_sb_info *sbi,
0163 enum iostat_type type, unsigned long long io_bytes)
0164 {
0165 unsigned long flags;
0166
0167 if (!sbi->iostat_enable)
0168 return;
0169
0170 spin_lock_irqsave(&sbi->iostat_lock, flags);
0171 sbi->rw_iostat[type] += io_bytes;
0172
0173 if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
0174 sbi->rw_iostat[APP_WRITE_IO] += io_bytes;
0175
0176 if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
0177 sbi->rw_iostat[APP_READ_IO] += io_bytes;
0178
0179 spin_unlock_irqrestore(&sbi->iostat_lock, flags);
0180
0181 f2fs_record_iostat(sbi);
0182 }
0183
0184 static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
0185 int rw, bool is_sync)
0186 {
0187 unsigned long ts_diff;
0188 unsigned int iotype = iostat_ctx->type;
0189 struct f2fs_sb_info *sbi = iostat_ctx->sbi;
0190 struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
0191 int idx;
0192 unsigned long flags;
0193
0194 if (!sbi->iostat_enable)
0195 return;
0196
0197 ts_diff = jiffies - iostat_ctx->submit_ts;
0198 if (iotype >= META_FLUSH)
0199 iotype = META;
0200
0201 if (rw == 0) {
0202 idx = READ_IO;
0203 } else {
0204 if (is_sync)
0205 idx = WRITE_SYNC_IO;
0206 else
0207 idx = WRITE_ASYNC_IO;
0208 }
0209
0210 spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
0211 io_lat->sum_lat[idx][iotype] += ts_diff;
0212 io_lat->bio_cnt[idx][iotype]++;
0213 if (ts_diff > io_lat->peak_lat[idx][iotype])
0214 io_lat->peak_lat[idx][iotype] = ts_diff;
0215 spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
0216 }
0217
0218 void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
0219 {
0220 struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
0221 bool is_sync = bio->bi_opf & REQ_SYNC;
0222
0223 if (rw == 0)
0224 bio->bi_private = iostat_ctx->post_read_ctx;
0225 else
0226 bio->bi_private = iostat_ctx->sbi;
0227 __update_iostat_latency(iostat_ctx, rw, is_sync);
0228 mempool_free(iostat_ctx, bio_iostat_ctx_pool);
0229 }
0230
0231 void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
0232 struct bio *bio, struct bio_post_read_ctx *ctx)
0233 {
0234 struct bio_iostat_ctx *iostat_ctx;
0235
0236 iostat_ctx = mempool_alloc(bio_iostat_ctx_pool, GFP_NOFS);
0237 iostat_ctx->sbi = sbi;
0238 iostat_ctx->submit_ts = 0;
0239 iostat_ctx->type = 0;
0240 iostat_ctx->post_read_ctx = ctx;
0241 bio->bi_private = iostat_ctx;
0242 }
0243
0244 int __init f2fs_init_iostat_processing(void)
0245 {
0246 bio_iostat_ctx_cache =
0247 kmem_cache_create("f2fs_bio_iostat_ctx",
0248 sizeof(struct bio_iostat_ctx), 0, 0, NULL);
0249 if (!bio_iostat_ctx_cache)
0250 goto fail;
0251 bio_iostat_ctx_pool =
0252 mempool_create_slab_pool(NUM_PREALLOC_IOSTAT_CTXS,
0253 bio_iostat_ctx_cache);
0254 if (!bio_iostat_ctx_pool)
0255 goto fail_free_cache;
0256 return 0;
0257
0258 fail_free_cache:
0259 kmem_cache_destroy(bio_iostat_ctx_cache);
0260 fail:
0261 return -ENOMEM;
0262 }
0263
0264 void f2fs_destroy_iostat_processing(void)
0265 {
0266 mempool_destroy(bio_iostat_ctx_pool);
0267 kmem_cache_destroy(bio_iostat_ctx_cache);
0268 }
0269
0270 int f2fs_init_iostat(struct f2fs_sb_info *sbi)
0271 {
0272
0273 spin_lock_init(&sbi->iostat_lock);
0274 spin_lock_init(&sbi->iostat_lat_lock);
0275 sbi->iostat_enable = false;
0276 sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
0277 sbi->iostat_io_lat = f2fs_kzalloc(sbi, sizeof(struct iostat_lat_info),
0278 GFP_KERNEL);
0279 if (!sbi->iostat_io_lat)
0280 return -ENOMEM;
0281
0282 return 0;
0283 }
0284
0285 void f2fs_destroy_iostat(struct f2fs_sb_info *sbi)
0286 {
0287 kfree(sbi->iostat_io_lat);
0288 }