0001
0002
0003
0004
0005
0006 #ifndef _BLK_CGROUP_RWSTAT_H
0007 #define _BLK_CGROUP_RWSTAT_H
0008
0009 #include "blk-cgroup.h"
0010
0011 enum blkg_rwstat_type {
0012 BLKG_RWSTAT_READ,
0013 BLKG_RWSTAT_WRITE,
0014 BLKG_RWSTAT_SYNC,
0015 BLKG_RWSTAT_ASYNC,
0016 BLKG_RWSTAT_DISCARD,
0017
0018 BLKG_RWSTAT_NR,
0019 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
0020 };
0021
0022
0023
0024
0025
0026 struct blkg_rwstat {
0027 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
0028 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
0029 };
0030
0031 struct blkg_rwstat_sample {
0032 u64 cnt[BLKG_RWSTAT_NR];
0033 };
0034
0035 static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
0036 unsigned int idx)
0037 {
0038 return atomic64_read(&rwstat->aux_cnt[idx]) +
0039 percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
0040 }
0041
0042 int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp);
0043 void blkg_rwstat_exit(struct blkg_rwstat *rwstat);
0044 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
0045 const struct blkg_rwstat_sample *rwstat);
0046 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
0047 int off);
0048 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
0049 int off, struct blkg_rwstat_sample *sum);
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
0062 blk_opf_t opf, uint64_t val)
0063 {
0064 struct percpu_counter *cnt;
0065
0066 if (op_is_discard(opf))
0067 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
0068 else if (op_is_write(opf))
0069 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
0070 else
0071 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
0072
0073 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
0074
0075 if (op_is_sync(opf))
0076 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
0077 else
0078 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
0079
0080 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
0081 }
0082
0083
0084
0085
0086
0087
0088
0089 static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
0090 struct blkg_rwstat_sample *result)
0091 {
0092 int i;
0093
0094 for (i = 0; i < BLKG_RWSTAT_NR; i++)
0095 result->cnt[i] =
0096 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
0097 }
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
0108 {
0109 struct blkg_rwstat_sample tmp = { };
0110
0111 blkg_rwstat_read(rwstat, &tmp);
0112 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
0113 }
0114
0115
0116
0117
0118
0119 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
0120 {
0121 int i;
0122
0123 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
0124 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
0125 atomic64_set(&rwstat->aux_cnt[i], 0);
0126 }
0127 }
0128
0129
0130
0131
0132
0133
0134
0135
0136 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
0137 struct blkg_rwstat *from)
0138 {
0139 u64 sum[BLKG_RWSTAT_NR];
0140 int i;
0141
0142 for (i = 0; i < BLKG_RWSTAT_NR; i++)
0143 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
0144
0145 for (i = 0; i < BLKG_RWSTAT_NR; i++)
0146 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
0147 &to->aux_cnt[i]);
0148 }
0149 #endif