Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0
0002  *
0003  * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
0004  * Do not use in new code.
0005  */
0006 #ifndef _BLK_CGROUP_RWSTAT_H
0007 #define _BLK_CGROUP_RWSTAT_H
0008 
0009 #include "blk-cgroup.h"
0010 
0011 enum blkg_rwstat_type {
0012     BLKG_RWSTAT_READ,
0013     BLKG_RWSTAT_WRITE,
0014     BLKG_RWSTAT_SYNC,
0015     BLKG_RWSTAT_ASYNC,
0016     BLKG_RWSTAT_DISCARD,
0017 
0018     BLKG_RWSTAT_NR,
0019     BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
0020 };
0021 
0022 /*
0023  * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
0024  * recursive.  Used to carry stats of dead children.
0025  */
0026 struct blkg_rwstat {
0027     struct percpu_counter       cpu_cnt[BLKG_RWSTAT_NR];
0028     atomic64_t          aux_cnt[BLKG_RWSTAT_NR];
0029 };
0030 
0031 struct blkg_rwstat_sample {
0032     u64             cnt[BLKG_RWSTAT_NR];
0033 };
0034 
0035 static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
0036         unsigned int idx)
0037 {
0038     return atomic64_read(&rwstat->aux_cnt[idx]) +
0039         percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
0040 }
0041 
0042 int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp);
0043 void blkg_rwstat_exit(struct blkg_rwstat *rwstat);
0044 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
0045              const struct blkg_rwstat_sample *rwstat);
0046 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
0047                int off);
0048 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
0049         int off, struct blkg_rwstat_sample *sum);
0050 
0051 
0052 /**
0053  * blkg_rwstat_add - add a value to a blkg_rwstat
0054  * @rwstat: target blkg_rwstat
0055  * @op: REQ_OP and flags
0056  * @val: value to add
0057  *
0058  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
0059  * caller is responsible for synchronizing calls to this function.
0060  */
0061 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
0062                    blk_opf_t opf, uint64_t val)
0063 {
0064     struct percpu_counter *cnt;
0065 
0066     if (op_is_discard(opf))
0067         cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
0068     else if (op_is_write(opf))
0069         cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
0070     else
0071         cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
0072 
0073     percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
0074 
0075     if (op_is_sync(opf))
0076         cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
0077     else
0078         cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
0079 
0080     percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
0081 }
0082 
0083 /**
0084  * blkg_rwstat_read - read the current values of a blkg_rwstat
0085  * @rwstat: blkg_rwstat to read
0086  *
0087  * Read the current snapshot of @rwstat and return it in the aux counts.
0088  */
0089 static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
0090         struct blkg_rwstat_sample *result)
0091 {
0092     int i;
0093 
0094     for (i = 0; i < BLKG_RWSTAT_NR; i++)
0095         result->cnt[i] =
0096             percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
0097 }
0098 
0099 /**
0100  * blkg_rwstat_total - read the total count of a blkg_rwstat
0101  * @rwstat: blkg_rwstat to read
0102  *
0103  * Return the total count of @rwstat regardless of the IO direction.  This
0104  * function can be called without synchronization and takes care of u64
0105  * atomicity.
0106  */
0107 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
0108 {
0109     struct blkg_rwstat_sample tmp = { };
0110 
0111     blkg_rwstat_read(rwstat, &tmp);
0112     return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
0113 }
0114 
0115 /**
0116  * blkg_rwstat_reset - reset a blkg_rwstat
0117  * @rwstat: blkg_rwstat to reset
0118  */
0119 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
0120 {
0121     int i;
0122 
0123     for (i = 0; i < BLKG_RWSTAT_NR; i++) {
0124         percpu_counter_set(&rwstat->cpu_cnt[i], 0);
0125         atomic64_set(&rwstat->aux_cnt[i], 0);
0126     }
0127 }
0128 
0129 /**
0130  * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
0131  * @to: the destination blkg_rwstat
0132  * @from: the source
0133  *
0134  * Add @from's count including the aux one to @to's aux count.
0135  */
0136 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
0137                        struct blkg_rwstat *from)
0138 {
0139     u64 sum[BLKG_RWSTAT_NR];
0140     int i;
0141 
0142     for (i = 0; i < BLKG_RWSTAT_NR; i++)
0143         sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
0144 
0145     for (i = 0; i < BLKG_RWSTAT_NR; i++)
0146         atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
0147                  &to->aux_cnt[i]);
0148 }
0149 #endif  /* _BLK_CGROUP_RWSTAT_H */