0001 #ifndef BLK_THROTTLE_H
0002 #define BLK_THROTTLE_H
0003
0004 #include "blk-cgroup-rwstat.h"
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 struct throtl_qnode {
0030 struct list_head node;
0031 struct bio_list bios;
0032 struct throtl_grp *tg;
0033 };
0034
0035 struct throtl_service_queue {
0036 struct throtl_service_queue *parent_sq;
0037
0038
0039
0040
0041
0042 struct list_head queued[2];
0043 unsigned int nr_queued[2];
0044
0045
0046
0047
0048
0049 struct rb_root_cached pending_tree;
0050 unsigned int nr_pending;
0051 unsigned long first_pending_disptime;
0052 struct timer_list pending_timer;
0053 };
0054
0055 enum tg_state_flags {
0056 THROTL_TG_PENDING = 1 << 0,
0057 THROTL_TG_WAS_EMPTY = 1 << 1,
0058 THROTL_TG_HAS_IOPS_LIMIT = 1 << 2,
0059 THROTL_TG_CANCELING = 1 << 3,
0060 };
0061
0062 enum {
0063 LIMIT_LOW,
0064 LIMIT_MAX,
0065 LIMIT_CNT,
0066 };
0067
0068 struct throtl_grp {
0069
0070 struct blkg_policy_data pd;
0071
0072
0073 struct rb_node rb_node;
0074
0075
0076 struct throtl_data *td;
0077
0078
0079 struct throtl_service_queue service_queue;
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 struct throtl_qnode qnode_on_self[2];
0090 struct throtl_qnode qnode_on_parent[2];
0091
0092
0093
0094
0095
0096
0097 unsigned long disptime;
0098
0099 unsigned int flags;
0100
0101
0102 bool has_rules[2];
0103
0104
0105 uint64_t bps[2][LIMIT_CNT];
0106
0107 uint64_t bps_conf[2][LIMIT_CNT];
0108
0109
0110 unsigned int iops[2][LIMIT_CNT];
0111
0112 unsigned int iops_conf[2][LIMIT_CNT];
0113
0114
0115 uint64_t bytes_disp[2];
0116
0117 unsigned int io_disp[2];
0118
0119 unsigned long last_low_overflow_time[2];
0120
0121 uint64_t last_bytes_disp[2];
0122 unsigned int last_io_disp[2];
0123
0124 unsigned long last_check_time;
0125
0126 unsigned long latency_target;
0127 unsigned long latency_target_conf;
0128
0129 unsigned long slice_start[2];
0130 unsigned long slice_end[2];
0131
0132 unsigned long last_finish_time;
0133 unsigned long checked_last_finish_time;
0134 unsigned long avg_idletime;
0135 unsigned long idletime_threshold;
0136 unsigned long idletime_threshold_conf;
0137
0138 unsigned int bio_cnt;
0139 unsigned int bad_bio_cnt;
0140 unsigned long bio_cnt_reset_time;
0141
0142 struct blkg_rwstat stat_bytes;
0143 struct blkg_rwstat stat_ios;
0144 };
0145
0146 extern struct blkcg_policy blkcg_policy_throtl;
0147
0148 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
0149 {
0150 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
0151 }
0152
0153 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
0154 {
0155 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
0156 }
0157
0158
0159
0160
0161 #ifndef CONFIG_BLK_DEV_THROTTLING
0162 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
0163 static inline void blk_throtl_exit(struct request_queue *q) { }
0164 static inline void blk_throtl_register_queue(struct request_queue *q) { }
0165 static inline bool blk_throtl_bio(struct bio *bio) { return false; }
0166 static inline void blk_throtl_cancel_bios(struct request_queue *q) { }
0167 #else
0168 int blk_throtl_init(struct request_queue *q);
0169 void blk_throtl_exit(struct request_queue *q);
0170 void blk_throtl_register_queue(struct request_queue *q);
0171 bool __blk_throtl_bio(struct bio *bio);
0172 void blk_throtl_cancel_bios(struct request_queue *q);
0173 static inline bool blk_throtl_bio(struct bio *bio)
0174 {
0175 struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
0176
0177
0178 if (bio_flagged(bio, BIO_THROTTLED) &&
0179 !(tg->flags & THROTL_TG_HAS_IOPS_LIMIT))
0180 return false;
0181
0182 if (!tg->has_rules[bio_data_dir(bio)])
0183 return false;
0184
0185 return __blk_throtl_bio(bio);
0186 }
0187 #endif
0188
0189 #endif