0001
0002 #ifndef RQ_QOS_H
0003 #define RQ_QOS_H
0004
0005 #include <linux/kernel.h>
0006 #include <linux/blkdev.h>
0007 #include <linux/blk_types.h>
0008 #include <linux/atomic.h>
0009 #include <linux/wait.h>
0010 #include <linux/blk-mq.h>
0011
0012 #include "blk-mq-debugfs.h"
0013
0014 struct blk_mq_debugfs_attr;
0015
0016 enum rq_qos_id {
0017 RQ_QOS_WBT,
0018 RQ_QOS_LATENCY,
0019 RQ_QOS_COST,
0020 RQ_QOS_IOPRIO,
0021 };
0022
0023 struct rq_wait {
0024 wait_queue_head_t wait;
0025 atomic_t inflight;
0026 };
0027
0028 struct rq_qos {
0029 struct rq_qos_ops *ops;
0030 struct request_queue *q;
0031 enum rq_qos_id id;
0032 struct rq_qos *next;
0033 #ifdef CONFIG_BLK_DEBUG_FS
0034 struct dentry *debugfs_dir;
0035 #endif
0036 };
0037
0038 struct rq_qos_ops {
0039 void (*throttle)(struct rq_qos *, struct bio *);
0040 void (*track)(struct rq_qos *, struct request *, struct bio *);
0041 void (*merge)(struct rq_qos *, struct request *, struct bio *);
0042 void (*issue)(struct rq_qos *, struct request *);
0043 void (*requeue)(struct rq_qos *, struct request *);
0044 void (*done)(struct rq_qos *, struct request *);
0045 void (*done_bio)(struct rq_qos *, struct bio *);
0046 void (*cleanup)(struct rq_qos *, struct bio *);
0047 void (*queue_depth_changed)(struct rq_qos *);
0048 void (*exit)(struct rq_qos *);
0049 const struct blk_mq_debugfs_attr *debugfs_attrs;
0050 };
0051
0052 struct rq_depth {
0053 unsigned int max_depth;
0054
0055 int scale_step;
0056 bool scaled_max;
0057
0058 unsigned int queue_depth;
0059 unsigned int default_depth;
0060 };
0061
0062 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
0063 enum rq_qos_id id)
0064 {
0065 struct rq_qos *rqos;
0066 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
0067 if (rqos->id == id)
0068 break;
0069 }
0070 return rqos;
0071 }
0072
0073 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
0074 {
0075 return rq_qos_id(q, RQ_QOS_WBT);
0076 }
0077
0078 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
0079 {
0080 return rq_qos_id(q, RQ_QOS_LATENCY);
0081 }
0082
0083 static inline void rq_wait_init(struct rq_wait *rq_wait)
0084 {
0085 atomic_set(&rq_wait->inflight, 0);
0086 init_waitqueue_head(&rq_wait->wait);
0087 }
0088
0089 static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
0090 {
0091
0092
0093
0094
0095
0096
0097
0098 blk_mq_freeze_queue(q);
0099
0100 spin_lock_irq(&q->queue_lock);
0101 if (rq_qos_id(q, rqos->id))
0102 goto ebusy;
0103 rqos->next = q->rq_qos;
0104 q->rq_qos = rqos;
0105 spin_unlock_irq(&q->queue_lock);
0106
0107 blk_mq_unfreeze_queue(q);
0108
0109 if (rqos->ops->debugfs_attrs) {
0110 mutex_lock(&q->debugfs_mutex);
0111 blk_mq_debugfs_register_rqos(rqos);
0112 mutex_unlock(&q->debugfs_mutex);
0113 }
0114
0115 return 0;
0116 ebusy:
0117 spin_unlock_irq(&q->queue_lock);
0118 blk_mq_unfreeze_queue(q);
0119 return -EBUSY;
0120
0121 }
0122
0123 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
0124 {
0125 struct rq_qos **cur;
0126
0127
0128
0129
0130
0131 blk_mq_freeze_queue(q);
0132
0133 spin_lock_irq(&q->queue_lock);
0134 for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
0135 if (*cur == rqos) {
0136 *cur = rqos->next;
0137 break;
0138 }
0139 }
0140 spin_unlock_irq(&q->queue_lock);
0141
0142 blk_mq_unfreeze_queue(q);
0143
0144 mutex_lock(&q->debugfs_mutex);
0145 blk_mq_debugfs_unregister_rqos(rqos);
0146 mutex_unlock(&q->debugfs_mutex);
0147 }
0148
0149 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
0150 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
0151
0152 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
0153 acquire_inflight_cb_t *acquire_inflight_cb,
0154 cleanup_cb_t *cleanup_cb);
0155 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
0156 bool rq_depth_scale_up(struct rq_depth *rqd);
0157 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
0158 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
0159
0160 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
0161 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
0162 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
0163 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
0164 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
0165 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
0166 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
0167 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
0168 void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
0169
0170 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
0171 {
0172 if (q->rq_qos)
0173 __rq_qos_cleanup(q->rq_qos, bio);
0174 }
0175
0176 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
0177 {
0178 if (q->rq_qos)
0179 __rq_qos_done(q->rq_qos, rq);
0180 }
0181
0182 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
0183 {
0184 if (q->rq_qos)
0185 __rq_qos_issue(q->rq_qos, rq);
0186 }
0187
0188 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
0189 {
0190 if (q->rq_qos)
0191 __rq_qos_requeue(q->rq_qos, rq);
0192 }
0193
0194 static inline void rq_qos_done_bio(struct bio *bio)
0195 {
0196 if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
0197 bio_flagged(bio, BIO_QOS_MERGED))) {
0198 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
0199 if (q->rq_qos)
0200 __rq_qos_done_bio(q->rq_qos, bio);
0201 }
0202 }
0203
0204 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
0205 {
0206 if (q->rq_qos) {
0207 bio_set_flag(bio, BIO_QOS_THROTTLED);
0208 __rq_qos_throttle(q->rq_qos, bio);
0209 }
0210 }
0211
0212 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
0213 struct bio *bio)
0214 {
0215 if (q->rq_qos)
0216 __rq_qos_track(q->rq_qos, rq, bio);
0217 }
0218
0219 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
0220 struct bio *bio)
0221 {
0222 if (q->rq_qos) {
0223 bio_set_flag(bio, BIO_QOS_MERGED);
0224 __rq_qos_merge(q->rq_qos, rq, bio);
0225 }
0226 }
0227
0228 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
0229 {
0230 if (q->rq_qos)
0231 __rq_qos_queue_depth_changed(q->rq_qos);
0232 }
0233
0234 void rq_qos_exit(struct request_queue *);
0235
0236 #endif