0001
0002 #ifndef _BLK_CGROUP_PRIVATE_H
0003 #define _BLK_CGROUP_PRIVATE_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/blk-cgroup.h>
0018 #include <linux/cgroup.h>
0019 #include <linux/kthread.h>
0020 #include <linux/blk-mq.h>
0021
0022 struct blkcg_gq;
0023 struct blkg_policy_data;
0024
0025
0026
0027 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
0028
0029 #ifdef CONFIG_BLK_CGROUP
0030
0031 enum blkg_iostat_type {
0032 BLKG_IOSTAT_READ,
0033 BLKG_IOSTAT_WRITE,
0034 BLKG_IOSTAT_DISCARD,
0035
0036 BLKG_IOSTAT_NR,
0037 };
0038
0039 struct blkg_iostat {
0040 u64 bytes[BLKG_IOSTAT_NR];
0041 u64 ios[BLKG_IOSTAT_NR];
0042 };
0043
0044 struct blkg_iostat_set {
0045 struct u64_stats_sync sync;
0046 struct blkg_iostat cur;
0047 struct blkg_iostat last;
0048 };
0049
0050
0051 struct blkcg_gq {
0052
0053 struct request_queue *q;
0054 struct list_head q_node;
0055 struct hlist_node blkcg_node;
0056 struct blkcg *blkcg;
0057
0058
0059 struct blkcg_gq *parent;
0060
0061
0062 struct percpu_ref refcnt;
0063
0064
0065 bool online;
0066
0067 struct blkg_iostat_set __percpu *iostat_cpu;
0068 struct blkg_iostat_set iostat;
0069
0070 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
0071
0072 spinlock_t async_bio_lock;
0073 struct bio_list async_bios;
0074 union {
0075 struct work_struct async_bio_work;
0076 struct work_struct free_work;
0077 };
0078
0079 atomic_t use_delay;
0080 atomic64_t delay_nsec;
0081 atomic64_t delay_start;
0082 u64 last_delay;
0083 int last_use;
0084
0085 struct rcu_head rcu_head;
0086 };
0087
0088 struct blkcg {
0089 struct cgroup_subsys_state css;
0090 spinlock_t lock;
0091 refcount_t online_pin;
0092
0093 struct radix_tree_root blkg_tree;
0094 struct blkcg_gq __rcu *blkg_hint;
0095 struct hlist_head blkg_list;
0096
0097 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
0098
0099 struct list_head all_blkcgs_node;
0100 #ifdef CONFIG_BLK_CGROUP_FC_APPID
0101 char fc_app_id[FC_APPID_LEN];
0102 #endif
0103 #ifdef CONFIG_CGROUP_WRITEBACK
0104 struct list_head cgwb_list;
0105 #endif
0106 };
0107
0108 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
0109 {
0110 return css ? container_of(css, struct blkcg, css) : NULL;
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 struct blkg_policy_data {
0125
0126 struct blkcg_gq *blkg;
0127 int plid;
0128 };
0129
0130
0131
0132
0133
0134
0135
0136
0137 struct blkcg_policy_data {
0138
0139 struct blkcg *blkcg;
0140 int plid;
0141 };
0142
0143 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
0144 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
0145 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
0146 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
0147 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
0148 struct request_queue *q, struct blkcg *blkcg);
0149 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
0150 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
0151 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
0152 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
0153 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
0154 typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
0155 struct seq_file *s);
0156
0157 struct blkcg_policy {
0158 int plid;
0159
0160 struct cftype *dfl_cftypes;
0161 struct cftype *legacy_cftypes;
0162
0163
0164 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
0165 blkcg_pol_init_cpd_fn *cpd_init_fn;
0166 blkcg_pol_free_cpd_fn *cpd_free_fn;
0167 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
0168
0169 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
0170 blkcg_pol_init_pd_fn *pd_init_fn;
0171 blkcg_pol_online_pd_fn *pd_online_fn;
0172 blkcg_pol_offline_pd_fn *pd_offline_fn;
0173 blkcg_pol_free_pd_fn *pd_free_fn;
0174 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
0175 blkcg_pol_stat_pd_fn *pd_stat_fn;
0176 };
0177
0178 extern struct blkcg blkcg_root;
0179 extern bool blkcg_debug_stats;
0180
0181 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
0182 struct request_queue *q, bool update_hint);
0183 int blkcg_init_queue(struct request_queue *q);
0184 void blkcg_exit_queue(struct request_queue *q);
0185
0186
0187 int blkcg_policy_register(struct blkcg_policy *pol);
0188 void blkcg_policy_unregister(struct blkcg_policy *pol);
0189 int blkcg_activate_policy(struct request_queue *q,
0190 const struct blkcg_policy *pol);
0191 void blkcg_deactivate_policy(struct request_queue *q,
0192 const struct blkcg_policy *pol);
0193
0194 const char *blkg_dev_name(struct blkcg_gq *blkg);
0195 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
0196 u64 (*prfill)(struct seq_file *,
0197 struct blkg_policy_data *, int),
0198 const struct blkcg_policy *pol, int data,
0199 bool show_total);
0200 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
0201
0202 struct blkg_conf_ctx {
0203 struct block_device *bdev;
0204 struct blkcg_gq *blkg;
0205 char *body;
0206 };
0207
0208 struct block_device *blkcg_conf_open_bdev(char **inputp);
0209 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
0210 char *input, struct blkg_conf_ctx *ctx);
0211 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224 static inline bool bio_issue_as_root_blkg(struct bio *bio)
0225 {
0226 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
0241 struct request_queue *q,
0242 bool update_hint)
0243 {
0244 struct blkcg_gq *blkg;
0245
0246 if (blkcg == &blkcg_root)
0247 return q->root_blkg;
0248
0249 blkg = rcu_dereference(blkcg->blkg_hint);
0250 if (blkg && blkg->q == q)
0251 return blkg;
0252
0253 return blkg_lookup_slowpath(blkcg, q, update_hint);
0254 }
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
0265 struct request_queue *q)
0266 {
0267 WARN_ON_ONCE(!rcu_read_lock_held());
0268 return __blkg_lookup(blkcg, q, false);
0269 }
0270
0271
0272
0273
0274
0275
0276
0277 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
0278 {
0279 return q->root_blkg;
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
0290 struct blkcg_policy *pol)
0291 {
0292 return blkg ? blkg->pd[pol->plid] : NULL;
0293 }
0294
0295 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
0296 struct blkcg_policy *pol)
0297 {
0298 return blkcg ? blkcg->cpd[pol->plid] : NULL;
0299 }
0300
0301
0302
0303
0304
0305
0306
0307 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
0308 {
0309 return pd ? pd->blkg : NULL;
0310 }
0311
0312 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
0313 {
0314 return cpd ? cpd->blkcg : NULL;
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
0326 {
0327 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
0328 }
0329
0330
0331
0332
0333
0334
0335
0336 static inline void blkg_get(struct blkcg_gq *blkg)
0337 {
0338 percpu_ref_get(&blkg->refcnt);
0339 }
0340
0341
0342
0343
0344
0345
0346
0347
0348 static inline bool blkg_tryget(struct blkcg_gq *blkg)
0349 {
0350 return blkg && percpu_ref_tryget(&blkg->refcnt);
0351 }
0352
0353
0354
0355
0356
0357 static inline void blkg_put(struct blkcg_gq *blkg)
0358 {
0359 percpu_ref_put(&blkg->refcnt);
0360 }
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
0375 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
0376 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
0377 (p_blkg)->q, false)))
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
0390 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
0391 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
0392 (p_blkg)->q, false)))
0393
0394 bool __blkcg_punt_bio_submit(struct bio *bio);
0395
0396 static inline bool blkcg_punt_bio_submit(struct bio *bio)
0397 {
0398 if (bio->bi_opf & REQ_CGROUP_PUNT)
0399 return __blkcg_punt_bio_submit(bio);
0400 else
0401 return false;
0402 }
0403
0404 static inline void blkcg_bio_issue_init(struct bio *bio)
0405 {
0406 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
0407 }
0408
0409 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
0410 {
0411 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
0412 return;
0413 if (atomic_add_return(1, &blkg->use_delay) == 1)
0414 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
0415 }
0416
0417 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
0418 {
0419 int old = atomic_read(&blkg->use_delay);
0420
0421 if (WARN_ON_ONCE(old < 0))
0422 return 0;
0423 if (old == 0)
0424 return 0;
0425
0426
0427
0428
0429
0430
0431
0432
0433 while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
0434 ;
0435
0436 if (old == 0)
0437 return 0;
0438 if (old == 1)
0439 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
0440 return 1;
0441 }
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
0453 {
0454 int old = atomic_read(&blkg->use_delay);
0455
0456
0457 if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
0458 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
0459
0460 atomic64_set(&blkg->delay_nsec, delay);
0461 }
0462
0463
0464
0465
0466
0467
0468
0469 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
0470 {
0471 int old = atomic_read(&blkg->use_delay);
0472
0473
0474 if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
0475 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
0476 }
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
0488 {
0489 return rq->bio->bi_blkg == bio->bi_blkg &&
0490 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
0491 }
0492
0493 void blk_cgroup_bio_start(struct bio *bio);
0494 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
0495 #else
0496
0497 struct blkg_policy_data {
0498 };
0499
0500 struct blkcg_policy_data {
0501 };
0502
0503 struct blkcg_policy {
0504 };
0505
0506 struct blkcg {
0507 };
0508
0509 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
0510 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
0511 { return NULL; }
0512 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
0513 static inline void blkcg_exit_queue(struct request_queue *q) { }
0514 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
0515 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
0516 static inline int blkcg_activate_policy(struct request_queue *q,
0517 const struct blkcg_policy *pol) { return 0; }
0518 static inline void blkcg_deactivate_policy(struct request_queue *q,
0519 const struct blkcg_policy *pol) { }
0520
0521 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
0522 struct blkcg_policy *pol) { return NULL; }
0523 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
0524 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
0525 static inline void blkg_get(struct blkcg_gq *blkg) { }
0526 static inline void blkg_put(struct blkcg_gq *blkg) { }
0527
0528 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
0529 static inline void blkcg_bio_issue_init(struct bio *bio) { }
0530 static inline void blk_cgroup_bio_start(struct bio *bio) { }
0531 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
0532
0533 #define blk_queue_for_each_rl(rl, q) \
0534 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
0535
0536 #endif
0537
0538 #endif