0001
0002
0003
0004
0005 #include <linux/module.h>
0006 #include <linux/slab.h>
0007 #include <linux/blkdev.h>
0008 #include <linux/cgroup.h>
0009 #include <linux/ktime.h>
0010 #include <linux/rbtree.h>
0011 #include <linux/ioprio.h>
0012 #include <linux/sbitmap.h>
0013 #include <linux/delay.h>
0014
0015 #include "elevator.h"
0016 #include "bfq-iosched.h"
0017
0018 #ifdef CONFIG_BFQ_CGROUP_DEBUG
0019 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
0020 {
0021 int ret;
0022
0023 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
0024 if (ret)
0025 return ret;
0026
0027 atomic64_set(&stat->aux_cnt, 0);
0028 return 0;
0029 }
0030
0031 static void bfq_stat_exit(struct bfq_stat *stat)
0032 {
0033 percpu_counter_destroy(&stat->cpu_cnt);
0034 }
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
0045 {
0046 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
0047 }
0048
0049
0050
0051
0052
0053 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
0054 {
0055 return percpu_counter_sum_positive(&stat->cpu_cnt);
0056 }
0057
0058
0059
0060
0061
0062 static inline void bfq_stat_reset(struct bfq_stat *stat)
0063 {
0064 percpu_counter_set(&stat->cpu_cnt, 0);
0065 atomic64_set(&stat->aux_cnt, 0);
0066 }
0067
0068
0069
0070
0071
0072
0073
0074
0075 static inline void bfq_stat_add_aux(struct bfq_stat *to,
0076 struct bfq_stat *from)
0077 {
0078 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
0079 &to->aux_cnt);
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
0091 int off)
0092 {
0093 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
0094 }
0095
0096
0097 enum bfqg_stats_flags {
0098 BFQG_stats_waiting = 0,
0099 BFQG_stats_idling,
0100 BFQG_stats_empty,
0101 };
0102
0103 #define BFQG_FLAG_FNS(name) \
0104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
0105 { \
0106 stats->flags |= (1 << BFQG_stats_##name); \
0107 } \
0108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
0109 { \
0110 stats->flags &= ~(1 << BFQG_stats_##name); \
0111 } \
0112 static int bfqg_stats_##name(struct bfqg_stats *stats) \
0113 { \
0114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
0115 } \
0116
0117 BFQG_FLAG_FNS(waiting)
0118 BFQG_FLAG_FNS(idling)
0119 BFQG_FLAG_FNS(empty)
0120 #undef BFQG_FLAG_FNS
0121
0122
0123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
0124 {
0125 u64 now;
0126
0127 if (!bfqg_stats_waiting(stats))
0128 return;
0129
0130 now = ktime_get_ns();
0131 if (now > stats->start_group_wait_time)
0132 bfq_stat_add(&stats->group_wait_time,
0133 now - stats->start_group_wait_time);
0134 bfqg_stats_clear_waiting(stats);
0135 }
0136
0137
0138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
0139 struct bfq_group *curr_bfqg)
0140 {
0141 struct bfqg_stats *stats = &bfqg->stats;
0142
0143 if (bfqg_stats_waiting(stats))
0144 return;
0145 if (bfqg == curr_bfqg)
0146 return;
0147 stats->start_group_wait_time = ktime_get_ns();
0148 bfqg_stats_mark_waiting(stats);
0149 }
0150
0151
0152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
0153 {
0154 u64 now;
0155
0156 if (!bfqg_stats_empty(stats))
0157 return;
0158
0159 now = ktime_get_ns();
0160 if (now > stats->start_empty_time)
0161 bfq_stat_add(&stats->empty_time,
0162 now - stats->start_empty_time);
0163 bfqg_stats_clear_empty(stats);
0164 }
0165
0166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
0167 {
0168 bfq_stat_add(&bfqg->stats.dequeue, 1);
0169 }
0170
0171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
0172 {
0173 struct bfqg_stats *stats = &bfqg->stats;
0174
0175 if (blkg_rwstat_total(&stats->queued))
0176 return;
0177
0178
0179
0180
0181
0182
0183 if (bfqg_stats_empty(stats))
0184 return;
0185
0186 stats->start_empty_time = ktime_get_ns();
0187 bfqg_stats_mark_empty(stats);
0188 }
0189
0190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
0191 {
0192 struct bfqg_stats *stats = &bfqg->stats;
0193
0194 if (bfqg_stats_idling(stats)) {
0195 u64 now = ktime_get_ns();
0196
0197 if (now > stats->start_idle_time)
0198 bfq_stat_add(&stats->idle_time,
0199 now - stats->start_idle_time);
0200 bfqg_stats_clear_idling(stats);
0201 }
0202 }
0203
0204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
0205 {
0206 struct bfqg_stats *stats = &bfqg->stats;
0207
0208 stats->start_idle_time = ktime_get_ns();
0209 bfqg_stats_mark_idling(stats);
0210 }
0211
0212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
0213 {
0214 struct bfqg_stats *stats = &bfqg->stats;
0215
0216 bfq_stat_add(&stats->avg_queue_size_sum,
0217 blkg_rwstat_total(&stats->queued));
0218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
0219 bfqg_stats_update_group_wait_time(stats);
0220 }
0221
0222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
0223 blk_opf_t opf)
0224 {
0225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
0226 bfqg_stats_end_empty_time(&bfqg->stats);
0227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
0228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
0229 }
0230
0231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
0232 {
0233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
0234 }
0235
0236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
0237 {
0238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
0239 }
0240
0241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
0242 u64 io_start_time_ns, blk_opf_t opf)
0243 {
0244 struct bfqg_stats *stats = &bfqg->stats;
0245 u64 now = ktime_get_ns();
0246
0247 if (now > io_start_time_ns)
0248 blkg_rwstat_add(&stats->service_time, opf,
0249 now - io_start_time_ns);
0250 if (io_start_time_ns > start_time_ns)
0251 blkg_rwstat_add(&stats->wait_time, opf,
0252 io_start_time_ns - start_time_ns);
0253 }
0254
0255 #else
0256
0257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
0258 blk_opf_t opf) { }
0259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
0260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
0261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
0262 u64 io_start_time_ns, blk_opf_t opf) { }
0263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
0264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
0265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
0266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
0267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
0268
0269 #endif
0270
0271 #ifdef CONFIG_BFQ_GROUP_IOSCHED
0272
0273
0274
0275
0276
0277
0278
0279 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
0280 {
0281 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
0282 }
0283
0284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
0285 {
0286 return pd_to_blkg(&bfqg->pd);
0287 }
0288
0289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
0290 {
0291 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
0302 {
0303 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
0304
0305 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
0306 }
0307
0308 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
0309 {
0310 struct bfq_entity *group_entity = bfqq->entity.parent;
0311
0312 return group_entity ? container_of(group_entity, struct bfq_group,
0313 entity) :
0314 bfqq->bfqd->root_group;
0315 }
0316
0317
0318
0319
0320
0321
0322 static void bfqg_get(struct bfq_group *bfqg)
0323 {
0324 bfqg->ref++;
0325 }
0326
0327 static void bfqg_put(struct bfq_group *bfqg)
0328 {
0329 bfqg->ref--;
0330
0331 if (bfqg->ref == 0)
0332 kfree(bfqg);
0333 }
0334
0335 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
0336 {
0337
0338 bfqg_get(bfqg);
0339
0340 blkg_get(bfqg_to_blkg(bfqg));
0341 }
0342
0343 void bfqg_and_blkg_put(struct bfq_group *bfqg)
0344 {
0345 blkg_put(bfqg_to_blkg(bfqg));
0346
0347 bfqg_put(bfqg);
0348 }
0349
0350 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
0351 {
0352 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
0353
0354 if (!bfqg)
0355 return;
0356
0357 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
0358 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
0359 }
0360
0361
0362 static void bfqg_stats_reset(struct bfqg_stats *stats)
0363 {
0364 #ifdef CONFIG_BFQ_CGROUP_DEBUG
0365
0366 blkg_rwstat_reset(&stats->merged);
0367 blkg_rwstat_reset(&stats->service_time);
0368 blkg_rwstat_reset(&stats->wait_time);
0369 bfq_stat_reset(&stats->time);
0370 bfq_stat_reset(&stats->avg_queue_size_sum);
0371 bfq_stat_reset(&stats->avg_queue_size_samples);
0372 bfq_stat_reset(&stats->dequeue);
0373 bfq_stat_reset(&stats->group_wait_time);
0374 bfq_stat_reset(&stats->idle_time);
0375 bfq_stat_reset(&stats->empty_time);
0376 #endif
0377 }
0378
0379
0380 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
0381 {
0382 if (!to || !from)
0383 return;
0384
0385 #ifdef CONFIG_BFQ_CGROUP_DEBUG
0386
0387 blkg_rwstat_add_aux(&to->merged, &from->merged);
0388 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
0389 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
0390 bfq_stat_add_aux(&from->time, &from->time);
0391 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
0392 bfq_stat_add_aux(&to->avg_queue_size_samples,
0393 &from->avg_queue_size_samples);
0394 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
0395 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
0396 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
0397 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
0398 #endif
0399 }
0400
0401
0402
0403
0404
0405
0406 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
0407 {
0408 struct bfq_group *parent;
0409
0410 if (!bfqg)
0411 return;
0412
0413 parent = bfqg_parent(bfqg);
0414
0415 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
0416
0417 if (unlikely(!parent))
0418 return;
0419
0420 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
0421 bfqg_stats_reset(&bfqg->stats);
0422 }
0423
0424 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
0425 {
0426 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
0427
0428 entity->weight = entity->new_weight;
0429 entity->orig_weight = entity->new_weight;
0430 if (bfqq) {
0431 bfqq->ioprio = bfqq->new_ioprio;
0432 bfqq->ioprio_class = bfqq->new_ioprio_class;
0433
0434
0435
0436
0437 bfqg_and_blkg_get(bfqg);
0438 }
0439 entity->parent = bfqg->my_entity;
0440 entity->sched_data = &bfqg->sched_data;
0441 }
0442
0443 static void bfqg_stats_exit(struct bfqg_stats *stats)
0444 {
0445 blkg_rwstat_exit(&stats->bytes);
0446 blkg_rwstat_exit(&stats->ios);
0447 #ifdef CONFIG_BFQ_CGROUP_DEBUG
0448 blkg_rwstat_exit(&stats->merged);
0449 blkg_rwstat_exit(&stats->service_time);
0450 blkg_rwstat_exit(&stats->wait_time);
0451 blkg_rwstat_exit(&stats->queued);
0452 bfq_stat_exit(&stats->time);
0453 bfq_stat_exit(&stats->avg_queue_size_sum);
0454 bfq_stat_exit(&stats->avg_queue_size_samples);
0455 bfq_stat_exit(&stats->dequeue);
0456 bfq_stat_exit(&stats->group_wait_time);
0457 bfq_stat_exit(&stats->idle_time);
0458 bfq_stat_exit(&stats->empty_time);
0459 #endif
0460 }
0461
0462 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
0463 {
0464 if (blkg_rwstat_init(&stats->bytes, gfp) ||
0465 blkg_rwstat_init(&stats->ios, gfp))
0466 goto error;
0467
0468 #ifdef CONFIG_BFQ_CGROUP_DEBUG
0469 if (blkg_rwstat_init(&stats->merged, gfp) ||
0470 blkg_rwstat_init(&stats->service_time, gfp) ||
0471 blkg_rwstat_init(&stats->wait_time, gfp) ||
0472 blkg_rwstat_init(&stats->queued, gfp) ||
0473 bfq_stat_init(&stats->time, gfp) ||
0474 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
0475 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
0476 bfq_stat_init(&stats->dequeue, gfp) ||
0477 bfq_stat_init(&stats->group_wait_time, gfp) ||
0478 bfq_stat_init(&stats->idle_time, gfp) ||
0479 bfq_stat_init(&stats->empty_time, gfp))
0480 goto error;
0481 #endif
0482
0483 return 0;
0484
0485 error:
0486 bfqg_stats_exit(stats);
0487 return -ENOMEM;
0488 }
0489
0490 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
0491 {
0492 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
0493 }
0494
0495 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
0496 {
0497 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
0498 }
0499
0500 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
0501 {
0502 struct bfq_group_data *bgd;
0503
0504 bgd = kzalloc(sizeof(*bgd), gfp);
0505 if (!bgd)
0506 return NULL;
0507 return &bgd->pd;
0508 }
0509
0510 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
0511 {
0512 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
0513
0514 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
0515 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
0516 }
0517
0518 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
0519 {
0520 kfree(cpd_to_bfqgd(cpd));
0521 }
0522
0523 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
0524 struct blkcg *blkcg)
0525 {
0526 struct bfq_group *bfqg;
0527
0528 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
0529 if (!bfqg)
0530 return NULL;
0531
0532 if (bfqg_stats_init(&bfqg->stats, gfp)) {
0533 kfree(bfqg);
0534 return NULL;
0535 }
0536
0537
0538 bfqg_get(bfqg);
0539 return &bfqg->pd;
0540 }
0541
0542 static void bfq_pd_init(struct blkg_policy_data *pd)
0543 {
0544 struct blkcg_gq *blkg = pd_to_blkg(pd);
0545 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
0546 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
0547 struct bfq_entity *entity = &bfqg->entity;
0548 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
0549
0550 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
0551 entity->my_sched_data = &bfqg->sched_data;
0552 entity->last_bfqq_created = NULL;
0553
0554 bfqg->my_entity = entity;
0555
0556
0557
0558 bfqg->bfqd = bfqd;
0559 bfqg->active_entities = 0;
0560 bfqg->online = true;
0561 bfqg->rq_pos_tree = RB_ROOT;
0562 }
0563
0564 static void bfq_pd_free(struct blkg_policy_data *pd)
0565 {
0566 struct bfq_group *bfqg = pd_to_bfqg(pd);
0567
0568 bfqg_stats_exit(&bfqg->stats);
0569 bfqg_put(bfqg);
0570 }
0571
0572 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
0573 {
0574 struct bfq_group *bfqg = pd_to_bfqg(pd);
0575
0576 bfqg_stats_reset(&bfqg->stats);
0577 }
0578
0579 static void bfq_group_set_parent(struct bfq_group *bfqg,
0580 struct bfq_group *parent)
0581 {
0582 struct bfq_entity *entity;
0583
0584 entity = &bfqg->entity;
0585 entity->parent = parent->my_entity;
0586 entity->sched_data = &parent->sched_data;
0587 }
0588
0589 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
0590 {
0591 struct bfq_group *parent;
0592 struct bfq_entity *entity;
0593
0594
0595
0596
0597
0598
0599 entity = &bfqg->entity;
0600 for_each_entity(entity) {
0601 struct bfq_group *curr_bfqg = container_of(entity,
0602 struct bfq_group, entity);
0603 if (curr_bfqg != bfqd->root_group) {
0604 parent = bfqg_parent(curr_bfqg);
0605 if (!parent)
0606 parent = bfqd->root_group;
0607 bfq_group_set_parent(curr_bfqg, parent);
0608 }
0609 }
0610 }
0611
0612 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
0613 {
0614 struct blkcg_gq *blkg = bio->bi_blkg;
0615 struct bfq_group *bfqg;
0616
0617 while (blkg) {
0618 bfqg = blkg_to_bfqg(blkg);
0619 if (bfqg->online) {
0620 bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
0621 return bfqg;
0622 }
0623 blkg = blkg->parent;
0624 }
0625 bio_associate_blkg_from_css(bio,
0626 &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
0627 return bfqd->root_group;
0628 }
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
0645 struct bfq_group *bfqg)
0646 {
0647 struct bfq_entity *entity = &bfqq->entity;
0648 struct bfq_group *old_parent = bfqq_group(bfqq);
0649
0650
0651
0652
0653
0654 if (old_parent == bfqg)
0655 return;
0656
0657
0658
0659
0660
0661 if (bfqq == &bfqd->oom_bfqq)
0662 return;
0663
0664
0665
0666
0667 bfqq->ref++;
0668
0669
0670
0671
0672
0673
0674
0675 if (bfqq == bfqd->in_service_queue)
0676 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
0677 false, BFQQE_PREEMPTED);
0678
0679 if (bfq_bfqq_busy(bfqq))
0680 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
0681 else if (entity->on_st_or_in_serv)
0682 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
0683 bfqg_and_blkg_put(old_parent);
0684
0685 if (entity->parent &&
0686 entity->parent->last_bfqq_created == bfqq)
0687 entity->parent->last_bfqq_created = NULL;
0688 else if (bfqd->last_bfqq_created == bfqq)
0689 bfqd->last_bfqq_created = NULL;
0690
0691 entity->parent = bfqg->my_entity;
0692 entity->sched_data = &bfqg->sched_data;
0693
0694 bfqg_and_blkg_get(bfqg);
0695
0696 if (bfq_bfqq_busy(bfqq)) {
0697 if (unlikely(!bfqd->nonrot_with_queueing))
0698 bfq_pos_tree_add_move(bfqd, bfqq);
0699 bfq_activate_bfqq(bfqd, bfqq);
0700 }
0701
0702 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
0703 bfq_schedule_dispatch(bfqd);
0704
0705 bfq_put_queue(bfqq);
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718 static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
0719 struct bfq_io_cq *bic,
0720 struct bfq_group *bfqg)
0721 {
0722 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
0723 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
0724 struct bfq_entity *entity;
0725
0726 if (async_bfqq) {
0727 entity = &async_bfqq->entity;
0728
0729 if (entity->sched_data != &bfqg->sched_data) {
0730 bic_set_bfqq(bic, NULL, 0);
0731 bfq_release_process_ref(bfqd, async_bfqq);
0732 }
0733 }
0734
0735 if (sync_bfqq) {
0736 if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
0737
0738 if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
0739 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
0740 } else {
0741 struct bfq_queue *bfqq;
0742
0743
0744
0745
0746
0747
0748 for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
0749 if (bfqq->entity.sched_data !=
0750 &bfqg->sched_data)
0751 break;
0752 if (bfqq) {
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764 bfq_put_cooperator(sync_bfqq);
0765 bfq_release_process_ref(bfqd, sync_bfqq);
0766 bic_set_bfqq(bic, NULL, 1);
0767 }
0768 }
0769 }
0770
0771 return bfqg;
0772 }
0773
0774 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
0775 {
0776 struct bfq_data *bfqd = bic_to_bfqd(bic);
0777 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
0778 uint64_t serial_nr;
0779
0780 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
0781
0782
0783
0784
0785
0786 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
0787 return;
0788
0789
0790
0791
0792
0793 bfq_link_bfqg(bfqd, bfqg);
0794 __bfq_bic_change_cgroup(bfqd, bic, bfqg);
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
0846 bic->blkcg_serial_nr = serial_nr;
0847 }
0848
0849
0850
0851
0852
0853 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
0854 {
0855 struct bfq_entity *entity = st->first_idle;
0856
0857 for (; entity ; entity = st->first_idle)
0858 __bfq_deactivate_entity(entity, false);
0859 }
0860
0861
0862
0863
0864
0865
0866
0867
0868 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
0869 struct bfq_entity *entity,
0870 int ioprio_class)
0871 {
0872 struct bfq_queue *bfqq;
0873 struct bfq_entity *child_entity = entity;
0874
0875 while (child_entity->my_sched_data) {
0876 struct bfq_sched_data *child_sd = child_entity->my_sched_data;
0877 struct bfq_service_tree *child_st = child_sd->service_tree +
0878 ioprio_class;
0879 struct rb_root *child_active = &child_st->active;
0880
0881 child_entity = bfq_entity_of(rb_first(child_active));
0882
0883 if (!child_entity)
0884 child_entity = child_sd->in_service_entity;
0885 }
0886
0887 bfqq = bfq_entity_to_bfqq(child_entity);
0888 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
0889 }
0890
0891
0892
0893
0894
0895
0896
0897
0898 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
0899 struct bfq_group *bfqg,
0900 struct bfq_service_tree *st,
0901 int ioprio_class)
0902 {
0903 struct rb_root *active = &st->active;
0904 struct bfq_entity *entity;
0905
0906 while ((entity = bfq_entity_of(rb_first(active))))
0907 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
0908
0909 if (bfqg->sched_data.in_service_entity)
0910 bfq_reparent_leaf_entity(bfqd,
0911 bfqg->sched_data.in_service_entity,
0912 ioprio_class);
0913 }
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923 static void bfq_pd_offline(struct blkg_policy_data *pd)
0924 {
0925 struct bfq_service_tree *st;
0926 struct bfq_group *bfqg = pd_to_bfqg(pd);
0927 struct bfq_data *bfqd = bfqg->bfqd;
0928 struct bfq_entity *entity = bfqg->my_entity;
0929 unsigned long flags;
0930 int i;
0931
0932 spin_lock_irqsave(&bfqd->lock, flags);
0933
0934 if (!entity)
0935 goto put_async_queues;
0936
0937
0938
0939
0940
0941 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
0942 st = bfqg->sched_data.service_tree + i;
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956 bfq_reparent_active_queues(bfqd, bfqg, st, i);
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969 bfq_flush_idle_tree(st);
0970 }
0971
0972 __bfq_deactivate_entity(entity, false);
0973
0974 put_async_queues:
0975 bfq_put_async_queues(bfqd, bfqg);
0976 bfqg->online = false;
0977
0978 spin_unlock_irqrestore(&bfqd->lock, flags);
0979
0980
0981
0982
0983
0984
0985 bfqg_stats_xfer_dead(bfqg);
0986 }
0987
0988 void bfq_end_wr_async(struct bfq_data *bfqd)
0989 {
0990 struct blkcg_gq *blkg;
0991
0992 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
0993 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
0994
0995 bfq_end_wr_async_queues(bfqd, bfqg);
0996 }
0997 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
0998 }
0999
1000 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
1001 {
1002 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1003 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1004 unsigned int val = 0;
1005
1006 if (bfqgd)
1007 val = bfqgd->weight;
1008
1009 seq_printf(sf, "%u\n", val);
1010
1011 return 0;
1012 }
1013
1014 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
1015 struct blkg_policy_data *pd, int off)
1016 {
1017 struct bfq_group *bfqg = pd_to_bfqg(pd);
1018
1019 if (!bfqg->entity.dev_weight)
1020 return 0;
1021 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1022 }
1023
1024 static int bfq_io_show_weight(struct seq_file *sf, void *v)
1025 {
1026 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1027 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1028
1029 seq_printf(sf, "default %u\n", bfqgd->weight);
1030 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1031 &blkcg_policy_bfq, 0, false);
1032 return 0;
1033 }
1034
1035 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1036 {
1037 weight = dev_weight ?: weight;
1038
1039 bfqg->entity.dev_weight = dev_weight;
1040
1041
1042
1043
1044
1045
1046 if ((unsigned short)weight != bfqg->entity.new_weight) {
1047 bfqg->entity.new_weight = (unsigned short)weight;
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 smp_wmb();
1064 bfqg->entity.prio_changed = 1;
1065 }
1066 }
1067
1068 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1069 struct cftype *cftype,
1070 u64 val)
1071 {
1072 struct blkcg *blkcg = css_to_blkcg(css);
1073 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1074 struct blkcg_gq *blkg;
1075 int ret = -ERANGE;
1076
1077 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1078 return ret;
1079
1080 ret = 0;
1081 spin_lock_irq(&blkcg->lock);
1082 bfqgd->weight = (unsigned short)val;
1083 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1084 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1085
1086 if (bfqg)
1087 bfq_group_set_weight(bfqg, val, 0);
1088 }
1089 spin_unlock_irq(&blkcg->lock);
1090
1091 return ret;
1092 }
1093
1094 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1095 char *buf, size_t nbytes,
1096 loff_t off)
1097 {
1098 int ret;
1099 struct blkg_conf_ctx ctx;
1100 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1101 struct bfq_group *bfqg;
1102 u64 v;
1103
1104 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1105 if (ret)
1106 return ret;
1107
1108 if (sscanf(ctx.body, "%llu", &v) == 1) {
1109
1110 ret = -ERANGE;
1111 if (!v)
1112 goto out;
1113 } else if (!strcmp(strim(ctx.body), "default")) {
1114 v = 0;
1115 } else {
1116 ret = -EINVAL;
1117 goto out;
1118 }
1119
1120 bfqg = blkg_to_bfqg(ctx.blkg);
1121
1122 ret = -ERANGE;
1123 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1124 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1125 ret = 0;
1126 }
1127 out:
1128 blkg_conf_finish(&ctx);
1129 return ret ?: nbytes;
1130 }
1131
1132 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1133 char *buf, size_t nbytes,
1134 loff_t off)
1135 {
1136 char *endp;
1137 int ret;
1138 u64 v;
1139
1140 buf = strim(buf);
1141
1142
1143 v = simple_strtoull(buf, &endp, 0);
1144 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1145 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1146 return ret ?: nbytes;
1147 }
1148
1149 return bfq_io_set_device_weight(of, buf, nbytes, off);
1150 }
1151
1152 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1153 {
1154 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1155 &blkcg_policy_bfq, seq_cft(sf)->private, true);
1156 return 0;
1157 }
1158
1159 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1160 struct blkg_policy_data *pd, int off)
1161 {
1162 struct blkg_rwstat_sample sum;
1163
1164 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1165 return __blkg_prfill_rwstat(sf, pd, &sum);
1166 }
1167
1168 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1169 {
1170 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1171 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1172 seq_cft(sf)->private, true);
1173 return 0;
1174 }
1175
1176 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1177 static int bfqg_print_stat(struct seq_file *sf, void *v)
1178 {
1179 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1180 &blkcg_policy_bfq, seq_cft(sf)->private, false);
1181 return 0;
1182 }
1183
1184 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1185 struct blkg_policy_data *pd, int off)
1186 {
1187 struct blkcg_gq *blkg = pd_to_blkg(pd);
1188 struct blkcg_gq *pos_blkg;
1189 struct cgroup_subsys_state *pos_css;
1190 u64 sum = 0;
1191
1192 lockdep_assert_held(&blkg->q->queue_lock);
1193
1194 rcu_read_lock();
1195 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1196 struct bfq_stat *stat;
1197
1198 if (!pos_blkg->online)
1199 continue;
1200
1201 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1202 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1203 }
1204 rcu_read_unlock();
1205
1206 return __blkg_prfill_u64(sf, pd, sum);
1207 }
1208
1209 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1210 {
1211 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1212 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1213 seq_cft(sf)->private, false);
1214 return 0;
1215 }
1216
1217 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1218 int off)
1219 {
1220 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1221 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1222
1223 return __blkg_prfill_u64(sf, pd, sum >> 9);
1224 }
1225
1226 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1227 {
1228 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1229 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1230 return 0;
1231 }
1232
1233 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1234 struct blkg_policy_data *pd, int off)
1235 {
1236 struct blkg_rwstat_sample tmp;
1237
1238 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1239 offsetof(struct bfq_group, stats.bytes), &tmp);
1240
1241 return __blkg_prfill_u64(sf, pd,
1242 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1243 }
1244
1245 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1246 {
1247 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1248 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1249 false);
1250 return 0;
1251 }
1252
1253 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1254 struct blkg_policy_data *pd, int off)
1255 {
1256 struct bfq_group *bfqg = pd_to_bfqg(pd);
1257 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1258 u64 v = 0;
1259
1260 if (samples) {
1261 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1262 v = div64_u64(v, samples);
1263 }
1264 __blkg_prfill_u64(sf, pd, v);
1265 return 0;
1266 }
1267
1268
1269 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1270 {
1271 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1272 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1273 0, false);
1274 return 0;
1275 }
1276 #endif
1277
1278 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1279 {
1280 int ret;
1281
1282 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1283 if (ret)
1284 return NULL;
1285
1286 return blkg_to_bfqg(bfqd->queue->root_blkg);
1287 }
1288
1289 struct blkcg_policy blkcg_policy_bfq = {
1290 .dfl_cftypes = bfq_blkg_files,
1291 .legacy_cftypes = bfq_blkcg_legacy_files,
1292
1293 .cpd_alloc_fn = bfq_cpd_alloc,
1294 .cpd_init_fn = bfq_cpd_init,
1295 .cpd_bind_fn = bfq_cpd_init,
1296 .cpd_free_fn = bfq_cpd_free,
1297
1298 .pd_alloc_fn = bfq_pd_alloc,
1299 .pd_init_fn = bfq_pd_init,
1300 .pd_offline_fn = bfq_pd_offline,
1301 .pd_free_fn = bfq_pd_free,
1302 .pd_reset_stats_fn = bfq_pd_reset_stats,
1303 };
1304
1305 struct cftype bfq_blkcg_legacy_files[] = {
1306 {
1307 .name = "bfq.weight",
1308 .flags = CFTYPE_NOT_ON_ROOT,
1309 .seq_show = bfq_io_show_weight_legacy,
1310 .write_u64 = bfq_io_set_weight_legacy,
1311 },
1312 {
1313 .name = "bfq.weight_device",
1314 .flags = CFTYPE_NOT_ON_ROOT,
1315 .seq_show = bfq_io_show_weight,
1316 .write = bfq_io_set_weight,
1317 },
1318
1319
1320 {
1321 .name = "bfq.io_service_bytes",
1322 .private = offsetof(struct bfq_group, stats.bytes),
1323 .seq_show = bfqg_print_rwstat,
1324 },
1325 {
1326 .name = "bfq.io_serviced",
1327 .private = offsetof(struct bfq_group, stats.ios),
1328 .seq_show = bfqg_print_rwstat,
1329 },
1330 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1331 {
1332 .name = "bfq.time",
1333 .private = offsetof(struct bfq_group, stats.time),
1334 .seq_show = bfqg_print_stat,
1335 },
1336 {
1337 .name = "bfq.sectors",
1338 .seq_show = bfqg_print_stat_sectors,
1339 },
1340 {
1341 .name = "bfq.io_service_time",
1342 .private = offsetof(struct bfq_group, stats.service_time),
1343 .seq_show = bfqg_print_rwstat,
1344 },
1345 {
1346 .name = "bfq.io_wait_time",
1347 .private = offsetof(struct bfq_group, stats.wait_time),
1348 .seq_show = bfqg_print_rwstat,
1349 },
1350 {
1351 .name = "bfq.io_merged",
1352 .private = offsetof(struct bfq_group, stats.merged),
1353 .seq_show = bfqg_print_rwstat,
1354 },
1355 {
1356 .name = "bfq.io_queued",
1357 .private = offsetof(struct bfq_group, stats.queued),
1358 .seq_show = bfqg_print_rwstat,
1359 },
1360 #endif
1361
1362
1363 {
1364 .name = "bfq.io_service_bytes_recursive",
1365 .private = offsetof(struct bfq_group, stats.bytes),
1366 .seq_show = bfqg_print_rwstat_recursive,
1367 },
1368 {
1369 .name = "bfq.io_serviced_recursive",
1370 .private = offsetof(struct bfq_group, stats.ios),
1371 .seq_show = bfqg_print_rwstat_recursive,
1372 },
1373 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1374 {
1375 .name = "bfq.time_recursive",
1376 .private = offsetof(struct bfq_group, stats.time),
1377 .seq_show = bfqg_print_stat_recursive,
1378 },
1379 {
1380 .name = "bfq.sectors_recursive",
1381 .seq_show = bfqg_print_stat_sectors_recursive,
1382 },
1383 {
1384 .name = "bfq.io_service_time_recursive",
1385 .private = offsetof(struct bfq_group, stats.service_time),
1386 .seq_show = bfqg_print_rwstat_recursive,
1387 },
1388 {
1389 .name = "bfq.io_wait_time_recursive",
1390 .private = offsetof(struct bfq_group, stats.wait_time),
1391 .seq_show = bfqg_print_rwstat_recursive,
1392 },
1393 {
1394 .name = "bfq.io_merged_recursive",
1395 .private = offsetof(struct bfq_group, stats.merged),
1396 .seq_show = bfqg_print_rwstat_recursive,
1397 },
1398 {
1399 .name = "bfq.io_queued_recursive",
1400 .private = offsetof(struct bfq_group, stats.queued),
1401 .seq_show = bfqg_print_rwstat_recursive,
1402 },
1403 {
1404 .name = "bfq.avg_queue_size",
1405 .seq_show = bfqg_print_avg_queue_size,
1406 },
1407 {
1408 .name = "bfq.group_wait_time",
1409 .private = offsetof(struct bfq_group, stats.group_wait_time),
1410 .seq_show = bfqg_print_stat,
1411 },
1412 {
1413 .name = "bfq.idle_time",
1414 .private = offsetof(struct bfq_group, stats.idle_time),
1415 .seq_show = bfqg_print_stat,
1416 },
1417 {
1418 .name = "bfq.empty_time",
1419 .private = offsetof(struct bfq_group, stats.empty_time),
1420 .seq_show = bfqg_print_stat,
1421 },
1422 {
1423 .name = "bfq.dequeue",
1424 .private = offsetof(struct bfq_group, stats.dequeue),
1425 .seq_show = bfqg_print_stat,
1426 },
1427 #endif
1428 { }
1429 };
1430
1431 struct cftype bfq_blkg_files[] = {
1432 {
1433 .name = "bfq.weight",
1434 .flags = CFTYPE_NOT_ON_ROOT,
1435 .seq_show = bfq_io_show_weight,
1436 .write = bfq_io_set_weight,
1437 },
1438 {}
1439 };
1440
1441 #else
1442
1443 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1444 struct bfq_group *bfqg) {}
1445
1446 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1447 {
1448 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1449
1450 entity->weight = entity->new_weight;
1451 entity->orig_weight = entity->new_weight;
1452 if (bfqq) {
1453 bfqq->ioprio = bfqq->new_ioprio;
1454 bfqq->ioprio_class = bfqq->new_ioprio_class;
1455 }
1456 entity->sched_data = &bfqg->sched_data;
1457 }
1458
1459 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1460
1461 void bfq_end_wr_async(struct bfq_data *bfqd)
1462 {
1463 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1464 }
1465
1466 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1467 {
1468 return bfqd->root_group;
1469 }
1470
1471 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1472 {
1473 return bfqq->bfqd->root_group;
1474 }
1475
1476 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1477
1478 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1479 {
1480 struct bfq_group *bfqg;
1481 int i;
1482
1483 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1484 if (!bfqg)
1485 return NULL;
1486
1487 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1488 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1489
1490 return bfqg;
1491 }
1492 #endif