0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include <linux/kernel.h>
0023 #include <linux/blk_types.h>
0024 #include <linux/slab.h>
0025 #include <linux/backing-dev.h>
0026 #include <linux/swap.h>
0027
0028 #include "blk-wbt.h"
0029 #include "blk-rq-qos.h"
0030
0031 #define CREATE_TRACE_POINTS
0032 #include <trace/events/wbt.h>
0033
0034 static inline void wbt_clear_state(struct request *rq)
0035 {
0036 rq->wbt_flags = 0;
0037 }
0038
0039 static inline enum wbt_flags wbt_flags(struct request *rq)
0040 {
0041 return rq->wbt_flags;
0042 }
0043
0044 static inline bool wbt_is_tracked(struct request *rq)
0045 {
0046 return rq->wbt_flags & WBT_TRACKED;
0047 }
0048
0049 static inline bool wbt_is_read(struct request *rq)
0050 {
0051 return rq->wbt_flags & WBT_READ;
0052 }
0053
0054 enum {
0055
0056
0057
0058
0059 RWB_DEF_DEPTH = 16,
0060
0061
0062
0063
0064 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
0065
0066
0067
0068
0069 RWB_MIN_WRITE_SAMPLES = 3,
0070
0071
0072
0073
0074
0075 RWB_UNKNOWN_BUMP = 5,
0076 };
0077
0078 static inline bool rwb_enabled(struct rq_wb *rwb)
0079 {
0080 return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
0081 rwb->wb_normal != 0;
0082 }
0083
0084 static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
0085 {
0086 if (rwb_enabled(rwb)) {
0087 const unsigned long cur = jiffies;
0088
0089 if (cur != *var)
0090 *var = cur;
0091 }
0092 }
0093
0094
0095
0096
0097
0098 static bool wb_recent_wait(struct rq_wb *rwb)
0099 {
0100 struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
0101
0102 return time_before(jiffies, wb->dirty_sleep + HZ);
0103 }
0104
0105 static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
0106 enum wbt_flags wb_acct)
0107 {
0108 if (wb_acct & WBT_KSWAPD)
0109 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
0110 else if (wb_acct & WBT_DISCARD)
0111 return &rwb->rq_wait[WBT_RWQ_DISCARD];
0112
0113 return &rwb->rq_wait[WBT_RWQ_BG];
0114 }
0115
0116 static void rwb_wake_all(struct rq_wb *rwb)
0117 {
0118 int i;
0119
0120 for (i = 0; i < WBT_NUM_RWQ; i++) {
0121 struct rq_wait *rqw = &rwb->rq_wait[i];
0122
0123 if (wq_has_sleeper(&rqw->wait))
0124 wake_up_all(&rqw->wait);
0125 }
0126 }
0127
0128 static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
0129 enum wbt_flags wb_acct)
0130 {
0131 int inflight, limit;
0132
0133 inflight = atomic_dec_return(&rqw->inflight);
0134
0135
0136
0137
0138
0139 if (unlikely(!rwb_enabled(rwb))) {
0140 rwb_wake_all(rwb);
0141 return;
0142 }
0143
0144
0145
0146
0147
0148
0149 if (wb_acct & WBT_DISCARD)
0150 limit = rwb->wb_background;
0151 else if (rwb->wc && !wb_recent_wait(rwb))
0152 limit = 0;
0153 else
0154 limit = rwb->wb_normal;
0155
0156
0157
0158
0159 if (inflight && inflight >= limit)
0160 return;
0161
0162 if (wq_has_sleeper(&rqw->wait)) {
0163 int diff = limit - inflight;
0164
0165 if (!inflight || diff >= rwb->wb_background / 2)
0166 wake_up_all(&rqw->wait);
0167 }
0168 }
0169
0170 static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
0171 {
0172 struct rq_wb *rwb = RQWB(rqos);
0173 struct rq_wait *rqw;
0174
0175 if (!(wb_acct & WBT_TRACKED))
0176 return;
0177
0178 rqw = get_rq_wait(rwb, wb_acct);
0179 wbt_rqw_done(rwb, rqw, wb_acct);
0180 }
0181
0182
0183
0184
0185
0186 static void wbt_done(struct rq_qos *rqos, struct request *rq)
0187 {
0188 struct rq_wb *rwb = RQWB(rqos);
0189
0190 if (!wbt_is_tracked(rq)) {
0191 if (rwb->sync_cookie == rq) {
0192 rwb->sync_issue = 0;
0193 rwb->sync_cookie = NULL;
0194 }
0195
0196 if (wbt_is_read(rq))
0197 wb_timestamp(rwb, &rwb->last_comp);
0198 } else {
0199 WARN_ON_ONCE(rq == rwb->sync_cookie);
0200 __wbt_done(rqos, wbt_flags(rq));
0201 }
0202 wbt_clear_state(rq);
0203 }
0204
0205 static inline bool stat_sample_valid(struct blk_rq_stat *stat)
0206 {
0207
0208
0209
0210
0211
0212
0213 return (stat[READ].nr_samples >= 1 &&
0214 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
0215 }
0216
0217 static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
0218 {
0219 u64 now, issue = READ_ONCE(rwb->sync_issue);
0220
0221 if (!issue || !rwb->sync_cookie)
0222 return 0;
0223
0224 now = ktime_to_ns(ktime_get());
0225 return now - issue;
0226 }
0227
0228 enum {
0229 LAT_OK = 1,
0230 LAT_UNKNOWN,
0231 LAT_UNKNOWN_WRITES,
0232 LAT_EXCEEDED,
0233 };
0234
0235 static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
0236 {
0237 struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
0238 struct rq_depth *rqd = &rwb->rq_depth;
0239 u64 thislat;
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250 thislat = rwb_sync_issue_lat(rwb);
0251 if (thislat > rwb->cur_win_nsec ||
0252 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
0253 trace_wbt_lat(bdi, thislat);
0254 return LAT_EXCEEDED;
0255 }
0256
0257
0258
0259
0260 if (!stat_sample_valid(stat)) {
0261
0262
0263
0264
0265
0266
0267 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
0268 wbt_inflight(rwb))
0269 return LAT_UNKNOWN_WRITES;
0270 return LAT_UNKNOWN;
0271 }
0272
0273
0274
0275
0276 if (stat[READ].min > rwb->min_lat_nsec) {
0277 trace_wbt_lat(bdi, stat[READ].min);
0278 trace_wbt_stat(bdi, stat);
0279 return LAT_EXCEEDED;
0280 }
0281
0282 if (rqd->scale_step)
0283 trace_wbt_stat(bdi, stat);
0284
0285 return LAT_OK;
0286 }
0287
0288 static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
0289 {
0290 struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
0291 struct rq_depth *rqd = &rwb->rq_depth;
0292
0293 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
0294 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
0295 }
0296
0297 static void calc_wb_limits(struct rq_wb *rwb)
0298 {
0299 if (rwb->min_lat_nsec == 0) {
0300 rwb->wb_normal = rwb->wb_background = 0;
0301 } else if (rwb->rq_depth.max_depth <= 2) {
0302 rwb->wb_normal = rwb->rq_depth.max_depth;
0303 rwb->wb_background = 1;
0304 } else {
0305 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
0306 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
0307 }
0308 }
0309
0310 static void scale_up(struct rq_wb *rwb)
0311 {
0312 if (!rq_depth_scale_up(&rwb->rq_depth))
0313 return;
0314 calc_wb_limits(rwb);
0315 rwb->unknown_cnt = 0;
0316 rwb_wake_all(rwb);
0317 rwb_trace_step(rwb, tracepoint_string("scale up"));
0318 }
0319
0320 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
0321 {
0322 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
0323 return;
0324 calc_wb_limits(rwb);
0325 rwb->unknown_cnt = 0;
0326 rwb_trace_step(rwb, tracepoint_string("scale down"));
0327 }
0328
0329 static void rwb_arm_timer(struct rq_wb *rwb)
0330 {
0331 struct rq_depth *rqd = &rwb->rq_depth;
0332
0333 if (rqd->scale_step > 0) {
0334
0335
0336
0337
0338
0339
0340 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
0341 int_sqrt((rqd->scale_step + 1) << 8));
0342 } else {
0343
0344
0345
0346
0347 rwb->cur_win_nsec = rwb->win_nsec;
0348 }
0349
0350 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
0351 }
0352
0353 static void wb_timer_fn(struct blk_stat_callback *cb)
0354 {
0355 struct rq_wb *rwb = cb->data;
0356 struct rq_depth *rqd = &rwb->rq_depth;
0357 unsigned int inflight = wbt_inflight(rwb);
0358 int status;
0359
0360 if (!rwb->rqos.q->disk)
0361 return;
0362
0363 status = latency_exceeded(rwb, cb->stat);
0364
0365 trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
0366 inflight);
0367
0368
0369
0370
0371
0372
0373 switch (status) {
0374 case LAT_EXCEEDED:
0375 scale_down(rwb, true);
0376 break;
0377 case LAT_OK:
0378 scale_up(rwb);
0379 break;
0380 case LAT_UNKNOWN_WRITES:
0381
0382
0383
0384
0385
0386 scale_up(rwb);
0387 break;
0388 case LAT_UNKNOWN:
0389 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
0390 break;
0391
0392
0393
0394
0395
0396 if (rqd->scale_step > 0)
0397 scale_up(rwb);
0398 else if (rqd->scale_step < 0)
0399 scale_down(rwb, false);
0400 break;
0401 default:
0402 break;
0403 }
0404
0405
0406
0407
0408 if (rqd->scale_step || inflight)
0409 rwb_arm_timer(rwb);
0410 }
0411
0412 static void wbt_update_limits(struct rq_wb *rwb)
0413 {
0414 struct rq_depth *rqd = &rwb->rq_depth;
0415
0416 rqd->scale_step = 0;
0417 rqd->scaled_max = false;
0418
0419 rq_depth_calc_max_depth(rqd);
0420 calc_wb_limits(rwb);
0421
0422 rwb_wake_all(rwb);
0423 }
0424
0425 u64 wbt_get_min_lat(struct request_queue *q)
0426 {
0427 struct rq_qos *rqos = wbt_rq_qos(q);
0428 if (!rqos)
0429 return 0;
0430 return RQWB(rqos)->min_lat_nsec;
0431 }
0432
0433 void wbt_set_min_lat(struct request_queue *q, u64 val)
0434 {
0435 struct rq_qos *rqos = wbt_rq_qos(q);
0436 if (!rqos)
0437 return;
0438 RQWB(rqos)->min_lat_nsec = val;
0439 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
0440 wbt_update_limits(RQWB(rqos));
0441 }
0442
0443
0444 static bool close_io(struct rq_wb *rwb)
0445 {
0446 const unsigned long now = jiffies;
0447
0448 return time_before(now, rwb->last_issue + HZ / 10) ||
0449 time_before(now, rwb->last_comp + HZ / 10);
0450 }
0451
0452 #define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
0453
0454 static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
0455 {
0456 unsigned int limit;
0457
0458
0459
0460
0461
0462 if (!rwb_enabled(rwb))
0463 return UINT_MAX;
0464
0465 if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
0466 return rwb->wb_background;
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476 if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
0477 limit = rwb->rq_depth.max_depth;
0478 else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
0479
0480
0481
0482
0483 limit = rwb->wb_background;
0484 } else
0485 limit = rwb->wb_normal;
0486
0487 return limit;
0488 }
0489
0490 struct wbt_wait_data {
0491 struct rq_wb *rwb;
0492 enum wbt_flags wb_acct;
0493 blk_opf_t opf;
0494 };
0495
0496 static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
0497 {
0498 struct wbt_wait_data *data = private_data;
0499 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf));
0500 }
0501
0502 static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
0503 {
0504 struct wbt_wait_data *data = private_data;
0505 wbt_rqw_done(data->rwb, rqw, data->wb_acct);
0506 }
0507
0508
0509
0510
0511
0512 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
0513 blk_opf_t opf)
0514 {
0515 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
0516 struct wbt_wait_data data = {
0517 .rwb = rwb,
0518 .wb_acct = wb_acct,
0519 .opf = opf,
0520 };
0521
0522 rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
0523 }
0524
0525 static inline bool wbt_should_throttle(struct bio *bio)
0526 {
0527 switch (bio_op(bio)) {
0528 case REQ_OP_WRITE:
0529
0530
0531
0532 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
0533 (REQ_SYNC | REQ_IDLE))
0534 return false;
0535 fallthrough;
0536 case REQ_OP_DISCARD:
0537 return true;
0538 default:
0539 return false;
0540 }
0541 }
0542
0543 static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
0544 {
0545 enum wbt_flags flags = 0;
0546
0547 if (!rwb_enabled(rwb))
0548 return 0;
0549
0550 if (bio_op(bio) == REQ_OP_READ) {
0551 flags = WBT_READ;
0552 } else if (wbt_should_throttle(bio)) {
0553 if (current_is_kswapd())
0554 flags |= WBT_KSWAPD;
0555 if (bio_op(bio) == REQ_OP_DISCARD)
0556 flags |= WBT_DISCARD;
0557 flags |= WBT_TRACKED;
0558 }
0559 return flags;
0560 }
0561
0562 static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
0563 {
0564 struct rq_wb *rwb = RQWB(rqos);
0565 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
0566 __wbt_done(rqos, flags);
0567 }
0568
0569
0570
0571
0572
0573
0574 static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
0575 {
0576 struct rq_wb *rwb = RQWB(rqos);
0577 enum wbt_flags flags;
0578
0579 flags = bio_to_wbt_flags(rwb, bio);
0580 if (!(flags & WBT_TRACKED)) {
0581 if (flags & WBT_READ)
0582 wb_timestamp(rwb, &rwb->last_issue);
0583 return;
0584 }
0585
0586 __wbt_wait(rwb, flags, bio->bi_opf);
0587
0588 if (!blk_stat_is_active(rwb->cb))
0589 rwb_arm_timer(rwb);
0590 }
0591
0592 static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
0593 {
0594 struct rq_wb *rwb = RQWB(rqos);
0595 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
0596 }
0597
0598 static void wbt_issue(struct rq_qos *rqos, struct request *rq)
0599 {
0600 struct rq_wb *rwb = RQWB(rqos);
0601
0602 if (!rwb_enabled(rwb))
0603 return;
0604
0605
0606
0607
0608
0609
0610
0611
0612 if (wbt_is_read(rq) && !rwb->sync_issue) {
0613 rwb->sync_cookie = rq;
0614 rwb->sync_issue = rq->io_start_time_ns;
0615 }
0616 }
0617
0618 static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
0619 {
0620 struct rq_wb *rwb = RQWB(rqos);
0621 if (!rwb_enabled(rwb))
0622 return;
0623 if (rq == rwb->sync_cookie) {
0624 rwb->sync_issue = 0;
0625 rwb->sync_cookie = NULL;
0626 }
0627 }
0628
0629 void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
0630 {
0631 struct rq_qos *rqos = wbt_rq_qos(q);
0632 if (rqos)
0633 RQWB(rqos)->wc = write_cache_on;
0634 }
0635
0636
0637
0638
0639 void wbt_enable_default(struct request_queue *q)
0640 {
0641 struct rq_qos *rqos = wbt_rq_qos(q);
0642
0643
0644 if (rqos) {
0645 if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
0646 RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
0647 return;
0648 }
0649
0650
0651 if (!blk_queue_registered(q))
0652 return;
0653
0654 if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
0655 wbt_init(q);
0656 }
0657 EXPORT_SYMBOL_GPL(wbt_enable_default);
0658
0659 u64 wbt_default_latency_nsec(struct request_queue *q)
0660 {
0661
0662
0663
0664
0665 if (blk_queue_nonrot(q))
0666 return 2000000ULL;
0667 else
0668 return 75000000ULL;
0669 }
0670
0671 static int wbt_data_dir(const struct request *rq)
0672 {
0673 const enum req_op op = req_op(rq);
0674
0675 if (op == REQ_OP_READ)
0676 return READ;
0677 else if (op_is_write(op))
0678 return WRITE;
0679
0680
0681 return -1;
0682 }
0683
0684 static void wbt_queue_depth_changed(struct rq_qos *rqos)
0685 {
0686 RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
0687 wbt_update_limits(RQWB(rqos));
0688 }
0689
0690 static void wbt_exit(struct rq_qos *rqos)
0691 {
0692 struct rq_wb *rwb = RQWB(rqos);
0693 struct request_queue *q = rqos->q;
0694
0695 blk_stat_remove_callback(q, rwb->cb);
0696 blk_stat_free_callback(rwb->cb);
0697 kfree(rwb);
0698 }
0699
0700
0701
0702
0703 void wbt_disable_default(struct request_queue *q)
0704 {
0705 struct rq_qos *rqos = wbt_rq_qos(q);
0706 struct rq_wb *rwb;
0707 if (!rqos)
0708 return;
0709 rwb = RQWB(rqos);
0710 if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
0711 blk_stat_deactivate(rwb->cb);
0712 rwb->enable_state = WBT_STATE_OFF_DEFAULT;
0713 }
0714 }
0715 EXPORT_SYMBOL_GPL(wbt_disable_default);
0716
0717 #ifdef CONFIG_BLK_DEBUG_FS
0718 static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
0719 {
0720 struct rq_qos *rqos = data;
0721 struct rq_wb *rwb = RQWB(rqos);
0722
0723 seq_printf(m, "%llu\n", rwb->cur_win_nsec);
0724 return 0;
0725 }
0726
0727 static int wbt_enabled_show(void *data, struct seq_file *m)
0728 {
0729 struct rq_qos *rqos = data;
0730 struct rq_wb *rwb = RQWB(rqos);
0731
0732 seq_printf(m, "%d\n", rwb->enable_state);
0733 return 0;
0734 }
0735
0736 static int wbt_id_show(void *data, struct seq_file *m)
0737 {
0738 struct rq_qos *rqos = data;
0739
0740 seq_printf(m, "%u\n", rqos->id);
0741 return 0;
0742 }
0743
0744 static int wbt_inflight_show(void *data, struct seq_file *m)
0745 {
0746 struct rq_qos *rqos = data;
0747 struct rq_wb *rwb = RQWB(rqos);
0748 int i;
0749
0750 for (i = 0; i < WBT_NUM_RWQ; i++)
0751 seq_printf(m, "%d: inflight %d\n", i,
0752 atomic_read(&rwb->rq_wait[i].inflight));
0753 return 0;
0754 }
0755
0756 static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
0757 {
0758 struct rq_qos *rqos = data;
0759 struct rq_wb *rwb = RQWB(rqos);
0760
0761 seq_printf(m, "%lu\n", rwb->min_lat_nsec);
0762 return 0;
0763 }
0764
0765 static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
0766 {
0767 struct rq_qos *rqos = data;
0768 struct rq_wb *rwb = RQWB(rqos);
0769
0770 seq_printf(m, "%u\n", rwb->unknown_cnt);
0771 return 0;
0772 }
0773
0774 static int wbt_normal_show(void *data, struct seq_file *m)
0775 {
0776 struct rq_qos *rqos = data;
0777 struct rq_wb *rwb = RQWB(rqos);
0778
0779 seq_printf(m, "%u\n", rwb->wb_normal);
0780 return 0;
0781 }
0782
0783 static int wbt_background_show(void *data, struct seq_file *m)
0784 {
0785 struct rq_qos *rqos = data;
0786 struct rq_wb *rwb = RQWB(rqos);
0787
0788 seq_printf(m, "%u\n", rwb->wb_background);
0789 return 0;
0790 }
0791
0792 static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
0793 {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
0794 {"enabled", 0400, wbt_enabled_show},
0795 {"id", 0400, wbt_id_show},
0796 {"inflight", 0400, wbt_inflight_show},
0797 {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
0798 {"unknown_cnt", 0400, wbt_unknown_cnt_show},
0799 {"wb_normal", 0400, wbt_normal_show},
0800 {"wb_background", 0400, wbt_background_show},
0801 {},
0802 };
0803 #endif
0804
0805 static struct rq_qos_ops wbt_rqos_ops = {
0806 .throttle = wbt_wait,
0807 .issue = wbt_issue,
0808 .track = wbt_track,
0809 .requeue = wbt_requeue,
0810 .done = wbt_done,
0811 .cleanup = wbt_cleanup,
0812 .queue_depth_changed = wbt_queue_depth_changed,
0813 .exit = wbt_exit,
0814 #ifdef CONFIG_BLK_DEBUG_FS
0815 .debugfs_attrs = wbt_debugfs_attrs,
0816 #endif
0817 };
0818
0819 int wbt_init(struct request_queue *q)
0820 {
0821 struct rq_wb *rwb;
0822 int i;
0823 int ret;
0824
0825 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
0826 if (!rwb)
0827 return -ENOMEM;
0828
0829 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
0830 if (!rwb->cb) {
0831 kfree(rwb);
0832 return -ENOMEM;
0833 }
0834
0835 for (i = 0; i < WBT_NUM_RWQ; i++)
0836 rq_wait_init(&rwb->rq_wait[i]);
0837
0838 rwb->rqos.id = RQ_QOS_WBT;
0839 rwb->rqos.ops = &wbt_rqos_ops;
0840 rwb->rqos.q = q;
0841 rwb->last_comp = rwb->last_issue = jiffies;
0842 rwb->win_nsec = RWB_WINDOW_NSEC;
0843 rwb->enable_state = WBT_STATE_ON_DEFAULT;
0844 rwb->wc = 1;
0845 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
0846
0847
0848
0849
0850 ret = rq_qos_add(q, &rwb->rqos);
0851 if (ret)
0852 goto err_free;
0853
0854 blk_stat_add_callback(q, rwb->cb);
0855
0856 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
0857
0858 wbt_queue_depth_changed(&rwb->rqos);
0859 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
0860
0861 return 0;
0862
0863 err_free:
0864 blk_stat_free_callback(rwb->cb);
0865 kfree(rwb);
0866 return ret;
0867
0868 }