Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
0004  *  for the blk-mq scheduling framework
0005  *
0006  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
0007  */
0008 #include <linux/kernel.h>
0009 #include <linux/fs.h>
0010 #include <linux/blkdev.h>
0011 #include <linux/blk-mq.h>
0012 #include <linux/bio.h>
0013 #include <linux/module.h>
0014 #include <linux/slab.h>
0015 #include <linux/init.h>
0016 #include <linux/compiler.h>
0017 #include <linux/rbtree.h>
0018 #include <linux/sbitmap.h>
0019 
0020 #include <trace/events/block.h>
0021 
0022 #include "elevator.h"
0023 #include "blk.h"
0024 #include "blk-mq.h"
0025 #include "blk-mq-debugfs.h"
0026 #include "blk-mq-tag.h"
0027 #include "blk-mq-sched.h"
0028 
0029 /*
0030  * See Documentation/block/deadline-iosched.rst
0031  */
0032 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
0033 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
0034 /*
0035  * Time after which to dispatch lower priority requests even if higher
0036  * priority requests are pending.
0037  */
0038 static const int prio_aging_expire = 10 * HZ;
0039 static const int writes_starved = 2;    /* max times reads can starve a write */
0040 static const int fifo_batch = 16;       /* # of sequential requests treated as one
0041                      by the above parameters. For throughput. */
0042 
0043 enum dd_data_dir {
0044     DD_READ     = READ,
0045     DD_WRITE    = WRITE,
0046 };
0047 
0048 enum { DD_DIR_COUNT = 2 };
0049 
0050 enum dd_prio {
0051     DD_RT_PRIO  = 0,
0052     DD_BE_PRIO  = 1,
0053     DD_IDLE_PRIO    = 2,
0054     DD_PRIO_MAX = 2,
0055 };
0056 
0057 enum { DD_PRIO_COUNT = 3 };
0058 
0059 /*
0060  * I/O statistics per I/O priority. It is fine if these counters overflow.
0061  * What matters is that these counters are at least as wide as
0062  * log2(max_outstanding_requests).
0063  */
0064 struct io_stats_per_prio {
0065     uint32_t inserted;
0066     uint32_t merged;
0067     uint32_t dispatched;
0068     atomic_t completed;
0069 };
0070 
0071 /*
0072  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
0073  * present on both sort_list[] and fifo_list[].
0074  */
0075 struct dd_per_prio {
0076     struct list_head dispatch;
0077     struct rb_root sort_list[DD_DIR_COUNT];
0078     struct list_head fifo_list[DD_DIR_COUNT];
0079     /* Next request in FIFO order. Read, write or both are NULL. */
0080     struct request *next_rq[DD_DIR_COUNT];
0081     struct io_stats_per_prio stats;
0082 };
0083 
0084 struct deadline_data {
0085     /*
0086      * run time data
0087      */
0088 
0089     struct dd_per_prio per_prio[DD_PRIO_COUNT];
0090 
0091     /* Data direction of latest dispatched request. */
0092     enum dd_data_dir last_dir;
0093     unsigned int batching;      /* number of sequential requests made */
0094     unsigned int starved;       /* times reads have starved writes */
0095 
0096     /*
0097      * settings that change how the i/o scheduler behaves
0098      */
0099     int fifo_expire[DD_DIR_COUNT];
0100     int fifo_batch;
0101     int writes_starved;
0102     int front_merges;
0103     u32 async_depth;
0104     int prio_aging_expire;
0105 
0106     spinlock_t lock;
0107     spinlock_t zone_lock;
0108 };
0109 
0110 /* Maps an I/O priority class to a deadline scheduler priority. */
0111 static const enum dd_prio ioprio_class_to_prio[] = {
0112     [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
0113     [IOPRIO_CLASS_RT]   = DD_RT_PRIO,
0114     [IOPRIO_CLASS_BE]   = DD_BE_PRIO,
0115     [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
0116 };
0117 
0118 static inline struct rb_root *
0119 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
0120 {
0121     return &per_prio->sort_list[rq_data_dir(rq)];
0122 }
0123 
0124 /*
0125  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
0126  * request.
0127  */
0128 static u8 dd_rq_ioclass(struct request *rq)
0129 {
0130     return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
0131 }
0132 
0133 /*
0134  * get the request after `rq' in sector-sorted order
0135  */
0136 static inline struct request *
0137 deadline_latter_request(struct request *rq)
0138 {
0139     struct rb_node *node = rb_next(&rq->rb_node);
0140 
0141     if (node)
0142         return rb_entry_rq(node);
0143 
0144     return NULL;
0145 }
0146 
0147 static void
0148 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
0149 {
0150     struct rb_root *root = deadline_rb_root(per_prio, rq);
0151 
0152     elv_rb_add(root, rq);
0153 }
0154 
0155 static inline void
0156 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
0157 {
0158     const enum dd_data_dir data_dir = rq_data_dir(rq);
0159 
0160     if (per_prio->next_rq[data_dir] == rq)
0161         per_prio->next_rq[data_dir] = deadline_latter_request(rq);
0162 
0163     elv_rb_del(deadline_rb_root(per_prio, rq), rq);
0164 }
0165 
0166 /*
0167  * remove rq from rbtree and fifo.
0168  */
0169 static void deadline_remove_request(struct request_queue *q,
0170                     struct dd_per_prio *per_prio,
0171                     struct request *rq)
0172 {
0173     list_del_init(&rq->queuelist);
0174 
0175     /*
0176      * We might not be on the rbtree, if we are doing an insert merge
0177      */
0178     if (!RB_EMPTY_NODE(&rq->rb_node))
0179         deadline_del_rq_rb(per_prio, rq);
0180 
0181     elv_rqhash_del(q, rq);
0182     if (q->last_merge == rq)
0183         q->last_merge = NULL;
0184 }
0185 
0186 static void dd_request_merged(struct request_queue *q, struct request *req,
0187                   enum elv_merge type)
0188 {
0189     struct deadline_data *dd = q->elevator->elevator_data;
0190     const u8 ioprio_class = dd_rq_ioclass(req);
0191     const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
0192     struct dd_per_prio *per_prio = &dd->per_prio[prio];
0193 
0194     /*
0195      * if the merge was a front merge, we need to reposition request
0196      */
0197     if (type == ELEVATOR_FRONT_MERGE) {
0198         elv_rb_del(deadline_rb_root(per_prio, req), req);
0199         deadline_add_rq_rb(per_prio, req);
0200     }
0201 }
0202 
0203 /*
0204  * Callback function that is invoked after @next has been merged into @req.
0205  */
0206 static void dd_merged_requests(struct request_queue *q, struct request *req,
0207                    struct request *next)
0208 {
0209     struct deadline_data *dd = q->elevator->elevator_data;
0210     const u8 ioprio_class = dd_rq_ioclass(next);
0211     const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
0212 
0213     lockdep_assert_held(&dd->lock);
0214 
0215     dd->per_prio[prio].stats.merged++;
0216 
0217     /*
0218      * if next expires before rq, assign its expire time to rq
0219      * and move into next position (next will be deleted) in fifo
0220      */
0221     if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
0222         if (time_before((unsigned long)next->fifo_time,
0223                 (unsigned long)req->fifo_time)) {
0224             list_move(&req->queuelist, &next->queuelist);
0225             req->fifo_time = next->fifo_time;
0226         }
0227     }
0228 
0229     /*
0230      * kill knowledge of next, this one is a goner
0231      */
0232     deadline_remove_request(q, &dd->per_prio[prio], next);
0233 }
0234 
0235 /*
0236  * move an entry to dispatch queue
0237  */
0238 static void
0239 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
0240               struct request *rq)
0241 {
0242     const enum dd_data_dir data_dir = rq_data_dir(rq);
0243 
0244     per_prio->next_rq[data_dir] = deadline_latter_request(rq);
0245 
0246     /*
0247      * take it off the sort and fifo list
0248      */
0249     deadline_remove_request(rq->q, per_prio, rq);
0250 }
0251 
0252 /* Number of requests queued for a given priority level. */
0253 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
0254 {
0255     const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
0256 
0257     lockdep_assert_held(&dd->lock);
0258 
0259     return stats->inserted - atomic_read(&stats->completed);
0260 }
0261 
0262 /*
0263  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
0264  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
0265  */
0266 static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
0267                       enum dd_data_dir data_dir)
0268 {
0269     struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
0270 
0271     /*
0272      * rq is expired!
0273      */
0274     if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
0275         return 1;
0276 
0277     return 0;
0278 }
0279 
0280 /*
0281  * For the specified data direction, return the next request to
0282  * dispatch using arrival ordered lists.
0283  */
0284 static struct request *
0285 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
0286               enum dd_data_dir data_dir)
0287 {
0288     struct request *rq;
0289     unsigned long flags;
0290 
0291     if (list_empty(&per_prio->fifo_list[data_dir]))
0292         return NULL;
0293 
0294     rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
0295     if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
0296         return rq;
0297 
0298     /*
0299      * Look for a write request that can be dispatched, that is one with
0300      * an unlocked target zone.
0301      */
0302     spin_lock_irqsave(&dd->zone_lock, flags);
0303     list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
0304         if (blk_req_can_dispatch_to_zone(rq))
0305             goto out;
0306     }
0307     rq = NULL;
0308 out:
0309     spin_unlock_irqrestore(&dd->zone_lock, flags);
0310 
0311     return rq;
0312 }
0313 
0314 /*
0315  * For the specified data direction, return the next request to
0316  * dispatch using sector position sorted lists.
0317  */
0318 static struct request *
0319 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
0320               enum dd_data_dir data_dir)
0321 {
0322     struct request *rq;
0323     unsigned long flags;
0324 
0325     rq = per_prio->next_rq[data_dir];
0326     if (!rq)
0327         return NULL;
0328 
0329     if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
0330         return rq;
0331 
0332     /*
0333      * Look for a write request that can be dispatched, that is one with
0334      * an unlocked target zone.
0335      */
0336     spin_lock_irqsave(&dd->zone_lock, flags);
0337     while (rq) {
0338         if (blk_req_can_dispatch_to_zone(rq))
0339             break;
0340         rq = deadline_latter_request(rq);
0341     }
0342     spin_unlock_irqrestore(&dd->zone_lock, flags);
0343 
0344     return rq;
0345 }
0346 
0347 /*
0348  * Returns true if and only if @rq started after @latest_start where
0349  * @latest_start is in jiffies.
0350  */
0351 static bool started_after(struct deadline_data *dd, struct request *rq,
0352               unsigned long latest_start)
0353 {
0354     unsigned long start_time = (unsigned long)rq->fifo_time;
0355 
0356     start_time -= dd->fifo_expire[rq_data_dir(rq)];
0357 
0358     return time_after(start_time, latest_start);
0359 }
0360 
0361 /*
0362  * deadline_dispatch_requests selects the best request according to
0363  * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
0364  */
0365 static struct request *__dd_dispatch_request(struct deadline_data *dd,
0366                          struct dd_per_prio *per_prio,
0367                          unsigned long latest_start)
0368 {
0369     struct request *rq, *next_rq;
0370     enum dd_data_dir data_dir;
0371     enum dd_prio prio;
0372     u8 ioprio_class;
0373 
0374     lockdep_assert_held(&dd->lock);
0375 
0376     if (!list_empty(&per_prio->dispatch)) {
0377         rq = list_first_entry(&per_prio->dispatch, struct request,
0378                       queuelist);
0379         if (started_after(dd, rq, latest_start))
0380             return NULL;
0381         list_del_init(&rq->queuelist);
0382         goto done;
0383     }
0384 
0385     /*
0386      * batches are currently reads XOR writes
0387      */
0388     rq = deadline_next_request(dd, per_prio, dd->last_dir);
0389     if (rq && dd->batching < dd->fifo_batch)
0390         /* we have a next request are still entitled to batch */
0391         goto dispatch_request;
0392 
0393     /*
0394      * at this point we are not running a batch. select the appropriate
0395      * data direction (read / write)
0396      */
0397 
0398     if (!list_empty(&per_prio->fifo_list[DD_READ])) {
0399         BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
0400 
0401         if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
0402             (dd->starved++ >= dd->writes_starved))
0403             goto dispatch_writes;
0404 
0405         data_dir = DD_READ;
0406 
0407         goto dispatch_find_request;
0408     }
0409 
0410     /*
0411      * there are either no reads or writes have been starved
0412      */
0413 
0414     if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
0415 dispatch_writes:
0416         BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
0417 
0418         dd->starved = 0;
0419 
0420         data_dir = DD_WRITE;
0421 
0422         goto dispatch_find_request;
0423     }
0424 
0425     return NULL;
0426 
0427 dispatch_find_request:
0428     /*
0429      * we are not running a batch, find best request for selected data_dir
0430      */
0431     next_rq = deadline_next_request(dd, per_prio, data_dir);
0432     if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
0433         /*
0434          * A deadline has expired, the last request was in the other
0435          * direction, or we have run out of higher-sectored requests.
0436          * Start again from the request with the earliest expiry time.
0437          */
0438         rq = deadline_fifo_request(dd, per_prio, data_dir);
0439     } else {
0440         /*
0441          * The last req was the same dir and we have a next request in
0442          * sort order. No expired requests so continue on from here.
0443          */
0444         rq = next_rq;
0445     }
0446 
0447     /*
0448      * For a zoned block device, if we only have writes queued and none of
0449      * them can be dispatched, rq will be NULL.
0450      */
0451     if (!rq)
0452         return NULL;
0453 
0454     dd->last_dir = data_dir;
0455     dd->batching = 0;
0456 
0457 dispatch_request:
0458     if (started_after(dd, rq, latest_start))
0459         return NULL;
0460 
0461     /*
0462      * rq is the selected appropriate request.
0463      */
0464     dd->batching++;
0465     deadline_move_request(dd, per_prio, rq);
0466 done:
0467     ioprio_class = dd_rq_ioclass(rq);
0468     prio = ioprio_class_to_prio[ioprio_class];
0469     dd->per_prio[prio].stats.dispatched++;
0470     /*
0471      * If the request needs its target zone locked, do it.
0472      */
0473     blk_req_zone_write_lock(rq);
0474     rq->rq_flags |= RQF_STARTED;
0475     return rq;
0476 }
0477 
0478 /*
0479  * Check whether there are any requests with priority other than DD_RT_PRIO
0480  * that were inserted more than prio_aging_expire jiffies ago.
0481  */
0482 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
0483                               unsigned long now)
0484 {
0485     struct request *rq;
0486     enum dd_prio prio;
0487     int prio_cnt;
0488 
0489     lockdep_assert_held(&dd->lock);
0490 
0491     prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
0492            !!dd_queued(dd, DD_IDLE_PRIO);
0493     if (prio_cnt < 2)
0494         return NULL;
0495 
0496     for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
0497         rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
0498                        now - dd->prio_aging_expire);
0499         if (rq)
0500             return rq;
0501     }
0502 
0503     return NULL;
0504 }
0505 
0506 /*
0507  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
0508  *
0509  * One confusing aspect here is that we get called for a specific
0510  * hardware queue, but we may return a request that is for a
0511  * different hardware queue. This is because mq-deadline has shared
0512  * state for all hardware queues, in terms of sorting, FIFOs, etc.
0513  */
0514 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
0515 {
0516     struct deadline_data *dd = hctx->queue->elevator->elevator_data;
0517     const unsigned long now = jiffies;
0518     struct request *rq;
0519     enum dd_prio prio;
0520 
0521     spin_lock(&dd->lock);
0522     rq = dd_dispatch_prio_aged_requests(dd, now);
0523     if (rq)
0524         goto unlock;
0525 
0526     /*
0527      * Next, dispatch requests in priority order. Ignore lower priority
0528      * requests if any higher priority requests are pending.
0529      */
0530     for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
0531         rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
0532         if (rq || dd_queued(dd, prio))
0533             break;
0534     }
0535 
0536 unlock:
0537     spin_unlock(&dd->lock);
0538 
0539     return rq;
0540 }
0541 
0542 /*
0543  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
0544  * function is used by __blk_mq_get_tag().
0545  */
0546 static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
0547 {
0548     struct deadline_data *dd = data->q->elevator->elevator_data;
0549 
0550     /* Do not throttle synchronous reads. */
0551     if (op_is_sync(opf) && !op_is_write(opf))
0552         return;
0553 
0554     /*
0555      * Throttle asynchronous requests and writes such that these requests
0556      * do not block the allocation of synchronous requests.
0557      */
0558     data->shallow_depth = dd->async_depth;
0559 }
0560 
0561 /* Called by blk_mq_update_nr_requests(). */
0562 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
0563 {
0564     struct request_queue *q = hctx->queue;
0565     struct deadline_data *dd = q->elevator->elevator_data;
0566     struct blk_mq_tags *tags = hctx->sched_tags;
0567 
0568     dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
0569 
0570     sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
0571 }
0572 
0573 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
0574 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
0575 {
0576     dd_depth_updated(hctx);
0577     return 0;
0578 }
0579 
0580 static void dd_exit_sched(struct elevator_queue *e)
0581 {
0582     struct deadline_data *dd = e->elevator_data;
0583     enum dd_prio prio;
0584 
0585     for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
0586         struct dd_per_prio *per_prio = &dd->per_prio[prio];
0587         const struct io_stats_per_prio *stats = &per_prio->stats;
0588         uint32_t queued;
0589 
0590         WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
0591         WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
0592 
0593         spin_lock(&dd->lock);
0594         queued = dd_queued(dd, prio);
0595         spin_unlock(&dd->lock);
0596 
0597         WARN_ONCE(queued != 0,
0598               "statistics for priority %d: i %u m %u d %u c %u\n",
0599               prio, stats->inserted, stats->merged,
0600               stats->dispatched, atomic_read(&stats->completed));
0601     }
0602 
0603     kfree(dd);
0604 }
0605 
0606 /*
0607  * initialize elevator private data (deadline_data).
0608  */
0609 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
0610 {
0611     struct deadline_data *dd;
0612     struct elevator_queue *eq;
0613     enum dd_prio prio;
0614     int ret = -ENOMEM;
0615 
0616     eq = elevator_alloc(q, e);
0617     if (!eq)
0618         return ret;
0619 
0620     dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
0621     if (!dd)
0622         goto put_eq;
0623 
0624     eq->elevator_data = dd;
0625 
0626     for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
0627         struct dd_per_prio *per_prio = &dd->per_prio[prio];
0628 
0629         INIT_LIST_HEAD(&per_prio->dispatch);
0630         INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
0631         INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
0632         per_prio->sort_list[DD_READ] = RB_ROOT;
0633         per_prio->sort_list[DD_WRITE] = RB_ROOT;
0634     }
0635     dd->fifo_expire[DD_READ] = read_expire;
0636     dd->fifo_expire[DD_WRITE] = write_expire;
0637     dd->writes_starved = writes_starved;
0638     dd->front_merges = 1;
0639     dd->last_dir = DD_WRITE;
0640     dd->fifo_batch = fifo_batch;
0641     dd->prio_aging_expire = prio_aging_expire;
0642     spin_lock_init(&dd->lock);
0643     spin_lock_init(&dd->zone_lock);
0644 
0645     /* We dispatch from request queue wide instead of hw queue */
0646     blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
0647 
0648     q->elevator = eq;
0649     return 0;
0650 
0651 put_eq:
0652     kobject_put(&eq->kobj);
0653     return ret;
0654 }
0655 
0656 /*
0657  * Try to merge @bio into an existing request. If @bio has been merged into
0658  * an existing request, store the pointer to that request into *@rq.
0659  */
0660 static int dd_request_merge(struct request_queue *q, struct request **rq,
0661                 struct bio *bio)
0662 {
0663     struct deadline_data *dd = q->elevator->elevator_data;
0664     const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
0665     const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
0666     struct dd_per_prio *per_prio = &dd->per_prio[prio];
0667     sector_t sector = bio_end_sector(bio);
0668     struct request *__rq;
0669 
0670     if (!dd->front_merges)
0671         return ELEVATOR_NO_MERGE;
0672 
0673     __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
0674     if (__rq) {
0675         BUG_ON(sector != blk_rq_pos(__rq));
0676 
0677         if (elv_bio_merge_ok(__rq, bio)) {
0678             *rq = __rq;
0679             if (blk_discard_mergable(__rq))
0680                 return ELEVATOR_DISCARD_MERGE;
0681             return ELEVATOR_FRONT_MERGE;
0682         }
0683     }
0684 
0685     return ELEVATOR_NO_MERGE;
0686 }
0687 
0688 /*
0689  * Attempt to merge a bio into an existing request. This function is called
0690  * before @bio is associated with a request.
0691  */
0692 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
0693         unsigned int nr_segs)
0694 {
0695     struct deadline_data *dd = q->elevator->elevator_data;
0696     struct request *free = NULL;
0697     bool ret;
0698 
0699     spin_lock(&dd->lock);
0700     ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
0701     spin_unlock(&dd->lock);
0702 
0703     if (free)
0704         blk_mq_free_request(free);
0705 
0706     return ret;
0707 }
0708 
0709 /*
0710  * add rq to rbtree and fifo
0711  */
0712 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
0713                   bool at_head)
0714 {
0715     struct request_queue *q = hctx->queue;
0716     struct deadline_data *dd = q->elevator->elevator_data;
0717     const enum dd_data_dir data_dir = rq_data_dir(rq);
0718     u16 ioprio = req_get_ioprio(rq);
0719     u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
0720     struct dd_per_prio *per_prio;
0721     enum dd_prio prio;
0722     LIST_HEAD(free);
0723 
0724     lockdep_assert_held(&dd->lock);
0725 
0726     /*
0727      * This may be a requeue of a write request that has locked its
0728      * target zone. If it is the case, this releases the zone lock.
0729      */
0730     blk_req_zone_write_unlock(rq);
0731 
0732     prio = ioprio_class_to_prio[ioprio_class];
0733     per_prio = &dd->per_prio[prio];
0734     if (!rq->elv.priv[0]) {
0735         per_prio->stats.inserted++;
0736         rq->elv.priv[0] = (void *)(uintptr_t)1;
0737     }
0738 
0739     if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
0740         blk_mq_free_requests(&free);
0741         return;
0742     }
0743 
0744     trace_block_rq_insert(rq);
0745 
0746     if (at_head) {
0747         list_add(&rq->queuelist, &per_prio->dispatch);
0748         rq->fifo_time = jiffies;
0749     } else {
0750         deadline_add_rq_rb(per_prio, rq);
0751 
0752         if (rq_mergeable(rq)) {
0753             elv_rqhash_add(q, rq);
0754             if (!q->last_merge)
0755                 q->last_merge = rq;
0756         }
0757 
0758         /*
0759          * set expire time and add to fifo list
0760          */
0761         rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
0762         list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
0763     }
0764 }
0765 
0766 /*
0767  * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
0768  */
0769 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
0770                    struct list_head *list, bool at_head)
0771 {
0772     struct request_queue *q = hctx->queue;
0773     struct deadline_data *dd = q->elevator->elevator_data;
0774 
0775     spin_lock(&dd->lock);
0776     while (!list_empty(list)) {
0777         struct request *rq;
0778 
0779         rq = list_first_entry(list, struct request, queuelist);
0780         list_del_init(&rq->queuelist);
0781         dd_insert_request(hctx, rq, at_head);
0782     }
0783     spin_unlock(&dd->lock);
0784 }
0785 
0786 /* Callback from inside blk_mq_rq_ctx_init(). */
0787 static void dd_prepare_request(struct request *rq)
0788 {
0789     rq->elv.priv[0] = NULL;
0790 }
0791 
0792 /*
0793  * Callback from inside blk_mq_free_request().
0794  *
0795  * For zoned block devices, write unlock the target zone of
0796  * completed write requests. Do this while holding the zone lock
0797  * spinlock so that the zone is never unlocked while deadline_fifo_request()
0798  * or deadline_next_request() are executing. This function is called for
0799  * all requests, whether or not these requests complete successfully.
0800  *
0801  * For a zoned block device, __dd_dispatch_request() may have stopped
0802  * dispatching requests if all the queued requests are write requests directed
0803  * at zones that are already locked due to on-going write requests. To ensure
0804  * write request dispatch progress in this case, mark the queue as needing a
0805  * restart to ensure that the queue is run again after completion of the
0806  * request and zones being unlocked.
0807  */
0808 static void dd_finish_request(struct request *rq)
0809 {
0810     struct request_queue *q = rq->q;
0811     struct deadline_data *dd = q->elevator->elevator_data;
0812     const u8 ioprio_class = dd_rq_ioclass(rq);
0813     const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
0814     struct dd_per_prio *per_prio = &dd->per_prio[prio];
0815 
0816     /*
0817      * The block layer core may call dd_finish_request() without having
0818      * called dd_insert_requests(). Skip requests that bypassed I/O
0819      * scheduling. See also blk_mq_request_bypass_insert().
0820      */
0821     if (!rq->elv.priv[0])
0822         return;
0823 
0824     atomic_inc(&per_prio->stats.completed);
0825 
0826     if (blk_queue_is_zoned(q)) {
0827         unsigned long flags;
0828 
0829         spin_lock_irqsave(&dd->zone_lock, flags);
0830         blk_req_zone_write_unlock(rq);
0831         if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
0832             blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
0833         spin_unlock_irqrestore(&dd->zone_lock, flags);
0834     }
0835 }
0836 
0837 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
0838 {
0839     return !list_empty_careful(&per_prio->dispatch) ||
0840         !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
0841         !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
0842 }
0843 
0844 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
0845 {
0846     struct deadline_data *dd = hctx->queue->elevator->elevator_data;
0847     enum dd_prio prio;
0848 
0849     for (prio = 0; prio <= DD_PRIO_MAX; prio++)
0850         if (dd_has_work_for_prio(&dd->per_prio[prio]))
0851             return true;
0852 
0853     return false;
0854 }
0855 
0856 /*
0857  * sysfs parts below
0858  */
0859 #define SHOW_INT(__FUNC, __VAR)                     \
0860 static ssize_t __FUNC(struct elevator_queue *e, char *page)     \
0861 {                                   \
0862     struct deadline_data *dd = e->elevator_data;            \
0863                                     \
0864     return sysfs_emit(page, "%d\n", __VAR);             \
0865 }
0866 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
0867 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
0868 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
0869 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
0870 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
0871 SHOW_INT(deadline_front_merges_show, dd->front_merges);
0872 SHOW_INT(deadline_async_depth_show, dd->async_depth);
0873 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
0874 #undef SHOW_INT
0875 #undef SHOW_JIFFIES
0876 
0877 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)         \
0878 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
0879 {                                   \
0880     struct deadline_data *dd = e->elevator_data;            \
0881     int __data, __ret;                      \
0882                                     \
0883     __ret = kstrtoint(page, 0, &__data);                \
0884     if (__ret < 0)                          \
0885         return __ret;                       \
0886     if (__data < (MIN))                     \
0887         __data = (MIN);                     \
0888     else if (__data > (MAX))                    \
0889         __data = (MAX);                     \
0890     *(__PTR) = __CONV(__data);                  \
0891     return count;                           \
0892 }
0893 #define STORE_INT(__FUNC, __PTR, MIN, MAX)              \
0894     STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
0895 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)              \
0896     STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
0897 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
0898 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
0899 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
0900 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
0901 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
0902 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
0903 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
0904 #undef STORE_FUNCTION
0905 #undef STORE_INT
0906 #undef STORE_JIFFIES
0907 
0908 #define DD_ATTR(name) \
0909     __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
0910 
0911 static struct elv_fs_entry deadline_attrs[] = {
0912     DD_ATTR(read_expire),
0913     DD_ATTR(write_expire),
0914     DD_ATTR(writes_starved),
0915     DD_ATTR(front_merges),
0916     DD_ATTR(async_depth),
0917     DD_ATTR(fifo_batch),
0918     DD_ATTR(prio_aging_expire),
0919     __ATTR_NULL
0920 };
0921 
0922 #ifdef CONFIG_BLK_DEBUG_FS
0923 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)       \
0924 static void *deadline_##name##_fifo_start(struct seq_file *m,       \
0925                       loff_t *pos)          \
0926     __acquires(&dd->lock)                       \
0927 {                                   \
0928     struct request_queue *q = m->private;               \
0929     struct deadline_data *dd = q->elevator->elevator_data;      \
0930     struct dd_per_prio *per_prio = &dd->per_prio[prio];     \
0931                                     \
0932     spin_lock(&dd->lock);                       \
0933     return seq_list_start(&per_prio->fifo_list[data_dir], *pos);    \
0934 }                                   \
0935                                     \
0936 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,   \
0937                      loff_t *pos)           \
0938 {                                   \
0939     struct request_queue *q = m->private;               \
0940     struct deadline_data *dd = q->elevator->elevator_data;      \
0941     struct dd_per_prio *per_prio = &dd->per_prio[prio];     \
0942                                     \
0943     return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);   \
0944 }                                   \
0945                                     \
0946 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)    \
0947     __releases(&dd->lock)                       \
0948 {                                   \
0949     struct request_queue *q = m->private;               \
0950     struct deadline_data *dd = q->elevator->elevator_data;      \
0951                                     \
0952     spin_unlock(&dd->lock);                     \
0953 }                                   \
0954                                     \
0955 static const struct seq_operations deadline_##name##_fifo_seq_ops = {   \
0956     .start  = deadline_##name##_fifo_start,             \
0957     .next   = deadline_##name##_fifo_next,              \
0958     .stop   = deadline_##name##_fifo_stop,              \
0959     .show   = blk_mq_debugfs_rq_show,               \
0960 };                                  \
0961                                     \
0962 static int deadline_##name##_next_rq_show(void *data,           \
0963                       struct seq_file *m)       \
0964 {                                   \
0965     struct request_queue *q = data;                 \
0966     struct deadline_data *dd = q->elevator->elevator_data;      \
0967     struct dd_per_prio *per_prio = &dd->per_prio[prio];     \
0968     struct request *rq = per_prio->next_rq[data_dir];       \
0969                                     \
0970     if (rq)                             \
0971         __blk_mq_debugfs_rq_show(m, rq);            \
0972     return 0;                           \
0973 }
0974 
0975 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
0976 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
0977 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
0978 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
0979 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
0980 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
0981 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
0982 
0983 static int deadline_batching_show(void *data, struct seq_file *m)
0984 {
0985     struct request_queue *q = data;
0986     struct deadline_data *dd = q->elevator->elevator_data;
0987 
0988     seq_printf(m, "%u\n", dd->batching);
0989     return 0;
0990 }
0991 
0992 static int deadline_starved_show(void *data, struct seq_file *m)
0993 {
0994     struct request_queue *q = data;
0995     struct deadline_data *dd = q->elevator->elevator_data;
0996 
0997     seq_printf(m, "%u\n", dd->starved);
0998     return 0;
0999 }
1000 
1001 static int dd_async_depth_show(void *data, struct seq_file *m)
1002 {
1003     struct request_queue *q = data;
1004     struct deadline_data *dd = q->elevator->elevator_data;
1005 
1006     seq_printf(m, "%u\n", dd->async_depth);
1007     return 0;
1008 }
1009 
1010 static int dd_queued_show(void *data, struct seq_file *m)
1011 {
1012     struct request_queue *q = data;
1013     struct deadline_data *dd = q->elevator->elevator_data;
1014     u32 rt, be, idle;
1015 
1016     spin_lock(&dd->lock);
1017     rt = dd_queued(dd, DD_RT_PRIO);
1018     be = dd_queued(dd, DD_BE_PRIO);
1019     idle = dd_queued(dd, DD_IDLE_PRIO);
1020     spin_unlock(&dd->lock);
1021 
1022     seq_printf(m, "%u %u %u\n", rt, be, idle);
1023 
1024     return 0;
1025 }
1026 
1027 /* Number of requests owned by the block driver for a given priority. */
1028 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1029 {
1030     const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1031 
1032     lockdep_assert_held(&dd->lock);
1033 
1034     return stats->dispatched + stats->merged -
1035         atomic_read(&stats->completed);
1036 }
1037 
1038 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1039 {
1040     struct request_queue *q = data;
1041     struct deadline_data *dd = q->elevator->elevator_data;
1042     u32 rt, be, idle;
1043 
1044     spin_lock(&dd->lock);
1045     rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1046     be = dd_owned_by_driver(dd, DD_BE_PRIO);
1047     idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1048     spin_unlock(&dd->lock);
1049 
1050     seq_printf(m, "%u %u %u\n", rt, be, idle);
1051 
1052     return 0;
1053 }
1054 
1055 #define DEADLINE_DISPATCH_ATTR(prio)                    \
1056 static void *deadline_dispatch##prio##_start(struct seq_file *m,    \
1057                          loff_t *pos)       \
1058     __acquires(&dd->lock)                       \
1059 {                                   \
1060     struct request_queue *q = m->private;               \
1061     struct deadline_data *dd = q->elevator->elevator_data;      \
1062     struct dd_per_prio *per_prio = &dd->per_prio[prio];     \
1063                                     \
1064     spin_lock(&dd->lock);                       \
1065     return seq_list_start(&per_prio->dispatch, *pos);       \
1066 }                                   \
1067                                     \
1068 static void *deadline_dispatch##prio##_next(struct seq_file *m,     \
1069                         void *v, loff_t *pos)   \
1070 {                                   \
1071     struct request_queue *q = m->private;               \
1072     struct deadline_data *dd = q->elevator->elevator_data;      \
1073     struct dd_per_prio *per_prio = &dd->per_prio[prio];     \
1074                                     \
1075     return seq_list_next(v, &per_prio->dispatch, pos);      \
1076 }                                   \
1077                                     \
1078 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1079     __releases(&dd->lock)                       \
1080 {                                   \
1081     struct request_queue *q = m->private;               \
1082     struct deadline_data *dd = q->elevator->elevator_data;      \
1083                                     \
1084     spin_unlock(&dd->lock);                     \
1085 }                                   \
1086                                     \
1087 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1088     .start  = deadline_dispatch##prio##_start,          \
1089     .next   = deadline_dispatch##prio##_next,           \
1090     .stop   = deadline_dispatch##prio##_stop,           \
1091     .show   = blk_mq_debugfs_rq_show,               \
1092 }
1093 
1094 DEADLINE_DISPATCH_ATTR(0);
1095 DEADLINE_DISPATCH_ATTR(1);
1096 DEADLINE_DISPATCH_ATTR(2);
1097 #undef DEADLINE_DISPATCH_ATTR
1098 
1099 #define DEADLINE_QUEUE_DDIR_ATTRS(name)                 \
1100     {#name "_fifo_list", 0400,                  \
1101             .seq_ops = &deadline_##name##_fifo_seq_ops}
1102 #define DEADLINE_NEXT_RQ_ATTR(name)                 \
1103     {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1104 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1105     DEADLINE_QUEUE_DDIR_ATTRS(read0),
1106     DEADLINE_QUEUE_DDIR_ATTRS(write0),
1107     DEADLINE_QUEUE_DDIR_ATTRS(read1),
1108     DEADLINE_QUEUE_DDIR_ATTRS(write1),
1109     DEADLINE_QUEUE_DDIR_ATTRS(read2),
1110     DEADLINE_QUEUE_DDIR_ATTRS(write2),
1111     DEADLINE_NEXT_RQ_ATTR(read0),
1112     DEADLINE_NEXT_RQ_ATTR(write0),
1113     DEADLINE_NEXT_RQ_ATTR(read1),
1114     DEADLINE_NEXT_RQ_ATTR(write1),
1115     DEADLINE_NEXT_RQ_ATTR(read2),
1116     DEADLINE_NEXT_RQ_ATTR(write2),
1117     {"batching", 0400, deadline_batching_show},
1118     {"starved", 0400, deadline_starved_show},
1119     {"async_depth", 0400, dd_async_depth_show},
1120     {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1121     {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1122     {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1123     {"owned_by_driver", 0400, dd_owned_by_driver_show},
1124     {"queued", 0400, dd_queued_show},
1125     {},
1126 };
1127 #undef DEADLINE_QUEUE_DDIR_ATTRS
1128 #endif
1129 
1130 static struct elevator_type mq_deadline = {
1131     .ops = {
1132         .depth_updated      = dd_depth_updated,
1133         .limit_depth        = dd_limit_depth,
1134         .insert_requests    = dd_insert_requests,
1135         .dispatch_request   = dd_dispatch_request,
1136         .prepare_request    = dd_prepare_request,
1137         .finish_request     = dd_finish_request,
1138         .next_request       = elv_rb_latter_request,
1139         .former_request     = elv_rb_former_request,
1140         .bio_merge      = dd_bio_merge,
1141         .request_merge      = dd_request_merge,
1142         .requests_merged    = dd_merged_requests,
1143         .request_merged     = dd_request_merged,
1144         .has_work       = dd_has_work,
1145         .init_sched     = dd_init_sched,
1146         .exit_sched     = dd_exit_sched,
1147         .init_hctx      = dd_init_hctx,
1148     },
1149 
1150 #ifdef CONFIG_BLK_DEBUG_FS
1151     .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1152 #endif
1153     .elevator_attrs = deadline_attrs,
1154     .elevator_name = "mq-deadline",
1155     .elevator_alias = "deadline",
1156     .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1157     .elevator_owner = THIS_MODULE,
1158 };
1159 MODULE_ALIAS("mq-deadline-iosched");
1160 
1161 static int __init deadline_init(void)
1162 {
1163     return elv_register(&mq_deadline);
1164 }
1165 
1166 static void __exit deadline_exit(void)
1167 {
1168     elv_unregister(&mq_deadline);
1169 }
1170 
1171 module_init(deadline_init);
1172 module_exit(deadline_exit);
1173 
1174 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1175 MODULE_LICENSE("GPL");
1176 MODULE_DESCRIPTION("MQ deadline IO scheduler");