Back to home page

LXR

 
 

    


0001 /*
0002  * Block multiqueue core code
0003  *
0004  * Copyright (C) 2013-2014 Jens Axboe
0005  * Copyright (C) 2013-2014 Christoph Hellwig
0006  */
0007 #include <linux/kernel.h>
0008 #include <linux/module.h>
0009 #include <linux/backing-dev.h>
0010 #include <linux/bio.h>
0011 #include <linux/blkdev.h>
0012 #include <linux/kmemleak.h>
0013 #include <linux/mm.h>
0014 #include <linux/init.h>
0015 #include <linux/slab.h>
0016 #include <linux/workqueue.h>
0017 #include <linux/smp.h>
0018 #include <linux/llist.h>
0019 #include <linux/list_sort.h>
0020 #include <linux/cpu.h>
0021 #include <linux/cache.h>
0022 #include <linux/sched/sysctl.h>
0023 #include <linux/delay.h>
0024 #include <linux/crash_dump.h>
0025 #include <linux/prefetch.h>
0026 
0027 #include <trace/events/block.h>
0028 
0029 #include <linux/blk-mq.h>
0030 #include "blk.h"
0031 #include "blk-mq.h"
0032 #include "blk-mq-tag.h"
0033 #include "blk-stat.h"
0034 #include "blk-wbt.h"
0035 
0036 static DEFINE_MUTEX(all_q_mutex);
0037 static LIST_HEAD(all_q_list);
0038 
0039 /*
0040  * Check if any of the ctx's have pending work in this hardware queue
0041  */
0042 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
0043 {
0044     return sbitmap_any_bit_set(&hctx->ctx_map);
0045 }
0046 
0047 /*
0048  * Mark this ctx as having pending work in this hardware queue
0049  */
0050 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
0051                      struct blk_mq_ctx *ctx)
0052 {
0053     if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
0054         sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
0055 }
0056 
0057 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
0058                       struct blk_mq_ctx *ctx)
0059 {
0060     sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
0061 }
0062 
0063 void blk_mq_freeze_queue_start(struct request_queue *q)
0064 {
0065     int freeze_depth;
0066 
0067     freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
0068     if (freeze_depth == 1) {
0069         percpu_ref_kill(&q->q_usage_counter);
0070         blk_mq_run_hw_queues(q, false);
0071     }
0072 }
0073 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
0074 
0075 static void blk_mq_freeze_queue_wait(struct request_queue *q)
0076 {
0077     wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
0078 }
0079 
0080 /*
0081  * Guarantee no request is in use, so we can change any data structure of
0082  * the queue afterward.
0083  */
0084 void blk_freeze_queue(struct request_queue *q)
0085 {
0086     /*
0087      * In the !blk_mq case we are only calling this to kill the
0088      * q_usage_counter, otherwise this increases the freeze depth
0089      * and waits for it to return to zero.  For this reason there is
0090      * no blk_unfreeze_queue(), and blk_freeze_queue() is not
0091      * exported to drivers as the only user for unfreeze is blk_mq.
0092      */
0093     blk_mq_freeze_queue_start(q);
0094     blk_mq_freeze_queue_wait(q);
0095 }
0096 
0097 void blk_mq_freeze_queue(struct request_queue *q)
0098 {
0099     /*
0100      * ...just an alias to keep freeze and unfreeze actions balanced
0101      * in the blk_mq_* namespace
0102      */
0103     blk_freeze_queue(q);
0104 }
0105 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
0106 
0107 void blk_mq_unfreeze_queue(struct request_queue *q)
0108 {
0109     int freeze_depth;
0110 
0111     freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
0112     WARN_ON_ONCE(freeze_depth < 0);
0113     if (!freeze_depth) {
0114         percpu_ref_reinit(&q->q_usage_counter);
0115         wake_up_all(&q->mq_freeze_wq);
0116     }
0117 }
0118 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
0119 
0120 /**
0121  * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
0122  * @q: request queue.
0123  *
0124  * Note: this function does not prevent that the struct request end_io()
0125  * callback function is invoked. Additionally, it is not prevented that
0126  * new queue_rq() calls occur unless the queue has been stopped first.
0127  */
0128 void blk_mq_quiesce_queue(struct request_queue *q)
0129 {
0130     struct blk_mq_hw_ctx *hctx;
0131     unsigned int i;
0132     bool rcu = false;
0133 
0134     blk_mq_stop_hw_queues(q);
0135 
0136     queue_for_each_hw_ctx(q, hctx, i) {
0137         if (hctx->flags & BLK_MQ_F_BLOCKING)
0138             synchronize_srcu(&hctx->queue_rq_srcu);
0139         else
0140             rcu = true;
0141     }
0142     if (rcu)
0143         synchronize_rcu();
0144 }
0145 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
0146 
0147 void blk_mq_wake_waiters(struct request_queue *q)
0148 {
0149     struct blk_mq_hw_ctx *hctx;
0150     unsigned int i;
0151 
0152     queue_for_each_hw_ctx(q, hctx, i)
0153         if (blk_mq_hw_queue_mapped(hctx))
0154             blk_mq_tag_wakeup_all(hctx->tags, true);
0155 
0156     /*
0157      * If we are called because the queue has now been marked as
0158      * dying, we need to ensure that processes currently waiting on
0159      * the queue are notified as well.
0160      */
0161     wake_up_all(&q->mq_freeze_wq);
0162 }
0163 
0164 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
0165 {
0166     return blk_mq_has_free_tags(hctx->tags);
0167 }
0168 EXPORT_SYMBOL(blk_mq_can_queue);
0169 
0170 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
0171                    struct request *rq, unsigned int op)
0172 {
0173     INIT_LIST_HEAD(&rq->queuelist);
0174     /* csd/requeue_work/fifo_time is initialized before use */
0175     rq->q = q;
0176     rq->mq_ctx = ctx;
0177     rq->cmd_flags = op;
0178     if (blk_queue_io_stat(q))
0179         rq->rq_flags |= RQF_IO_STAT;
0180     /* do not touch atomic flags, it needs atomic ops against the timer */
0181     rq->cpu = -1;
0182     INIT_HLIST_NODE(&rq->hash);
0183     RB_CLEAR_NODE(&rq->rb_node);
0184     rq->rq_disk = NULL;
0185     rq->part = NULL;
0186     rq->start_time = jiffies;
0187 #ifdef CONFIG_BLK_CGROUP
0188     rq->rl = NULL;
0189     set_start_time_ns(rq);
0190     rq->io_start_time_ns = 0;
0191 #endif
0192     rq->nr_phys_segments = 0;
0193 #if defined(CONFIG_BLK_DEV_INTEGRITY)
0194     rq->nr_integrity_segments = 0;
0195 #endif
0196     rq->special = NULL;
0197     /* tag was already set */
0198     rq->errors = 0;
0199 
0200     rq->cmd = rq->__cmd;
0201 
0202     rq->extra_len = 0;
0203     rq->sense_len = 0;
0204     rq->resid_len = 0;
0205     rq->sense = NULL;
0206 
0207     INIT_LIST_HEAD(&rq->timeout_list);
0208     rq->timeout = 0;
0209 
0210     rq->end_io = NULL;
0211     rq->end_io_data = NULL;
0212     rq->next_rq = NULL;
0213 
0214     ctx->rq_dispatched[op_is_sync(op)]++;
0215 }
0216 
0217 static struct request *
0218 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
0219 {
0220     struct request *rq;
0221     unsigned int tag;
0222 
0223     tag = blk_mq_get_tag(data);
0224     if (tag != BLK_MQ_TAG_FAIL) {
0225         rq = data->hctx->tags->rqs[tag];
0226 
0227         if (blk_mq_tag_busy(data->hctx)) {
0228             rq->rq_flags = RQF_MQ_INFLIGHT;
0229             atomic_inc(&data->hctx->nr_active);
0230         }
0231 
0232         rq->tag = tag;
0233         blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
0234         return rq;
0235     }
0236 
0237     return NULL;
0238 }
0239 
0240 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
0241         unsigned int flags)
0242 {
0243     struct blk_mq_ctx *ctx;
0244     struct blk_mq_hw_ctx *hctx;
0245     struct request *rq;
0246     struct blk_mq_alloc_data alloc_data;
0247     int ret;
0248 
0249     ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
0250     if (ret)
0251         return ERR_PTR(ret);
0252 
0253     ctx = blk_mq_get_ctx(q);
0254     hctx = blk_mq_map_queue(q, ctx->cpu);
0255     blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
0256     rq = __blk_mq_alloc_request(&alloc_data, rw);
0257     blk_mq_put_ctx(ctx);
0258 
0259     if (!rq) {
0260         blk_queue_exit(q);
0261         return ERR_PTR(-EWOULDBLOCK);
0262     }
0263 
0264     rq->__data_len = 0;
0265     rq->__sector = (sector_t) -1;
0266     rq->bio = rq->biotail = NULL;
0267     return rq;
0268 }
0269 EXPORT_SYMBOL(blk_mq_alloc_request);
0270 
0271 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
0272         unsigned int flags, unsigned int hctx_idx)
0273 {
0274     struct blk_mq_hw_ctx *hctx;
0275     struct blk_mq_ctx *ctx;
0276     struct request *rq;
0277     struct blk_mq_alloc_data alloc_data;
0278     int ret;
0279 
0280     /*
0281      * If the tag allocator sleeps we could get an allocation for a
0282      * different hardware context.  No need to complicate the low level
0283      * allocator for this for the rare use case of a command tied to
0284      * a specific queue.
0285      */
0286     if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
0287         return ERR_PTR(-EINVAL);
0288 
0289     if (hctx_idx >= q->nr_hw_queues)
0290         return ERR_PTR(-EIO);
0291 
0292     ret = blk_queue_enter(q, true);
0293     if (ret)
0294         return ERR_PTR(ret);
0295 
0296     /*
0297      * Check if the hardware context is actually mapped to anything.
0298      * If not tell the caller that it should skip this queue.
0299      */
0300     hctx = q->queue_hw_ctx[hctx_idx];
0301     if (!blk_mq_hw_queue_mapped(hctx)) {
0302         ret = -EXDEV;
0303         goto out_queue_exit;
0304     }
0305     ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
0306 
0307     blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
0308     rq = __blk_mq_alloc_request(&alloc_data, rw);
0309     if (!rq) {
0310         ret = -EWOULDBLOCK;
0311         goto out_queue_exit;
0312     }
0313 
0314     return rq;
0315 
0316 out_queue_exit:
0317     blk_queue_exit(q);
0318     return ERR_PTR(ret);
0319 }
0320 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
0321 
0322 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
0323                   struct blk_mq_ctx *ctx, struct request *rq)
0324 {
0325     const int tag = rq->tag;
0326     struct request_queue *q = rq->q;
0327 
0328     if (rq->rq_flags & RQF_MQ_INFLIGHT)
0329         atomic_dec(&hctx->nr_active);
0330 
0331     wbt_done(q->rq_wb, &rq->issue_stat);
0332     rq->rq_flags = 0;
0333 
0334     clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
0335     clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
0336     blk_mq_put_tag(hctx, ctx, tag);
0337     blk_queue_exit(q);
0338 }
0339 
0340 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
0341 {
0342     struct blk_mq_ctx *ctx = rq->mq_ctx;
0343 
0344     ctx->rq_completed[rq_is_sync(rq)]++;
0345     __blk_mq_free_request(hctx, ctx, rq);
0346 
0347 }
0348 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
0349 
0350 void blk_mq_free_request(struct request *rq)
0351 {
0352     blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
0353 }
0354 EXPORT_SYMBOL_GPL(blk_mq_free_request);
0355 
0356 inline void __blk_mq_end_request(struct request *rq, int error)
0357 {
0358     blk_account_io_done(rq);
0359 
0360     if (rq->end_io) {
0361         wbt_done(rq->q->rq_wb, &rq->issue_stat);
0362         rq->end_io(rq, error);
0363     } else {
0364         if (unlikely(blk_bidi_rq(rq)))
0365             blk_mq_free_request(rq->next_rq);
0366         blk_mq_free_request(rq);
0367     }
0368 }
0369 EXPORT_SYMBOL(__blk_mq_end_request);
0370 
0371 void blk_mq_end_request(struct request *rq, int error)
0372 {
0373     if (blk_update_request(rq, error, blk_rq_bytes(rq)))
0374         BUG();
0375     __blk_mq_end_request(rq, error);
0376 }
0377 EXPORT_SYMBOL(blk_mq_end_request);
0378 
0379 static void __blk_mq_complete_request_remote(void *data)
0380 {
0381     struct request *rq = data;
0382 
0383     rq->q->softirq_done_fn(rq);
0384 }
0385 
0386 static void blk_mq_ipi_complete_request(struct request *rq)
0387 {
0388     struct blk_mq_ctx *ctx = rq->mq_ctx;
0389     bool shared = false;
0390     int cpu;
0391 
0392     if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
0393         rq->q->softirq_done_fn(rq);
0394         return;
0395     }
0396 
0397     cpu = get_cpu();
0398     if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
0399         shared = cpus_share_cache(cpu, ctx->cpu);
0400 
0401     if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
0402         rq->csd.func = __blk_mq_complete_request_remote;
0403         rq->csd.info = rq;
0404         rq->csd.flags = 0;
0405         smp_call_function_single_async(ctx->cpu, &rq->csd);
0406     } else {
0407         rq->q->softirq_done_fn(rq);
0408     }
0409     put_cpu();
0410 }
0411 
0412 static void blk_mq_stat_add(struct request *rq)
0413 {
0414     if (rq->rq_flags & RQF_STATS) {
0415         /*
0416          * We could rq->mq_ctx here, but there's less of a risk
0417          * of races if we have the completion event add the stats
0418          * to the local software queue.
0419          */
0420         struct blk_mq_ctx *ctx;
0421 
0422         ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
0423         blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
0424     }
0425 }
0426 
0427 static void __blk_mq_complete_request(struct request *rq)
0428 {
0429     struct request_queue *q = rq->q;
0430 
0431     blk_mq_stat_add(rq);
0432 
0433     if (!q->softirq_done_fn)
0434         blk_mq_end_request(rq, rq->errors);
0435     else
0436         blk_mq_ipi_complete_request(rq);
0437 }
0438 
0439 /**
0440  * blk_mq_complete_request - end I/O on a request
0441  * @rq:     the request being processed
0442  *
0443  * Description:
0444  *  Ends all I/O on a request. It does not handle partial completions.
0445  *  The actual completion happens out-of-order, through a IPI handler.
0446  **/
0447 void blk_mq_complete_request(struct request *rq, int error)
0448 {
0449     struct request_queue *q = rq->q;
0450 
0451     if (unlikely(blk_should_fake_timeout(q)))
0452         return;
0453     if (!blk_mark_rq_complete(rq)) {
0454         rq->errors = error;
0455         __blk_mq_complete_request(rq);
0456     }
0457 }
0458 EXPORT_SYMBOL(blk_mq_complete_request);
0459 
0460 int blk_mq_request_started(struct request *rq)
0461 {
0462     return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
0463 }
0464 EXPORT_SYMBOL_GPL(blk_mq_request_started);
0465 
0466 void blk_mq_start_request(struct request *rq)
0467 {
0468     struct request_queue *q = rq->q;
0469 
0470     trace_block_rq_issue(q, rq);
0471 
0472     rq->resid_len = blk_rq_bytes(rq);
0473     if (unlikely(blk_bidi_rq(rq)))
0474         rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
0475 
0476     if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
0477         blk_stat_set_issue_time(&rq->issue_stat);
0478         rq->rq_flags |= RQF_STATS;
0479         wbt_issue(q->rq_wb, &rq->issue_stat);
0480     }
0481 
0482     blk_add_timer(rq);
0483 
0484     /*
0485      * Ensure that ->deadline is visible before set the started
0486      * flag and clear the completed flag.
0487      */
0488     smp_mb__before_atomic();
0489 
0490     /*
0491      * Mark us as started and clear complete. Complete might have been
0492      * set if requeue raced with timeout, which then marked it as
0493      * complete. So be sure to clear complete again when we start
0494      * the request, otherwise we'll ignore the completion event.
0495      */
0496     if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
0497         set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
0498     if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
0499         clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
0500 
0501     if (q->dma_drain_size && blk_rq_bytes(rq)) {
0502         /*
0503          * Make sure space for the drain appears.  We know we can do
0504          * this because max_hw_segments has been adjusted to be one
0505          * fewer than the device can handle.
0506          */
0507         rq->nr_phys_segments++;
0508     }
0509 }
0510 EXPORT_SYMBOL(blk_mq_start_request);
0511 
0512 static void __blk_mq_requeue_request(struct request *rq)
0513 {
0514     struct request_queue *q = rq->q;
0515 
0516     trace_block_rq_requeue(q, rq);
0517     wbt_requeue(q->rq_wb, &rq->issue_stat);
0518 
0519     if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
0520         if (q->dma_drain_size && blk_rq_bytes(rq))
0521             rq->nr_phys_segments--;
0522     }
0523 }
0524 
0525 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
0526 {
0527     __blk_mq_requeue_request(rq);
0528 
0529     BUG_ON(blk_queued_rq(rq));
0530     blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
0531 }
0532 EXPORT_SYMBOL(blk_mq_requeue_request);
0533 
0534 static void blk_mq_requeue_work(struct work_struct *work)
0535 {
0536     struct request_queue *q =
0537         container_of(work, struct request_queue, requeue_work.work);
0538     LIST_HEAD(rq_list);
0539     struct request *rq, *next;
0540     unsigned long flags;
0541 
0542     spin_lock_irqsave(&q->requeue_lock, flags);
0543     list_splice_init(&q->requeue_list, &rq_list);
0544     spin_unlock_irqrestore(&q->requeue_lock, flags);
0545 
0546     list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
0547         if (!(rq->rq_flags & RQF_SOFTBARRIER))
0548             continue;
0549 
0550         rq->rq_flags &= ~RQF_SOFTBARRIER;
0551         list_del_init(&rq->queuelist);
0552         blk_mq_insert_request(rq, true, false, false);
0553     }
0554 
0555     while (!list_empty(&rq_list)) {
0556         rq = list_entry(rq_list.next, struct request, queuelist);
0557         list_del_init(&rq->queuelist);
0558         blk_mq_insert_request(rq, false, false, false);
0559     }
0560 
0561     blk_mq_run_hw_queues(q, false);
0562 }
0563 
0564 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
0565                 bool kick_requeue_list)
0566 {
0567     struct request_queue *q = rq->q;
0568     unsigned long flags;
0569 
0570     /*
0571      * We abuse this flag that is otherwise used by the I/O scheduler to
0572      * request head insertation from the workqueue.
0573      */
0574     BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
0575 
0576     spin_lock_irqsave(&q->requeue_lock, flags);
0577     if (at_head) {
0578         rq->rq_flags |= RQF_SOFTBARRIER;
0579         list_add(&rq->queuelist, &q->requeue_list);
0580     } else {
0581         list_add_tail(&rq->queuelist, &q->requeue_list);
0582     }
0583     spin_unlock_irqrestore(&q->requeue_lock, flags);
0584 
0585     if (kick_requeue_list)
0586         blk_mq_kick_requeue_list(q);
0587 }
0588 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
0589 
0590 void blk_mq_kick_requeue_list(struct request_queue *q)
0591 {
0592     kblockd_schedule_delayed_work(&q->requeue_work, 0);
0593 }
0594 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
0595 
0596 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
0597                     unsigned long msecs)
0598 {
0599     kblockd_schedule_delayed_work(&q->requeue_work,
0600                       msecs_to_jiffies(msecs));
0601 }
0602 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
0603 
0604 void blk_mq_abort_requeue_list(struct request_queue *q)
0605 {
0606     unsigned long flags;
0607     LIST_HEAD(rq_list);
0608 
0609     spin_lock_irqsave(&q->requeue_lock, flags);
0610     list_splice_init(&q->requeue_list, &rq_list);
0611     spin_unlock_irqrestore(&q->requeue_lock, flags);
0612 
0613     while (!list_empty(&rq_list)) {
0614         struct request *rq;
0615 
0616         rq = list_first_entry(&rq_list, struct request, queuelist);
0617         list_del_init(&rq->queuelist);
0618         rq->errors = -EIO;
0619         blk_mq_end_request(rq, rq->errors);
0620     }
0621 }
0622 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
0623 
0624 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
0625 {
0626     if (tag < tags->nr_tags) {
0627         prefetch(tags->rqs[tag]);
0628         return tags->rqs[tag];
0629     }
0630 
0631     return NULL;
0632 }
0633 EXPORT_SYMBOL(blk_mq_tag_to_rq);
0634 
0635 struct blk_mq_timeout_data {
0636     unsigned long next;
0637     unsigned int next_set;
0638 };
0639 
0640 void blk_mq_rq_timed_out(struct request *req, bool reserved)
0641 {
0642     struct blk_mq_ops *ops = req->q->mq_ops;
0643     enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
0644 
0645     /*
0646      * We know that complete is set at this point. If STARTED isn't set
0647      * anymore, then the request isn't active and the "timeout" should
0648      * just be ignored. This can happen due to the bitflag ordering.
0649      * Timeout first checks if STARTED is set, and if it is, assumes
0650      * the request is active. But if we race with completion, then
0651      * we both flags will get cleared. So check here again, and ignore
0652      * a timeout event with a request that isn't active.
0653      */
0654     if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
0655         return;
0656 
0657     if (ops->timeout)
0658         ret = ops->timeout(req, reserved);
0659 
0660     switch (ret) {
0661     case BLK_EH_HANDLED:
0662         __blk_mq_complete_request(req);
0663         break;
0664     case BLK_EH_RESET_TIMER:
0665         blk_add_timer(req);
0666         blk_clear_rq_complete(req);
0667         break;
0668     case BLK_EH_NOT_HANDLED:
0669         break;
0670     default:
0671         printk(KERN_ERR "block: bad eh return: %d\n", ret);
0672         break;
0673     }
0674 }
0675 
0676 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
0677         struct request *rq, void *priv, bool reserved)
0678 {
0679     struct blk_mq_timeout_data *data = priv;
0680 
0681     if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
0682         /*
0683          * If a request wasn't started before the queue was
0684          * marked dying, kill it here or it'll go unnoticed.
0685          */
0686         if (unlikely(blk_queue_dying(rq->q))) {
0687             rq->errors = -EIO;
0688             blk_mq_end_request(rq, rq->errors);
0689         }
0690         return;
0691     }
0692 
0693     if (time_after_eq(jiffies, rq->deadline)) {
0694         if (!blk_mark_rq_complete(rq))
0695             blk_mq_rq_timed_out(rq, reserved);
0696     } else if (!data->next_set || time_after(data->next, rq->deadline)) {
0697         data->next = rq->deadline;
0698         data->next_set = 1;
0699     }
0700 }
0701 
0702 static void blk_mq_timeout_work(struct work_struct *work)
0703 {
0704     struct request_queue *q =
0705         container_of(work, struct request_queue, timeout_work);
0706     struct blk_mq_timeout_data data = {
0707         .next       = 0,
0708         .next_set   = 0,
0709     };
0710     int i;
0711 
0712     /* A deadlock might occur if a request is stuck requiring a
0713      * timeout at the same time a queue freeze is waiting
0714      * completion, since the timeout code would not be able to
0715      * acquire the queue reference here.
0716      *
0717      * That's why we don't use blk_queue_enter here; instead, we use
0718      * percpu_ref_tryget directly, because we need to be able to
0719      * obtain a reference even in the short window between the queue
0720      * starting to freeze, by dropping the first reference in
0721      * blk_mq_freeze_queue_start, and the moment the last request is
0722      * consumed, marked by the instant q_usage_counter reaches
0723      * zero.
0724      */
0725     if (!percpu_ref_tryget(&q->q_usage_counter))
0726         return;
0727 
0728     blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
0729 
0730     if (data.next_set) {
0731         data.next = blk_rq_timeout(round_jiffies_up(data.next));
0732         mod_timer(&q->timeout, data.next);
0733     } else {
0734         struct blk_mq_hw_ctx *hctx;
0735 
0736         queue_for_each_hw_ctx(q, hctx, i) {
0737             /* the hctx may be unmapped, so check it here */
0738             if (blk_mq_hw_queue_mapped(hctx))
0739                 blk_mq_tag_idle(hctx);
0740         }
0741     }
0742     blk_queue_exit(q);
0743 }
0744 
0745 /*
0746  * Reverse check our software queue for entries that we could potentially
0747  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
0748  * too much time checking for merges.
0749  */
0750 static bool blk_mq_attempt_merge(struct request_queue *q,
0751                  struct blk_mq_ctx *ctx, struct bio *bio)
0752 {
0753     struct request *rq;
0754     int checked = 8;
0755 
0756     list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
0757         int el_ret;
0758 
0759         if (!checked--)
0760             break;
0761 
0762         if (!blk_rq_merge_ok(rq, bio))
0763             continue;
0764 
0765         el_ret = blk_try_merge(rq, bio);
0766         if (el_ret == ELEVATOR_BACK_MERGE) {
0767             if (bio_attempt_back_merge(q, rq, bio)) {
0768                 ctx->rq_merged++;
0769                 return true;
0770             }
0771             break;
0772         } else if (el_ret == ELEVATOR_FRONT_MERGE) {
0773             if (bio_attempt_front_merge(q, rq, bio)) {
0774                 ctx->rq_merged++;
0775                 return true;
0776             }
0777             break;
0778         }
0779     }
0780 
0781     return false;
0782 }
0783 
0784 struct flush_busy_ctx_data {
0785     struct blk_mq_hw_ctx *hctx;
0786     struct list_head *list;
0787 };
0788 
0789 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
0790 {
0791     struct flush_busy_ctx_data *flush_data = data;
0792     struct blk_mq_hw_ctx *hctx = flush_data->hctx;
0793     struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
0794 
0795     sbitmap_clear_bit(sb, bitnr);
0796     spin_lock(&ctx->lock);
0797     list_splice_tail_init(&ctx->rq_list, flush_data->list);
0798     spin_unlock(&ctx->lock);
0799     return true;
0800 }
0801 
0802 /*
0803  * Process software queues that have been marked busy, splicing them
0804  * to the for-dispatch
0805  */
0806 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
0807 {
0808     struct flush_busy_ctx_data data = {
0809         .hctx = hctx,
0810         .list = list,
0811     };
0812 
0813     sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
0814 }
0815 
0816 static inline unsigned int queued_to_index(unsigned int queued)
0817 {
0818     if (!queued)
0819         return 0;
0820 
0821     return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
0822 }
0823 
0824 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
0825 {
0826     struct request_queue *q = hctx->queue;
0827     struct request *rq;
0828     LIST_HEAD(driver_list);
0829     struct list_head *dptr;
0830     int queued, ret = BLK_MQ_RQ_QUEUE_OK;
0831 
0832     /*
0833      * Start off with dptr being NULL, so we start the first request
0834      * immediately, even if we have more pending.
0835      */
0836     dptr = NULL;
0837 
0838     /*
0839      * Now process all the entries, sending them to the driver.
0840      */
0841     queued = 0;
0842     while (!list_empty(list)) {
0843         struct blk_mq_queue_data bd;
0844 
0845         rq = list_first_entry(list, struct request, queuelist);
0846         list_del_init(&rq->queuelist);
0847 
0848         bd.rq = rq;
0849         bd.list = dptr;
0850         bd.last = list_empty(list);
0851 
0852         ret = q->mq_ops->queue_rq(hctx, &bd);
0853         switch (ret) {
0854         case BLK_MQ_RQ_QUEUE_OK:
0855             queued++;
0856             break;
0857         case BLK_MQ_RQ_QUEUE_BUSY:
0858             list_add(&rq->queuelist, list);
0859             __blk_mq_requeue_request(rq);
0860             break;
0861         default:
0862             pr_err("blk-mq: bad return on queue: %d\n", ret);
0863         case BLK_MQ_RQ_QUEUE_ERROR:
0864             rq->errors = -EIO;
0865             blk_mq_end_request(rq, rq->errors);
0866             break;
0867         }
0868 
0869         if (ret == BLK_MQ_RQ_QUEUE_BUSY)
0870             break;
0871 
0872         /*
0873          * We've done the first request. If we have more than 1
0874          * left in the list, set dptr to defer issue.
0875          */
0876         if (!dptr && list->next != list->prev)
0877             dptr = &driver_list;
0878     }
0879 
0880     hctx->dispatched[queued_to_index(queued)]++;
0881 
0882     /*
0883      * Any items that need requeuing? Stuff them into hctx->dispatch,
0884      * that is where we will continue on next queue run.
0885      */
0886     if (!list_empty(list)) {
0887         spin_lock(&hctx->lock);
0888         list_splice(list, &hctx->dispatch);
0889         spin_unlock(&hctx->lock);
0890 
0891         /*
0892          * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
0893          * it's possible the queue is stopped and restarted again
0894          * before this. Queue restart will dispatch requests. And since
0895          * requests in rq_list aren't added into hctx->dispatch yet,
0896          * the requests in rq_list might get lost.
0897          *
0898          * blk_mq_run_hw_queue() already checks the STOPPED bit
0899          **/
0900         blk_mq_run_hw_queue(hctx, true);
0901     }
0902 
0903     return ret != BLK_MQ_RQ_QUEUE_BUSY;
0904 }
0905 
0906 /*
0907  * Run this hardware queue, pulling any software queues mapped to it in.
0908  * Note that this function currently has various problems around ordering
0909  * of IO. In particular, we'd like FIFO behaviour on handling existing
0910  * items on the hctx->dispatch list. Ignore that for now.
0911  */
0912 static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
0913 {
0914     LIST_HEAD(rq_list);
0915 
0916     if (unlikely(blk_mq_hctx_stopped(hctx)))
0917         return;
0918 
0919     hctx->run++;
0920 
0921     /*
0922      * Touch any software queue that has pending entries.
0923      */
0924     flush_busy_ctxs(hctx, &rq_list);
0925 
0926     /*
0927      * If we have previous entries on our dispatch list, grab them
0928      * and stuff them at the front for more fair dispatch.
0929      */
0930     if (!list_empty_careful(&hctx->dispatch)) {
0931         spin_lock(&hctx->lock);
0932         if (!list_empty(&hctx->dispatch))
0933             list_splice_init(&hctx->dispatch, &rq_list);
0934         spin_unlock(&hctx->lock);
0935     }
0936 
0937     blk_mq_dispatch_rq_list(hctx, &rq_list);
0938 }
0939 
0940 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
0941 {
0942     int srcu_idx;
0943 
0944     WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
0945         cpu_online(hctx->next_cpu));
0946 
0947     if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
0948         rcu_read_lock();
0949         blk_mq_process_rq_list(hctx);
0950         rcu_read_unlock();
0951     } else {
0952         srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
0953         blk_mq_process_rq_list(hctx);
0954         srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
0955     }
0956 }
0957 
0958 /*
0959  * It'd be great if the workqueue API had a way to pass
0960  * in a mask and had some smarts for more clever placement.
0961  * For now we just round-robin here, switching for every
0962  * BLK_MQ_CPU_WORK_BATCH queued items.
0963  */
0964 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
0965 {
0966     if (hctx->queue->nr_hw_queues == 1)
0967         return WORK_CPU_UNBOUND;
0968 
0969     if (--hctx->next_cpu_batch <= 0) {
0970         int next_cpu;
0971 
0972         next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
0973         if (next_cpu >= nr_cpu_ids)
0974             next_cpu = cpumask_first(hctx->cpumask);
0975 
0976         hctx->next_cpu = next_cpu;
0977         hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
0978     }
0979 
0980     return hctx->next_cpu;
0981 }
0982 
0983 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
0984 {
0985     if (unlikely(blk_mq_hctx_stopped(hctx) ||
0986              !blk_mq_hw_queue_mapped(hctx)))
0987         return;
0988 
0989     if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
0990         int cpu = get_cpu();
0991         if (cpumask_test_cpu(cpu, hctx->cpumask)) {
0992             __blk_mq_run_hw_queue(hctx);
0993             put_cpu();
0994             return;
0995         }
0996 
0997         put_cpu();
0998     }
0999 
1000     kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
1001 }
1002 
1003 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1004 {
1005     struct blk_mq_hw_ctx *hctx;
1006     int i;
1007 
1008     queue_for_each_hw_ctx(q, hctx, i) {
1009         if ((!blk_mq_hctx_has_pending(hctx) &&
1010             list_empty_careful(&hctx->dispatch)) ||
1011             blk_mq_hctx_stopped(hctx))
1012             continue;
1013 
1014         blk_mq_run_hw_queue(hctx, async);
1015     }
1016 }
1017 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1018 
1019 /**
1020  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1021  * @q: request queue.
1022  *
1023  * The caller is responsible for serializing this function against
1024  * blk_mq_{start,stop}_hw_queue().
1025  */
1026 bool blk_mq_queue_stopped(struct request_queue *q)
1027 {
1028     struct blk_mq_hw_ctx *hctx;
1029     int i;
1030 
1031     queue_for_each_hw_ctx(q, hctx, i)
1032         if (blk_mq_hctx_stopped(hctx))
1033             return true;
1034 
1035     return false;
1036 }
1037 EXPORT_SYMBOL(blk_mq_queue_stopped);
1038 
1039 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1040 {
1041     cancel_work(&hctx->run_work);
1042     cancel_delayed_work(&hctx->delay_work);
1043     set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1044 }
1045 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1046 
1047 void blk_mq_stop_hw_queues(struct request_queue *q)
1048 {
1049     struct blk_mq_hw_ctx *hctx;
1050     int i;
1051 
1052     queue_for_each_hw_ctx(q, hctx, i)
1053         blk_mq_stop_hw_queue(hctx);
1054 }
1055 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1056 
1057 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1058 {
1059     clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1060 
1061     blk_mq_run_hw_queue(hctx, false);
1062 }
1063 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1064 
1065 void blk_mq_start_hw_queues(struct request_queue *q)
1066 {
1067     struct blk_mq_hw_ctx *hctx;
1068     int i;
1069 
1070     queue_for_each_hw_ctx(q, hctx, i)
1071         blk_mq_start_hw_queue(hctx);
1072 }
1073 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1074 
1075 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1076 {
1077     if (!blk_mq_hctx_stopped(hctx))
1078         return;
1079 
1080     clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1081     blk_mq_run_hw_queue(hctx, async);
1082 }
1083 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1084 
1085 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1086 {
1087     struct blk_mq_hw_ctx *hctx;
1088     int i;
1089 
1090     queue_for_each_hw_ctx(q, hctx, i)
1091         blk_mq_start_stopped_hw_queue(hctx, async);
1092 }
1093 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1094 
1095 static void blk_mq_run_work_fn(struct work_struct *work)
1096 {
1097     struct blk_mq_hw_ctx *hctx;
1098 
1099     hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1100 
1101     __blk_mq_run_hw_queue(hctx);
1102 }
1103 
1104 static void blk_mq_delay_work_fn(struct work_struct *work)
1105 {
1106     struct blk_mq_hw_ctx *hctx;
1107 
1108     hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1109 
1110     if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1111         __blk_mq_run_hw_queue(hctx);
1112 }
1113 
1114 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1115 {
1116     if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1117         return;
1118 
1119     kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1120             &hctx->delay_work, msecs_to_jiffies(msecs));
1121 }
1122 EXPORT_SYMBOL(blk_mq_delay_queue);
1123 
1124 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1125                         struct request *rq,
1126                         bool at_head)
1127 {
1128     struct blk_mq_ctx *ctx = rq->mq_ctx;
1129 
1130     trace_block_rq_insert(hctx->queue, rq);
1131 
1132     if (at_head)
1133         list_add(&rq->queuelist, &ctx->rq_list);
1134     else
1135         list_add_tail(&rq->queuelist, &ctx->rq_list);
1136 }
1137 
1138 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1139                     struct request *rq, bool at_head)
1140 {
1141     struct blk_mq_ctx *ctx = rq->mq_ctx;
1142 
1143     __blk_mq_insert_req_list(hctx, rq, at_head);
1144     blk_mq_hctx_mark_pending(hctx, ctx);
1145 }
1146 
1147 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1148                bool async)
1149 {
1150     struct blk_mq_ctx *ctx = rq->mq_ctx;
1151     struct request_queue *q = rq->q;
1152     struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1153 
1154     spin_lock(&ctx->lock);
1155     __blk_mq_insert_request(hctx, rq, at_head);
1156     spin_unlock(&ctx->lock);
1157 
1158     if (run_queue)
1159         blk_mq_run_hw_queue(hctx, async);
1160 }
1161 
1162 static void blk_mq_insert_requests(struct request_queue *q,
1163                      struct blk_mq_ctx *ctx,
1164                      struct list_head *list,
1165                      int depth,
1166                      bool from_schedule)
1167 
1168 {
1169     struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1170 
1171     trace_block_unplug(q, depth, !from_schedule);
1172 
1173     /*
1174      * preemption doesn't flush plug list, so it's possible ctx->cpu is
1175      * offline now
1176      */
1177     spin_lock(&ctx->lock);
1178     while (!list_empty(list)) {
1179         struct request *rq;
1180 
1181         rq = list_first_entry(list, struct request, queuelist);
1182         BUG_ON(rq->mq_ctx != ctx);
1183         list_del_init(&rq->queuelist);
1184         __blk_mq_insert_req_list(hctx, rq, false);
1185     }
1186     blk_mq_hctx_mark_pending(hctx, ctx);
1187     spin_unlock(&ctx->lock);
1188 
1189     blk_mq_run_hw_queue(hctx, from_schedule);
1190 }
1191 
1192 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1193 {
1194     struct request *rqa = container_of(a, struct request, queuelist);
1195     struct request *rqb = container_of(b, struct request, queuelist);
1196 
1197     return !(rqa->mq_ctx < rqb->mq_ctx ||
1198          (rqa->mq_ctx == rqb->mq_ctx &&
1199           blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1200 }
1201 
1202 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1203 {
1204     struct blk_mq_ctx *this_ctx;
1205     struct request_queue *this_q;
1206     struct request *rq;
1207     LIST_HEAD(list);
1208     LIST_HEAD(ctx_list);
1209     unsigned int depth;
1210 
1211     list_splice_init(&plug->mq_list, &list);
1212 
1213     list_sort(NULL, &list, plug_ctx_cmp);
1214 
1215     this_q = NULL;
1216     this_ctx = NULL;
1217     depth = 0;
1218 
1219     while (!list_empty(&list)) {
1220         rq = list_entry_rq(list.next);
1221         list_del_init(&rq->queuelist);
1222         BUG_ON(!rq->q);
1223         if (rq->mq_ctx != this_ctx) {
1224             if (this_ctx) {
1225                 blk_mq_insert_requests(this_q, this_ctx,
1226                             &ctx_list, depth,
1227                             from_schedule);
1228             }
1229 
1230             this_ctx = rq->mq_ctx;
1231             this_q = rq->q;
1232             depth = 0;
1233         }
1234 
1235         depth++;
1236         list_add_tail(&rq->queuelist, &ctx_list);
1237     }
1238 
1239     /*
1240      * If 'this_ctx' is set, we know we have entries to complete
1241      * on 'ctx_list'. Do those.
1242      */
1243     if (this_ctx) {
1244         blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1245                        from_schedule);
1246     }
1247 }
1248 
1249 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1250 {
1251     init_request_from_bio(rq, bio);
1252 
1253     blk_account_io_start(rq, true);
1254 }
1255 
1256 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1257 {
1258     return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1259         !blk_queue_nomerges(hctx->queue);
1260 }
1261 
1262 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1263                      struct blk_mq_ctx *ctx,
1264                      struct request *rq, struct bio *bio)
1265 {
1266     if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1267         blk_mq_bio_to_request(rq, bio);
1268         spin_lock(&ctx->lock);
1269 insert_rq:
1270         __blk_mq_insert_request(hctx, rq, false);
1271         spin_unlock(&ctx->lock);
1272         return false;
1273     } else {
1274         struct request_queue *q = hctx->queue;
1275 
1276         spin_lock(&ctx->lock);
1277         if (!blk_mq_attempt_merge(q, ctx, bio)) {
1278             blk_mq_bio_to_request(rq, bio);
1279             goto insert_rq;
1280         }
1281 
1282         spin_unlock(&ctx->lock);
1283         __blk_mq_free_request(hctx, ctx, rq);
1284         return true;
1285     }
1286 }
1287 
1288 static struct request *blk_mq_map_request(struct request_queue *q,
1289                       struct bio *bio,
1290                       struct blk_mq_alloc_data *data)
1291 {
1292     struct blk_mq_hw_ctx *hctx;
1293     struct blk_mq_ctx *ctx;
1294     struct request *rq;
1295 
1296     blk_queue_enter_live(q);
1297     ctx = blk_mq_get_ctx(q);
1298     hctx = blk_mq_map_queue(q, ctx->cpu);
1299 
1300     trace_block_getrq(q, bio, bio->bi_opf);
1301     blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
1302     rq = __blk_mq_alloc_request(data, bio->bi_opf);
1303 
1304     data->hctx->queued++;
1305     return rq;
1306 }
1307 
1308 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1309 {
1310     int ret;
1311     struct request_queue *q = rq->q;
1312     struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
1313     struct blk_mq_queue_data bd = {
1314         .rq = rq,
1315         .list = NULL,
1316         .last = 1
1317     };
1318     blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1319 
1320     if (blk_mq_hctx_stopped(hctx))
1321         goto insert;
1322 
1323     /*
1324      * For OK queue, we are done. For error, kill it. Any other
1325      * error (busy), just add it to our list as we previously
1326      * would have done
1327      */
1328     ret = q->mq_ops->queue_rq(hctx, &bd);
1329     if (ret == BLK_MQ_RQ_QUEUE_OK) {
1330         *cookie = new_cookie;
1331         return;
1332     }
1333 
1334     __blk_mq_requeue_request(rq);
1335 
1336     if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1337         *cookie = BLK_QC_T_NONE;
1338         rq->errors = -EIO;
1339         blk_mq_end_request(rq, rq->errors);
1340         return;
1341     }
1342 
1343 insert:
1344     blk_mq_insert_request(rq, false, true, true);
1345 }
1346 
1347 /*
1348  * Multiple hardware queue variant. This will not use per-process plugs,
1349  * but will attempt to bypass the hctx queueing if we can go straight to
1350  * hardware for SYNC IO.
1351  */
1352 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1353 {
1354     const int is_sync = op_is_sync(bio->bi_opf);
1355     const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1356     struct blk_mq_alloc_data data;
1357     struct request *rq;
1358     unsigned int request_count = 0, srcu_idx;
1359     struct blk_plug *plug;
1360     struct request *same_queue_rq = NULL;
1361     blk_qc_t cookie;
1362     unsigned int wb_acct;
1363 
1364     blk_queue_bounce(q, &bio);
1365 
1366     if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1367         bio_io_error(bio);
1368         return BLK_QC_T_NONE;
1369     }
1370 
1371     blk_queue_split(q, &bio, q->bio_split);
1372 
1373     if (!is_flush_fua && !blk_queue_nomerges(q) &&
1374         blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1375         return BLK_QC_T_NONE;
1376 
1377     wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1378 
1379     rq = blk_mq_map_request(q, bio, &data);
1380     if (unlikely(!rq)) {
1381         __wbt_done(q->rq_wb, wb_acct);
1382         return BLK_QC_T_NONE;
1383     }
1384 
1385     wbt_track(&rq->issue_stat, wb_acct);
1386 
1387     cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1388 
1389     if (unlikely(is_flush_fua)) {
1390         blk_mq_bio_to_request(rq, bio);
1391         blk_insert_flush(rq);
1392         goto run_queue;
1393     }
1394 
1395     plug = current->plug;
1396     /*
1397      * If the driver supports defer issued based on 'last', then
1398      * queue it up like normal since we can potentially save some
1399      * CPU this way.
1400      */
1401     if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1402         !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1403         struct request *old_rq = NULL;
1404 
1405         blk_mq_bio_to_request(rq, bio);
1406 
1407         /*
1408          * We do limited plugging. If the bio can be merged, do that.
1409          * Otherwise the existing request in the plug list will be
1410          * issued. So the plug list will have one request at most
1411          */
1412         if (plug) {
1413             /*
1414              * The plug list might get flushed before this. If that
1415              * happens, same_queue_rq is invalid and plug list is
1416              * empty
1417              */
1418             if (same_queue_rq && !list_empty(&plug->mq_list)) {
1419                 old_rq = same_queue_rq;
1420                 list_del_init(&old_rq->queuelist);
1421             }
1422             list_add_tail(&rq->queuelist, &plug->mq_list);
1423         } else /* is_sync */
1424             old_rq = rq;
1425         blk_mq_put_ctx(data.ctx);
1426         if (!old_rq)
1427             goto done;
1428 
1429         if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1430             rcu_read_lock();
1431             blk_mq_try_issue_directly(old_rq, &cookie);
1432             rcu_read_unlock();
1433         } else {
1434             srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1435             blk_mq_try_issue_directly(old_rq, &cookie);
1436             srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1437         }
1438         goto done;
1439     }
1440 
1441     if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1442         /*
1443          * For a SYNC request, send it to the hardware immediately. For
1444          * an ASYNC request, just ensure that we run it later on. The
1445          * latter allows for merging opportunities and more efficient
1446          * dispatching.
1447          */
1448 run_queue:
1449         blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1450     }
1451     blk_mq_put_ctx(data.ctx);
1452 done:
1453     return cookie;
1454 }
1455 
1456 /*
1457  * Single hardware queue variant. This will attempt to use any per-process
1458  * plug for merging and IO deferral.
1459  */
1460 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1461 {
1462     const int is_sync = op_is_sync(bio->bi_opf);
1463     const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1464     struct blk_plug *plug;
1465     unsigned int request_count = 0;
1466     struct blk_mq_alloc_data data;
1467     struct request *rq;
1468     blk_qc_t cookie;
1469     unsigned int wb_acct;
1470 
1471     blk_queue_bounce(q, &bio);
1472 
1473     if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1474         bio_io_error(bio);
1475         return BLK_QC_T_NONE;
1476     }
1477 
1478     blk_queue_split(q, &bio, q->bio_split);
1479 
1480     if (!is_flush_fua && !blk_queue_nomerges(q)) {
1481         if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1482             return BLK_QC_T_NONE;
1483     } else
1484         request_count = blk_plug_queued_count(q);
1485 
1486     wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1487 
1488     rq = blk_mq_map_request(q, bio, &data);
1489     if (unlikely(!rq)) {
1490         __wbt_done(q->rq_wb, wb_acct);
1491         return BLK_QC_T_NONE;
1492     }
1493 
1494     wbt_track(&rq->issue_stat, wb_acct);
1495 
1496     cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1497 
1498     if (unlikely(is_flush_fua)) {
1499         blk_mq_bio_to_request(rq, bio);
1500         blk_insert_flush(rq);
1501         goto run_queue;
1502     }
1503 
1504     /*
1505      * A task plug currently exists. Since this is completely lockless,
1506      * utilize that to temporarily store requests until the task is
1507      * either done or scheduled away.
1508      */
1509     plug = current->plug;
1510     if (plug) {
1511         struct request *last = NULL;
1512 
1513         blk_mq_bio_to_request(rq, bio);
1514 
1515         /*
1516          * @request_count may become stale because of schedule
1517          * out, so check the list again.
1518          */
1519         if (list_empty(&plug->mq_list))
1520             request_count = 0;
1521         if (!request_count)
1522             trace_block_plug(q);
1523         else
1524             last = list_entry_rq(plug->mq_list.prev);
1525 
1526         blk_mq_put_ctx(data.ctx);
1527 
1528         if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1529             blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1530             blk_flush_plug_list(plug, false);
1531             trace_block_plug(q);
1532         }
1533 
1534         list_add_tail(&rq->queuelist, &plug->mq_list);
1535         return cookie;
1536     }
1537 
1538     if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1539         /*
1540          * For a SYNC request, send it to the hardware immediately. For
1541          * an ASYNC request, just ensure that we run it later on. The
1542          * latter allows for merging opportunities and more efficient
1543          * dispatching.
1544          */
1545 run_queue:
1546         blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1547     }
1548 
1549     blk_mq_put_ctx(data.ctx);
1550     return cookie;
1551 }
1552 
1553 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1554         struct blk_mq_tags *tags, unsigned int hctx_idx)
1555 {
1556     struct page *page;
1557 
1558     if (tags->rqs && set->ops->exit_request) {
1559         int i;
1560 
1561         for (i = 0; i < tags->nr_tags; i++) {
1562             if (!tags->rqs[i])
1563                 continue;
1564             set->ops->exit_request(set->driver_data, tags->rqs[i],
1565                         hctx_idx, i);
1566             tags->rqs[i] = NULL;
1567         }
1568     }
1569 
1570     while (!list_empty(&tags->page_list)) {
1571         page = list_first_entry(&tags->page_list, struct page, lru);
1572         list_del_init(&page->lru);
1573         /*
1574          * Remove kmemleak object previously allocated in
1575          * blk_mq_init_rq_map().
1576          */
1577         kmemleak_free(page_address(page));
1578         __free_pages(page, page->private);
1579     }
1580 
1581     kfree(tags->rqs);
1582 
1583     blk_mq_free_tags(tags);
1584 }
1585 
1586 static size_t order_to_size(unsigned int order)
1587 {
1588     return (size_t)PAGE_SIZE << order;
1589 }
1590 
1591 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1592         unsigned int hctx_idx)
1593 {
1594     struct blk_mq_tags *tags;
1595     unsigned int i, j, entries_per_page, max_order = 4;
1596     size_t rq_size, left;
1597 
1598     tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1599                 set->numa_node,
1600                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1601     if (!tags)
1602         return NULL;
1603 
1604     INIT_LIST_HEAD(&tags->page_list);
1605 
1606     tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1607                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1608                  set->numa_node);
1609     if (!tags->rqs) {
1610         blk_mq_free_tags(tags);
1611         return NULL;
1612     }
1613 
1614     /*
1615      * rq_size is the size of the request plus driver payload, rounded
1616      * to the cacheline size
1617      */
1618     rq_size = round_up(sizeof(struct request) + set->cmd_size,
1619                 cache_line_size());
1620     left = rq_size * set->queue_depth;
1621 
1622     for (i = 0; i < set->queue_depth; ) {
1623         int this_order = max_order;
1624         struct page *page;
1625         int to_do;
1626         void *p;
1627 
1628         while (this_order && left < order_to_size(this_order - 1))
1629             this_order--;
1630 
1631         do {
1632             page = alloc_pages_node(set->numa_node,
1633                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1634                 this_order);
1635             if (page)
1636                 break;
1637             if (!this_order--)
1638                 break;
1639             if (order_to_size(this_order) < rq_size)
1640                 break;
1641         } while (1);
1642 
1643         if (!page)
1644             goto fail;
1645 
1646         page->private = this_order;
1647         list_add_tail(&page->lru, &tags->page_list);
1648 
1649         p = page_address(page);
1650         /*
1651          * Allow kmemleak to scan these pages as they contain pointers
1652          * to additional allocations like via ops->init_request().
1653          */
1654         kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1655         entries_per_page = order_to_size(this_order) / rq_size;
1656         to_do = min(entries_per_page, set->queue_depth - i);
1657         left -= to_do * rq_size;
1658         for (j = 0; j < to_do; j++) {
1659             tags->rqs[i] = p;
1660             if (set->ops->init_request) {
1661                 if (set->ops->init_request(set->driver_data,
1662                         tags->rqs[i], hctx_idx, i,
1663                         set->numa_node)) {
1664                     tags->rqs[i] = NULL;
1665                     goto fail;
1666                 }
1667             }
1668 
1669             p += rq_size;
1670             i++;
1671         }
1672     }
1673     return tags;
1674 
1675 fail:
1676     blk_mq_free_rq_map(set, tags, hctx_idx);
1677     return NULL;
1678 }
1679 
1680 /*
1681  * 'cpu' is going away. splice any existing rq_list entries from this
1682  * software queue to the hw queue dispatch list, and ensure that it
1683  * gets run.
1684  */
1685 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1686 {
1687     struct blk_mq_hw_ctx *hctx;
1688     struct blk_mq_ctx *ctx;
1689     LIST_HEAD(tmp);
1690 
1691     hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1692     ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1693 
1694     spin_lock(&ctx->lock);
1695     if (!list_empty(&ctx->rq_list)) {
1696         list_splice_init(&ctx->rq_list, &tmp);
1697         blk_mq_hctx_clear_pending(hctx, ctx);
1698     }
1699     spin_unlock(&ctx->lock);
1700 
1701     if (list_empty(&tmp))
1702         return 0;
1703 
1704     spin_lock(&hctx->lock);
1705     list_splice_tail_init(&tmp, &hctx->dispatch);
1706     spin_unlock(&hctx->lock);
1707 
1708     blk_mq_run_hw_queue(hctx, true);
1709     return 0;
1710 }
1711 
1712 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1713 {
1714     cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1715                         &hctx->cpuhp_dead);
1716 }
1717 
1718 /* hctx->ctxs will be freed in queue's release handler */
1719 static void blk_mq_exit_hctx(struct request_queue *q,
1720         struct blk_mq_tag_set *set,
1721         struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1722 {
1723     unsigned flush_start_tag = set->queue_depth;
1724 
1725     blk_mq_tag_idle(hctx);
1726 
1727     if (set->ops->exit_request)
1728         set->ops->exit_request(set->driver_data,
1729                        hctx->fq->flush_rq, hctx_idx,
1730                        flush_start_tag + hctx_idx);
1731 
1732     if (set->ops->exit_hctx)
1733         set->ops->exit_hctx(hctx, hctx_idx);
1734 
1735     if (hctx->flags & BLK_MQ_F_BLOCKING)
1736         cleanup_srcu_struct(&hctx->queue_rq_srcu);
1737 
1738     blk_mq_remove_cpuhp(hctx);
1739     blk_free_flush_queue(hctx->fq);
1740     sbitmap_free(&hctx->ctx_map);
1741 }
1742 
1743 static void blk_mq_exit_hw_queues(struct request_queue *q,
1744         struct blk_mq_tag_set *set, int nr_queue)
1745 {
1746     struct blk_mq_hw_ctx *hctx;
1747     unsigned int i;
1748 
1749     queue_for_each_hw_ctx(q, hctx, i) {
1750         if (i == nr_queue)
1751             break;
1752         blk_mq_exit_hctx(q, set, hctx, i);
1753     }
1754 }
1755 
1756 static void blk_mq_free_hw_queues(struct request_queue *q,
1757         struct blk_mq_tag_set *set)
1758 {
1759     struct blk_mq_hw_ctx *hctx;
1760     unsigned int i;
1761 
1762     queue_for_each_hw_ctx(q, hctx, i)
1763         free_cpumask_var(hctx->cpumask);
1764 }
1765 
1766 static int blk_mq_init_hctx(struct request_queue *q,
1767         struct blk_mq_tag_set *set,
1768         struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1769 {
1770     int node;
1771     unsigned flush_start_tag = set->queue_depth;
1772 
1773     node = hctx->numa_node;
1774     if (node == NUMA_NO_NODE)
1775         node = hctx->numa_node = set->numa_node;
1776 
1777     INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1778     INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1779     spin_lock_init(&hctx->lock);
1780     INIT_LIST_HEAD(&hctx->dispatch);
1781     hctx->queue = q;
1782     hctx->queue_num = hctx_idx;
1783     hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1784 
1785     cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1786 
1787     hctx->tags = set->tags[hctx_idx];
1788 
1789     /*
1790      * Allocate space for all possible cpus to avoid allocation at
1791      * runtime
1792      */
1793     hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1794                     GFP_KERNEL, node);
1795     if (!hctx->ctxs)
1796         goto unregister_cpu_notifier;
1797 
1798     if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1799                   node))
1800         goto free_ctxs;
1801 
1802     hctx->nr_ctx = 0;
1803 
1804     if (set->ops->init_hctx &&
1805         set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1806         goto free_bitmap;
1807 
1808     hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1809     if (!hctx->fq)
1810         goto exit_hctx;
1811 
1812     if (set->ops->init_request &&
1813         set->ops->init_request(set->driver_data,
1814                    hctx->fq->flush_rq, hctx_idx,
1815                    flush_start_tag + hctx_idx, node))
1816         goto free_fq;
1817 
1818     if (hctx->flags & BLK_MQ_F_BLOCKING)
1819         init_srcu_struct(&hctx->queue_rq_srcu);
1820 
1821     return 0;
1822 
1823  free_fq:
1824     kfree(hctx->fq);
1825  exit_hctx:
1826     if (set->ops->exit_hctx)
1827         set->ops->exit_hctx(hctx, hctx_idx);
1828  free_bitmap:
1829     sbitmap_free(&hctx->ctx_map);
1830  free_ctxs:
1831     kfree(hctx->ctxs);
1832  unregister_cpu_notifier:
1833     blk_mq_remove_cpuhp(hctx);
1834     return -1;
1835 }
1836 
1837 static void blk_mq_init_cpu_queues(struct request_queue *q,
1838                    unsigned int nr_hw_queues)
1839 {
1840     unsigned int i;
1841 
1842     for_each_possible_cpu(i) {
1843         struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1844         struct blk_mq_hw_ctx *hctx;
1845 
1846         memset(__ctx, 0, sizeof(*__ctx));
1847         __ctx->cpu = i;
1848         spin_lock_init(&__ctx->lock);
1849         INIT_LIST_HEAD(&__ctx->rq_list);
1850         __ctx->queue = q;
1851         blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
1852         blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
1853 
1854         /* If the cpu isn't online, the cpu is mapped to first hctx */
1855         if (!cpu_online(i))
1856             continue;
1857 
1858         hctx = blk_mq_map_queue(q, i);
1859 
1860         /*
1861          * Set local node, IFF we have more than one hw queue. If
1862          * not, we remain on the home node of the device
1863          */
1864         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1865             hctx->numa_node = local_memory_node(cpu_to_node(i));
1866     }
1867 }
1868 
1869 static void blk_mq_map_swqueue(struct request_queue *q,
1870                    const struct cpumask *online_mask)
1871 {
1872     unsigned int i, hctx_idx;
1873     struct blk_mq_hw_ctx *hctx;
1874     struct blk_mq_ctx *ctx;
1875     struct blk_mq_tag_set *set = q->tag_set;
1876 
1877     /*
1878      * Avoid others reading imcomplete hctx->cpumask through sysfs
1879      */
1880     mutex_lock(&q->sysfs_lock);
1881 
1882     queue_for_each_hw_ctx(q, hctx, i) {
1883         cpumask_clear(hctx->cpumask);
1884         hctx->nr_ctx = 0;
1885     }
1886 
1887     /*
1888      * Map software to hardware queues
1889      */
1890     for_each_possible_cpu(i) {
1891         /* If the cpu isn't online, the cpu is mapped to first hctx */
1892         if (!cpumask_test_cpu(i, online_mask))
1893             continue;
1894 
1895         hctx_idx = q->mq_map[i];
1896         /* unmapped hw queue can be remapped after CPU topo changed */
1897         if (!set->tags[hctx_idx]) {
1898             set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
1899 
1900             /*
1901              * If tags initialization fail for some hctx,
1902              * that hctx won't be brought online.  In this
1903              * case, remap the current ctx to hctx[0] which
1904              * is guaranteed to always have tags allocated
1905              */
1906             if (!set->tags[hctx_idx])
1907                 q->mq_map[i] = 0;
1908         }
1909 
1910         ctx = per_cpu_ptr(q->queue_ctx, i);
1911         hctx = blk_mq_map_queue(q, i);
1912 
1913         cpumask_set_cpu(i, hctx->cpumask);
1914         ctx->index_hw = hctx->nr_ctx;
1915         hctx->ctxs[hctx->nr_ctx++] = ctx;
1916     }
1917 
1918     mutex_unlock(&q->sysfs_lock);
1919 
1920     queue_for_each_hw_ctx(q, hctx, i) {
1921         /*
1922          * If no software queues are mapped to this hardware queue,
1923          * disable it and free the request entries.
1924          */
1925         if (!hctx->nr_ctx) {
1926             /* Never unmap queue 0.  We need it as a
1927              * fallback in case of a new remap fails
1928              * allocation
1929              */
1930             if (i && set->tags[i]) {
1931                 blk_mq_free_rq_map(set, set->tags[i], i);
1932                 set->tags[i] = NULL;
1933             }
1934             hctx->tags = NULL;
1935             continue;
1936         }
1937 
1938         hctx->tags = set->tags[i];
1939         WARN_ON(!hctx->tags);
1940 
1941         /*
1942          * Set the map size to the number of mapped software queues.
1943          * This is more accurate and more efficient than looping
1944          * over all possibly mapped software queues.
1945          */
1946         sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
1947 
1948         /*
1949          * Initialize batch roundrobin counts
1950          */
1951         hctx->next_cpu = cpumask_first(hctx->cpumask);
1952         hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1953     }
1954 }
1955 
1956 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
1957 {
1958     struct blk_mq_hw_ctx *hctx;
1959     int i;
1960 
1961     queue_for_each_hw_ctx(q, hctx, i) {
1962         if (shared)
1963             hctx->flags |= BLK_MQ_F_TAG_SHARED;
1964         else
1965             hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1966     }
1967 }
1968 
1969 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
1970 {
1971     struct request_queue *q;
1972 
1973     list_for_each_entry(q, &set->tag_list, tag_set_list) {
1974         blk_mq_freeze_queue(q);
1975         queue_set_hctx_shared(q, shared);
1976         blk_mq_unfreeze_queue(q);
1977     }
1978 }
1979 
1980 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1981 {
1982     struct blk_mq_tag_set *set = q->tag_set;
1983 
1984     mutex_lock(&set->tag_list_lock);
1985     list_del_init(&q->tag_set_list);
1986     if (list_is_singular(&set->tag_list)) {
1987         /* just transitioned to unshared */
1988         set->flags &= ~BLK_MQ_F_TAG_SHARED;
1989         /* update existing queue */
1990         blk_mq_update_tag_set_depth(set, false);
1991     }
1992     mutex_unlock(&set->tag_list_lock);
1993 }
1994 
1995 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1996                      struct request_queue *q)
1997 {
1998     q->tag_set = set;
1999 
2000     mutex_lock(&set->tag_list_lock);
2001 
2002     /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2003     if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2004         set->flags |= BLK_MQ_F_TAG_SHARED;
2005         /* update existing queue */
2006         blk_mq_update_tag_set_depth(set, true);
2007     }
2008     if (set->flags & BLK_MQ_F_TAG_SHARED)
2009         queue_set_hctx_shared(q, true);
2010     list_add_tail(&q->tag_set_list, &set->tag_list);
2011 
2012     mutex_unlock(&set->tag_list_lock);
2013 }
2014 
2015 /*
2016  * It is the actual release handler for mq, but we do it from
2017  * request queue's release handler for avoiding use-after-free
2018  * and headache because q->mq_kobj shouldn't have been introduced,
2019  * but we can't group ctx/kctx kobj without it.
2020  */
2021 void blk_mq_release(struct request_queue *q)
2022 {
2023     struct blk_mq_hw_ctx *hctx;
2024     unsigned int i;
2025 
2026     /* hctx kobj stays in hctx */
2027     queue_for_each_hw_ctx(q, hctx, i) {
2028         if (!hctx)
2029             continue;
2030         kfree(hctx->ctxs);
2031         kfree(hctx);
2032     }
2033 
2034     q->mq_map = NULL;
2035 
2036     kfree(q->queue_hw_ctx);
2037 
2038     /* ctx kobj stays in queue_ctx */
2039     free_percpu(q->queue_ctx);
2040 }
2041 
2042 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2043 {
2044     struct request_queue *uninit_q, *q;
2045 
2046     uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2047     if (!uninit_q)
2048         return ERR_PTR(-ENOMEM);
2049 
2050     q = blk_mq_init_allocated_queue(set, uninit_q);
2051     if (IS_ERR(q))
2052         blk_cleanup_queue(uninit_q);
2053 
2054     return q;
2055 }
2056 EXPORT_SYMBOL(blk_mq_init_queue);
2057 
2058 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2059                         struct request_queue *q)
2060 {
2061     int i, j;
2062     struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2063 
2064     blk_mq_sysfs_unregister(q);
2065     for (i = 0; i < set->nr_hw_queues; i++) {
2066         int node;
2067 
2068         if (hctxs[i])
2069             continue;
2070 
2071         node = blk_mq_hw_queue_to_node(q->mq_map, i);
2072         hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2073                     GFP_KERNEL, node);
2074         if (!hctxs[i])
2075             break;
2076 
2077         if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2078                         node)) {
2079             kfree(hctxs[i]);
2080             hctxs[i] = NULL;
2081             break;
2082         }
2083 
2084         atomic_set(&hctxs[i]->nr_active, 0);
2085         hctxs[i]->numa_node = node;
2086         hctxs[i]->queue_num = i;
2087 
2088         if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2089             free_cpumask_var(hctxs[i]->cpumask);
2090             kfree(hctxs[i]);
2091             hctxs[i] = NULL;
2092             break;
2093         }
2094         blk_mq_hctx_kobj_init(hctxs[i]);
2095     }
2096     for (j = i; j < q->nr_hw_queues; j++) {
2097         struct blk_mq_hw_ctx *hctx = hctxs[j];
2098 
2099         if (hctx) {
2100             if (hctx->tags) {
2101                 blk_mq_free_rq_map(set, hctx->tags, j);
2102                 set->tags[j] = NULL;
2103             }
2104             blk_mq_exit_hctx(q, set, hctx, j);
2105             free_cpumask_var(hctx->cpumask);
2106             kobject_put(&hctx->kobj);
2107             kfree(hctx->ctxs);
2108             kfree(hctx);
2109             hctxs[j] = NULL;
2110 
2111         }
2112     }
2113     q->nr_hw_queues = i;
2114     blk_mq_sysfs_register(q);
2115 }
2116 
2117 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2118                           struct request_queue *q)
2119 {
2120     /* mark the queue as mq asap */
2121     q->mq_ops = set->ops;
2122 
2123     q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2124     if (!q->queue_ctx)
2125         goto err_exit;
2126 
2127     q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2128                         GFP_KERNEL, set->numa_node);
2129     if (!q->queue_hw_ctx)
2130         goto err_percpu;
2131 
2132     q->mq_map = set->mq_map;
2133 
2134     blk_mq_realloc_hw_ctxs(set, q);
2135     if (!q->nr_hw_queues)
2136         goto err_hctxs;
2137 
2138     INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2139     blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2140 
2141     q->nr_queues = nr_cpu_ids;
2142 
2143     q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2144 
2145     if (!(set->flags & BLK_MQ_F_SG_MERGE))
2146         q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2147 
2148     q->sg_reserved_size = INT_MAX;
2149 
2150     INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2151     INIT_LIST_HEAD(&q->requeue_list);
2152     spin_lock_init(&q->requeue_lock);
2153 
2154     if (q->nr_hw_queues > 1)
2155         blk_queue_make_request(q, blk_mq_make_request);
2156     else
2157         blk_queue_make_request(q, blk_sq_make_request);
2158 
2159     /*
2160      * Do this after blk_queue_make_request() overrides it...
2161      */
2162     q->nr_requests = set->queue_depth;
2163 
2164     /*
2165      * Default to classic polling
2166      */
2167     q->poll_nsec = -1;
2168 
2169     if (set->ops->complete)
2170         blk_queue_softirq_done(q, set->ops->complete);
2171 
2172     blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2173 
2174     get_online_cpus();
2175     mutex_lock(&all_q_mutex);
2176 
2177     list_add_tail(&q->all_q_node, &all_q_list);
2178     blk_mq_add_queue_tag_set(set, q);
2179     blk_mq_map_swqueue(q, cpu_online_mask);
2180 
2181     mutex_unlock(&all_q_mutex);
2182     put_online_cpus();
2183 
2184     return q;
2185 
2186 err_hctxs:
2187     kfree(q->queue_hw_ctx);
2188 err_percpu:
2189     free_percpu(q->queue_ctx);
2190 err_exit:
2191     q->mq_ops = NULL;
2192     return ERR_PTR(-ENOMEM);
2193 }
2194 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2195 
2196 void blk_mq_free_queue(struct request_queue *q)
2197 {
2198     struct blk_mq_tag_set   *set = q->tag_set;
2199 
2200     mutex_lock(&all_q_mutex);
2201     list_del_init(&q->all_q_node);
2202     mutex_unlock(&all_q_mutex);
2203 
2204     wbt_exit(q);
2205 
2206     blk_mq_del_queue_tag_set(q);
2207 
2208     blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2209     blk_mq_free_hw_queues(q, set);
2210 }
2211 
2212 /* Basically redo blk_mq_init_queue with queue frozen */
2213 static void blk_mq_queue_reinit(struct request_queue *q,
2214                 const struct cpumask *online_mask)
2215 {
2216     WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2217 
2218     blk_mq_sysfs_unregister(q);
2219 
2220     /*
2221      * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2222      * we should change hctx numa_node according to new topology (this
2223      * involves free and re-allocate memory, worthy doing?)
2224      */
2225 
2226     blk_mq_map_swqueue(q, online_mask);
2227 
2228     blk_mq_sysfs_register(q);
2229 }
2230 
2231 /*
2232  * New online cpumask which is going to be set in this hotplug event.
2233  * Declare this cpumasks as global as cpu-hotplug operation is invoked
2234  * one-by-one and dynamically allocating this could result in a failure.
2235  */
2236 static struct cpumask cpuhp_online_new;
2237 
2238 static void blk_mq_queue_reinit_work(void)
2239 {
2240     struct request_queue *q;
2241 
2242     mutex_lock(&all_q_mutex);
2243     /*
2244      * We need to freeze and reinit all existing queues.  Freezing
2245      * involves synchronous wait for an RCU grace period and doing it
2246      * one by one may take a long time.  Start freezing all queues in
2247      * one swoop and then wait for the completions so that freezing can
2248      * take place in parallel.
2249      */
2250     list_for_each_entry(q, &all_q_list, all_q_node)
2251         blk_mq_freeze_queue_start(q);
2252     list_for_each_entry(q, &all_q_list, all_q_node)
2253         blk_mq_freeze_queue_wait(q);
2254 
2255     list_for_each_entry(q, &all_q_list, all_q_node)
2256         blk_mq_queue_reinit(q, &cpuhp_online_new);
2257 
2258     list_for_each_entry(q, &all_q_list, all_q_node)
2259         blk_mq_unfreeze_queue(q);
2260 
2261     mutex_unlock(&all_q_mutex);
2262 }
2263 
2264 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2265 {
2266     cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2267     blk_mq_queue_reinit_work();
2268     return 0;
2269 }
2270 
2271 /*
2272  * Before hotadded cpu starts handling requests, new mappings must be
2273  * established.  Otherwise, these requests in hw queue might never be
2274  * dispatched.
2275  *
2276  * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2277  * for CPU0, and ctx1 for CPU1).
2278  *
2279  * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2280  * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2281  *
2282  * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
2283  * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2284  * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
2285  * is ignored.
2286  */
2287 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2288 {
2289     cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2290     cpumask_set_cpu(cpu, &cpuhp_online_new);
2291     blk_mq_queue_reinit_work();
2292     return 0;
2293 }
2294 
2295 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2296 {
2297     int i;
2298 
2299     for (i = 0; i < set->nr_hw_queues; i++) {
2300         set->tags[i] = blk_mq_init_rq_map(set, i);
2301         if (!set->tags[i])
2302             goto out_unwind;
2303     }
2304 
2305     return 0;
2306 
2307 out_unwind:
2308     while (--i >= 0)
2309         blk_mq_free_rq_map(set, set->tags[i], i);
2310 
2311     return -ENOMEM;
2312 }
2313 
2314 /*
2315  * Allocate the request maps associated with this tag_set. Note that this
2316  * may reduce the depth asked for, if memory is tight. set->queue_depth
2317  * will be updated to reflect the allocated depth.
2318  */
2319 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2320 {
2321     unsigned int depth;
2322     int err;
2323 
2324     depth = set->queue_depth;
2325     do {
2326         err = __blk_mq_alloc_rq_maps(set);
2327         if (!err)
2328             break;
2329 
2330         set->queue_depth >>= 1;
2331         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2332             err = -ENOMEM;
2333             break;
2334         }
2335     } while (set->queue_depth);
2336 
2337     if (!set->queue_depth || err) {
2338         pr_err("blk-mq: failed to allocate request map\n");
2339         return -ENOMEM;
2340     }
2341 
2342     if (depth != set->queue_depth)
2343         pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2344                         depth, set->queue_depth);
2345 
2346     return 0;
2347 }
2348 
2349 /*
2350  * Alloc a tag set to be associated with one or more request queues.
2351  * May fail with EINVAL for various error conditions. May adjust the
2352  * requested depth down, if if it too large. In that case, the set
2353  * value will be stored in set->queue_depth.
2354  */
2355 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2356 {
2357     int ret;
2358 
2359     BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2360 
2361     if (!set->nr_hw_queues)
2362         return -EINVAL;
2363     if (!set->queue_depth)
2364         return -EINVAL;
2365     if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2366         return -EINVAL;
2367 
2368     if (!set->ops->queue_rq)
2369         return -EINVAL;
2370 
2371     if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2372         pr_info("blk-mq: reduced tag depth to %u\n",
2373             BLK_MQ_MAX_DEPTH);
2374         set->queue_depth = BLK_MQ_MAX_DEPTH;
2375     }
2376 
2377     /*
2378      * If a crashdump is active, then we are potentially in a very
2379      * memory constrained environment. Limit us to 1 queue and
2380      * 64 tags to prevent using too much memory.
2381      */
2382     if (is_kdump_kernel()) {
2383         set->nr_hw_queues = 1;
2384         set->queue_depth = min(64U, set->queue_depth);
2385     }
2386     /*
2387      * There is no use for more h/w queues than cpus.
2388      */
2389     if (set->nr_hw_queues > nr_cpu_ids)
2390         set->nr_hw_queues = nr_cpu_ids;
2391 
2392     set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2393                  GFP_KERNEL, set->numa_node);
2394     if (!set->tags)
2395         return -ENOMEM;
2396 
2397     ret = -ENOMEM;
2398     set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2399             GFP_KERNEL, set->numa_node);
2400     if (!set->mq_map)
2401         goto out_free_tags;
2402 
2403     if (set->ops->map_queues)
2404         ret = set->ops->map_queues(set);
2405     else
2406         ret = blk_mq_map_queues(set);
2407     if (ret)
2408         goto out_free_mq_map;
2409 
2410     ret = blk_mq_alloc_rq_maps(set);
2411     if (ret)
2412         goto out_free_mq_map;
2413 
2414     mutex_init(&set->tag_list_lock);
2415     INIT_LIST_HEAD(&set->tag_list);
2416 
2417     return 0;
2418 
2419 out_free_mq_map:
2420     kfree(set->mq_map);
2421     set->mq_map = NULL;
2422 out_free_tags:
2423     kfree(set->tags);
2424     set->tags = NULL;
2425     return ret;
2426 }
2427 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2428 
2429 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2430 {
2431     int i;
2432 
2433     for (i = 0; i < nr_cpu_ids; i++) {
2434         if (set->tags[i])
2435             blk_mq_free_rq_map(set, set->tags[i], i);
2436     }
2437 
2438     kfree(set->mq_map);
2439     set->mq_map = NULL;
2440 
2441     kfree(set->tags);
2442     set->tags = NULL;
2443 }
2444 EXPORT_SYMBOL(blk_mq_free_tag_set);
2445 
2446 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2447 {
2448     struct blk_mq_tag_set *set = q->tag_set;
2449     struct blk_mq_hw_ctx *hctx;
2450     int i, ret;
2451 
2452     if (!set || nr > set->queue_depth)
2453         return -EINVAL;
2454 
2455     ret = 0;
2456     queue_for_each_hw_ctx(q, hctx, i) {
2457         if (!hctx->tags)
2458             continue;
2459         ret = blk_mq_tag_update_depth(hctx->tags, nr);
2460         if (ret)
2461             break;
2462     }
2463 
2464     if (!ret)
2465         q->nr_requests = nr;
2466 
2467     return ret;
2468 }
2469 
2470 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2471 {
2472     struct request_queue *q;
2473 
2474     if (nr_hw_queues > nr_cpu_ids)
2475         nr_hw_queues = nr_cpu_ids;
2476     if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2477         return;
2478 
2479     list_for_each_entry(q, &set->tag_list, tag_set_list)
2480         blk_mq_freeze_queue(q);
2481 
2482     set->nr_hw_queues = nr_hw_queues;
2483     list_for_each_entry(q, &set->tag_list, tag_set_list) {
2484         blk_mq_realloc_hw_ctxs(set, q);
2485 
2486         if (q->nr_hw_queues > 1)
2487             blk_queue_make_request(q, blk_mq_make_request);
2488         else
2489             blk_queue_make_request(q, blk_sq_make_request);
2490 
2491         blk_mq_queue_reinit(q, cpu_online_mask);
2492     }
2493 
2494     list_for_each_entry(q, &set->tag_list, tag_set_list)
2495         blk_mq_unfreeze_queue(q);
2496 }
2497 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2498 
2499 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2500                        struct blk_mq_hw_ctx *hctx,
2501                        struct request *rq)
2502 {
2503     struct blk_rq_stat stat[2];
2504     unsigned long ret = 0;
2505 
2506     /*
2507      * If stats collection isn't on, don't sleep but turn it on for
2508      * future users
2509      */
2510     if (!blk_stat_enable(q))
2511         return 0;
2512 
2513     /*
2514      * We don't have to do this once per IO, should optimize this
2515      * to just use the current window of stats until it changes
2516      */
2517     memset(&stat, 0, sizeof(stat));
2518     blk_hctx_stat_get(hctx, stat);
2519 
2520     /*
2521      * As an optimistic guess, use half of the mean service time
2522      * for this type of request. We can (and should) make this smarter.
2523      * For instance, if the completion latencies are tight, we can
2524      * get closer than just half the mean. This is especially
2525      * important on devices where the completion latencies are longer
2526      * than ~10 usec.
2527      */
2528     if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2529         ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2530     else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2531         ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2532 
2533     return ret;
2534 }
2535 
2536 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2537                      struct blk_mq_hw_ctx *hctx,
2538                      struct request *rq)
2539 {
2540     struct hrtimer_sleeper hs;
2541     enum hrtimer_mode mode;
2542     unsigned int nsecs;
2543     ktime_t kt;
2544 
2545     if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2546         return false;
2547 
2548     /*
2549      * poll_nsec can be:
2550      *
2551      * -1:  don't ever hybrid sleep
2552      *  0:  use half of prev avg
2553      * >0:  use this specific value
2554      */
2555     if (q->poll_nsec == -1)
2556         return false;
2557     else if (q->poll_nsec > 0)
2558         nsecs = q->poll_nsec;
2559     else
2560         nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2561 
2562     if (!nsecs)
2563         return false;
2564 
2565     set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2566 
2567     /*
2568      * This will be replaced with the stats tracking code, using
2569      * 'avg_completion_time / 2' as the pre-sleep target.
2570      */
2571     kt = nsecs;
2572 
2573     mode = HRTIMER_MODE_REL;
2574     hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2575     hrtimer_set_expires(&hs.timer, kt);
2576 
2577     hrtimer_init_sleeper(&hs, current);
2578     do {
2579         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2580             break;
2581         set_current_state(TASK_UNINTERRUPTIBLE);
2582         hrtimer_start_expires(&hs.timer, mode);
2583         if (hs.task)
2584             io_schedule();
2585         hrtimer_cancel(&hs.timer);
2586         mode = HRTIMER_MODE_ABS;
2587     } while (hs.task && !signal_pending(current));
2588 
2589     __set_current_state(TASK_RUNNING);
2590     destroy_hrtimer_on_stack(&hs.timer);
2591     return true;
2592 }
2593 
2594 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2595 {
2596     struct request_queue *q = hctx->queue;
2597     long state;
2598 
2599     /*
2600      * If we sleep, have the caller restart the poll loop to reset
2601      * the state. Like for the other success return cases, the
2602      * caller is responsible for checking if the IO completed. If
2603      * the IO isn't complete, we'll get called again and will go
2604      * straight to the busy poll loop.
2605      */
2606     if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2607         return true;
2608 
2609     hctx->poll_considered++;
2610 
2611     state = current->state;
2612     while (!need_resched()) {
2613         int ret;
2614 
2615         hctx->poll_invoked++;
2616 
2617         ret = q->mq_ops->poll(hctx, rq->tag);
2618         if (ret > 0) {
2619             hctx->poll_success++;
2620             set_current_state(TASK_RUNNING);
2621             return true;
2622         }
2623 
2624         if (signal_pending_state(state, current))
2625             set_current_state(TASK_RUNNING);
2626 
2627         if (current->state == TASK_RUNNING)
2628             return true;
2629         if (ret < 0)
2630             break;
2631         cpu_relax();
2632     }
2633 
2634     return false;
2635 }
2636 
2637 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2638 {
2639     struct blk_mq_hw_ctx *hctx;
2640     struct blk_plug *plug;
2641     struct request *rq;
2642 
2643     if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2644         !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2645         return false;
2646 
2647     plug = current->plug;
2648     if (plug)
2649         blk_flush_plug_list(plug, false);
2650 
2651     hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2652     rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2653 
2654     return __blk_mq_poll(hctx, rq);
2655 }
2656 EXPORT_SYMBOL_GPL(blk_mq_poll);
2657 
2658 void blk_mq_disable_hotplug(void)
2659 {
2660     mutex_lock(&all_q_mutex);
2661 }
2662 
2663 void blk_mq_enable_hotplug(void)
2664 {
2665     mutex_unlock(&all_q_mutex);
2666 }
2667 
2668 static int __init blk_mq_init(void)
2669 {
2670     cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2671                 blk_mq_hctx_notify_dead);
2672 
2673     cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2674                   blk_mq_queue_reinit_prepare,
2675                   blk_mq_queue_reinit_dead);
2676     return 0;
2677 }
2678 subsys_initcall(blk_mq_init);