Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 1991, 1992 Linus Torvalds
0004  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
0005  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
0006  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
0007  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
0008  *  -  July2000
0009  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
0010  */
0011 
0012 /*
0013  * This handles all read/write requests to block devices
0014  */
0015 #include <linux/kernel.h>
0016 #include <linux/module.h>
0017 #include <linux/bio.h>
0018 #include <linux/blkdev.h>
0019 #include <linux/blk-pm.h>
0020 #include <linux/blk-integrity.h>
0021 #include <linux/highmem.h>
0022 #include <linux/mm.h>
0023 #include <linux/pagemap.h>
0024 #include <linux/kernel_stat.h>
0025 #include <linux/string.h>
0026 #include <linux/init.h>
0027 #include <linux/completion.h>
0028 #include <linux/slab.h>
0029 #include <linux/swap.h>
0030 #include <linux/writeback.h>
0031 #include <linux/task_io_accounting_ops.h>
0032 #include <linux/fault-inject.h>
0033 #include <linux/list_sort.h>
0034 #include <linux/delay.h>
0035 #include <linux/ratelimit.h>
0036 #include <linux/pm_runtime.h>
0037 #include <linux/t10-pi.h>
0038 #include <linux/debugfs.h>
0039 #include <linux/bpf.h>
0040 #include <linux/psi.h>
0041 #include <linux/part_stat.h>
0042 #include <linux/sched/sysctl.h>
0043 #include <linux/blk-crypto.h>
0044 
0045 #define CREATE_TRACE_POINTS
0046 #include <trace/events/block.h>
0047 
0048 #include "blk.h"
0049 #include "blk-mq-sched.h"
0050 #include "blk-pm.h"
0051 #include "blk-cgroup.h"
0052 #include "blk-throttle.h"
0053 
0054 struct dentry *blk_debugfs_root;
0055 
0056 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
0057 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0058 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
0059 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
0060 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0061 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
0062 
0063 DEFINE_IDA(blk_queue_ida);
0064 
0065 /*
0066  * For queue allocation
0067  */
0068 struct kmem_cache *blk_requestq_cachep;
0069 struct kmem_cache *blk_requestq_srcu_cachep;
0070 
0071 /*
0072  * Controlling structure to kblockd
0073  */
0074 static struct workqueue_struct *kblockd_workqueue;
0075 
0076 /**
0077  * blk_queue_flag_set - atomically set a queue flag
0078  * @flag: flag to be set
0079  * @q: request queue
0080  */
0081 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
0082 {
0083     set_bit(flag, &q->queue_flags);
0084 }
0085 EXPORT_SYMBOL(blk_queue_flag_set);
0086 
0087 /**
0088  * blk_queue_flag_clear - atomically clear a queue flag
0089  * @flag: flag to be cleared
0090  * @q: request queue
0091  */
0092 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
0093 {
0094     clear_bit(flag, &q->queue_flags);
0095 }
0096 EXPORT_SYMBOL(blk_queue_flag_clear);
0097 
0098 /**
0099  * blk_queue_flag_test_and_set - atomically test and set a queue flag
0100  * @flag: flag to be set
0101  * @q: request queue
0102  *
0103  * Returns the previous value of @flag - 0 if the flag was not set and 1 if
0104  * the flag was already set.
0105  */
0106 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
0107 {
0108     return test_and_set_bit(flag, &q->queue_flags);
0109 }
0110 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
0111 
0112 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
0113 static const char *const blk_op_name[] = {
0114     REQ_OP_NAME(READ),
0115     REQ_OP_NAME(WRITE),
0116     REQ_OP_NAME(FLUSH),
0117     REQ_OP_NAME(DISCARD),
0118     REQ_OP_NAME(SECURE_ERASE),
0119     REQ_OP_NAME(ZONE_RESET),
0120     REQ_OP_NAME(ZONE_RESET_ALL),
0121     REQ_OP_NAME(ZONE_OPEN),
0122     REQ_OP_NAME(ZONE_CLOSE),
0123     REQ_OP_NAME(ZONE_FINISH),
0124     REQ_OP_NAME(ZONE_APPEND),
0125     REQ_OP_NAME(WRITE_ZEROES),
0126     REQ_OP_NAME(DRV_IN),
0127     REQ_OP_NAME(DRV_OUT),
0128 };
0129 #undef REQ_OP_NAME
0130 
0131 /**
0132  * blk_op_str - Return string XXX in the REQ_OP_XXX.
0133  * @op: REQ_OP_XXX.
0134  *
0135  * Description: Centralize block layer function to convert REQ_OP_XXX into
0136  * string format. Useful in the debugging and tracing bio or request. For
0137  * invalid REQ_OP_XXX it returns string "UNKNOWN".
0138  */
0139 inline const char *blk_op_str(enum req_op op)
0140 {
0141     const char *op_str = "UNKNOWN";
0142 
0143     if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
0144         op_str = blk_op_name[op];
0145 
0146     return op_str;
0147 }
0148 EXPORT_SYMBOL_GPL(blk_op_str);
0149 
0150 static const struct {
0151     int     errno;
0152     const char  *name;
0153 } blk_errors[] = {
0154     [BLK_STS_OK]        = { 0,      "" },
0155     [BLK_STS_NOTSUPP]   = { -EOPNOTSUPP, "operation not supported" },
0156     [BLK_STS_TIMEOUT]   = { -ETIMEDOUT, "timeout" },
0157     [BLK_STS_NOSPC]     = { -ENOSPC,    "critical space allocation" },
0158     [BLK_STS_TRANSPORT] = { -ENOLINK,   "recoverable transport" },
0159     [BLK_STS_TARGET]    = { -EREMOTEIO, "critical target" },
0160     [BLK_STS_NEXUS]     = { -EBADE, "critical nexus" },
0161     [BLK_STS_MEDIUM]    = { -ENODATA,   "critical medium" },
0162     [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
0163     [BLK_STS_RESOURCE]  = { -ENOMEM,    "kernel resource" },
0164     [BLK_STS_DEV_RESOURCE]  = { -EBUSY, "device resource" },
0165     [BLK_STS_AGAIN]     = { -EAGAIN,    "nonblocking retry" },
0166     [BLK_STS_OFFLINE]   = { -ENODEV,    "device offline" },
0167 
0168     /* device mapper special case, should not leak out: */
0169     [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
0170 
0171     /* zone device specific errors */
0172     [BLK_STS_ZONE_OPEN_RESOURCE]    = { -ETOOMANYREFS, "open zones exceeded" },
0173     [BLK_STS_ZONE_ACTIVE_RESOURCE]  = { -EOVERFLOW, "active zones exceeded" },
0174 
0175     /* everything else not covered above: */
0176     [BLK_STS_IOERR]     = { -EIO,   "I/O" },
0177 };
0178 
0179 blk_status_t errno_to_blk_status(int errno)
0180 {
0181     int i;
0182 
0183     for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
0184         if (blk_errors[i].errno == errno)
0185             return (__force blk_status_t)i;
0186     }
0187 
0188     return BLK_STS_IOERR;
0189 }
0190 EXPORT_SYMBOL_GPL(errno_to_blk_status);
0191 
0192 int blk_status_to_errno(blk_status_t status)
0193 {
0194     int idx = (__force int)status;
0195 
0196     if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
0197         return -EIO;
0198     return blk_errors[idx].errno;
0199 }
0200 EXPORT_SYMBOL_GPL(blk_status_to_errno);
0201 
0202 const char *blk_status_to_str(blk_status_t status)
0203 {
0204     int idx = (__force int)status;
0205 
0206     if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
0207         return "<null>";
0208     return blk_errors[idx].name;
0209 }
0210 
0211 /**
0212  * blk_sync_queue - cancel any pending callbacks on a queue
0213  * @q: the queue
0214  *
0215  * Description:
0216  *     The block layer may perform asynchronous callback activity
0217  *     on a queue, such as calling the unplug function after a timeout.
0218  *     A block device may call blk_sync_queue to ensure that any
0219  *     such activity is cancelled, thus allowing it to release resources
0220  *     that the callbacks might use. The caller must already have made sure
0221  *     that its ->submit_bio will not re-add plugging prior to calling
0222  *     this function.
0223  *
0224  *     This function does not cancel any asynchronous activity arising
0225  *     out of elevator or throttling code. That would require elevator_exit()
0226  *     and blkcg_exit_queue() to be called with queue lock initialized.
0227  *
0228  */
0229 void blk_sync_queue(struct request_queue *q)
0230 {
0231     del_timer_sync(&q->timeout);
0232     cancel_work_sync(&q->timeout_work);
0233 }
0234 EXPORT_SYMBOL(blk_sync_queue);
0235 
0236 /**
0237  * blk_set_pm_only - increment pm_only counter
0238  * @q: request queue pointer
0239  */
0240 void blk_set_pm_only(struct request_queue *q)
0241 {
0242     atomic_inc(&q->pm_only);
0243 }
0244 EXPORT_SYMBOL_GPL(blk_set_pm_only);
0245 
0246 void blk_clear_pm_only(struct request_queue *q)
0247 {
0248     int pm_only;
0249 
0250     pm_only = atomic_dec_return(&q->pm_only);
0251     WARN_ON_ONCE(pm_only < 0);
0252     if (pm_only == 0)
0253         wake_up_all(&q->mq_freeze_wq);
0254 }
0255 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
0256 
0257 /**
0258  * blk_put_queue - decrement the request_queue refcount
0259  * @q: the request_queue structure to decrement the refcount for
0260  *
0261  * Decrements the refcount of the request_queue kobject. When this reaches 0
0262  * we'll have blk_release_queue() called.
0263  *
0264  * Context: Any context, but the last reference must not be dropped from
0265  *          atomic context.
0266  */
0267 void blk_put_queue(struct request_queue *q)
0268 {
0269     kobject_put(&q->kobj);
0270 }
0271 EXPORT_SYMBOL(blk_put_queue);
0272 
0273 void blk_queue_start_drain(struct request_queue *q)
0274 {
0275     /*
0276      * When queue DYING flag is set, we need to block new req
0277      * entering queue, so we call blk_freeze_queue_start() to
0278      * prevent I/O from crossing blk_queue_enter().
0279      */
0280     blk_freeze_queue_start(q);
0281     if (queue_is_mq(q))
0282         blk_mq_wake_waiters(q);
0283     /* Make blk_queue_enter() reexamine the DYING flag. */
0284     wake_up_all(&q->mq_freeze_wq);
0285 }
0286 
0287 /**
0288  * blk_queue_enter() - try to increase q->q_usage_counter
0289  * @q: request queue pointer
0290  * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
0291  */
0292 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
0293 {
0294     const bool pm = flags & BLK_MQ_REQ_PM;
0295 
0296     while (!blk_try_enter_queue(q, pm)) {
0297         if (flags & BLK_MQ_REQ_NOWAIT)
0298             return -EAGAIN;
0299 
0300         /*
0301          * read pair of barrier in blk_freeze_queue_start(), we need to
0302          * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
0303          * reading .mq_freeze_depth or queue dying flag, otherwise the
0304          * following wait may never return if the two reads are
0305          * reordered.
0306          */
0307         smp_rmb();
0308         wait_event(q->mq_freeze_wq,
0309                (!q->mq_freeze_depth &&
0310                 blk_pm_resume_queue(pm, q)) ||
0311                blk_queue_dying(q));
0312         if (blk_queue_dying(q))
0313             return -ENODEV;
0314     }
0315 
0316     return 0;
0317 }
0318 
0319 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
0320 {
0321     while (!blk_try_enter_queue(q, false)) {
0322         struct gendisk *disk = bio->bi_bdev->bd_disk;
0323 
0324         if (bio->bi_opf & REQ_NOWAIT) {
0325             if (test_bit(GD_DEAD, &disk->state))
0326                 goto dead;
0327             bio_wouldblock_error(bio);
0328             return -EAGAIN;
0329         }
0330 
0331         /*
0332          * read pair of barrier in blk_freeze_queue_start(), we need to
0333          * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
0334          * reading .mq_freeze_depth or queue dying flag, otherwise the
0335          * following wait may never return if the two reads are
0336          * reordered.
0337          */
0338         smp_rmb();
0339         wait_event(q->mq_freeze_wq,
0340                (!q->mq_freeze_depth &&
0341                 blk_pm_resume_queue(false, q)) ||
0342                test_bit(GD_DEAD, &disk->state));
0343         if (test_bit(GD_DEAD, &disk->state))
0344             goto dead;
0345     }
0346 
0347     return 0;
0348 dead:
0349     bio_io_error(bio);
0350     return -ENODEV;
0351 }
0352 
0353 void blk_queue_exit(struct request_queue *q)
0354 {
0355     percpu_ref_put(&q->q_usage_counter);
0356 }
0357 
0358 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
0359 {
0360     struct request_queue *q =
0361         container_of(ref, struct request_queue, q_usage_counter);
0362 
0363     wake_up_all(&q->mq_freeze_wq);
0364 }
0365 
0366 static void blk_rq_timed_out_timer(struct timer_list *t)
0367 {
0368     struct request_queue *q = from_timer(q, t, timeout);
0369 
0370     kblockd_schedule_work(&q->timeout_work);
0371 }
0372 
0373 static void blk_timeout_work(struct work_struct *work)
0374 {
0375 }
0376 
0377 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
0378 {
0379     struct request_queue *q;
0380 
0381     q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
0382             GFP_KERNEL | __GFP_ZERO, node_id);
0383     if (!q)
0384         return NULL;
0385 
0386     if (alloc_srcu) {
0387         blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
0388         if (init_srcu_struct(q->srcu) != 0)
0389             goto fail_q;
0390     }
0391 
0392     q->last_merge = NULL;
0393 
0394     q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
0395     if (q->id < 0)
0396         goto fail_srcu;
0397 
0398     q->stats = blk_alloc_queue_stats();
0399     if (!q->stats)
0400         goto fail_id;
0401 
0402     q->node = node_id;
0403 
0404     atomic_set(&q->nr_active_requests_shared_tags, 0);
0405 
0406     timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
0407     INIT_WORK(&q->timeout_work, blk_timeout_work);
0408     INIT_LIST_HEAD(&q->icq_list);
0409 
0410     kobject_init(&q->kobj, &blk_queue_ktype);
0411 
0412     mutex_init(&q->debugfs_mutex);
0413     mutex_init(&q->sysfs_lock);
0414     mutex_init(&q->sysfs_dir_lock);
0415     spin_lock_init(&q->queue_lock);
0416 
0417     init_waitqueue_head(&q->mq_freeze_wq);
0418     mutex_init(&q->mq_freeze_lock);
0419 
0420     /*
0421      * Init percpu_ref in atomic mode so that it's faster to shutdown.
0422      * See blk_register_queue() for details.
0423      */
0424     if (percpu_ref_init(&q->q_usage_counter,
0425                 blk_queue_usage_counter_release,
0426                 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
0427         goto fail_stats;
0428 
0429     blk_queue_dma_alignment(q, 511);
0430     blk_set_default_limits(&q->limits);
0431     q->nr_requests = BLKDEV_DEFAULT_RQ;
0432 
0433     return q;
0434 
0435 fail_stats:
0436     blk_free_queue_stats(q->stats);
0437 fail_id:
0438     ida_free(&blk_queue_ida, q->id);
0439 fail_srcu:
0440     if (alloc_srcu)
0441         cleanup_srcu_struct(q->srcu);
0442 fail_q:
0443     kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
0444     return NULL;
0445 }
0446 
0447 /**
0448  * blk_get_queue - increment the request_queue refcount
0449  * @q: the request_queue structure to increment the refcount for
0450  *
0451  * Increment the refcount of the request_queue kobject.
0452  *
0453  * Context: Any context.
0454  */
0455 bool blk_get_queue(struct request_queue *q)
0456 {
0457     if (unlikely(blk_queue_dying(q)))
0458         return false;
0459     kobject_get(&q->kobj);
0460     return true;
0461 }
0462 EXPORT_SYMBOL(blk_get_queue);
0463 
0464 #ifdef CONFIG_FAIL_MAKE_REQUEST
0465 
0466 static DECLARE_FAULT_ATTR(fail_make_request);
0467 
0468 static int __init setup_fail_make_request(char *str)
0469 {
0470     return setup_fault_attr(&fail_make_request, str);
0471 }
0472 __setup("fail_make_request=", setup_fail_make_request);
0473 
0474 bool should_fail_request(struct block_device *part, unsigned int bytes)
0475 {
0476     return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
0477 }
0478 
0479 static int __init fail_make_request_debugfs(void)
0480 {
0481     struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
0482                         NULL, &fail_make_request);
0483 
0484     return PTR_ERR_OR_ZERO(dir);
0485 }
0486 
0487 late_initcall(fail_make_request_debugfs);
0488 #endif /* CONFIG_FAIL_MAKE_REQUEST */
0489 
0490 static inline bool bio_check_ro(struct bio *bio)
0491 {
0492     if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
0493         if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
0494             return false;
0495         pr_warn("Trying to write to read-only block-device %pg\n",
0496             bio->bi_bdev);
0497         /* Older lvm-tools actually trigger this */
0498         return false;
0499     }
0500 
0501     return false;
0502 }
0503 
0504 static noinline int should_fail_bio(struct bio *bio)
0505 {
0506     if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
0507         return -EIO;
0508     return 0;
0509 }
0510 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
0511 
0512 /*
0513  * Check whether this bio extends beyond the end of the device or partition.
0514  * This may well happen - the kernel calls bread() without checking the size of
0515  * the device, e.g., when mounting a file system.
0516  */
0517 static inline int bio_check_eod(struct bio *bio)
0518 {
0519     sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
0520     unsigned int nr_sectors = bio_sectors(bio);
0521 
0522     if (nr_sectors && maxsector &&
0523         (nr_sectors > maxsector ||
0524          bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
0525         pr_info_ratelimited("%s: attempt to access beyond end of device\n"
0526                     "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
0527                     current->comm, bio->bi_bdev, bio->bi_opf,
0528                     bio->bi_iter.bi_sector, nr_sectors, maxsector);
0529         return -EIO;
0530     }
0531     return 0;
0532 }
0533 
0534 /*
0535  * Remap block n of partition p to block n+start(p) of the disk.
0536  */
0537 static int blk_partition_remap(struct bio *bio)
0538 {
0539     struct block_device *p = bio->bi_bdev;
0540 
0541     if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
0542         return -EIO;
0543     if (bio_sectors(bio)) {
0544         bio->bi_iter.bi_sector += p->bd_start_sect;
0545         trace_block_bio_remap(bio, p->bd_dev,
0546                       bio->bi_iter.bi_sector -
0547                       p->bd_start_sect);
0548     }
0549     bio_set_flag(bio, BIO_REMAPPED);
0550     return 0;
0551 }
0552 
0553 /*
0554  * Check write append to a zoned block device.
0555  */
0556 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
0557                          struct bio *bio)
0558 {
0559     int nr_sectors = bio_sectors(bio);
0560 
0561     /* Only applicable to zoned block devices */
0562     if (!bdev_is_zoned(bio->bi_bdev))
0563         return BLK_STS_NOTSUPP;
0564 
0565     /* The bio sector must point to the start of a sequential zone */
0566     if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
0567         !bio_zone_is_seq(bio))
0568         return BLK_STS_IOERR;
0569 
0570     /*
0571      * Not allowed to cross zone boundaries. Otherwise, the BIO will be
0572      * split and could result in non-contiguous sectors being written in
0573      * different zones.
0574      */
0575     if (nr_sectors > q->limits.chunk_sectors)
0576         return BLK_STS_IOERR;
0577 
0578     /* Make sure the BIO is small enough and will not get split */
0579     if (nr_sectors > q->limits.max_zone_append_sectors)
0580         return BLK_STS_IOERR;
0581 
0582     bio->bi_opf |= REQ_NOMERGE;
0583 
0584     return BLK_STS_OK;
0585 }
0586 
0587 static void __submit_bio(struct bio *bio)
0588 {
0589     struct gendisk *disk = bio->bi_bdev->bd_disk;
0590 
0591     if (unlikely(!blk_crypto_bio_prep(&bio)))
0592         return;
0593 
0594     if (!disk->fops->submit_bio) {
0595         blk_mq_submit_bio(bio);
0596     } else if (likely(bio_queue_enter(bio) == 0)) {
0597         disk->fops->submit_bio(bio);
0598         blk_queue_exit(disk->queue);
0599     }
0600 }
0601 
0602 /*
0603  * The loop in this function may be a bit non-obvious, and so deserves some
0604  * explanation:
0605  *
0606  *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
0607  *    that), so we have a list with a single bio.
0608  *  - We pretend that we have just taken it off a longer list, so we assign
0609  *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
0610  *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
0611  *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
0612  *    non-NULL value in bio_list and re-enter the loop from the top.
0613  *  - In this case we really did just take the bio of the top of the list (no
0614  *    pretending) and so remove it from bio_list, and call into ->submit_bio()
0615  *    again.
0616  *
0617  * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
0618  * bio_list_on_stack[1] contains bios that were submitted before the current
0619  *  ->submit_bio, but that haven't been processed yet.
0620  */
0621 static void __submit_bio_noacct(struct bio *bio)
0622 {
0623     struct bio_list bio_list_on_stack[2];
0624 
0625     BUG_ON(bio->bi_next);
0626 
0627     bio_list_init(&bio_list_on_stack[0]);
0628     current->bio_list = bio_list_on_stack;
0629 
0630     do {
0631         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
0632         struct bio_list lower, same;
0633 
0634         /*
0635          * Create a fresh bio_list for all subordinate requests.
0636          */
0637         bio_list_on_stack[1] = bio_list_on_stack[0];
0638         bio_list_init(&bio_list_on_stack[0]);
0639 
0640         __submit_bio(bio);
0641 
0642         /*
0643          * Sort new bios into those for a lower level and those for the
0644          * same level.
0645          */
0646         bio_list_init(&lower);
0647         bio_list_init(&same);
0648         while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
0649             if (q == bdev_get_queue(bio->bi_bdev))
0650                 bio_list_add(&same, bio);
0651             else
0652                 bio_list_add(&lower, bio);
0653 
0654         /*
0655          * Now assemble so we handle the lowest level first.
0656          */
0657         bio_list_merge(&bio_list_on_stack[0], &lower);
0658         bio_list_merge(&bio_list_on_stack[0], &same);
0659         bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
0660     } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
0661 
0662     current->bio_list = NULL;
0663 }
0664 
0665 static void __submit_bio_noacct_mq(struct bio *bio)
0666 {
0667     struct bio_list bio_list[2] = { };
0668 
0669     current->bio_list = bio_list;
0670 
0671     do {
0672         __submit_bio(bio);
0673     } while ((bio = bio_list_pop(&bio_list[0])));
0674 
0675     current->bio_list = NULL;
0676 }
0677 
0678 void submit_bio_noacct_nocheck(struct bio *bio)
0679 {
0680     /*
0681      * We only want one ->submit_bio to be active at a time, else stack
0682      * usage with stacked devices could be a problem.  Use current->bio_list
0683      * to collect a list of requests submited by a ->submit_bio method while
0684      * it is active, and then process them after it returned.
0685      */
0686     if (current->bio_list)
0687         bio_list_add(&current->bio_list[0], bio);
0688     else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
0689         __submit_bio_noacct_mq(bio);
0690     else
0691         __submit_bio_noacct(bio);
0692 }
0693 
0694 /**
0695  * submit_bio_noacct - re-submit a bio to the block device layer for I/O
0696  * @bio:  The bio describing the location in memory and on the device.
0697  *
0698  * This is a version of submit_bio() that shall only be used for I/O that is
0699  * resubmitted to lower level drivers by stacking block drivers.  All file
0700  * systems and other upper level users of the block layer should use
0701  * submit_bio() instead.
0702  */
0703 void submit_bio_noacct(struct bio *bio)
0704 {
0705     struct block_device *bdev = bio->bi_bdev;
0706     struct request_queue *q = bdev_get_queue(bdev);
0707     blk_status_t status = BLK_STS_IOERR;
0708     struct blk_plug *plug;
0709 
0710     might_sleep();
0711 
0712     plug = blk_mq_plug(bio);
0713     if (plug && plug->nowait)
0714         bio->bi_opf |= REQ_NOWAIT;
0715 
0716     /*
0717      * For a REQ_NOWAIT based request, return -EOPNOTSUPP
0718      * if queue does not support NOWAIT.
0719      */
0720     if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
0721         goto not_supported;
0722 
0723     if (should_fail_bio(bio))
0724         goto end_io;
0725     if (unlikely(bio_check_ro(bio)))
0726         goto end_io;
0727     if (!bio_flagged(bio, BIO_REMAPPED)) {
0728         if (unlikely(bio_check_eod(bio)))
0729             goto end_io;
0730         if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
0731             goto end_io;
0732     }
0733 
0734     /*
0735      * Filter flush bio's early so that bio based drivers without flush
0736      * support don't have to worry about them.
0737      */
0738     if (op_is_flush(bio->bi_opf) &&
0739         !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
0740         bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
0741         if (!bio_sectors(bio)) {
0742             status = BLK_STS_OK;
0743             goto end_io;
0744         }
0745     }
0746 
0747     if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
0748         bio_clear_polled(bio);
0749 
0750     switch (bio_op(bio)) {
0751     case REQ_OP_DISCARD:
0752         if (!bdev_max_discard_sectors(bdev))
0753             goto not_supported;
0754         break;
0755     case REQ_OP_SECURE_ERASE:
0756         if (!bdev_max_secure_erase_sectors(bdev))
0757             goto not_supported;
0758         break;
0759     case REQ_OP_ZONE_APPEND:
0760         status = blk_check_zone_append(q, bio);
0761         if (status != BLK_STS_OK)
0762             goto end_io;
0763         break;
0764     case REQ_OP_ZONE_RESET:
0765     case REQ_OP_ZONE_OPEN:
0766     case REQ_OP_ZONE_CLOSE:
0767     case REQ_OP_ZONE_FINISH:
0768         if (!bdev_is_zoned(bio->bi_bdev))
0769             goto not_supported;
0770         break;
0771     case REQ_OP_ZONE_RESET_ALL:
0772         if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
0773             goto not_supported;
0774         break;
0775     case REQ_OP_WRITE_ZEROES:
0776         if (!q->limits.max_write_zeroes_sectors)
0777             goto not_supported;
0778         break;
0779     default:
0780         break;
0781     }
0782 
0783     if (blk_throtl_bio(bio))
0784         return;
0785 
0786     blk_cgroup_bio_start(bio);
0787     blkcg_bio_issue_init(bio);
0788 
0789     if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
0790         trace_block_bio_queue(bio);
0791         /* Now that enqueuing has been traced, we need to trace
0792          * completion as well.
0793          */
0794         bio_set_flag(bio, BIO_TRACE_COMPLETION);
0795     }
0796     submit_bio_noacct_nocheck(bio);
0797     return;
0798 
0799 not_supported:
0800     status = BLK_STS_NOTSUPP;
0801 end_io:
0802     bio->bi_status = status;
0803     bio_endio(bio);
0804 }
0805 EXPORT_SYMBOL(submit_bio_noacct);
0806 
0807 /**
0808  * submit_bio - submit a bio to the block device layer for I/O
0809  * @bio: The &struct bio which describes the I/O
0810  *
0811  * submit_bio() is used to submit I/O requests to block devices.  It is passed a
0812  * fully set up &struct bio that describes the I/O that needs to be done.  The
0813  * bio will be send to the device described by the bi_bdev field.
0814  *
0815  * The success/failure status of the request, along with notification of
0816  * completion, is delivered asynchronously through the ->bi_end_io() callback
0817  * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
0818  * been called.
0819  */
0820 void submit_bio(struct bio *bio)
0821 {
0822     if (blkcg_punt_bio_submit(bio))
0823         return;
0824 
0825     if (bio_op(bio) == REQ_OP_READ) {
0826         task_io_account_read(bio->bi_iter.bi_size);
0827         count_vm_events(PGPGIN, bio_sectors(bio));
0828     } else if (bio_op(bio) == REQ_OP_WRITE) {
0829         count_vm_events(PGPGOUT, bio_sectors(bio));
0830     }
0831 
0832     /*
0833      * If we're reading data that is part of the userspace workingset, count
0834      * submission time as memory stall.  When the device is congested, or
0835      * the submitting cgroup IO-throttled, submission can be a significant
0836      * part of overall IO time.
0837      */
0838     if (unlikely(bio_op(bio) == REQ_OP_READ &&
0839         bio_flagged(bio, BIO_WORKINGSET))) {
0840         unsigned long pflags;
0841 
0842         psi_memstall_enter(&pflags);
0843         submit_bio_noacct(bio);
0844         psi_memstall_leave(&pflags);
0845         return;
0846     }
0847 
0848     submit_bio_noacct(bio);
0849 }
0850 EXPORT_SYMBOL(submit_bio);
0851 
0852 /**
0853  * bio_poll - poll for BIO completions
0854  * @bio: bio to poll for
0855  * @iob: batches of IO
0856  * @flags: BLK_POLL_* flags that control the behavior
0857  *
0858  * Poll for completions on queue associated with the bio. Returns number of
0859  * completed entries found.
0860  *
0861  * Note: the caller must either be the context that submitted @bio, or
0862  * be in a RCU critical section to prevent freeing of @bio.
0863  */
0864 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
0865 {
0866     struct request_queue *q = bdev_get_queue(bio->bi_bdev);
0867     blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
0868     int ret = 0;
0869 
0870     if (cookie == BLK_QC_T_NONE ||
0871         !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
0872         return 0;
0873 
0874     blk_flush_plug(current->plug, false);
0875 
0876     if (bio_queue_enter(bio))
0877         return 0;
0878     if (queue_is_mq(q)) {
0879         ret = blk_mq_poll(q, cookie, iob, flags);
0880     } else {
0881         struct gendisk *disk = q->disk;
0882 
0883         if (disk && disk->fops->poll_bio)
0884             ret = disk->fops->poll_bio(bio, iob, flags);
0885     }
0886     blk_queue_exit(q);
0887     return ret;
0888 }
0889 EXPORT_SYMBOL_GPL(bio_poll);
0890 
0891 /*
0892  * Helper to implement file_operations.iopoll.  Requires the bio to be stored
0893  * in iocb->private, and cleared before freeing the bio.
0894  */
0895 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
0896             unsigned int flags)
0897 {
0898     struct bio *bio;
0899     int ret = 0;
0900 
0901     /*
0902      * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
0903      * point to a freshly allocated bio at this point.  If that happens
0904      * we have a few cases to consider:
0905      *
0906      *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
0907      *     simply nothing in this case
0908      *  2) the bio points to a not poll enabled device.  bio_poll will catch
0909      *     this and return 0
0910      *  3) the bio points to a poll capable device, including but not
0911      *     limited to the one that the original bio pointed to.  In this
0912      *     case we will call into the actual poll method and poll for I/O,
0913      *     even if we don't need to, but it won't cause harm either.
0914      *
0915      * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
0916      * is still allocated. Because partitions hold a reference to the whole
0917      * device bdev and thus disk, the disk is also still valid.  Grabbing
0918      * a reference to the queue in bio_poll() ensures the hctxs and requests
0919      * are still valid as well.
0920      */
0921     rcu_read_lock();
0922     bio = READ_ONCE(kiocb->private);
0923     if (bio && bio->bi_bdev)
0924         ret = bio_poll(bio, iob, flags);
0925     rcu_read_unlock();
0926 
0927     return ret;
0928 }
0929 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
0930 
0931 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
0932 {
0933     unsigned long stamp;
0934 again:
0935     stamp = READ_ONCE(part->bd_stamp);
0936     if (unlikely(time_after(now, stamp))) {
0937         if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
0938             __part_stat_add(part, io_ticks, end ? now - stamp : 1);
0939     }
0940     if (part->bd_partno) {
0941         part = bdev_whole(part);
0942         goto again;
0943     }
0944 }
0945 
0946 unsigned long bdev_start_io_acct(struct block_device *bdev,
0947                  unsigned int sectors, enum req_op op,
0948                  unsigned long start_time)
0949 {
0950     const int sgrp = op_stat_group(op);
0951 
0952     part_stat_lock();
0953     update_io_ticks(bdev, start_time, false);
0954     part_stat_inc(bdev, ios[sgrp]);
0955     part_stat_add(bdev, sectors[sgrp], sectors);
0956     part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
0957     part_stat_unlock();
0958 
0959     return start_time;
0960 }
0961 EXPORT_SYMBOL(bdev_start_io_acct);
0962 
0963 /**
0964  * bio_start_io_acct_time - start I/O accounting for bio based drivers
0965  * @bio:    bio to start account for
0966  * @start_time: start time that should be passed back to bio_end_io_acct().
0967  */
0968 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
0969 {
0970     bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
0971                bio_op(bio), start_time);
0972 }
0973 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
0974 
0975 /**
0976  * bio_start_io_acct - start I/O accounting for bio based drivers
0977  * @bio:    bio to start account for
0978  *
0979  * Returns the start time that should be passed back to bio_end_io_acct().
0980  */
0981 unsigned long bio_start_io_acct(struct bio *bio)
0982 {
0983     return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
0984                   bio_op(bio), jiffies);
0985 }
0986 EXPORT_SYMBOL_GPL(bio_start_io_acct);
0987 
0988 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
0989               unsigned long start_time)
0990 {
0991     const int sgrp = op_stat_group(op);
0992     unsigned long now = READ_ONCE(jiffies);
0993     unsigned long duration = now - start_time;
0994 
0995     part_stat_lock();
0996     update_io_ticks(bdev, now, true);
0997     part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
0998     part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
0999     part_stat_unlock();
1000 }
1001 EXPORT_SYMBOL(bdev_end_io_acct);
1002 
1003 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1004                   struct block_device *orig_bdev)
1005 {
1006     bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
1007 }
1008 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1009 
1010 /**
1011  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1012  * @q : the queue of the device being checked
1013  *
1014  * Description:
1015  *    Check if underlying low-level drivers of a device are busy.
1016  *    If the drivers want to export their busy state, they must set own
1017  *    exporting function using blk_queue_lld_busy() first.
1018  *
1019  *    Basically, this function is used only by request stacking drivers
1020  *    to stop dispatching requests to underlying devices when underlying
1021  *    devices are busy.  This behavior helps more I/O merging on the queue
1022  *    of the request stacking driver and prevents I/O throughput regression
1023  *    on burst I/O load.
1024  *
1025  * Return:
1026  *    0 - Not busy (The request stacking driver should dispatch request)
1027  *    1 - Busy (The request stacking driver should stop dispatching request)
1028  */
1029 int blk_lld_busy(struct request_queue *q)
1030 {
1031     if (queue_is_mq(q) && q->mq_ops->busy)
1032         return q->mq_ops->busy(q);
1033 
1034     return 0;
1035 }
1036 EXPORT_SYMBOL_GPL(blk_lld_busy);
1037 
1038 int kblockd_schedule_work(struct work_struct *work)
1039 {
1040     return queue_work(kblockd_workqueue, work);
1041 }
1042 EXPORT_SYMBOL(kblockd_schedule_work);
1043 
1044 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1045                 unsigned long delay)
1046 {
1047     return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1048 }
1049 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1050 
1051 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1052 {
1053     struct task_struct *tsk = current;
1054 
1055     /*
1056      * If this is a nested plug, don't actually assign it.
1057      */
1058     if (tsk->plug)
1059         return;
1060 
1061     plug->mq_list = NULL;
1062     plug->cached_rq = NULL;
1063     plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1064     plug->rq_count = 0;
1065     plug->multiple_queues = false;
1066     plug->has_elevator = false;
1067     plug->nowait = false;
1068     INIT_LIST_HEAD(&plug->cb_list);
1069 
1070     /*
1071      * Store ordering should not be needed here, since a potential
1072      * preempt will imply a full memory barrier
1073      */
1074     tsk->plug = plug;
1075 }
1076 
1077 /**
1078  * blk_start_plug - initialize blk_plug and track it inside the task_struct
1079  * @plug:   The &struct blk_plug that needs to be initialized
1080  *
1081  * Description:
1082  *   blk_start_plug() indicates to the block layer an intent by the caller
1083  *   to submit multiple I/O requests in a batch.  The block layer may use
1084  *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
1085  *   is called.  However, the block layer may choose to submit requests
1086  *   before a call to blk_finish_plug() if the number of queued I/Os
1087  *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1088  *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
1089  *   the task schedules (see below).
1090  *
1091  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
1092  *   pending I/O should the task end up blocking between blk_start_plug() and
1093  *   blk_finish_plug(). This is important from a performance perspective, but
1094  *   also ensures that we don't deadlock. For instance, if the task is blocking
1095  *   for a memory allocation, memory reclaim could end up wanting to free a
1096  *   page belonging to that request that is currently residing in our private
1097  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
1098  *   this kind of deadlock.
1099  */
1100 void blk_start_plug(struct blk_plug *plug)
1101 {
1102     blk_start_plug_nr_ios(plug, 1);
1103 }
1104 EXPORT_SYMBOL(blk_start_plug);
1105 
1106 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1107 {
1108     LIST_HEAD(callbacks);
1109 
1110     while (!list_empty(&plug->cb_list)) {
1111         list_splice_init(&plug->cb_list, &callbacks);
1112 
1113         while (!list_empty(&callbacks)) {
1114             struct blk_plug_cb *cb = list_first_entry(&callbacks,
1115                               struct blk_plug_cb,
1116                               list);
1117             list_del(&cb->list);
1118             cb->callback(cb, from_schedule);
1119         }
1120     }
1121 }
1122 
1123 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1124                       int size)
1125 {
1126     struct blk_plug *plug = current->plug;
1127     struct blk_plug_cb *cb;
1128 
1129     if (!plug)
1130         return NULL;
1131 
1132     list_for_each_entry(cb, &plug->cb_list, list)
1133         if (cb->callback == unplug && cb->data == data)
1134             return cb;
1135 
1136     /* Not currently on the callback list */
1137     BUG_ON(size < sizeof(*cb));
1138     cb = kzalloc(size, GFP_ATOMIC);
1139     if (cb) {
1140         cb->data = data;
1141         cb->callback = unplug;
1142         list_add(&cb->list, &plug->cb_list);
1143     }
1144     return cb;
1145 }
1146 EXPORT_SYMBOL(blk_check_plugged);
1147 
1148 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1149 {
1150     if (!list_empty(&plug->cb_list))
1151         flush_plug_callbacks(plug, from_schedule);
1152     if (!rq_list_empty(plug->mq_list))
1153         blk_mq_flush_plug_list(plug, from_schedule);
1154     /*
1155      * Unconditionally flush out cached requests, even if the unplug
1156      * event came from schedule. Since we know hold references to the
1157      * queue for cached requests, we don't want a blocked task holding
1158      * up a queue freeze/quiesce event.
1159      */
1160     if (unlikely(!rq_list_empty(plug->cached_rq)))
1161         blk_mq_free_plug_rqs(plug);
1162 }
1163 
1164 /**
1165  * blk_finish_plug - mark the end of a batch of submitted I/O
1166  * @plug:   The &struct blk_plug passed to blk_start_plug()
1167  *
1168  * Description:
1169  * Indicate that a batch of I/O submissions is complete.  This function
1170  * must be paired with an initial call to blk_start_plug().  The intent
1171  * is to allow the block layer to optimize I/O submission.  See the
1172  * documentation for blk_start_plug() for more information.
1173  */
1174 void blk_finish_plug(struct blk_plug *plug)
1175 {
1176     if (plug == current->plug) {
1177         __blk_flush_plug(plug, false);
1178         current->plug = NULL;
1179     }
1180 }
1181 EXPORT_SYMBOL(blk_finish_plug);
1182 
1183 void blk_io_schedule(void)
1184 {
1185     /* Prevent hang_check timer from firing at us during very long I/O */
1186     unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1187 
1188     if (timeout)
1189         io_schedule_timeout(timeout);
1190     else
1191         io_schedule();
1192 }
1193 EXPORT_SYMBOL_GPL(blk_io_schedule);
1194 
1195 int __init blk_dev_init(void)
1196 {
1197     BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1198     BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1199             sizeof_field(struct request, cmd_flags));
1200     BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1201             sizeof_field(struct bio, bi_opf));
1202     BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1203                __alignof__(struct request_queue)) !=
1204              sizeof(struct request_queue));
1205 
1206     /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1207     kblockd_workqueue = alloc_workqueue("kblockd",
1208                         WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1209     if (!kblockd_workqueue)
1210         panic("Failed to create kblockd\n");
1211 
1212     blk_requestq_cachep = kmem_cache_create("request_queue",
1213             sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1214 
1215     blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1216             sizeof(struct request_queue) +
1217             sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1218 
1219     blk_debugfs_root = debugfs_create_dir("block", NULL);
1220 
1221     return 0;
1222 }