0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/kernel.h>
0016 #include <linux/module.h>
0017 #include <linux/bio.h>
0018 #include <linux/blkdev.h>
0019 #include <linux/blk-pm.h>
0020 #include <linux/blk-integrity.h>
0021 #include <linux/highmem.h>
0022 #include <linux/mm.h>
0023 #include <linux/pagemap.h>
0024 #include <linux/kernel_stat.h>
0025 #include <linux/string.h>
0026 #include <linux/init.h>
0027 #include <linux/completion.h>
0028 #include <linux/slab.h>
0029 #include <linux/swap.h>
0030 #include <linux/writeback.h>
0031 #include <linux/task_io_accounting_ops.h>
0032 #include <linux/fault-inject.h>
0033 #include <linux/list_sort.h>
0034 #include <linux/delay.h>
0035 #include <linux/ratelimit.h>
0036 #include <linux/pm_runtime.h>
0037 #include <linux/t10-pi.h>
0038 #include <linux/debugfs.h>
0039 #include <linux/bpf.h>
0040 #include <linux/psi.h>
0041 #include <linux/part_stat.h>
0042 #include <linux/sched/sysctl.h>
0043 #include <linux/blk-crypto.h>
0044
0045 #define CREATE_TRACE_POINTS
0046 #include <trace/events/block.h>
0047
0048 #include "blk.h"
0049 #include "blk-mq-sched.h"
0050 #include "blk-pm.h"
0051 #include "blk-cgroup.h"
0052 #include "blk-throttle.h"
0053
0054 struct dentry *blk_debugfs_root;
0055
0056 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
0057 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
0058 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
0059 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
0060 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
0061 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
0062
0063 DEFINE_IDA(blk_queue_ida);
0064
0065
0066
0067
0068 struct kmem_cache *blk_requestq_cachep;
0069 struct kmem_cache *blk_requestq_srcu_cachep;
0070
0071
0072
0073
0074 static struct workqueue_struct *kblockd_workqueue;
0075
0076
0077
0078
0079
0080
0081 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
0082 {
0083 set_bit(flag, &q->queue_flags);
0084 }
0085 EXPORT_SYMBOL(blk_queue_flag_set);
0086
0087
0088
0089
0090
0091
0092 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
0093 {
0094 clear_bit(flag, &q->queue_flags);
0095 }
0096 EXPORT_SYMBOL(blk_queue_flag_clear);
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
0107 {
0108 return test_and_set_bit(flag, &q->queue_flags);
0109 }
0110 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
0111
0112 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
0113 static const char *const blk_op_name[] = {
0114 REQ_OP_NAME(READ),
0115 REQ_OP_NAME(WRITE),
0116 REQ_OP_NAME(FLUSH),
0117 REQ_OP_NAME(DISCARD),
0118 REQ_OP_NAME(SECURE_ERASE),
0119 REQ_OP_NAME(ZONE_RESET),
0120 REQ_OP_NAME(ZONE_RESET_ALL),
0121 REQ_OP_NAME(ZONE_OPEN),
0122 REQ_OP_NAME(ZONE_CLOSE),
0123 REQ_OP_NAME(ZONE_FINISH),
0124 REQ_OP_NAME(ZONE_APPEND),
0125 REQ_OP_NAME(WRITE_ZEROES),
0126 REQ_OP_NAME(DRV_IN),
0127 REQ_OP_NAME(DRV_OUT),
0128 };
0129 #undef REQ_OP_NAME
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 inline const char *blk_op_str(enum req_op op)
0140 {
0141 const char *op_str = "UNKNOWN";
0142
0143 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
0144 op_str = blk_op_name[op];
0145
0146 return op_str;
0147 }
0148 EXPORT_SYMBOL_GPL(blk_op_str);
0149
0150 static const struct {
0151 int errno;
0152 const char *name;
0153 } blk_errors[] = {
0154 [BLK_STS_OK] = { 0, "" },
0155 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
0156 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
0157 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
0158 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
0159 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
0160 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
0161 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
0162 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
0163 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
0164 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
0165 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
0166 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
0167
0168
0169 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
0170
0171
0172 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
0173 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
0174
0175
0176 [BLK_STS_IOERR] = { -EIO, "I/O" },
0177 };
0178
0179 blk_status_t errno_to_blk_status(int errno)
0180 {
0181 int i;
0182
0183 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
0184 if (blk_errors[i].errno == errno)
0185 return (__force blk_status_t)i;
0186 }
0187
0188 return BLK_STS_IOERR;
0189 }
0190 EXPORT_SYMBOL_GPL(errno_to_blk_status);
0191
0192 int blk_status_to_errno(blk_status_t status)
0193 {
0194 int idx = (__force int)status;
0195
0196 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
0197 return -EIO;
0198 return blk_errors[idx].errno;
0199 }
0200 EXPORT_SYMBOL_GPL(blk_status_to_errno);
0201
0202 const char *blk_status_to_str(blk_status_t status)
0203 {
0204 int idx = (__force int)status;
0205
0206 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
0207 return "<null>";
0208 return blk_errors[idx].name;
0209 }
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 void blk_sync_queue(struct request_queue *q)
0230 {
0231 del_timer_sync(&q->timeout);
0232 cancel_work_sync(&q->timeout_work);
0233 }
0234 EXPORT_SYMBOL(blk_sync_queue);
0235
0236
0237
0238
0239
0240 void blk_set_pm_only(struct request_queue *q)
0241 {
0242 atomic_inc(&q->pm_only);
0243 }
0244 EXPORT_SYMBOL_GPL(blk_set_pm_only);
0245
0246 void blk_clear_pm_only(struct request_queue *q)
0247 {
0248 int pm_only;
0249
0250 pm_only = atomic_dec_return(&q->pm_only);
0251 WARN_ON_ONCE(pm_only < 0);
0252 if (pm_only == 0)
0253 wake_up_all(&q->mq_freeze_wq);
0254 }
0255 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 void blk_put_queue(struct request_queue *q)
0268 {
0269 kobject_put(&q->kobj);
0270 }
0271 EXPORT_SYMBOL(blk_put_queue);
0272
0273 void blk_queue_start_drain(struct request_queue *q)
0274 {
0275
0276
0277
0278
0279
0280 blk_freeze_queue_start(q);
0281 if (queue_is_mq(q))
0282 blk_mq_wake_waiters(q);
0283
0284 wake_up_all(&q->mq_freeze_wq);
0285 }
0286
0287
0288
0289
0290
0291
0292 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
0293 {
0294 const bool pm = flags & BLK_MQ_REQ_PM;
0295
0296 while (!blk_try_enter_queue(q, pm)) {
0297 if (flags & BLK_MQ_REQ_NOWAIT)
0298 return -EAGAIN;
0299
0300
0301
0302
0303
0304
0305
0306
0307 smp_rmb();
0308 wait_event(q->mq_freeze_wq,
0309 (!q->mq_freeze_depth &&
0310 blk_pm_resume_queue(pm, q)) ||
0311 blk_queue_dying(q));
0312 if (blk_queue_dying(q))
0313 return -ENODEV;
0314 }
0315
0316 return 0;
0317 }
0318
0319 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
0320 {
0321 while (!blk_try_enter_queue(q, false)) {
0322 struct gendisk *disk = bio->bi_bdev->bd_disk;
0323
0324 if (bio->bi_opf & REQ_NOWAIT) {
0325 if (test_bit(GD_DEAD, &disk->state))
0326 goto dead;
0327 bio_wouldblock_error(bio);
0328 return -EAGAIN;
0329 }
0330
0331
0332
0333
0334
0335
0336
0337
0338 smp_rmb();
0339 wait_event(q->mq_freeze_wq,
0340 (!q->mq_freeze_depth &&
0341 blk_pm_resume_queue(false, q)) ||
0342 test_bit(GD_DEAD, &disk->state));
0343 if (test_bit(GD_DEAD, &disk->state))
0344 goto dead;
0345 }
0346
0347 return 0;
0348 dead:
0349 bio_io_error(bio);
0350 return -ENODEV;
0351 }
0352
0353 void blk_queue_exit(struct request_queue *q)
0354 {
0355 percpu_ref_put(&q->q_usage_counter);
0356 }
0357
0358 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
0359 {
0360 struct request_queue *q =
0361 container_of(ref, struct request_queue, q_usage_counter);
0362
0363 wake_up_all(&q->mq_freeze_wq);
0364 }
0365
0366 static void blk_rq_timed_out_timer(struct timer_list *t)
0367 {
0368 struct request_queue *q = from_timer(q, t, timeout);
0369
0370 kblockd_schedule_work(&q->timeout_work);
0371 }
0372
0373 static void blk_timeout_work(struct work_struct *work)
0374 {
0375 }
0376
0377 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
0378 {
0379 struct request_queue *q;
0380
0381 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
0382 GFP_KERNEL | __GFP_ZERO, node_id);
0383 if (!q)
0384 return NULL;
0385
0386 if (alloc_srcu) {
0387 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
0388 if (init_srcu_struct(q->srcu) != 0)
0389 goto fail_q;
0390 }
0391
0392 q->last_merge = NULL;
0393
0394 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
0395 if (q->id < 0)
0396 goto fail_srcu;
0397
0398 q->stats = blk_alloc_queue_stats();
0399 if (!q->stats)
0400 goto fail_id;
0401
0402 q->node = node_id;
0403
0404 atomic_set(&q->nr_active_requests_shared_tags, 0);
0405
0406 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
0407 INIT_WORK(&q->timeout_work, blk_timeout_work);
0408 INIT_LIST_HEAD(&q->icq_list);
0409
0410 kobject_init(&q->kobj, &blk_queue_ktype);
0411
0412 mutex_init(&q->debugfs_mutex);
0413 mutex_init(&q->sysfs_lock);
0414 mutex_init(&q->sysfs_dir_lock);
0415 spin_lock_init(&q->queue_lock);
0416
0417 init_waitqueue_head(&q->mq_freeze_wq);
0418 mutex_init(&q->mq_freeze_lock);
0419
0420
0421
0422
0423
0424 if (percpu_ref_init(&q->q_usage_counter,
0425 blk_queue_usage_counter_release,
0426 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
0427 goto fail_stats;
0428
0429 blk_queue_dma_alignment(q, 511);
0430 blk_set_default_limits(&q->limits);
0431 q->nr_requests = BLKDEV_DEFAULT_RQ;
0432
0433 return q;
0434
0435 fail_stats:
0436 blk_free_queue_stats(q->stats);
0437 fail_id:
0438 ida_free(&blk_queue_ida, q->id);
0439 fail_srcu:
0440 if (alloc_srcu)
0441 cleanup_srcu_struct(q->srcu);
0442 fail_q:
0443 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
0444 return NULL;
0445 }
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455 bool blk_get_queue(struct request_queue *q)
0456 {
0457 if (unlikely(blk_queue_dying(q)))
0458 return false;
0459 kobject_get(&q->kobj);
0460 return true;
0461 }
0462 EXPORT_SYMBOL(blk_get_queue);
0463
0464 #ifdef CONFIG_FAIL_MAKE_REQUEST
0465
0466 static DECLARE_FAULT_ATTR(fail_make_request);
0467
0468 static int __init setup_fail_make_request(char *str)
0469 {
0470 return setup_fault_attr(&fail_make_request, str);
0471 }
0472 __setup("fail_make_request=", setup_fail_make_request);
0473
0474 bool should_fail_request(struct block_device *part, unsigned int bytes)
0475 {
0476 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
0477 }
0478
0479 static int __init fail_make_request_debugfs(void)
0480 {
0481 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
0482 NULL, &fail_make_request);
0483
0484 return PTR_ERR_OR_ZERO(dir);
0485 }
0486
0487 late_initcall(fail_make_request_debugfs);
0488 #endif
0489
0490 static inline bool bio_check_ro(struct bio *bio)
0491 {
0492 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
0493 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
0494 return false;
0495 pr_warn("Trying to write to read-only block-device %pg\n",
0496 bio->bi_bdev);
0497
0498 return false;
0499 }
0500
0501 return false;
0502 }
0503
0504 static noinline int should_fail_bio(struct bio *bio)
0505 {
0506 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
0507 return -EIO;
0508 return 0;
0509 }
0510 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
0511
0512
0513
0514
0515
0516
0517 static inline int bio_check_eod(struct bio *bio)
0518 {
0519 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
0520 unsigned int nr_sectors = bio_sectors(bio);
0521
0522 if (nr_sectors && maxsector &&
0523 (nr_sectors > maxsector ||
0524 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
0525 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
0526 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
0527 current->comm, bio->bi_bdev, bio->bi_opf,
0528 bio->bi_iter.bi_sector, nr_sectors, maxsector);
0529 return -EIO;
0530 }
0531 return 0;
0532 }
0533
0534
0535
0536
0537 static int blk_partition_remap(struct bio *bio)
0538 {
0539 struct block_device *p = bio->bi_bdev;
0540
0541 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
0542 return -EIO;
0543 if (bio_sectors(bio)) {
0544 bio->bi_iter.bi_sector += p->bd_start_sect;
0545 trace_block_bio_remap(bio, p->bd_dev,
0546 bio->bi_iter.bi_sector -
0547 p->bd_start_sect);
0548 }
0549 bio_set_flag(bio, BIO_REMAPPED);
0550 return 0;
0551 }
0552
0553
0554
0555
0556 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
0557 struct bio *bio)
0558 {
0559 int nr_sectors = bio_sectors(bio);
0560
0561
0562 if (!bdev_is_zoned(bio->bi_bdev))
0563 return BLK_STS_NOTSUPP;
0564
0565
0566 if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
0567 !bio_zone_is_seq(bio))
0568 return BLK_STS_IOERR;
0569
0570
0571
0572
0573
0574
0575 if (nr_sectors > q->limits.chunk_sectors)
0576 return BLK_STS_IOERR;
0577
0578
0579 if (nr_sectors > q->limits.max_zone_append_sectors)
0580 return BLK_STS_IOERR;
0581
0582 bio->bi_opf |= REQ_NOMERGE;
0583
0584 return BLK_STS_OK;
0585 }
0586
0587 static void __submit_bio(struct bio *bio)
0588 {
0589 struct gendisk *disk = bio->bi_bdev->bd_disk;
0590
0591 if (unlikely(!blk_crypto_bio_prep(&bio)))
0592 return;
0593
0594 if (!disk->fops->submit_bio) {
0595 blk_mq_submit_bio(bio);
0596 } else if (likely(bio_queue_enter(bio) == 0)) {
0597 disk->fops->submit_bio(bio);
0598 blk_queue_exit(disk->queue);
0599 }
0600 }
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621 static void __submit_bio_noacct(struct bio *bio)
0622 {
0623 struct bio_list bio_list_on_stack[2];
0624
0625 BUG_ON(bio->bi_next);
0626
0627 bio_list_init(&bio_list_on_stack[0]);
0628 current->bio_list = bio_list_on_stack;
0629
0630 do {
0631 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
0632 struct bio_list lower, same;
0633
0634
0635
0636
0637 bio_list_on_stack[1] = bio_list_on_stack[0];
0638 bio_list_init(&bio_list_on_stack[0]);
0639
0640 __submit_bio(bio);
0641
0642
0643
0644
0645
0646 bio_list_init(&lower);
0647 bio_list_init(&same);
0648 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
0649 if (q == bdev_get_queue(bio->bi_bdev))
0650 bio_list_add(&same, bio);
0651 else
0652 bio_list_add(&lower, bio);
0653
0654
0655
0656
0657 bio_list_merge(&bio_list_on_stack[0], &lower);
0658 bio_list_merge(&bio_list_on_stack[0], &same);
0659 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
0660 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
0661
0662 current->bio_list = NULL;
0663 }
0664
0665 static void __submit_bio_noacct_mq(struct bio *bio)
0666 {
0667 struct bio_list bio_list[2] = { };
0668
0669 current->bio_list = bio_list;
0670
0671 do {
0672 __submit_bio(bio);
0673 } while ((bio = bio_list_pop(&bio_list[0])));
0674
0675 current->bio_list = NULL;
0676 }
0677
0678 void submit_bio_noacct_nocheck(struct bio *bio)
0679 {
0680
0681
0682
0683
0684
0685
0686 if (current->bio_list)
0687 bio_list_add(¤t->bio_list[0], bio);
0688 else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
0689 __submit_bio_noacct_mq(bio);
0690 else
0691 __submit_bio_noacct(bio);
0692 }
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703 void submit_bio_noacct(struct bio *bio)
0704 {
0705 struct block_device *bdev = bio->bi_bdev;
0706 struct request_queue *q = bdev_get_queue(bdev);
0707 blk_status_t status = BLK_STS_IOERR;
0708 struct blk_plug *plug;
0709
0710 might_sleep();
0711
0712 plug = blk_mq_plug(bio);
0713 if (plug && plug->nowait)
0714 bio->bi_opf |= REQ_NOWAIT;
0715
0716
0717
0718
0719
0720 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
0721 goto not_supported;
0722
0723 if (should_fail_bio(bio))
0724 goto end_io;
0725 if (unlikely(bio_check_ro(bio)))
0726 goto end_io;
0727 if (!bio_flagged(bio, BIO_REMAPPED)) {
0728 if (unlikely(bio_check_eod(bio)))
0729 goto end_io;
0730 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
0731 goto end_io;
0732 }
0733
0734
0735
0736
0737
0738 if (op_is_flush(bio->bi_opf) &&
0739 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
0740 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
0741 if (!bio_sectors(bio)) {
0742 status = BLK_STS_OK;
0743 goto end_io;
0744 }
0745 }
0746
0747 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
0748 bio_clear_polled(bio);
0749
0750 switch (bio_op(bio)) {
0751 case REQ_OP_DISCARD:
0752 if (!bdev_max_discard_sectors(bdev))
0753 goto not_supported;
0754 break;
0755 case REQ_OP_SECURE_ERASE:
0756 if (!bdev_max_secure_erase_sectors(bdev))
0757 goto not_supported;
0758 break;
0759 case REQ_OP_ZONE_APPEND:
0760 status = blk_check_zone_append(q, bio);
0761 if (status != BLK_STS_OK)
0762 goto end_io;
0763 break;
0764 case REQ_OP_ZONE_RESET:
0765 case REQ_OP_ZONE_OPEN:
0766 case REQ_OP_ZONE_CLOSE:
0767 case REQ_OP_ZONE_FINISH:
0768 if (!bdev_is_zoned(bio->bi_bdev))
0769 goto not_supported;
0770 break;
0771 case REQ_OP_ZONE_RESET_ALL:
0772 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
0773 goto not_supported;
0774 break;
0775 case REQ_OP_WRITE_ZEROES:
0776 if (!q->limits.max_write_zeroes_sectors)
0777 goto not_supported;
0778 break;
0779 default:
0780 break;
0781 }
0782
0783 if (blk_throtl_bio(bio))
0784 return;
0785
0786 blk_cgroup_bio_start(bio);
0787 blkcg_bio_issue_init(bio);
0788
0789 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
0790 trace_block_bio_queue(bio);
0791
0792
0793
0794 bio_set_flag(bio, BIO_TRACE_COMPLETION);
0795 }
0796 submit_bio_noacct_nocheck(bio);
0797 return;
0798
0799 not_supported:
0800 status = BLK_STS_NOTSUPP;
0801 end_io:
0802 bio->bi_status = status;
0803 bio_endio(bio);
0804 }
0805 EXPORT_SYMBOL(submit_bio_noacct);
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820 void submit_bio(struct bio *bio)
0821 {
0822 if (blkcg_punt_bio_submit(bio))
0823 return;
0824
0825 if (bio_op(bio) == REQ_OP_READ) {
0826 task_io_account_read(bio->bi_iter.bi_size);
0827 count_vm_events(PGPGIN, bio_sectors(bio));
0828 } else if (bio_op(bio) == REQ_OP_WRITE) {
0829 count_vm_events(PGPGOUT, bio_sectors(bio));
0830 }
0831
0832
0833
0834
0835
0836
0837
0838 if (unlikely(bio_op(bio) == REQ_OP_READ &&
0839 bio_flagged(bio, BIO_WORKINGSET))) {
0840 unsigned long pflags;
0841
0842 psi_memstall_enter(&pflags);
0843 submit_bio_noacct(bio);
0844 psi_memstall_leave(&pflags);
0845 return;
0846 }
0847
0848 submit_bio_noacct(bio);
0849 }
0850 EXPORT_SYMBOL(submit_bio);
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
0865 {
0866 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
0867 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
0868 int ret = 0;
0869
0870 if (cookie == BLK_QC_T_NONE ||
0871 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
0872 return 0;
0873
0874 blk_flush_plug(current->plug, false);
0875
0876 if (bio_queue_enter(bio))
0877 return 0;
0878 if (queue_is_mq(q)) {
0879 ret = blk_mq_poll(q, cookie, iob, flags);
0880 } else {
0881 struct gendisk *disk = q->disk;
0882
0883 if (disk && disk->fops->poll_bio)
0884 ret = disk->fops->poll_bio(bio, iob, flags);
0885 }
0886 blk_queue_exit(q);
0887 return ret;
0888 }
0889 EXPORT_SYMBOL_GPL(bio_poll);
0890
0891
0892
0893
0894
0895 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
0896 unsigned int flags)
0897 {
0898 struct bio *bio;
0899 int ret = 0;
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921 rcu_read_lock();
0922 bio = READ_ONCE(kiocb->private);
0923 if (bio && bio->bi_bdev)
0924 ret = bio_poll(bio, iob, flags);
0925 rcu_read_unlock();
0926
0927 return ret;
0928 }
0929 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
0930
0931 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
0932 {
0933 unsigned long stamp;
0934 again:
0935 stamp = READ_ONCE(part->bd_stamp);
0936 if (unlikely(time_after(now, stamp))) {
0937 if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
0938 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
0939 }
0940 if (part->bd_partno) {
0941 part = bdev_whole(part);
0942 goto again;
0943 }
0944 }
0945
0946 unsigned long bdev_start_io_acct(struct block_device *bdev,
0947 unsigned int sectors, enum req_op op,
0948 unsigned long start_time)
0949 {
0950 const int sgrp = op_stat_group(op);
0951
0952 part_stat_lock();
0953 update_io_ticks(bdev, start_time, false);
0954 part_stat_inc(bdev, ios[sgrp]);
0955 part_stat_add(bdev, sectors[sgrp], sectors);
0956 part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
0957 part_stat_unlock();
0958
0959 return start_time;
0960 }
0961 EXPORT_SYMBOL(bdev_start_io_acct);
0962
0963
0964
0965
0966
0967
0968 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
0969 {
0970 bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
0971 bio_op(bio), start_time);
0972 }
0973 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
0974
0975
0976
0977
0978
0979
0980
0981 unsigned long bio_start_io_acct(struct bio *bio)
0982 {
0983 return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
0984 bio_op(bio), jiffies);
0985 }
0986 EXPORT_SYMBOL_GPL(bio_start_io_acct);
0987
0988 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
0989 unsigned long start_time)
0990 {
0991 const int sgrp = op_stat_group(op);
0992 unsigned long now = READ_ONCE(jiffies);
0993 unsigned long duration = now - start_time;
0994
0995 part_stat_lock();
0996 update_io_ticks(bdev, now, true);
0997 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
0998 part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
0999 part_stat_unlock();
1000 }
1001 EXPORT_SYMBOL(bdev_end_io_acct);
1002
1003 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1004 struct block_device *orig_bdev)
1005 {
1006 bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
1007 }
1008 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 int blk_lld_busy(struct request_queue *q)
1030 {
1031 if (queue_is_mq(q) && q->mq_ops->busy)
1032 return q->mq_ops->busy(q);
1033
1034 return 0;
1035 }
1036 EXPORT_SYMBOL_GPL(blk_lld_busy);
1037
1038 int kblockd_schedule_work(struct work_struct *work)
1039 {
1040 return queue_work(kblockd_workqueue, work);
1041 }
1042 EXPORT_SYMBOL(kblockd_schedule_work);
1043
1044 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1045 unsigned long delay)
1046 {
1047 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1048 }
1049 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1050
1051 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1052 {
1053 struct task_struct *tsk = current;
1054
1055
1056
1057
1058 if (tsk->plug)
1059 return;
1060
1061 plug->mq_list = NULL;
1062 plug->cached_rq = NULL;
1063 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1064 plug->rq_count = 0;
1065 plug->multiple_queues = false;
1066 plug->has_elevator = false;
1067 plug->nowait = false;
1068 INIT_LIST_HEAD(&plug->cb_list);
1069
1070
1071
1072
1073
1074 tsk->plug = plug;
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 void blk_start_plug(struct blk_plug *plug)
1101 {
1102 blk_start_plug_nr_ios(plug, 1);
1103 }
1104 EXPORT_SYMBOL(blk_start_plug);
1105
1106 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1107 {
1108 LIST_HEAD(callbacks);
1109
1110 while (!list_empty(&plug->cb_list)) {
1111 list_splice_init(&plug->cb_list, &callbacks);
1112
1113 while (!list_empty(&callbacks)) {
1114 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1115 struct blk_plug_cb,
1116 list);
1117 list_del(&cb->list);
1118 cb->callback(cb, from_schedule);
1119 }
1120 }
1121 }
1122
1123 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1124 int size)
1125 {
1126 struct blk_plug *plug = current->plug;
1127 struct blk_plug_cb *cb;
1128
1129 if (!plug)
1130 return NULL;
1131
1132 list_for_each_entry(cb, &plug->cb_list, list)
1133 if (cb->callback == unplug && cb->data == data)
1134 return cb;
1135
1136
1137 BUG_ON(size < sizeof(*cb));
1138 cb = kzalloc(size, GFP_ATOMIC);
1139 if (cb) {
1140 cb->data = data;
1141 cb->callback = unplug;
1142 list_add(&cb->list, &plug->cb_list);
1143 }
1144 return cb;
1145 }
1146 EXPORT_SYMBOL(blk_check_plugged);
1147
1148 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1149 {
1150 if (!list_empty(&plug->cb_list))
1151 flush_plug_callbacks(plug, from_schedule);
1152 if (!rq_list_empty(plug->mq_list))
1153 blk_mq_flush_plug_list(plug, from_schedule);
1154
1155
1156
1157
1158
1159
1160 if (unlikely(!rq_list_empty(plug->cached_rq)))
1161 blk_mq_free_plug_rqs(plug);
1162 }
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 void blk_finish_plug(struct blk_plug *plug)
1175 {
1176 if (plug == current->plug) {
1177 __blk_flush_plug(plug, false);
1178 current->plug = NULL;
1179 }
1180 }
1181 EXPORT_SYMBOL(blk_finish_plug);
1182
1183 void blk_io_schedule(void)
1184 {
1185
1186 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1187
1188 if (timeout)
1189 io_schedule_timeout(timeout);
1190 else
1191 io_schedule();
1192 }
1193 EXPORT_SYMBOL_GPL(blk_io_schedule);
1194
1195 int __init blk_dev_init(void)
1196 {
1197 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1198 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1199 sizeof_field(struct request, cmd_flags));
1200 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1201 sizeof_field(struct bio, bi_opf));
1202 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1203 __alignof__(struct request_queue)) !=
1204 sizeof(struct request_queue));
1205
1206
1207 kblockd_workqueue = alloc_workqueue("kblockd",
1208 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1209 if (!kblockd_workqueue)
1210 panic("Failed to create kblockd\n");
1211
1212 blk_requestq_cachep = kmem_cache_create("request_queue",
1213 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1214
1215 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1216 sizeof(struct request_queue) +
1217 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1218
1219 blk_debugfs_root = debugfs_create_dir("block", NULL);
1220
1221 return 0;
1222 }