0001
0002
0003
0004
0005 #include <linux/mm.h>
0006 #include <linux/swap.h>
0007 #include <linux/bio.h>
0008 #include <linux/blkdev.h>
0009 #include <linux/uio.h>
0010 #include <linux/iocontext.h>
0011 #include <linux/slab.h>
0012 #include <linux/init.h>
0013 #include <linux/kernel.h>
0014 #include <linux/export.h>
0015 #include <linux/mempool.h>
0016 #include <linux/workqueue.h>
0017 #include <linux/cgroup.h>
0018 #include <linux/highmem.h>
0019 #include <linux/sched/sysctl.h>
0020 #include <linux/blk-crypto.h>
0021 #include <linux/xarray.h>
0022
0023 #include <trace/events/block.h>
0024 #include "blk.h"
0025 #include "blk-rq-qos.h"
0026 #include "blk-cgroup.h"
0027
0028 struct bio_alloc_cache {
0029 struct bio *free_list;
0030 unsigned int nr;
0031 };
0032
0033 static struct biovec_slab {
0034 int nr_vecs;
0035 char *name;
0036 struct kmem_cache *slab;
0037 } bvec_slabs[] __read_mostly = {
0038 { .nr_vecs = 16, .name = "biovec-16" },
0039 { .nr_vecs = 64, .name = "biovec-64" },
0040 { .nr_vecs = 128, .name = "biovec-128" },
0041 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
0042 };
0043
0044 static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
0045 {
0046 switch (nr_vecs) {
0047
0048 case 5 ... 16:
0049 return &bvec_slabs[0];
0050 case 17 ... 64:
0051 return &bvec_slabs[1];
0052 case 65 ... 128:
0053 return &bvec_slabs[2];
0054 case 129 ... BIO_MAX_VECS:
0055 return &bvec_slabs[3];
0056 default:
0057 BUG();
0058 return NULL;
0059 }
0060 }
0061
0062
0063
0064
0065
0066 struct bio_set fs_bio_set;
0067 EXPORT_SYMBOL(fs_bio_set);
0068
0069
0070
0071
0072 struct bio_slab {
0073 struct kmem_cache *slab;
0074 unsigned int slab_ref;
0075 unsigned int slab_size;
0076 char name[8];
0077 };
0078 static DEFINE_MUTEX(bio_slab_lock);
0079 static DEFINE_XARRAY(bio_slabs);
0080
0081 static struct bio_slab *create_bio_slab(unsigned int size)
0082 {
0083 struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
0084
0085 if (!bslab)
0086 return NULL;
0087
0088 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
0089 bslab->slab = kmem_cache_create(bslab->name, size,
0090 ARCH_KMALLOC_MINALIGN,
0091 SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
0092 if (!bslab->slab)
0093 goto fail_alloc_slab;
0094
0095 bslab->slab_ref = 1;
0096 bslab->slab_size = size;
0097
0098 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
0099 return bslab;
0100
0101 kmem_cache_destroy(bslab->slab);
0102
0103 fail_alloc_slab:
0104 kfree(bslab);
0105 return NULL;
0106 }
0107
0108 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
0109 {
0110 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
0111 }
0112
0113 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
0114 {
0115 unsigned int size = bs_bio_slab_size(bs);
0116 struct bio_slab *bslab;
0117
0118 mutex_lock(&bio_slab_lock);
0119 bslab = xa_load(&bio_slabs, size);
0120 if (bslab)
0121 bslab->slab_ref++;
0122 else
0123 bslab = create_bio_slab(size);
0124 mutex_unlock(&bio_slab_lock);
0125
0126 if (bslab)
0127 return bslab->slab;
0128 return NULL;
0129 }
0130
0131 static void bio_put_slab(struct bio_set *bs)
0132 {
0133 struct bio_slab *bslab = NULL;
0134 unsigned int slab_size = bs_bio_slab_size(bs);
0135
0136 mutex_lock(&bio_slab_lock);
0137
0138 bslab = xa_load(&bio_slabs, slab_size);
0139 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
0140 goto out;
0141
0142 WARN_ON_ONCE(bslab->slab != bs->bio_slab);
0143
0144 WARN_ON(!bslab->slab_ref);
0145
0146 if (--bslab->slab_ref)
0147 goto out;
0148
0149 xa_erase(&bio_slabs, slab_size);
0150
0151 kmem_cache_destroy(bslab->slab);
0152 kfree(bslab);
0153
0154 out:
0155 mutex_unlock(&bio_slab_lock);
0156 }
0157
0158 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
0159 {
0160 BUG_ON(nr_vecs > BIO_MAX_VECS);
0161
0162 if (nr_vecs == BIO_MAX_VECS)
0163 mempool_free(bv, pool);
0164 else if (nr_vecs > BIO_INLINE_VECS)
0165 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
0166 }
0167
0168
0169
0170
0171
0172 static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
0173 {
0174 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
0175 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
0176 }
0177
0178 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
0179 gfp_t gfp_mask)
0180 {
0181 struct biovec_slab *bvs = biovec_slab(*nr_vecs);
0182
0183 if (WARN_ON_ONCE(!bvs))
0184 return NULL;
0185
0186
0187
0188
0189
0190 *nr_vecs = bvs->nr_vecs;
0191
0192
0193
0194
0195
0196
0197 if (*nr_vecs < BIO_MAX_VECS) {
0198 struct bio_vec *bvl;
0199
0200 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
0201 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
0202 return bvl;
0203 *nr_vecs = BIO_MAX_VECS;
0204 }
0205
0206 return mempool_alloc(pool, gfp_mask);
0207 }
0208
0209 void bio_uninit(struct bio *bio)
0210 {
0211 #ifdef CONFIG_BLK_CGROUP
0212 if (bio->bi_blkg) {
0213 blkg_put(bio->bi_blkg);
0214 bio->bi_blkg = NULL;
0215 }
0216 #endif
0217 if (bio_integrity(bio))
0218 bio_integrity_free(bio);
0219
0220 bio_crypt_free_ctx(bio);
0221 }
0222 EXPORT_SYMBOL(bio_uninit);
0223
0224 static void bio_free(struct bio *bio)
0225 {
0226 struct bio_set *bs = bio->bi_pool;
0227 void *p = bio;
0228
0229 WARN_ON_ONCE(!bs);
0230
0231 bio_uninit(bio);
0232 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
0233 mempool_free(p - bs->front_pad, &bs->bio_pool);
0234 }
0235
0236
0237
0238
0239
0240
0241 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
0242 unsigned short max_vecs, blk_opf_t opf)
0243 {
0244 bio->bi_next = NULL;
0245 bio->bi_bdev = bdev;
0246 bio->bi_opf = opf;
0247 bio->bi_flags = 0;
0248 bio->bi_ioprio = 0;
0249 bio->bi_status = 0;
0250 bio->bi_iter.bi_sector = 0;
0251 bio->bi_iter.bi_size = 0;
0252 bio->bi_iter.bi_idx = 0;
0253 bio->bi_iter.bi_bvec_done = 0;
0254 bio->bi_end_io = NULL;
0255 bio->bi_private = NULL;
0256 #ifdef CONFIG_BLK_CGROUP
0257 bio->bi_blkg = NULL;
0258 bio->bi_issue.value = 0;
0259 if (bdev)
0260 bio_associate_blkg(bio);
0261 #ifdef CONFIG_BLK_CGROUP_IOCOST
0262 bio->bi_iocost_cost = 0;
0263 #endif
0264 #endif
0265 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
0266 bio->bi_crypt_context = NULL;
0267 #endif
0268 #ifdef CONFIG_BLK_DEV_INTEGRITY
0269 bio->bi_integrity = NULL;
0270 #endif
0271 bio->bi_vcnt = 0;
0272
0273 atomic_set(&bio->__bi_remaining, 1);
0274 atomic_set(&bio->__bi_cnt, 1);
0275 bio->bi_cookie = BLK_QC_T_NONE;
0276
0277 bio->bi_max_vecs = max_vecs;
0278 bio->bi_io_vec = table;
0279 bio->bi_pool = NULL;
0280 }
0281 EXPORT_SYMBOL(bio_init);
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
0296 {
0297 bio_uninit(bio);
0298 memset(bio, 0, BIO_RESET_BYTES);
0299 atomic_set(&bio->__bi_remaining, 1);
0300 bio->bi_bdev = bdev;
0301 if (bio->bi_bdev)
0302 bio_associate_blkg(bio);
0303 bio->bi_opf = opf;
0304 }
0305 EXPORT_SYMBOL(bio_reset);
0306
0307 static struct bio *__bio_chain_endio(struct bio *bio)
0308 {
0309 struct bio *parent = bio->bi_private;
0310
0311 if (bio->bi_status && !parent->bi_status)
0312 parent->bi_status = bio->bi_status;
0313 bio_put(bio);
0314 return parent;
0315 }
0316
0317 static void bio_chain_endio(struct bio *bio)
0318 {
0319 bio_endio(__bio_chain_endio(bio));
0320 }
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 void bio_chain(struct bio *bio, struct bio *parent)
0334 {
0335 BUG_ON(bio->bi_private || bio->bi_end_io);
0336
0337 bio->bi_private = parent;
0338 bio->bi_end_io = bio_chain_endio;
0339 bio_inc_remaining(parent);
0340 }
0341 EXPORT_SYMBOL(bio_chain);
0342
0343 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
0344 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
0345 {
0346 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
0347
0348 if (bio) {
0349 bio_chain(bio, new);
0350 submit_bio(bio);
0351 }
0352
0353 return new;
0354 }
0355 EXPORT_SYMBOL_GPL(blk_next_bio);
0356
0357 static void bio_alloc_rescue(struct work_struct *work)
0358 {
0359 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
0360 struct bio *bio;
0361
0362 while (1) {
0363 spin_lock(&bs->rescue_lock);
0364 bio = bio_list_pop(&bs->rescue_list);
0365 spin_unlock(&bs->rescue_lock);
0366
0367 if (!bio)
0368 break;
0369
0370 submit_bio_noacct(bio);
0371 }
0372 }
0373
0374 static void punt_bios_to_rescuer(struct bio_set *bs)
0375 {
0376 struct bio_list punt, nopunt;
0377 struct bio *bio;
0378
0379 if (WARN_ON_ONCE(!bs->rescue_workqueue))
0380 return;
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 bio_list_init(&punt);
0393 bio_list_init(&nopunt);
0394
0395 while ((bio = bio_list_pop(¤t->bio_list[0])))
0396 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
0397 current->bio_list[0] = nopunt;
0398
0399 bio_list_init(&nopunt);
0400 while ((bio = bio_list_pop(¤t->bio_list[1])))
0401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
0402 current->bio_list[1] = nopunt;
0403
0404 spin_lock(&bs->rescue_lock);
0405 bio_list_merge(&bs->rescue_list, &punt);
0406 spin_unlock(&bs->rescue_lock);
0407
0408 queue_work(bs->rescue_workqueue, &bs->rescue_work);
0409 }
0410
0411 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
0412 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
0413 struct bio_set *bs)
0414 {
0415 struct bio_alloc_cache *cache;
0416 struct bio *bio;
0417
0418 cache = per_cpu_ptr(bs->cache, get_cpu());
0419 if (!cache->free_list) {
0420 put_cpu();
0421 return NULL;
0422 }
0423 bio = cache->free_list;
0424 cache->free_list = bio->bi_next;
0425 cache->nr--;
0426 put_cpu();
0427
0428 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
0429 bio->bi_pool = bs;
0430 return bio;
0431 }
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
0471 blk_opf_t opf, gfp_t gfp_mask,
0472 struct bio_set *bs)
0473 {
0474 gfp_t saved_gfp = gfp_mask;
0475 struct bio *bio;
0476 void *p;
0477
0478
0479 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
0480 return NULL;
0481
0482 if (opf & REQ_ALLOC_CACHE) {
0483 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
0484 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
0485 gfp_mask, bs);
0486 if (bio)
0487 return bio;
0488
0489
0490
0491
0492 } else {
0493 opf &= ~REQ_ALLOC_CACHE;
0494 }
0495 }
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 if (current->bio_list &&
0516 (!bio_list_empty(¤t->bio_list[0]) ||
0517 !bio_list_empty(¤t->bio_list[1])) &&
0518 bs->rescue_workqueue)
0519 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
0520
0521 p = mempool_alloc(&bs->bio_pool, gfp_mask);
0522 if (!p && gfp_mask != saved_gfp) {
0523 punt_bios_to_rescuer(bs);
0524 gfp_mask = saved_gfp;
0525 p = mempool_alloc(&bs->bio_pool, gfp_mask);
0526 }
0527 if (unlikely(!p))
0528 return NULL;
0529
0530 bio = p + bs->front_pad;
0531 if (nr_vecs > BIO_INLINE_VECS) {
0532 struct bio_vec *bvl = NULL;
0533
0534 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
0535 if (!bvl && gfp_mask != saved_gfp) {
0536 punt_bios_to_rescuer(bs);
0537 gfp_mask = saved_gfp;
0538 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
0539 }
0540 if (unlikely(!bvl))
0541 goto err_free;
0542
0543 bio_init(bio, bdev, bvl, nr_vecs, opf);
0544 } else if (nr_vecs) {
0545 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
0546 } else {
0547 bio_init(bio, bdev, NULL, 0, opf);
0548 }
0549
0550 bio->bi_pool = bs;
0551 return bio;
0552
0553 err_free:
0554 mempool_free(p, &bs->bio_pool);
0555 return NULL;
0556 }
0557 EXPORT_SYMBOL(bio_alloc_bioset);
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
0576 {
0577 struct bio *bio;
0578
0579 if (nr_vecs > UIO_MAXIOV)
0580 return NULL;
0581 return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
0582 }
0583 EXPORT_SYMBOL(bio_kmalloc);
0584
0585 void zero_fill_bio(struct bio *bio)
0586 {
0587 struct bio_vec bv;
0588 struct bvec_iter iter;
0589
0590 bio_for_each_segment(bv, bio, iter)
0591 memzero_bvec(&bv);
0592 }
0593 EXPORT_SYMBOL(zero_fill_bio);
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 static void bio_truncate(struct bio *bio, unsigned new_size)
0606 {
0607 struct bio_vec bv;
0608 struct bvec_iter iter;
0609 unsigned int done = 0;
0610 bool truncated = false;
0611
0612 if (new_size >= bio->bi_iter.bi_size)
0613 return;
0614
0615 if (bio_op(bio) != REQ_OP_READ)
0616 goto exit;
0617
0618 bio_for_each_segment(bv, bio, iter) {
0619 if (done + bv.bv_len > new_size) {
0620 unsigned offset;
0621
0622 if (!truncated)
0623 offset = new_size - done;
0624 else
0625 offset = 0;
0626 zero_user(bv.bv_page, bv.bv_offset + offset,
0627 bv.bv_len - offset);
0628 truncated = true;
0629 }
0630 done += bv.bv_len;
0631 }
0632
0633 exit:
0634
0635
0636
0637
0638
0639
0640
0641
0642 bio->bi_iter.bi_size = new_size;
0643 }
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 void guard_bio_eod(struct bio *bio)
0658 {
0659 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
0660
0661 if (!maxsector)
0662 return;
0663
0664
0665
0666
0667
0668
0669 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
0670 return;
0671
0672 maxsector -= bio->bi_iter.bi_sector;
0673 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
0674 return;
0675
0676 bio_truncate(bio, maxsector << 9);
0677 }
0678
0679 #define ALLOC_CACHE_MAX 512
0680 #define ALLOC_CACHE_SLACK 64
0681
0682 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
0683 unsigned int nr)
0684 {
0685 unsigned int i = 0;
0686 struct bio *bio;
0687
0688 while ((bio = cache->free_list) != NULL) {
0689 cache->free_list = bio->bi_next;
0690 cache->nr--;
0691 bio_free(bio);
0692 if (++i == nr)
0693 break;
0694 }
0695 }
0696
0697 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
0698 {
0699 struct bio_set *bs;
0700
0701 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
0702 if (bs->cache) {
0703 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
0704
0705 bio_alloc_cache_prune(cache, -1U);
0706 }
0707 return 0;
0708 }
0709
0710 static void bio_alloc_cache_destroy(struct bio_set *bs)
0711 {
0712 int cpu;
0713
0714 if (!bs->cache)
0715 return;
0716
0717 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
0718 for_each_possible_cpu(cpu) {
0719 struct bio_alloc_cache *cache;
0720
0721 cache = per_cpu_ptr(bs->cache, cpu);
0722 bio_alloc_cache_prune(cache, -1U);
0723 }
0724 free_percpu(bs->cache);
0725 bs->cache = NULL;
0726 }
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 void bio_put(struct bio *bio)
0737 {
0738 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
0739 BUG_ON(!atomic_read(&bio->__bi_cnt));
0740 if (!atomic_dec_and_test(&bio->__bi_cnt))
0741 return;
0742 }
0743
0744 if (bio->bi_opf & REQ_ALLOC_CACHE) {
0745 struct bio_alloc_cache *cache;
0746
0747 bio_uninit(bio);
0748 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
0749 bio->bi_next = cache->free_list;
0750 cache->free_list = bio;
0751 if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
0752 bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
0753 put_cpu();
0754 } else {
0755 bio_free(bio);
0756 }
0757 }
0758 EXPORT_SYMBOL(bio_put);
0759
0760 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
0761 {
0762 bio_set_flag(bio, BIO_CLONED);
0763 if (bio_flagged(bio_src, BIO_THROTTLED))
0764 bio_set_flag(bio, BIO_THROTTLED);
0765 bio->bi_ioprio = bio_src->bi_ioprio;
0766 bio->bi_iter = bio_src->bi_iter;
0767
0768 if (bio->bi_bdev) {
0769 if (bio->bi_bdev == bio_src->bi_bdev &&
0770 bio_flagged(bio_src, BIO_REMAPPED))
0771 bio_set_flag(bio, BIO_REMAPPED);
0772 bio_clone_blkg_association(bio, bio_src);
0773 }
0774
0775 if (bio_crypt_clone(bio, bio_src, gfp) < 0)
0776 return -ENOMEM;
0777 if (bio_integrity(bio_src) &&
0778 bio_integrity_clone(bio, bio_src, gfp) < 0)
0779 return -ENOMEM;
0780 return 0;
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
0796 gfp_t gfp, struct bio_set *bs)
0797 {
0798 struct bio *bio;
0799
0800 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
0801 if (!bio)
0802 return NULL;
0803
0804 if (__bio_clone(bio, bio_src, gfp) < 0) {
0805 bio_put(bio);
0806 return NULL;
0807 }
0808 bio->bi_io_vec = bio_src->bi_io_vec;
0809
0810 return bio;
0811 }
0812 EXPORT_SYMBOL(bio_alloc_clone);
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826 int bio_init_clone(struct block_device *bdev, struct bio *bio,
0827 struct bio *bio_src, gfp_t gfp)
0828 {
0829 int ret;
0830
0831 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
0832 ret = __bio_clone(bio, bio_src, gfp);
0833 if (ret)
0834 bio_uninit(bio);
0835 return ret;
0836 }
0837 EXPORT_SYMBOL(bio_init_clone);
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847 static inline bool bio_full(struct bio *bio, unsigned len)
0848 {
0849 if (bio->bi_vcnt >= bio->bi_max_vecs)
0850 return true;
0851 if (bio->bi_iter.bi_size > UINT_MAX - len)
0852 return true;
0853 return false;
0854 }
0855
0856 static inline bool page_is_mergeable(const struct bio_vec *bv,
0857 struct page *page, unsigned int len, unsigned int off,
0858 bool *same_page)
0859 {
0860 size_t bv_end = bv->bv_offset + bv->bv_len;
0861 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
0862 phys_addr_t page_addr = page_to_phys(page);
0863
0864 if (vec_end_addr + 1 != page_addr + off)
0865 return false;
0866 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
0867 return false;
0868
0869 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
0870 if (*same_page)
0871 return true;
0872 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891 static bool __bio_try_merge_page(struct bio *bio, struct page *page,
0892 unsigned int len, unsigned int off, bool *same_page)
0893 {
0894 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0895 return false;
0896
0897 if (bio->bi_vcnt > 0) {
0898 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
0899
0900 if (page_is_mergeable(bv, page, len, off, same_page)) {
0901 if (bio->bi_iter.bi_size > UINT_MAX - len) {
0902 *same_page = false;
0903 return false;
0904 }
0905 bv->bv_len += len;
0906 bio->bi_iter.bi_size += len;
0907 return true;
0908 }
0909 }
0910 return false;
0911 }
0912
0913
0914
0915
0916
0917
0918 static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
0919 struct page *page, unsigned len,
0920 unsigned offset, bool *same_page)
0921 {
0922 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
0923 unsigned long mask = queue_segment_boundary(q);
0924 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
0925 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
0926
0927 if ((addr1 | mask) != (addr2 | mask))
0928 return false;
0929 if (bv->bv_len + len > queue_max_segment_size(q))
0930 return false;
0931 return __bio_try_merge_page(bio, page, len, offset, same_page);
0932 }
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
0948 struct page *page, unsigned int len, unsigned int offset,
0949 unsigned int max_sectors, bool *same_page)
0950 {
0951 struct bio_vec *bvec;
0952
0953 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0954 return 0;
0955
0956 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
0957 return 0;
0958
0959 if (bio->bi_vcnt > 0) {
0960 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
0961 return len;
0962
0963
0964
0965
0966
0967 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
0968 if (bvec_gap_to_prev(&q->limits, bvec, offset))
0969 return 0;
0970 }
0971
0972 if (bio_full(bio, len))
0973 return 0;
0974
0975 if (bio->bi_vcnt >= queue_max_segments(q))
0976 return 0;
0977
0978 bvec = &bio->bi_io_vec[bio->bi_vcnt];
0979 bvec->bv_page = page;
0980 bvec->bv_len = len;
0981 bvec->bv_offset = offset;
0982 bio->bi_vcnt++;
0983 bio->bi_iter.bi_size += len;
0984 return len;
0985 }
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1003 struct page *page, unsigned int len, unsigned int offset)
1004 {
1005 bool same_page = false;
1006 return bio_add_hw_page(q, bio, page, len, offset,
1007 queue_max_hw_sectors(q), &same_page);
1008 }
1009 EXPORT_SYMBOL(bio_add_pc_page);
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 int bio_add_zone_append_page(struct bio *bio, struct page *page,
1028 unsigned int len, unsigned int offset)
1029 {
1030 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1031 bool same_page = false;
1032
1033 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
1034 return 0;
1035
1036 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
1037 return 0;
1038
1039 return bio_add_hw_page(q, bio, page, len, offset,
1040 queue_max_zone_append_sectors(q), &same_page);
1041 }
1042 EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 void __bio_add_page(struct bio *bio, struct page *page,
1055 unsigned int len, unsigned int off)
1056 {
1057 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
1058
1059 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1060 WARN_ON_ONCE(bio_full(bio, len));
1061
1062 bv->bv_page = page;
1063 bv->bv_offset = off;
1064 bv->bv_len = len;
1065
1066 bio->bi_iter.bi_size += len;
1067 bio->bi_vcnt++;
1068
1069 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
1070 bio_set_flag(bio, BIO_WORKINGSET);
1071 }
1072 EXPORT_SYMBOL_GPL(__bio_add_page);
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 int bio_add_page(struct bio *bio, struct page *page,
1085 unsigned int len, unsigned int offset)
1086 {
1087 bool same_page = false;
1088
1089 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1090 if (bio_full(bio, len))
1091 return 0;
1092 __bio_add_page(bio, page, len, offset);
1093 }
1094 return len;
1095 }
1096 EXPORT_SYMBOL(bio_add_page);
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1113 size_t off)
1114 {
1115 if (len > UINT_MAX || off > UINT_MAX)
1116 return false;
1117 return bio_add_page(bio, &folio->page, len, off) > 0;
1118 }
1119
1120 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1121 {
1122 struct bvec_iter_all iter_all;
1123 struct bio_vec *bvec;
1124
1125 bio_for_each_segment_all(bvec, bio, iter_all) {
1126 if (mark_dirty && !PageCompound(bvec->bv_page))
1127 set_page_dirty_lock(bvec->bv_page);
1128 put_page(bvec->bv_page);
1129 }
1130 }
1131 EXPORT_SYMBOL_GPL(__bio_release_pages);
1132
1133 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
1134 {
1135 size_t size = iov_iter_count(iter);
1136
1137 WARN_ON_ONCE(bio->bi_max_vecs);
1138
1139 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1140 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1141 size_t max_sectors = queue_max_zone_append_sectors(q);
1142
1143 size = min(size, max_sectors << SECTOR_SHIFT);
1144 }
1145
1146 bio->bi_vcnt = iter->nr_segs;
1147 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1148 bio->bi_iter.bi_bvec_done = iter->iov_offset;
1149 bio->bi_iter.bi_size = size;
1150 bio_set_flag(bio, BIO_NO_PAGE_REF);
1151 bio_set_flag(bio, BIO_CLONED);
1152 }
1153
1154 static int bio_iov_add_page(struct bio *bio, struct page *page,
1155 unsigned int len, unsigned int offset)
1156 {
1157 bool same_page = false;
1158
1159 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1160 __bio_add_page(bio, page, len, offset);
1161 return 0;
1162 }
1163
1164 if (same_page)
1165 put_page(page);
1166 return 0;
1167 }
1168
1169 static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1170 unsigned int len, unsigned int offset)
1171 {
1172 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1173 bool same_page = false;
1174
1175 if (bio_add_hw_page(q, bio, page, len, offset,
1176 queue_max_zone_append_sectors(q), &same_page) != len)
1177 return -EINVAL;
1178 if (same_page)
1179 put_page(page);
1180 return 0;
1181 }
1182
1183 #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1196 {
1197 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1198 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1199 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1200 struct page **pages = (struct page **)bv;
1201 ssize_t size, left;
1202 unsigned len, i = 0;
1203 size_t offset, trim;
1204 int ret = 0;
1205
1206
1207
1208
1209
1210
1211 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1212 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1213
1214
1215
1216
1217
1218
1219
1220
1221 size = iov_iter_get_pages2(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
1222 nr_pages, &offset);
1223 if (unlikely(size <= 0))
1224 return size ? size : -EFAULT;
1225
1226 nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
1227
1228 trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1229 iov_iter_revert(iter, trim);
1230
1231 size -= trim;
1232 if (unlikely(!size)) {
1233 ret = -EFAULT;
1234 goto out;
1235 }
1236
1237 for (left = size, i = 0; left > 0; left -= len, i++) {
1238 struct page *page = pages[i];
1239
1240 len = min_t(size_t, PAGE_SIZE - offset, left);
1241 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1242 ret = bio_iov_add_zone_append_page(bio, page, len,
1243 offset);
1244 if (ret)
1245 break;
1246 } else
1247 bio_iov_add_page(bio, page, len, offset);
1248
1249 offset = 0;
1250 }
1251
1252 iov_iter_revert(iter, left);
1253 out:
1254 while (i < nr_pages)
1255 put_page(pages[i++]);
1256
1257 return ret;
1258 }
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1284 {
1285 int ret = 0;
1286
1287 if (iov_iter_is_bvec(iter)) {
1288 bio_iov_bvec_set(bio, iter);
1289 iov_iter_advance(iter, bio->bi_iter.bi_size);
1290 return 0;
1291 }
1292
1293 do {
1294 ret = __bio_iov_iter_get_pages(bio, iter);
1295 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1296
1297
1298 bio_clear_flag(bio, BIO_WORKINGSET);
1299 return bio->bi_vcnt ? 0 : ret;
1300 }
1301 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1302
1303 static void submit_bio_wait_endio(struct bio *bio)
1304 {
1305 complete(bio->bi_private);
1306 }
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 int submit_bio_wait(struct bio *bio)
1320 {
1321 DECLARE_COMPLETION_ONSTACK_MAP(done,
1322 bio->bi_bdev->bd_disk->lockdep_map);
1323 unsigned long hang_check;
1324
1325 bio->bi_private = &done;
1326 bio->bi_end_io = submit_bio_wait_endio;
1327 bio->bi_opf |= REQ_SYNC;
1328 submit_bio(bio);
1329
1330
1331 hang_check = sysctl_hung_task_timeout_secs;
1332 if (hang_check)
1333 while (!wait_for_completion_io_timeout(&done,
1334 hang_check * (HZ/2)))
1335 ;
1336 else
1337 wait_for_completion_io(&done);
1338
1339 return blk_status_to_errno(bio->bi_status);
1340 }
1341 EXPORT_SYMBOL(submit_bio_wait);
1342
1343 void __bio_advance(struct bio *bio, unsigned bytes)
1344 {
1345 if (bio_integrity(bio))
1346 bio_integrity_advance(bio, bytes);
1347
1348 bio_crypt_advance(bio, bytes);
1349 bio_advance_iter(bio, &bio->bi_iter, bytes);
1350 }
1351 EXPORT_SYMBOL(__bio_advance);
1352
1353 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1354 struct bio *src, struct bvec_iter *src_iter)
1355 {
1356 while (src_iter->bi_size && dst_iter->bi_size) {
1357 struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1358 struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1359 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1360 void *src_buf = bvec_kmap_local(&src_bv);
1361 void *dst_buf = bvec_kmap_local(&dst_bv);
1362
1363 memcpy(dst_buf, src_buf, bytes);
1364
1365 kunmap_local(dst_buf);
1366 kunmap_local(src_buf);
1367
1368 bio_advance_iter_single(src, src_iter, bytes);
1369 bio_advance_iter_single(dst, dst_iter, bytes);
1370 }
1371 }
1372 EXPORT_SYMBOL(bio_copy_data_iter);
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 void bio_copy_data(struct bio *dst, struct bio *src)
1383 {
1384 struct bvec_iter src_iter = src->bi_iter;
1385 struct bvec_iter dst_iter = dst->bi_iter;
1386
1387 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1388 }
1389 EXPORT_SYMBOL(bio_copy_data);
1390
1391 void bio_free_pages(struct bio *bio)
1392 {
1393 struct bio_vec *bvec;
1394 struct bvec_iter_all iter_all;
1395
1396 bio_for_each_segment_all(bvec, bio, iter_all)
1397 __free_page(bvec->bv_page);
1398 }
1399 EXPORT_SYMBOL(bio_free_pages);
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 void bio_set_pages_dirty(struct bio *bio)
1431 {
1432 struct bio_vec *bvec;
1433 struct bvec_iter_all iter_all;
1434
1435 bio_for_each_segment_all(bvec, bio, iter_all) {
1436 if (!PageCompound(bvec->bv_page))
1437 set_page_dirty_lock(bvec->bv_page);
1438 }
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452 static void bio_dirty_fn(struct work_struct *work);
1453
1454 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1455 static DEFINE_SPINLOCK(bio_dirty_lock);
1456 static struct bio *bio_dirty_list;
1457
1458
1459
1460
1461 static void bio_dirty_fn(struct work_struct *work)
1462 {
1463 struct bio *bio, *next;
1464
1465 spin_lock_irq(&bio_dirty_lock);
1466 next = bio_dirty_list;
1467 bio_dirty_list = NULL;
1468 spin_unlock_irq(&bio_dirty_lock);
1469
1470 while ((bio = next) != NULL) {
1471 next = bio->bi_private;
1472
1473 bio_release_pages(bio, true);
1474 bio_put(bio);
1475 }
1476 }
1477
1478 void bio_check_pages_dirty(struct bio *bio)
1479 {
1480 struct bio_vec *bvec;
1481 unsigned long flags;
1482 struct bvec_iter_all iter_all;
1483
1484 bio_for_each_segment_all(bvec, bio, iter_all) {
1485 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1486 goto defer;
1487 }
1488
1489 bio_release_pages(bio, false);
1490 bio_put(bio);
1491 return;
1492 defer:
1493 spin_lock_irqsave(&bio_dirty_lock, flags);
1494 bio->bi_private = bio_dirty_list;
1495 bio_dirty_list = bio;
1496 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1497 schedule_work(&bio_dirty_work);
1498 }
1499
1500 static inline bool bio_remaining_done(struct bio *bio)
1501 {
1502
1503
1504
1505
1506 if (!bio_flagged(bio, BIO_CHAIN))
1507 return true;
1508
1509 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1510
1511 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1512 bio_clear_flag(bio, BIO_CHAIN);
1513 return true;
1514 }
1515
1516 return false;
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 void bio_endio(struct bio *bio)
1533 {
1534 again:
1535 if (!bio_remaining_done(bio))
1536 return;
1537 if (!bio_integrity_endio(bio))
1538 return;
1539
1540 rq_qos_done_bio(bio);
1541
1542 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1543 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1544 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1545 }
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555 if (bio->bi_end_io == bio_chain_endio) {
1556 bio = __bio_chain_endio(bio);
1557 goto again;
1558 }
1559
1560 blk_throtl_bio_endio(bio);
1561
1562 bio_uninit(bio);
1563 if (bio->bi_end_io)
1564 bio->bi_end_io(bio);
1565 }
1566 EXPORT_SYMBOL(bio_endio);
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582 struct bio *bio_split(struct bio *bio, int sectors,
1583 gfp_t gfp, struct bio_set *bs)
1584 {
1585 struct bio *split;
1586
1587 BUG_ON(sectors <= 0);
1588 BUG_ON(sectors >= bio_sectors(bio));
1589
1590
1591 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1592 return NULL;
1593
1594 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1595 if (!split)
1596 return NULL;
1597
1598 split->bi_iter.bi_size = sectors << 9;
1599
1600 if (bio_integrity(split))
1601 bio_integrity_trim(split);
1602
1603 bio_advance(bio, split->bi_iter.bi_size);
1604
1605 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1606 bio_set_flag(split, BIO_TRACE_COMPLETION);
1607
1608 return split;
1609 }
1610 EXPORT_SYMBOL(bio_split);
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1622 {
1623 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1624 offset + size > bio_sectors(bio)))
1625 return;
1626
1627 size <<= 9;
1628 if (offset == 0 && size == bio->bi_iter.bi_size)
1629 return;
1630
1631 bio_advance(bio, offset << 9);
1632 bio->bi_iter.bi_size = size;
1633
1634 if (bio_integrity(bio))
1635 bio_integrity_trim(bio);
1636 }
1637 EXPORT_SYMBOL_GPL(bio_trim);
1638
1639
1640
1641
1642
1643 int biovec_init_pool(mempool_t *pool, int pool_entries)
1644 {
1645 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1646
1647 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1648 }
1649
1650
1651
1652
1653
1654
1655
1656 void bioset_exit(struct bio_set *bs)
1657 {
1658 bio_alloc_cache_destroy(bs);
1659 if (bs->rescue_workqueue)
1660 destroy_workqueue(bs->rescue_workqueue);
1661 bs->rescue_workqueue = NULL;
1662
1663 mempool_exit(&bs->bio_pool);
1664 mempool_exit(&bs->bvec_pool);
1665
1666 bioset_integrity_free(bs);
1667 if (bs->bio_slab)
1668 bio_put_slab(bs);
1669 bs->bio_slab = NULL;
1670 }
1671 EXPORT_SYMBOL(bioset_exit);
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 int bioset_init(struct bio_set *bs,
1695 unsigned int pool_size,
1696 unsigned int front_pad,
1697 int flags)
1698 {
1699 bs->front_pad = front_pad;
1700 if (flags & BIOSET_NEED_BVECS)
1701 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1702 else
1703 bs->back_pad = 0;
1704
1705 spin_lock_init(&bs->rescue_lock);
1706 bio_list_init(&bs->rescue_list);
1707 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1708
1709 bs->bio_slab = bio_find_or_create_slab(bs);
1710 if (!bs->bio_slab)
1711 return -ENOMEM;
1712
1713 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1714 goto bad;
1715
1716 if ((flags & BIOSET_NEED_BVECS) &&
1717 biovec_init_pool(&bs->bvec_pool, pool_size))
1718 goto bad;
1719
1720 if (flags & BIOSET_NEED_RESCUER) {
1721 bs->rescue_workqueue = alloc_workqueue("bioset",
1722 WQ_MEM_RECLAIM, 0);
1723 if (!bs->rescue_workqueue)
1724 goto bad;
1725 }
1726 if (flags & BIOSET_PERCPU_CACHE) {
1727 bs->cache = alloc_percpu(struct bio_alloc_cache);
1728 if (!bs->cache)
1729 goto bad;
1730 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1731 }
1732
1733 return 0;
1734 bad:
1735 bioset_exit(bs);
1736 return -ENOMEM;
1737 }
1738 EXPORT_SYMBOL(bioset_init);
1739
1740 static int __init init_bio(void)
1741 {
1742 int i;
1743
1744 bio_integrity_init();
1745
1746 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1747 struct biovec_slab *bvs = bvec_slabs + i;
1748
1749 bvs->slab = kmem_cache_create(bvs->name,
1750 bvs->nr_vecs * sizeof(struct bio_vec), 0,
1751 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1752 }
1753
1754 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1755 bio_cpu_dead);
1756
1757 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1758 panic("bio: can't allocate bios\n");
1759
1760 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1761 panic("bio: can't create integrity pool\n");
1762
1763 return 0;
1764 }
1765 subsys_initcall(init_bio);