Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
0004  */
0005 #ifndef __LINUX_BIO_H
0006 #define __LINUX_BIO_H
0007 
0008 #include <linux/mempool.h>
0009 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
0010 #include <linux/blk_types.h>
0011 #include <linux/uio.h>
0012 
0013 #define BIO_MAX_VECS        256U
0014 
0015 static inline unsigned int bio_max_segs(unsigned int nr_segs)
0016 {
0017     return min(nr_segs, BIO_MAX_VECS);
0018 }
0019 
0020 #define bio_prio(bio)           (bio)->bi_ioprio
0021 #define bio_set_prio(bio, prio)     ((bio)->bi_ioprio = prio)
0022 
0023 #define bio_iter_iovec(bio, iter)               \
0024     bvec_iter_bvec((bio)->bi_io_vec, (iter))
0025 
0026 #define bio_iter_page(bio, iter)                \
0027     bvec_iter_page((bio)->bi_io_vec, (iter))
0028 #define bio_iter_len(bio, iter)                 \
0029     bvec_iter_len((bio)->bi_io_vec, (iter))
0030 #define bio_iter_offset(bio, iter)              \
0031     bvec_iter_offset((bio)->bi_io_vec, (iter))
0032 
0033 #define bio_page(bio)       bio_iter_page((bio), (bio)->bi_iter)
0034 #define bio_offset(bio)     bio_iter_offset((bio), (bio)->bi_iter)
0035 #define bio_iovec(bio)      bio_iter_iovec((bio), (bio)->bi_iter)
0036 
0037 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
0038 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
0039 
0040 #define bio_sectors(bio)    bvec_iter_sectors((bio)->bi_iter)
0041 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
0042 
0043 /*
0044  * Return the data direction, READ or WRITE.
0045  */
0046 #define bio_data_dir(bio) \
0047     (op_is_write(bio_op(bio)) ? WRITE : READ)
0048 
0049 /*
0050  * Check whether this bio carries any data or not. A NULL bio is allowed.
0051  */
0052 static inline bool bio_has_data(struct bio *bio)
0053 {
0054     if (bio &&
0055         bio->bi_iter.bi_size &&
0056         bio_op(bio) != REQ_OP_DISCARD &&
0057         bio_op(bio) != REQ_OP_SECURE_ERASE &&
0058         bio_op(bio) != REQ_OP_WRITE_ZEROES)
0059         return true;
0060 
0061     return false;
0062 }
0063 
0064 static inline bool bio_no_advance_iter(const struct bio *bio)
0065 {
0066     return bio_op(bio) == REQ_OP_DISCARD ||
0067            bio_op(bio) == REQ_OP_SECURE_ERASE ||
0068            bio_op(bio) == REQ_OP_WRITE_ZEROES;
0069 }
0070 
0071 static inline void *bio_data(struct bio *bio)
0072 {
0073     if (bio_has_data(bio))
0074         return page_address(bio_page(bio)) + bio_offset(bio);
0075 
0076     return NULL;
0077 }
0078 
0079 static inline bool bio_next_segment(const struct bio *bio,
0080                     struct bvec_iter_all *iter)
0081 {
0082     if (iter->idx >= bio->bi_vcnt)
0083         return false;
0084 
0085     bvec_advance(&bio->bi_io_vec[iter->idx], iter);
0086     return true;
0087 }
0088 
0089 /*
0090  * drivers should _never_ use the all version - the bio may have been split
0091  * before it got to the driver and the driver won't own all of it
0092  */
0093 #define bio_for_each_segment_all(bvl, bio, iter) \
0094     for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
0095 
0096 static inline void bio_advance_iter(const struct bio *bio,
0097                     struct bvec_iter *iter, unsigned int bytes)
0098 {
0099     iter->bi_sector += bytes >> 9;
0100 
0101     if (bio_no_advance_iter(bio))
0102         iter->bi_size -= bytes;
0103     else
0104         bvec_iter_advance(bio->bi_io_vec, iter, bytes);
0105         /* TODO: It is reasonable to complete bio with error here. */
0106 }
0107 
0108 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
0109 static inline void bio_advance_iter_single(const struct bio *bio,
0110                        struct bvec_iter *iter,
0111                        unsigned int bytes)
0112 {
0113     iter->bi_sector += bytes >> 9;
0114 
0115     if (bio_no_advance_iter(bio))
0116         iter->bi_size -= bytes;
0117     else
0118         bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
0119 }
0120 
0121 void __bio_advance(struct bio *, unsigned bytes);
0122 
0123 /**
0124  * bio_advance - increment/complete a bio by some number of bytes
0125  * @bio:    bio to advance
0126  * @nbytes: number of bytes to complete
0127  *
0128  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
0129  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
0130  * be updated on the last bvec as well.
0131  *
0132  * @bio will then represent the remaining, uncompleted portion of the io.
0133  */
0134 static inline void bio_advance(struct bio *bio, unsigned int nbytes)
0135 {
0136     if (nbytes == bio->bi_iter.bi_size) {
0137         bio->bi_iter.bi_size = 0;
0138         return;
0139     }
0140     __bio_advance(bio, nbytes);
0141 }
0142 
0143 #define __bio_for_each_segment(bvl, bio, iter, start)           \
0144     for (iter = (start);                        \
0145          (iter).bi_size &&                      \
0146         ((bvl = bio_iter_iovec((bio), (iter))), 1);     \
0147          bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
0148 
0149 #define bio_for_each_segment(bvl, bio, iter)                \
0150     __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
0151 
0152 #define __bio_for_each_bvec(bvl, bio, iter, start)      \
0153     for (iter = (start);                        \
0154          (iter).bi_size &&                      \
0155         ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
0156          bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
0157 
0158 /* iterate over multi-page bvec */
0159 #define bio_for_each_bvec(bvl, bio, iter)           \
0160     __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
0161 
0162 /*
0163  * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
0164  * same reasons as bio_for_each_segment_all().
0165  */
0166 #define bio_for_each_bvec_all(bvl, bio, i)      \
0167     for (i = 0, bvl = bio_first_bvec_all(bio);  \
0168          i < (bio)->bi_vcnt; i++, bvl++)
0169 
0170 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
0171 
0172 static inline unsigned bio_segments(struct bio *bio)
0173 {
0174     unsigned segs = 0;
0175     struct bio_vec bv;
0176     struct bvec_iter iter;
0177 
0178     /*
0179      * We special case discard/write same/write zeroes, because they
0180      * interpret bi_size differently:
0181      */
0182 
0183     switch (bio_op(bio)) {
0184     case REQ_OP_DISCARD:
0185     case REQ_OP_SECURE_ERASE:
0186     case REQ_OP_WRITE_ZEROES:
0187         return 0;
0188     default:
0189         break;
0190     }
0191 
0192     bio_for_each_segment(bv, bio, iter)
0193         segs++;
0194 
0195     return segs;
0196 }
0197 
0198 /*
0199  * get a reference to a bio, so it won't disappear. the intended use is
0200  * something like:
0201  *
0202  * bio_get(bio);
0203  * submit_bio(rw, bio);
0204  * if (bio->bi_flags ...)
0205  *  do_something
0206  * bio_put(bio);
0207  *
0208  * without the bio_get(), it could potentially complete I/O before submit_bio
0209  * returns. and then bio would be freed memory when if (bio->bi_flags ...)
0210  * runs
0211  */
0212 static inline void bio_get(struct bio *bio)
0213 {
0214     bio->bi_flags |= (1 << BIO_REFFED);
0215     smp_mb__before_atomic();
0216     atomic_inc(&bio->__bi_cnt);
0217 }
0218 
0219 static inline void bio_cnt_set(struct bio *bio, unsigned int count)
0220 {
0221     if (count != 1) {
0222         bio->bi_flags |= (1 << BIO_REFFED);
0223         smp_mb();
0224     }
0225     atomic_set(&bio->__bi_cnt, count);
0226 }
0227 
0228 static inline bool bio_flagged(struct bio *bio, unsigned int bit)
0229 {
0230     return (bio->bi_flags & (1U << bit)) != 0;
0231 }
0232 
0233 static inline void bio_set_flag(struct bio *bio, unsigned int bit)
0234 {
0235     bio->bi_flags |= (1U << bit);
0236 }
0237 
0238 static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
0239 {
0240     bio->bi_flags &= ~(1U << bit);
0241 }
0242 
0243 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
0244 {
0245     WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
0246     return bio->bi_io_vec;
0247 }
0248 
0249 static inline struct page *bio_first_page_all(struct bio *bio)
0250 {
0251     return bio_first_bvec_all(bio)->bv_page;
0252 }
0253 
0254 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
0255 {
0256     WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
0257     return &bio->bi_io_vec[bio->bi_vcnt - 1];
0258 }
0259 
0260 /**
0261  * struct folio_iter - State for iterating all folios in a bio.
0262  * @folio: The current folio we're iterating.  NULL after the last folio.
0263  * @offset: The byte offset within the current folio.
0264  * @length: The number of bytes in this iteration (will not cross folio
0265  *  boundary).
0266  */
0267 struct folio_iter {
0268     struct folio *folio;
0269     size_t offset;
0270     size_t length;
0271     /* private: for use by the iterator */
0272     struct folio *_next;
0273     size_t _seg_count;
0274     int _i;
0275 };
0276 
0277 static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
0278                    int i)
0279 {
0280     struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
0281 
0282     fi->folio = page_folio(bvec->bv_page);
0283     fi->offset = bvec->bv_offset +
0284             PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
0285     fi->_seg_count = bvec->bv_len;
0286     fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
0287     fi->_next = folio_next(fi->folio);
0288     fi->_i = i;
0289 }
0290 
0291 static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
0292 {
0293     fi->_seg_count -= fi->length;
0294     if (fi->_seg_count) {
0295         fi->folio = fi->_next;
0296         fi->offset = 0;
0297         fi->length = min(folio_size(fi->folio), fi->_seg_count);
0298         fi->_next = folio_next(fi->folio);
0299     } else if (fi->_i + 1 < bio->bi_vcnt) {
0300         bio_first_folio(fi, bio, fi->_i + 1);
0301     } else {
0302         fi->folio = NULL;
0303     }
0304 }
0305 
0306 /**
0307  * bio_for_each_folio_all - Iterate over each folio in a bio.
0308  * @fi: struct folio_iter which is updated for each folio.
0309  * @bio: struct bio to iterate over.
0310  */
0311 #define bio_for_each_folio_all(fi, bio)             \
0312     for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
0313 
0314 enum bip_flags {
0315     BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
0316     BIP_MAPPED_INTEGRITY    = 1 << 1, /* ref tag has been remapped */
0317     BIP_CTRL_NOCHECK    = 1 << 2, /* disable HBA integrity checking */
0318     BIP_DISK_NOCHECK    = 1 << 3, /* disable disk integrity checking */
0319     BIP_IP_CHECKSUM     = 1 << 4, /* IP checksum */
0320 };
0321 
0322 /*
0323  * bio integrity payload
0324  */
0325 struct bio_integrity_payload {
0326     struct bio      *bip_bio;   /* parent bio */
0327 
0328     struct bvec_iter    bip_iter;
0329 
0330     unsigned short      bip_vcnt;   /* # of integrity bio_vecs */
0331     unsigned short      bip_max_vcnt;   /* integrity bio_vec slots */
0332     unsigned short      bip_flags;  /* control flags */
0333 
0334     struct bvec_iter    bio_iter;   /* for rewinding parent bio */
0335 
0336     struct work_struct  bip_work;   /* I/O completion */
0337 
0338     struct bio_vec      *bip_vec;
0339     struct bio_vec      bip_inline_vecs[];/* embedded bvec array */
0340 };
0341 
0342 #if defined(CONFIG_BLK_DEV_INTEGRITY)
0343 
0344 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
0345 {
0346     if (bio->bi_opf & REQ_INTEGRITY)
0347         return bio->bi_integrity;
0348 
0349     return NULL;
0350 }
0351 
0352 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
0353 {
0354     struct bio_integrity_payload *bip = bio_integrity(bio);
0355 
0356     if (bip)
0357         return bip->bip_flags & flag;
0358 
0359     return false;
0360 }
0361 
0362 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
0363 {
0364     return bip->bip_iter.bi_sector;
0365 }
0366 
0367 static inline void bip_set_seed(struct bio_integrity_payload *bip,
0368                 sector_t seed)
0369 {
0370     bip->bip_iter.bi_sector = seed;
0371 }
0372 
0373 #endif /* CONFIG_BLK_DEV_INTEGRITY */
0374 
0375 void bio_trim(struct bio *bio, sector_t offset, sector_t size);
0376 extern struct bio *bio_split(struct bio *bio, int sectors,
0377                  gfp_t gfp, struct bio_set *bs);
0378 
0379 /**
0380  * bio_next_split - get next @sectors from a bio, splitting if necessary
0381  * @bio:    bio to split
0382  * @sectors:    number of sectors to split from the front of @bio
0383  * @gfp:    gfp mask
0384  * @bs:     bio set to allocate from
0385  *
0386  * Return: a bio representing the next @sectors of @bio - if the bio is smaller
0387  * than @sectors, returns the original bio unchanged.
0388  */
0389 static inline struct bio *bio_next_split(struct bio *bio, int sectors,
0390                      gfp_t gfp, struct bio_set *bs)
0391 {
0392     if (sectors >= bio_sectors(bio))
0393         return bio;
0394 
0395     return bio_split(bio, sectors, gfp, bs);
0396 }
0397 
0398 enum {
0399     BIOSET_NEED_BVECS = BIT(0),
0400     BIOSET_NEED_RESCUER = BIT(1),
0401     BIOSET_PERCPU_CACHE = BIT(2),
0402 };
0403 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
0404 extern void bioset_exit(struct bio_set *);
0405 extern int biovec_init_pool(mempool_t *pool, int pool_entries);
0406 
0407 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
0408                  blk_opf_t opf, gfp_t gfp_mask,
0409                  struct bio_set *bs);
0410 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
0411 extern void bio_put(struct bio *);
0412 
0413 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
0414         gfp_t gfp, struct bio_set *bs);
0415 int bio_init_clone(struct block_device *bdev, struct bio *bio,
0416         struct bio *bio_src, gfp_t gfp);
0417 
0418 extern struct bio_set fs_bio_set;
0419 
0420 static inline struct bio *bio_alloc(struct block_device *bdev,
0421         unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
0422 {
0423     return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
0424 }
0425 
0426 void submit_bio(struct bio *bio);
0427 
0428 extern void bio_endio(struct bio *);
0429 
0430 static inline void bio_io_error(struct bio *bio)
0431 {
0432     bio->bi_status = BLK_STS_IOERR;
0433     bio_endio(bio);
0434 }
0435 
0436 static inline void bio_wouldblock_error(struct bio *bio)
0437 {
0438     bio_set_flag(bio, BIO_QUIET);
0439     bio->bi_status = BLK_STS_AGAIN;
0440     bio_endio(bio);
0441 }
0442 
0443 /*
0444  * Calculate number of bvec segments that should be allocated to fit data
0445  * pointed by @iter. If @iter is backed by bvec it's going to be reused
0446  * instead of allocating a new one.
0447  */
0448 static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
0449 {
0450     if (iov_iter_is_bvec(iter))
0451         return 0;
0452     return iov_iter_npages(iter, max_segs);
0453 }
0454 
0455 struct request_queue;
0456 
0457 extern int submit_bio_wait(struct bio *bio);
0458 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
0459           unsigned short max_vecs, blk_opf_t opf);
0460 extern void bio_uninit(struct bio *);
0461 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
0462 void bio_chain(struct bio *, struct bio *);
0463 
0464 int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
0465 bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
0466 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
0467                unsigned int, unsigned int);
0468 int bio_add_zone_append_page(struct bio *bio, struct page *page,
0469                  unsigned int len, unsigned int offset);
0470 void __bio_add_page(struct bio *bio, struct page *page,
0471         unsigned int len, unsigned int off);
0472 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
0473 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
0474 void __bio_release_pages(struct bio *bio, bool mark_dirty);
0475 extern void bio_set_pages_dirty(struct bio *bio);
0476 extern void bio_check_pages_dirty(struct bio *bio);
0477 
0478 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
0479                    struct bio *src, struct bvec_iter *src_iter);
0480 extern void bio_copy_data(struct bio *dst, struct bio *src);
0481 extern void bio_free_pages(struct bio *bio);
0482 void guard_bio_eod(struct bio *bio);
0483 void zero_fill_bio(struct bio *bio);
0484 
0485 static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
0486 {
0487     if (!bio_flagged(bio, BIO_NO_PAGE_REF))
0488         __bio_release_pages(bio, mark_dirty);
0489 }
0490 
0491 #define bio_dev(bio) \
0492     disk_devt((bio)->bi_bdev->bd_disk)
0493 
0494 #ifdef CONFIG_BLK_CGROUP
0495 void bio_associate_blkg(struct bio *bio);
0496 void bio_associate_blkg_from_css(struct bio *bio,
0497                  struct cgroup_subsys_state *css);
0498 void bio_clone_blkg_association(struct bio *dst, struct bio *src);
0499 #else   /* CONFIG_BLK_CGROUP */
0500 static inline void bio_associate_blkg(struct bio *bio) { }
0501 static inline void bio_associate_blkg_from_css(struct bio *bio,
0502                            struct cgroup_subsys_state *css)
0503 { }
0504 static inline void bio_clone_blkg_association(struct bio *dst,
0505                           struct bio *src) { }
0506 #endif  /* CONFIG_BLK_CGROUP */
0507 
0508 static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
0509 {
0510     bio_clear_flag(bio, BIO_REMAPPED);
0511     if (bio->bi_bdev != bdev)
0512         bio_clear_flag(bio, BIO_THROTTLED);
0513     bio->bi_bdev = bdev;
0514     bio_associate_blkg(bio);
0515 }
0516 
0517 /*
0518  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
0519  *
0520  * A bio_list anchors a singly-linked list of bios chained through the bi_next
0521  * member of the bio.  The bio_list also caches the last list member to allow
0522  * fast access to the tail.
0523  */
0524 struct bio_list {
0525     struct bio *head;
0526     struct bio *tail;
0527 };
0528 
0529 static inline int bio_list_empty(const struct bio_list *bl)
0530 {
0531     return bl->head == NULL;
0532 }
0533 
0534 static inline void bio_list_init(struct bio_list *bl)
0535 {
0536     bl->head = bl->tail = NULL;
0537 }
0538 
0539 #define BIO_EMPTY_LIST  { NULL, NULL }
0540 
0541 #define bio_list_for_each(bio, bl) \
0542     for (bio = (bl)->head; bio; bio = bio->bi_next)
0543 
0544 static inline unsigned bio_list_size(const struct bio_list *bl)
0545 {
0546     unsigned sz = 0;
0547     struct bio *bio;
0548 
0549     bio_list_for_each(bio, bl)
0550         sz++;
0551 
0552     return sz;
0553 }
0554 
0555 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
0556 {
0557     bio->bi_next = NULL;
0558 
0559     if (bl->tail)
0560         bl->tail->bi_next = bio;
0561     else
0562         bl->head = bio;
0563 
0564     bl->tail = bio;
0565 }
0566 
0567 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
0568 {
0569     bio->bi_next = bl->head;
0570 
0571     bl->head = bio;
0572 
0573     if (!bl->tail)
0574         bl->tail = bio;
0575 }
0576 
0577 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
0578 {
0579     if (!bl2->head)
0580         return;
0581 
0582     if (bl->tail)
0583         bl->tail->bi_next = bl2->head;
0584     else
0585         bl->head = bl2->head;
0586 
0587     bl->tail = bl2->tail;
0588 }
0589 
0590 static inline void bio_list_merge_head(struct bio_list *bl,
0591                        struct bio_list *bl2)
0592 {
0593     if (!bl2->head)
0594         return;
0595 
0596     if (bl->head)
0597         bl2->tail->bi_next = bl->head;
0598     else
0599         bl->tail = bl2->tail;
0600 
0601     bl->head = bl2->head;
0602 }
0603 
0604 static inline struct bio *bio_list_peek(struct bio_list *bl)
0605 {
0606     return bl->head;
0607 }
0608 
0609 static inline struct bio *bio_list_pop(struct bio_list *bl)
0610 {
0611     struct bio *bio = bl->head;
0612 
0613     if (bio) {
0614         bl->head = bl->head->bi_next;
0615         if (!bl->head)
0616             bl->tail = NULL;
0617 
0618         bio->bi_next = NULL;
0619     }
0620 
0621     return bio;
0622 }
0623 
0624 static inline struct bio *bio_list_get(struct bio_list *bl)
0625 {
0626     struct bio *bio = bl->head;
0627 
0628     bl->head = bl->tail = NULL;
0629 
0630     return bio;
0631 }
0632 
0633 /*
0634  * Increment chain count for the bio. Make sure the CHAIN flag update
0635  * is visible before the raised count.
0636  */
0637 static inline void bio_inc_remaining(struct bio *bio)
0638 {
0639     bio_set_flag(bio, BIO_CHAIN);
0640     smp_mb__before_atomic();
0641     atomic_inc(&bio->__bi_remaining);
0642 }
0643 
0644 /*
0645  * bio_set is used to allow other portions of the IO system to
0646  * allocate their own private memory pools for bio and iovec structures.
0647  * These memory pools in turn all allocate from the bio_slab
0648  * and the bvec_slabs[].
0649  */
0650 #define BIO_POOL_SIZE 2
0651 
0652 struct bio_set {
0653     struct kmem_cache *bio_slab;
0654     unsigned int front_pad;
0655 
0656     /*
0657      * per-cpu bio alloc cache
0658      */
0659     struct bio_alloc_cache __percpu *cache;
0660 
0661     mempool_t bio_pool;
0662     mempool_t bvec_pool;
0663 #if defined(CONFIG_BLK_DEV_INTEGRITY)
0664     mempool_t bio_integrity_pool;
0665     mempool_t bvec_integrity_pool;
0666 #endif
0667 
0668     unsigned int back_pad;
0669     /*
0670      * Deadlock avoidance for stacking block drivers: see comments in
0671      * bio_alloc_bioset() for details
0672      */
0673     spinlock_t      rescue_lock;
0674     struct bio_list     rescue_list;
0675     struct work_struct  rescue_work;
0676     struct workqueue_struct *rescue_workqueue;
0677 
0678     /*
0679      * Hot un-plug notifier for the per-cpu cache, if used
0680      */
0681     struct hlist_node cpuhp_dead;
0682 };
0683 
0684 static inline bool bioset_initialized(struct bio_set *bs)
0685 {
0686     return bs->bio_slab != NULL;
0687 }
0688 
0689 #if defined(CONFIG_BLK_DEV_INTEGRITY)
0690 
0691 #define bip_for_each_vec(bvl, bip, iter)                \
0692     for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
0693 
0694 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)           \
0695     for_each_bio(_bio)                      \
0696         bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
0697 
0698 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
0699 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
0700 extern bool bio_integrity_prep(struct bio *);
0701 extern void bio_integrity_advance(struct bio *, unsigned int);
0702 extern void bio_integrity_trim(struct bio *);
0703 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
0704 extern int bioset_integrity_create(struct bio_set *, int);
0705 extern void bioset_integrity_free(struct bio_set *);
0706 extern void bio_integrity_init(void);
0707 
0708 #else /* CONFIG_BLK_DEV_INTEGRITY */
0709 
0710 static inline void *bio_integrity(struct bio *bio)
0711 {
0712     return NULL;
0713 }
0714 
0715 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
0716 {
0717     return 0;
0718 }
0719 
0720 static inline void bioset_integrity_free (struct bio_set *bs)
0721 {
0722     return;
0723 }
0724 
0725 static inline bool bio_integrity_prep(struct bio *bio)
0726 {
0727     return true;
0728 }
0729 
0730 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
0731                       gfp_t gfp_mask)
0732 {
0733     return 0;
0734 }
0735 
0736 static inline void bio_integrity_advance(struct bio *bio,
0737                      unsigned int bytes_done)
0738 {
0739     return;
0740 }
0741 
0742 static inline void bio_integrity_trim(struct bio *bio)
0743 {
0744     return;
0745 }
0746 
0747 static inline void bio_integrity_init(void)
0748 {
0749     return;
0750 }
0751 
0752 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
0753 {
0754     return false;
0755 }
0756 
0757 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
0758                                 unsigned int nr)
0759 {
0760     return ERR_PTR(-EINVAL);
0761 }
0762 
0763 static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
0764                     unsigned int len, unsigned int offset)
0765 {
0766     return 0;
0767 }
0768 
0769 #endif /* CONFIG_BLK_DEV_INTEGRITY */
0770 
0771 /*
0772  * Mark a bio as polled. Note that for async polled IO, the caller must
0773  * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
0774  * We cannot block waiting for requests on polled IO, as those completions
0775  * must be found by the caller. This is different than IRQ driven IO, where
0776  * it's safe to wait for IO to complete.
0777  */
0778 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
0779 {
0780     bio->bi_opf |= REQ_POLLED;
0781     if (!is_sync_kiocb(kiocb))
0782         bio->bi_opf |= REQ_NOWAIT;
0783 }
0784 
0785 static inline void bio_clear_polled(struct bio *bio)
0786 {
0787     /* can't support alloc cache if we turn off polling */
0788     bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
0789 }
0790 
0791 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
0792         unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
0793 
0794 #endif /* __LINUX_BIO_H */