Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Block data types and constants.  Directly include this file only to
0004  * break include dependency loop.
0005  */
0006 #ifndef __LINUX_BLK_TYPES_H
0007 #define __LINUX_BLK_TYPES_H
0008 
0009 #include <linux/types.h>
0010 #include <linux/bvec.h>
0011 #include <linux/device.h>
0012 #include <linux/ktime.h>
0013 
0014 struct bio_set;
0015 struct bio;
0016 struct bio_integrity_payload;
0017 struct page;
0018 struct io_context;
0019 struct cgroup_subsys_state;
0020 typedef void (bio_end_io_t) (struct bio *);
0021 struct bio_crypt_ctx;
0022 
0023 /*
0024  * The basic unit of block I/O is a sector. It is used in a number of contexts
0025  * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
0026  * bytes. Variables of type sector_t represent an offset or size that is a
0027  * multiple of 512 bytes. Hence these two constants.
0028  */
0029 #ifndef SECTOR_SHIFT
0030 #define SECTOR_SHIFT 9
0031 #endif
0032 #ifndef SECTOR_SIZE
0033 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
0034 #endif
0035 
0036 #define PAGE_SECTORS_SHIFT  (PAGE_SHIFT - SECTOR_SHIFT)
0037 #define PAGE_SECTORS        (1 << PAGE_SECTORS_SHIFT)
0038 #define SECTOR_MASK     (PAGE_SECTORS - 1)
0039 
0040 struct block_device {
0041     sector_t        bd_start_sect;
0042     sector_t        bd_nr_sectors;
0043     struct disk_stats __percpu *bd_stats;
0044     unsigned long       bd_stamp;
0045     bool            bd_read_only;   /* read-only policy */
0046     dev_t           bd_dev;
0047     atomic_t        bd_openers;
0048     struct inode *      bd_inode;   /* will die */
0049     struct super_block *    bd_super;
0050     void *          bd_claiming;
0051     struct device       bd_device;
0052     void *          bd_holder;
0053     int         bd_holders;
0054     bool            bd_write_holder;
0055     struct kobject      *bd_holder_dir;
0056     u8          bd_partno;
0057     spinlock_t      bd_size_lock; /* for bd_inode->i_size updates */
0058     struct gendisk *    bd_disk;
0059     struct request_queue *  bd_queue;
0060 
0061     /* The counter of freeze processes */
0062     int         bd_fsfreeze_count;
0063     /* Mutex for freeze */
0064     struct mutex        bd_fsfreeze_mutex;
0065     struct super_block  *bd_fsfreeze_sb;
0066 
0067     struct partition_meta_info *bd_meta_info;
0068 #ifdef CONFIG_FAIL_MAKE_REQUEST
0069     bool            bd_make_it_fail;
0070 #endif
0071 } __randomize_layout;
0072 
0073 #define bdev_whole(_bdev) \
0074     ((_bdev)->bd_disk->part0)
0075 
0076 #define dev_to_bdev(device) \
0077     container_of((device), struct block_device, bd_device)
0078 
0079 #define bdev_kobj(_bdev) \
0080     (&((_bdev)->bd_device.kobj))
0081 
0082 /*
0083  * Block error status values.  See block/blk-core:blk_errors for the details.
0084  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
0085  */
0086 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
0087 typedef u32 __bitwise blk_status_t;
0088 typedef u32 blk_short_t;
0089 #else
0090 typedef u8 __bitwise blk_status_t;
0091 typedef u16 blk_short_t;
0092 #endif
0093 #define BLK_STS_OK 0
0094 #define BLK_STS_NOTSUPP     ((__force blk_status_t)1)
0095 #define BLK_STS_TIMEOUT     ((__force blk_status_t)2)
0096 #define BLK_STS_NOSPC       ((__force blk_status_t)3)
0097 #define BLK_STS_TRANSPORT   ((__force blk_status_t)4)
0098 #define BLK_STS_TARGET      ((__force blk_status_t)5)
0099 #define BLK_STS_NEXUS       ((__force blk_status_t)6)
0100 #define BLK_STS_MEDIUM      ((__force blk_status_t)7)
0101 #define BLK_STS_PROTECTION  ((__force blk_status_t)8)
0102 #define BLK_STS_RESOURCE    ((__force blk_status_t)9)
0103 #define BLK_STS_IOERR       ((__force blk_status_t)10)
0104 
0105 /* hack for device mapper, don't use elsewhere: */
0106 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
0107 
0108 /*
0109  * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
0110  * and the bio would block (cf bio_wouldblock_error())
0111  */
0112 #define BLK_STS_AGAIN       ((__force blk_status_t)12)
0113 
0114 /*
0115  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
0116  * device related resources are unavailable, but the driver can guarantee
0117  * that the queue will be rerun in the future once resources become
0118  * available again. This is typically the case for device specific
0119  * resources that are consumed for IO. If the driver fails allocating these
0120  * resources, we know that inflight (or pending) IO will free these
0121  * resource upon completion.
0122  *
0123  * This is different from BLK_STS_RESOURCE in that it explicitly references
0124  * a device specific resource. For resources of wider scope, allocation
0125  * failure can happen without having pending IO. This means that we can't
0126  * rely on request completions freeing these resources, as IO may not be in
0127  * flight. Examples of that are kernel memory allocations, DMA mappings, or
0128  * any other system wide resources.
0129  */
0130 #define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)
0131 
0132 /*
0133  * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
0134  * related resources are unavailable, but the driver can guarantee the queue
0135  * will be rerun in the future once the resources become available again.
0136  *
0137  * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
0138  * a zone specific resource and IO to a different zone on the same device could
0139  * still be served. Examples of that are zones that are write-locked, but a read
0140  * to the same zone could be served.
0141  */
0142 #define BLK_STS_ZONE_RESOURCE   ((__force blk_status_t)14)
0143 
0144 /*
0145  * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
0146  * path if the device returns a status indicating that too many zone resources
0147  * are currently open. The same command should be successful if resubmitted
0148  * after the number of open zones decreases below the device's limits, which is
0149  * reported in the request_queue's max_open_zones.
0150  */
0151 #define BLK_STS_ZONE_OPEN_RESOURCE  ((__force blk_status_t)15)
0152 
0153 /*
0154  * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
0155  * path if the device returns a status indicating that too many zone resources
0156  * are currently active. The same command should be successful if resubmitted
0157  * after the number of active zones decreases below the device's limits, which
0158  * is reported in the request_queue's max_active_zones.
0159  */
0160 #define BLK_STS_ZONE_ACTIVE_RESOURCE    ((__force blk_status_t)16)
0161 
0162 /*
0163  * BLK_STS_OFFLINE is returned from the driver when the target device is offline
0164  * or is being taken offline. This could help differentiate the case where a
0165  * device is intentionally being shut down from a real I/O error.
0166  */
0167 #define BLK_STS_OFFLINE     ((__force blk_status_t)17)
0168 
0169 /**
0170  * blk_path_error - returns true if error may be path related
0171  * @error: status the request was completed with
0172  *
0173  * Description:
0174  *     This classifies block error status into non-retryable errors and ones
0175  *     that may be successful if retried on a failover path.
0176  *
0177  * Return:
0178  *     %false - retrying failover path will not help
0179  *     %true  - may succeed if retried
0180  */
0181 static inline bool blk_path_error(blk_status_t error)
0182 {
0183     switch (error) {
0184     case BLK_STS_NOTSUPP:
0185     case BLK_STS_NOSPC:
0186     case BLK_STS_TARGET:
0187     case BLK_STS_NEXUS:
0188     case BLK_STS_MEDIUM:
0189     case BLK_STS_PROTECTION:
0190         return false;
0191     }
0192 
0193     /* Anything else could be a path failure, so should be retried */
0194     return true;
0195 }
0196 
0197 /*
0198  * From most significant bit:
0199  * 1 bit: reserved for other usage, see below
0200  * 12 bits: original size of bio
0201  * 51 bits: issue time of bio
0202  */
0203 #define BIO_ISSUE_RES_BITS      1
0204 #define BIO_ISSUE_SIZE_BITS     12
0205 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
0206 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
0207 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
0208 #define BIO_ISSUE_SIZE_MASK     \
0209     (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
0210 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
0211 
0212 /* Reserved bit for blk-throtl */
0213 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
0214 
0215 struct bio_issue {
0216     u64 value;
0217 };
0218 
0219 static inline u64 __bio_issue_time(u64 time)
0220 {
0221     return time & BIO_ISSUE_TIME_MASK;
0222 }
0223 
0224 static inline u64 bio_issue_time(struct bio_issue *issue)
0225 {
0226     return __bio_issue_time(issue->value);
0227 }
0228 
0229 static inline sector_t bio_issue_size(struct bio_issue *issue)
0230 {
0231     return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
0232 }
0233 
0234 static inline void bio_issue_init(struct bio_issue *issue,
0235                        sector_t size)
0236 {
0237     size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
0238     issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
0239             (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
0240             ((u64)size << BIO_ISSUE_SIZE_SHIFT));
0241 }
0242 
0243 typedef __u32 __bitwise blk_opf_t;
0244 
0245 typedef unsigned int blk_qc_t;
0246 #define BLK_QC_T_NONE       -1U
0247 
0248 /*
0249  * main unit of I/O for the block layer and lower layers (ie drivers and
0250  * stacking drivers)
0251  */
0252 struct bio {
0253     struct bio      *bi_next;   /* request queue link */
0254     struct block_device *bi_bdev;
0255     blk_opf_t       bi_opf;     /* bottom bits REQ_OP, top bits
0256                          * req_flags.
0257                          */
0258     unsigned short      bi_flags;   /* BIO_* below */
0259     unsigned short      bi_ioprio;
0260     blk_status_t        bi_status;
0261     atomic_t        __bi_remaining;
0262 
0263     struct bvec_iter    bi_iter;
0264 
0265     blk_qc_t        bi_cookie;
0266     bio_end_io_t        *bi_end_io;
0267     void            *bi_private;
0268 #ifdef CONFIG_BLK_CGROUP
0269     /*
0270      * Represents the association of the css and request_queue for the bio.
0271      * If a bio goes direct to device, it will not have a blkg as it will
0272      * not have a request_queue associated with it.  The reference is put
0273      * on release of the bio.
0274      */
0275     struct blkcg_gq     *bi_blkg;
0276     struct bio_issue    bi_issue;
0277 #ifdef CONFIG_BLK_CGROUP_IOCOST
0278     u64         bi_iocost_cost;
0279 #endif
0280 #endif
0281 
0282 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
0283     struct bio_crypt_ctx    *bi_crypt_context;
0284 #endif
0285 
0286     union {
0287 #if defined(CONFIG_BLK_DEV_INTEGRITY)
0288         struct bio_integrity_payload *bi_integrity; /* data integrity */
0289 #endif
0290     };
0291 
0292     unsigned short      bi_vcnt;    /* how many bio_vec's */
0293 
0294     /*
0295      * Everything starting with bi_max_vecs will be preserved by bio_reset()
0296      */
0297 
0298     unsigned short      bi_max_vecs;    /* max bvl_vecs we can hold */
0299 
0300     atomic_t        __bi_cnt;   /* pin count */
0301 
0302     struct bio_vec      *bi_io_vec; /* the actual vec list */
0303 
0304     struct bio_set      *bi_pool;
0305 
0306     /*
0307      * We can inline a number of vecs at the end of the bio, to avoid
0308      * double allocations for a small number of bio_vecs. This member
0309      * MUST obviously be kept at the very end of the bio.
0310      */
0311     struct bio_vec      bi_inline_vecs[];
0312 };
0313 
0314 #define BIO_RESET_BYTES     offsetof(struct bio, bi_max_vecs)
0315 #define BIO_MAX_SECTORS     (UINT_MAX >> SECTOR_SHIFT)
0316 
0317 /*
0318  * bio flags
0319  */
0320 enum {
0321     BIO_NO_PAGE_REF,    /* don't put release vec pages */
0322     BIO_CLONED,     /* doesn't own data */
0323     BIO_BOUNCED,        /* bio is a bounce bio */
0324     BIO_WORKINGSET,     /* contains userspace workingset pages */
0325     BIO_QUIET,      /* Make BIO Quiet */
0326     BIO_CHAIN,      /* chained bio, ->bi_remaining in effect */
0327     BIO_REFFED,     /* bio has elevated ->bi_cnt */
0328     BIO_THROTTLED,      /* This bio has already been subjected to
0329                  * throttling rules. Don't do it again. */
0330     BIO_TRACE_COMPLETION,   /* bio_endio() should trace the final completion
0331                  * of this bio. */
0332     BIO_CGROUP_ACCT,    /* has been accounted to a cgroup */
0333     BIO_QOS_THROTTLED,  /* bio went through rq_qos throttle path */
0334     BIO_QOS_MERGED,     /* but went through rq_qos merge path */
0335     BIO_REMAPPED,
0336     BIO_ZONE_WRITE_LOCKED,  /* Owns a zoned device zone write lock */
0337     BIO_FLAG_LAST
0338 };
0339 
0340 typedef __u32 __bitwise blk_mq_req_flags_t;
0341 
0342 #define REQ_OP_BITS 8
0343 #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
0344 #define REQ_FLAG_BITS   24
0345 
0346 /**
0347  * enum req_op - Operations common to the bio and request structures.
0348  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
0349  *
0350  * The least significant bit of the operation number indicates the data
0351  * transfer direction:
0352  *
0353  *   - if the least significant bit is set transfers are TO the device
0354  *   - if the least significant bit is not set transfers are FROM the device
0355  *
0356  * If a operation does not transfer data the least significant bit has no
0357  * meaning.
0358  */
0359 enum req_op {
0360     /* read sectors from the device */
0361     REQ_OP_READ     = (__force blk_opf_t)0,
0362     /* write sectors to the device */
0363     REQ_OP_WRITE        = (__force blk_opf_t)1,
0364     /* flush the volatile write cache */
0365     REQ_OP_FLUSH        = (__force blk_opf_t)2,
0366     /* discard sectors */
0367     REQ_OP_DISCARD      = (__force blk_opf_t)3,
0368     /* securely erase sectors */
0369     REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
0370     /* write the zero filled sector many times */
0371     REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
0372     /* Open a zone */
0373     REQ_OP_ZONE_OPEN    = (__force blk_opf_t)10,
0374     /* Close a zone */
0375     REQ_OP_ZONE_CLOSE   = (__force blk_opf_t)11,
0376     /* Transition a zone to full */
0377     REQ_OP_ZONE_FINISH  = (__force blk_opf_t)12,
0378     /* write data at the current zone write pointer */
0379     REQ_OP_ZONE_APPEND  = (__force blk_opf_t)13,
0380     /* reset a zone write pointer */
0381     REQ_OP_ZONE_RESET   = (__force blk_opf_t)15,
0382     /* reset all the zone present on the device */
0383     REQ_OP_ZONE_RESET_ALL   = (__force blk_opf_t)17,
0384 
0385     /* Driver private requests */
0386     REQ_OP_DRV_IN       = (__force blk_opf_t)34,
0387     REQ_OP_DRV_OUT      = (__force blk_opf_t)35,
0388 
0389     REQ_OP_LAST     = (__force blk_opf_t)36,
0390 };
0391 
0392 enum req_flag_bits {
0393     __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
0394         REQ_OP_BITS,
0395     __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
0396     __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
0397     __REQ_SYNC,     /* request is sync (sync write or read) */
0398     __REQ_META,     /* metadata io request */
0399     __REQ_PRIO,     /* boost priority in cfq */
0400     __REQ_NOMERGE,      /* don't touch this for merging */
0401     __REQ_IDLE,     /* anticipate more IO after this one */
0402     __REQ_INTEGRITY,    /* I/O includes block integrity payload */
0403     __REQ_FUA,      /* forced unit access */
0404     __REQ_PREFLUSH,     /* request for cache flush */
0405     __REQ_RAHEAD,       /* read ahead, can fail anytime */
0406     __REQ_BACKGROUND,   /* background IO */
0407     __REQ_NOWAIT,           /* Don't wait if request will block */
0408     /*
0409      * When a shared kthread needs to issue a bio for a cgroup, doing
0410      * so synchronously can lead to priority inversions as the kthread
0411      * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
0412      * submit_bio() punt the actual issuing to a dedicated per-blkcg
0413      * work item to avoid such priority inversions.
0414      */
0415     __REQ_CGROUP_PUNT,
0416     __REQ_POLLED,       /* caller polls for completion using bio_poll */
0417     __REQ_ALLOC_CACHE,  /* allocate IO from cache if available */
0418     __REQ_SWAP,     /* swap I/O */
0419     __REQ_DRV,      /* for driver use */
0420 
0421     /*
0422      * Command specific flags, keep last:
0423      */
0424     /* for REQ_OP_WRITE_ZEROES: */
0425     __REQ_NOUNMAP,      /* do not free blocks when zeroing */
0426 
0427     __REQ_NR_BITS,      /* stops here */
0428 };
0429 
0430 #define REQ_FAILFAST_DEV    \
0431             (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
0432 #define REQ_FAILFAST_TRANSPORT  \
0433             (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
0434 #define REQ_FAILFAST_DRIVER \
0435             (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
0436 #define REQ_SYNC    (__force blk_opf_t)(1ULL << __REQ_SYNC)
0437 #define REQ_META    (__force blk_opf_t)(1ULL << __REQ_META)
0438 #define REQ_PRIO    (__force blk_opf_t)(1ULL << __REQ_PRIO)
0439 #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
0440 #define REQ_IDLE    (__force blk_opf_t)(1ULL << __REQ_IDLE)
0441 #define REQ_INTEGRITY   (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
0442 #define REQ_FUA     (__force blk_opf_t)(1ULL << __REQ_FUA)
0443 #define REQ_PREFLUSH    (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
0444 #define REQ_RAHEAD  (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
0445 #define REQ_BACKGROUND  (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
0446 #define REQ_NOWAIT  (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
0447 #define REQ_CGROUP_PUNT (__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT)
0448 
0449 #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
0450 #define REQ_POLLED  (__force blk_opf_t)(1ULL << __REQ_POLLED)
0451 #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
0452 
0453 #define REQ_DRV     (__force blk_opf_t)(1ULL << __REQ_DRV)
0454 #define REQ_SWAP    (__force blk_opf_t)(1ULL << __REQ_SWAP)
0455 
0456 #define REQ_FAILFAST_MASK \
0457     (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
0458 
0459 #define REQ_NOMERGE_FLAGS \
0460     (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
0461 
0462 enum stat_group {
0463     STAT_READ,
0464     STAT_WRITE,
0465     STAT_DISCARD,
0466     STAT_FLUSH,
0467 
0468     NR_STAT_GROUPS
0469 };
0470 
0471 static inline enum req_op bio_op(const struct bio *bio)
0472 {
0473     return bio->bi_opf & REQ_OP_MASK;
0474 }
0475 
0476 /* obsolete, don't use in new code */
0477 static inline void bio_set_op_attrs(struct bio *bio, enum req_op op,
0478                     blk_opf_t op_flags)
0479 {
0480     bio->bi_opf = op | op_flags;
0481 }
0482 
0483 static inline bool op_is_write(blk_opf_t op)
0484 {
0485     return !!(op & (__force blk_opf_t)1);
0486 }
0487 
0488 /*
0489  * Check if the bio or request is one that needs special treatment in the
0490  * flush state machine.
0491  */
0492 static inline bool op_is_flush(blk_opf_t op)
0493 {
0494     return op & (REQ_FUA | REQ_PREFLUSH);
0495 }
0496 
0497 /*
0498  * Reads are always treated as synchronous, as are requests with the FUA or
0499  * PREFLUSH flag.  Other operations may be marked as synchronous using the
0500  * REQ_SYNC flag.
0501  */
0502 static inline bool op_is_sync(blk_opf_t op)
0503 {
0504     return (op & REQ_OP_MASK) == REQ_OP_READ ||
0505         (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
0506 }
0507 
0508 static inline bool op_is_discard(blk_opf_t op)
0509 {
0510     return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
0511 }
0512 
0513 /*
0514  * Check if a bio or request operation is a zone management operation, with
0515  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
0516  * due to its different handling in the block layer and device response in
0517  * case of command failure.
0518  */
0519 static inline bool op_is_zone_mgmt(enum req_op op)
0520 {
0521     switch (op & REQ_OP_MASK) {
0522     case REQ_OP_ZONE_RESET:
0523     case REQ_OP_ZONE_OPEN:
0524     case REQ_OP_ZONE_CLOSE:
0525     case REQ_OP_ZONE_FINISH:
0526         return true;
0527     default:
0528         return false;
0529     }
0530 }
0531 
0532 static inline int op_stat_group(enum req_op op)
0533 {
0534     if (op_is_discard(op))
0535         return STAT_DISCARD;
0536     return op_is_write(op);
0537 }
0538 
0539 struct blk_rq_stat {
0540     u64 mean;
0541     u64 min;
0542     u64 max;
0543     u32 nr_samples;
0544     u64 batch;
0545 };
0546 
0547 #endif /* __LINUX_BLK_TYPES_H */