0001
0002
0003
0004
0005 #ifndef _LINUX_BLKDEV_H
0006 #define _LINUX_BLKDEV_H
0007
0008 #include <linux/types.h>
0009 #include <linux/blk_types.h>
0010 #include <linux/device.h>
0011 #include <linux/list.h>
0012 #include <linux/llist.h>
0013 #include <linux/minmax.h>
0014 #include <linux/timer.h>
0015 #include <linux/workqueue.h>
0016 #include <linux/wait.h>
0017 #include <linux/bio.h>
0018 #include <linux/gfp.h>
0019 #include <linux/kdev_t.h>
0020 #include <linux/rcupdate.h>
0021 #include <linux/percpu-refcount.h>
0022 #include <linux/blkzoned.h>
0023 #include <linux/sched.h>
0024 #include <linux/sbitmap.h>
0025 #include <linux/srcu.h>
0026 #include <linux/uuid.h>
0027 #include <linux/xarray.h>
0028
0029 struct module;
0030 struct request_queue;
0031 struct elevator_queue;
0032 struct blk_trace;
0033 struct request;
0034 struct sg_io_hdr;
0035 struct blkcg_gq;
0036 struct blk_flush_queue;
0037 struct kiocb;
0038 struct pr_ops;
0039 struct rq_qos;
0040 struct blk_queue_stats;
0041 struct blk_stat_callback;
0042 struct blk_crypto_profile;
0043
0044 extern const struct device_type disk_type;
0045 extern struct device_type part_type;
0046 extern struct class block_class;
0047
0048
0049 #define BLK_MQ_POLL_STATS_BKTS 16
0050
0051
0052 #define BLK_MQ_POLL_CLASSIC -1
0053
0054
0055
0056
0057
0058 #define BLKCG_MAX_POLS 6
0059
0060 #define DISK_MAX_PARTS 256
0061 #define DISK_NAME_LEN 32
0062
0063 #define PARTITION_META_INFO_VOLNAMELTH 64
0064
0065
0066
0067
0068 #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
0069
0070 struct partition_meta_info {
0071 char uuid[PARTITION_META_INFO_UUIDLTH];
0072 u8 volname[PARTITION_META_INFO_VOLNAMELTH];
0073 };
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 enum {
0092 GENHD_FL_REMOVABLE = 1 << 0,
0093 GENHD_FL_HIDDEN = 1 << 1,
0094 GENHD_FL_NO_PART = 1 << 2,
0095 };
0096
0097 enum {
0098 DISK_EVENT_MEDIA_CHANGE = 1 << 0,
0099 DISK_EVENT_EJECT_REQUEST = 1 << 1,
0100 };
0101
0102 enum {
0103
0104 DISK_EVENT_FLAG_POLL = 1 << 0,
0105
0106 DISK_EVENT_FLAG_UEVENT = 1 << 1,
0107
0108 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
0109 };
0110
0111 struct disk_events;
0112 struct badblocks;
0113
0114 struct blk_integrity {
0115 const struct blk_integrity_profile *profile;
0116 unsigned char flags;
0117 unsigned char tuple_size;
0118 unsigned char interval_exp;
0119 unsigned char tag_size;
0120 };
0121
0122 struct gendisk {
0123
0124
0125
0126
0127 int major;
0128 int first_minor;
0129 int minors;
0130
0131 char disk_name[DISK_NAME_LEN];
0132
0133 unsigned short events;
0134 unsigned short event_flags;
0135
0136 struct xarray part_tbl;
0137 struct block_device *part0;
0138
0139 const struct block_device_operations *fops;
0140 struct request_queue *queue;
0141 void *private_data;
0142
0143 struct bio_set bio_split;
0144
0145 int flags;
0146 unsigned long state;
0147 #define GD_NEED_PART_SCAN 0
0148 #define GD_READ_ONLY 1
0149 #define GD_DEAD 2
0150 #define GD_NATIVE_CAPACITY 3
0151 #define GD_ADDED 4
0152 #define GD_SUPPRESS_PART_SCAN 5
0153 #define GD_OWNS_QUEUE 6
0154
0155 struct mutex open_mutex;
0156 unsigned open_partitions;
0157
0158 struct backing_dev_info *bdi;
0159 struct kobject *slave_dir;
0160 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
0161 struct list_head slave_bdevs;
0162 #endif
0163 struct timer_rand_state *random;
0164 atomic_t sync_io;
0165 struct disk_events *ev;
0166 #ifdef CONFIG_BLK_DEV_INTEGRITY
0167 struct kobject integrity_kobj;
0168 #endif
0169
0170 #ifdef CONFIG_BLK_DEV_ZONED
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 unsigned int nr_zones;
0186 unsigned int max_open_zones;
0187 unsigned int max_active_zones;
0188 unsigned long *conv_zones_bitmap;
0189 unsigned long *seq_zones_wlock;
0190 #endif
0191
0192 #if IS_ENABLED(CONFIG_CDROM)
0193 struct cdrom_device_info *cdi;
0194 #endif
0195 int node_id;
0196 struct badblocks *bb;
0197 struct lockdep_map lockdep_map;
0198 u64 diskseq;
0199
0200
0201
0202
0203
0204 struct blk_independent_access_ranges *ia_ranges;
0205 };
0206
0207 static inline bool disk_live(struct gendisk *disk)
0208 {
0209 return !inode_unhashed(disk->part0->bd_inode);
0210 }
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222 static inline unsigned int disk_openers(struct gendisk *disk)
0223 {
0224 return atomic_read(&disk->part0->bd_openers);
0225 }
0226
0227
0228
0229
0230
0231 #define dev_to_disk(device) \
0232 (dev_to_bdev(device)->bd_disk)
0233 #define disk_to_dev(disk) \
0234 (&((disk)->part0->bd_device))
0235
0236 #if IS_REACHABLE(CONFIG_CDROM)
0237 #define disk_to_cdi(disk) ((disk)->cdi)
0238 #else
0239 #define disk_to_cdi(disk) NULL
0240 #endif
0241
0242 static inline dev_t disk_devt(struct gendisk *disk)
0243 {
0244 return MKDEV(disk->major, disk->first_minor);
0245 }
0246
0247 static inline int blk_validate_block_size(unsigned long bsize)
0248 {
0249 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
0250 return -EINVAL;
0251
0252 return 0;
0253 }
0254
0255 static inline bool blk_op_is_passthrough(blk_opf_t op)
0256 {
0257 op &= REQ_OP_MASK;
0258 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
0259 }
0260
0261
0262
0263
0264
0265
0266
0267 enum blk_zoned_model {
0268 BLK_ZONED_NONE = 0,
0269 BLK_ZONED_HA,
0270 BLK_ZONED_HM,
0271 };
0272
0273
0274
0275
0276
0277 enum blk_bounce {
0278 BLK_BOUNCE_NONE,
0279 BLK_BOUNCE_HIGH,
0280 };
0281
0282 struct queue_limits {
0283 enum blk_bounce bounce;
0284 unsigned long seg_boundary_mask;
0285 unsigned long virt_boundary_mask;
0286
0287 unsigned int max_hw_sectors;
0288 unsigned int max_dev_sectors;
0289 unsigned int chunk_sectors;
0290 unsigned int max_sectors;
0291 unsigned int max_segment_size;
0292 unsigned int physical_block_size;
0293 unsigned int logical_block_size;
0294 unsigned int alignment_offset;
0295 unsigned int io_min;
0296 unsigned int io_opt;
0297 unsigned int max_discard_sectors;
0298 unsigned int max_hw_discard_sectors;
0299 unsigned int max_secure_erase_sectors;
0300 unsigned int max_write_zeroes_sectors;
0301 unsigned int max_zone_append_sectors;
0302 unsigned int discard_granularity;
0303 unsigned int discard_alignment;
0304 unsigned int zone_write_granularity;
0305
0306 unsigned short max_segments;
0307 unsigned short max_integrity_segments;
0308 unsigned short max_discard_segments;
0309
0310 unsigned char misaligned;
0311 unsigned char discard_misaligned;
0312 unsigned char raid_partial_stripes_expensive;
0313 enum blk_zoned_model zoned;
0314 };
0315
0316 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
0317 void *data);
0318
0319 void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
0320
0321 #ifdef CONFIG_BLK_DEV_ZONED
0322
0323 #define BLK_ALL_ZONES ((unsigned int)-1)
0324 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
0325 unsigned int nr_zones, report_zones_cb cb, void *data);
0326 unsigned int bdev_nr_zones(struct block_device *bdev);
0327 extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
0328 sector_t sectors, sector_t nr_sectors,
0329 gfp_t gfp_mask);
0330 int blk_revalidate_disk_zones(struct gendisk *disk,
0331 void (*update_driver_data)(struct gendisk *disk));
0332
0333 extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
0334 unsigned int cmd, unsigned long arg);
0335 extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
0336 unsigned int cmd, unsigned long arg);
0337
0338 #else
0339
0340 static inline unsigned int bdev_nr_zones(struct block_device *bdev)
0341 {
0342 return 0;
0343 }
0344
0345 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
0346 fmode_t mode, unsigned int cmd,
0347 unsigned long arg)
0348 {
0349 return -ENOTTY;
0350 }
0351
0352 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
0353 fmode_t mode, unsigned int cmd,
0354 unsigned long arg)
0355 {
0356 return -ENOTTY;
0357 }
0358
0359 #endif
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375 struct blk_independent_access_range {
0376 struct kobject kobj;
0377 sector_t sector;
0378 sector_t nr_sectors;
0379 };
0380
0381 struct blk_independent_access_ranges {
0382 struct kobject kobj;
0383 bool sysfs_registered;
0384 unsigned int nr_ia_ranges;
0385 struct blk_independent_access_range ia_range[];
0386 };
0387
0388 struct request_queue {
0389 struct request *last_merge;
0390 struct elevator_queue *elevator;
0391
0392 struct percpu_ref q_usage_counter;
0393
0394 struct blk_queue_stats *stats;
0395 struct rq_qos *rq_qos;
0396
0397 const struct blk_mq_ops *mq_ops;
0398
0399
0400 struct blk_mq_ctx __percpu *queue_ctx;
0401
0402 unsigned int queue_depth;
0403
0404
0405 struct xarray hctx_table;
0406 unsigned int nr_hw_queues;
0407
0408
0409
0410
0411
0412 void *queuedata;
0413
0414
0415
0416
0417 unsigned long queue_flags;
0418
0419
0420
0421
0422 atomic_t pm_only;
0423
0424
0425
0426
0427
0428 int id;
0429
0430 spinlock_t queue_lock;
0431
0432 struct gendisk *disk;
0433
0434
0435
0436
0437 struct kobject kobj;
0438
0439
0440
0441
0442 struct kobject *mq_kobj;
0443
0444 #ifdef CONFIG_BLK_DEV_INTEGRITY
0445 struct blk_integrity integrity;
0446 #endif
0447
0448 #ifdef CONFIG_PM
0449 struct device *dev;
0450 enum rpm_status rpm_status;
0451 #endif
0452
0453
0454
0455
0456 unsigned long nr_requests;
0457
0458 unsigned int dma_pad_mask;
0459
0460
0461
0462
0463
0464 unsigned int dma_alignment;
0465
0466 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
0467 struct blk_crypto_profile *crypto_profile;
0468 struct kobject *crypto_kobject;
0469 #endif
0470
0471 unsigned int rq_timeout;
0472 int poll_nsec;
0473
0474 struct blk_stat_callback *poll_cb;
0475 struct blk_rq_stat *poll_stat;
0476
0477 struct timer_list timeout;
0478 struct work_struct timeout_work;
0479
0480 atomic_t nr_active_requests_shared_tags;
0481
0482 struct blk_mq_tags *sched_shared_tags;
0483
0484 struct list_head icq_list;
0485 #ifdef CONFIG_BLK_CGROUP
0486 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
0487 struct blkcg_gq *root_blkg;
0488 struct list_head blkg_list;
0489 #endif
0490
0491 struct queue_limits limits;
0492
0493 unsigned int required_elevator_features;
0494
0495 int node;
0496 #ifdef CONFIG_BLK_DEV_IO_TRACE
0497 struct blk_trace __rcu *blk_trace;
0498 #endif
0499
0500
0501
0502 struct blk_flush_queue *fq;
0503
0504 struct list_head requeue_list;
0505 spinlock_t requeue_lock;
0506 struct delayed_work requeue_work;
0507
0508 struct mutex sysfs_lock;
0509 struct mutex sysfs_dir_lock;
0510
0511
0512
0513
0514
0515 struct list_head unused_hctx_list;
0516 spinlock_t unused_hctx_lock;
0517
0518 int mq_freeze_depth;
0519
0520 #ifdef CONFIG_BLK_DEV_THROTTLING
0521
0522 struct throtl_data *td;
0523 #endif
0524 struct rcu_head rcu_head;
0525 wait_queue_head_t mq_freeze_wq;
0526
0527
0528
0529
0530 struct mutex mq_freeze_lock;
0531
0532 int quiesce_depth;
0533
0534 struct blk_mq_tag_set *tag_set;
0535 struct list_head tag_set_list;
0536
0537 struct dentry *debugfs_dir;
0538 struct dentry *sched_debugfs_dir;
0539 struct dentry *rqos_debugfs_dir;
0540
0541
0542
0543 struct mutex debugfs_mutex;
0544
0545 bool mq_sysfs_init_done;
0546
0547
0548
0549
0550
0551 struct srcu_struct srcu[];
0552 };
0553
0554
0555 #define QUEUE_FLAG_STOPPED 0
0556 #define QUEUE_FLAG_DYING 1
0557 #define QUEUE_FLAG_HAS_SRCU 2
0558 #define QUEUE_FLAG_NOMERGES 3
0559 #define QUEUE_FLAG_SAME_COMP 4
0560 #define QUEUE_FLAG_FAIL_IO 5
0561 #define QUEUE_FLAG_NONROT 6
0562 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT
0563 #define QUEUE_FLAG_IO_STAT 7
0564 #define QUEUE_FLAG_NOXMERGES 9
0565 #define QUEUE_FLAG_ADD_RANDOM 10
0566 #define QUEUE_FLAG_SAME_FORCE 12
0567 #define QUEUE_FLAG_INIT_DONE 14
0568 #define QUEUE_FLAG_STABLE_WRITES 15
0569 #define QUEUE_FLAG_POLL 16
0570 #define QUEUE_FLAG_WC 17
0571 #define QUEUE_FLAG_FUA 18
0572 #define QUEUE_FLAG_DAX 19
0573 #define QUEUE_FLAG_STATS 20
0574 #define QUEUE_FLAG_REGISTERED 22
0575 #define QUEUE_FLAG_QUIESCED 24
0576 #define QUEUE_FLAG_PCI_P2PDMA 25
0577 #define QUEUE_FLAG_ZONE_RESETALL 26
0578 #define QUEUE_FLAG_RQ_ALLOC_TIME 27
0579 #define QUEUE_FLAG_HCTX_ACTIVE 28
0580 #define QUEUE_FLAG_NOWAIT 29
0581 #define QUEUE_FLAG_SQ_SCHED 30
0582
0583 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
0584 (1 << QUEUE_FLAG_SAME_COMP) | \
0585 (1 << QUEUE_FLAG_NOWAIT))
0586
0587 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
0588 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
0589 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
0590
0591 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
0592 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
0593 #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
0594 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
0595 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
0596 #define blk_queue_noxmerges(q) \
0597 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
0598 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
0599 #define blk_queue_stable_writes(q) \
0600 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
0601 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
0602 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
0603 #define blk_queue_zone_resetall(q) \
0604 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
0605 #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
0606 #define blk_queue_pci_p2pdma(q) \
0607 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
0608 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
0609 #define blk_queue_rq_alloc_time(q) \
0610 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
0611 #else
0612 #define blk_queue_rq_alloc_time(q) false
0613 #endif
0614
0615 #define blk_noretry_request(rq) \
0616 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
0617 REQ_FAILFAST_DRIVER))
0618 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
0619 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
0620 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
0621 #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
0622 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
0623
0624 extern void blk_set_pm_only(struct request_queue *q);
0625 extern void blk_clear_pm_only(struct request_queue *q);
0626
0627 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
0628
0629 #define dma_map_bvec(dev, bv, dir, attrs) \
0630 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
0631 (dir), (attrs))
0632
0633 static inline bool queue_is_mq(struct request_queue *q)
0634 {
0635 return q->mq_ops;
0636 }
0637
0638 #ifdef CONFIG_PM
0639 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
0640 {
0641 return q->rpm_status;
0642 }
0643 #else
0644 static inline enum rpm_status queue_rpm_status(struct request_queue *q)
0645 {
0646 return RPM_ACTIVE;
0647 }
0648 #endif
0649
0650 static inline enum blk_zoned_model
0651 blk_queue_zoned_model(struct request_queue *q)
0652 {
0653 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
0654 return q->limits.zoned;
0655 return BLK_ZONED_NONE;
0656 }
0657
0658 static inline bool blk_queue_is_zoned(struct request_queue *q)
0659 {
0660 switch (blk_queue_zoned_model(q)) {
0661 case BLK_ZONED_HA:
0662 case BLK_ZONED_HM:
0663 return true;
0664 default:
0665 return false;
0666 }
0667 }
0668
0669 #ifdef CONFIG_BLK_DEV_ZONED
0670 static inline unsigned int disk_nr_zones(struct gendisk *disk)
0671 {
0672 return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
0673 }
0674
0675 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
0676 {
0677 if (!blk_queue_is_zoned(disk->queue))
0678 return 0;
0679 return sector >> ilog2(disk->queue->limits.chunk_sectors);
0680 }
0681
0682 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
0683 {
0684 if (!blk_queue_is_zoned(disk->queue))
0685 return false;
0686 if (!disk->conv_zones_bitmap)
0687 return true;
0688 return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
0689 }
0690
0691 static inline void disk_set_max_open_zones(struct gendisk *disk,
0692 unsigned int max_open_zones)
0693 {
0694 disk->max_open_zones = max_open_zones;
0695 }
0696
0697 static inline void disk_set_max_active_zones(struct gendisk *disk,
0698 unsigned int max_active_zones)
0699 {
0700 disk->max_active_zones = max_active_zones;
0701 }
0702
0703 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
0704 {
0705 return bdev->bd_disk->max_open_zones;
0706 }
0707
0708 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
0709 {
0710 return bdev->bd_disk->max_active_zones;
0711 }
0712
0713 #else
0714 static inline unsigned int disk_nr_zones(struct gendisk *disk)
0715 {
0716 return 0;
0717 }
0718 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
0719 {
0720 return false;
0721 }
0722 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
0723 {
0724 return 0;
0725 }
0726 static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
0727 {
0728 return 0;
0729 }
0730
0731 static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
0732 {
0733 return 0;
0734 }
0735 #endif
0736
0737 static inline unsigned int blk_queue_depth(struct request_queue *q)
0738 {
0739 if (q->queue_depth)
0740 return q->queue_depth;
0741
0742 return q->nr_requests;
0743 }
0744
0745
0746
0747
0748 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
0749 #define BLK_MIN_SG_TIMEOUT (7 * HZ)
0750
0751
0752 #define for_each_bio(_bio) \
0753 for (; _bio; _bio = _bio->bi_next)
0754
0755 int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
0756 const struct attribute_group **groups);
0757 static inline int __must_check add_disk(struct gendisk *disk)
0758 {
0759 return device_add_disk(NULL, disk, NULL);
0760 }
0761 void del_gendisk(struct gendisk *gp);
0762 void invalidate_disk(struct gendisk *disk);
0763 void set_disk_ro(struct gendisk *disk, bool read_only);
0764 void disk_uevent(struct gendisk *disk, enum kobject_action action);
0765
0766 static inline int get_disk_ro(struct gendisk *disk)
0767 {
0768 return disk->part0->bd_read_only ||
0769 test_bit(GD_READ_ONLY, &disk->state);
0770 }
0771
0772 static inline int bdev_read_only(struct block_device *bdev)
0773 {
0774 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
0775 }
0776
0777 bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
0778 bool disk_force_media_change(struct gendisk *disk, unsigned int events);
0779
0780 void add_disk_randomness(struct gendisk *disk) __latent_entropy;
0781 void rand_initialize_disk(struct gendisk *disk);
0782
0783 static inline sector_t get_start_sect(struct block_device *bdev)
0784 {
0785 return bdev->bd_start_sect;
0786 }
0787
0788 static inline sector_t bdev_nr_sectors(struct block_device *bdev)
0789 {
0790 return bdev->bd_nr_sectors;
0791 }
0792
0793 static inline loff_t bdev_nr_bytes(struct block_device *bdev)
0794 {
0795 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
0796 }
0797
0798 static inline sector_t get_capacity(struct gendisk *disk)
0799 {
0800 return bdev_nr_sectors(disk->part0);
0801 }
0802
0803 static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
0804 {
0805 return bdev_nr_sectors(sb->s_bdev) >>
0806 (sb->s_blocksize_bits - SECTOR_SHIFT);
0807 }
0808
0809 int bdev_disk_changed(struct gendisk *disk, bool invalidate);
0810
0811 void put_disk(struct gendisk *disk);
0812 struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823 #define blk_alloc_disk(node_id) \
0824 ({ \
0825 static struct lock_class_key __key; \
0826 \
0827 __blk_alloc_disk(node_id, &__key); \
0828 })
0829
0830 int __register_blkdev(unsigned int major, const char *name,
0831 void (*probe)(dev_t devt));
0832 #define register_blkdev(major, name) \
0833 __register_blkdev(major, name, NULL)
0834 void unregister_blkdev(unsigned int major, const char *name);
0835
0836 bool bdev_check_media_change(struct block_device *bdev);
0837 int __invalidate_device(struct block_device *bdev, bool kill_dirty);
0838 void set_capacity(struct gendisk *disk, sector_t size);
0839
0840 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
0841 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
0842 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
0843 int bd_register_pending_holders(struct gendisk *disk);
0844 #else
0845 static inline int bd_link_disk_holder(struct block_device *bdev,
0846 struct gendisk *disk)
0847 {
0848 return 0;
0849 }
0850 static inline void bd_unlink_disk_holder(struct block_device *bdev,
0851 struct gendisk *disk)
0852 {
0853 }
0854 static inline int bd_register_pending_holders(struct gendisk *disk)
0855 {
0856 return 0;
0857 }
0858 #endif
0859
0860 dev_t part_devt(struct gendisk *disk, u8 partno);
0861 void inc_diskseq(struct gendisk *disk);
0862 dev_t blk_lookup_devt(const char *name, int partno);
0863 void blk_request_module(dev_t devt);
0864
0865 extern int blk_register_queue(struct gendisk *disk);
0866 extern void blk_unregister_queue(struct gendisk *disk);
0867 void submit_bio_noacct(struct bio *bio);
0868 struct bio *bio_split_to_limits(struct bio *bio);
0869
0870 extern int blk_lld_busy(struct request_queue *q);
0871 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
0872 extern void blk_queue_exit(struct request_queue *q);
0873 extern void blk_sync_queue(struct request_queue *q);
0874
0875
0876 extern const char *blk_op_str(enum req_op op);
0877
0878 int blk_status_to_errno(blk_status_t status);
0879 blk_status_t errno_to_blk_status(int errno);
0880
0881
0882 #define BLK_POLL_ONESHOT (1 << 0)
0883
0884 #define BLK_POLL_NOSLEEP (1 << 1)
0885 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
0886 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
0887 unsigned int flags);
0888
0889 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
0890 {
0891 return bdev->bd_queue;
0892 }
0893
0894
0895 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
0896
0897 static inline unsigned int bio_zone_no(struct bio *bio)
0898 {
0899 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
0900 }
0901
0902 static inline unsigned int bio_zone_is_seq(struct bio *bio)
0903 {
0904 return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
0905 }
0906
0907
0908
0909
0910 static inline unsigned int blk_chunk_sectors_left(sector_t offset,
0911 unsigned int chunk_sectors)
0912 {
0913 if (unlikely(!is_power_of_2(chunk_sectors)))
0914 return chunk_sectors - sector_div(offset, chunk_sectors);
0915 return chunk_sectors - (offset & (chunk_sectors - 1));
0916 }
0917
0918
0919
0920
0921 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
0922 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
0923 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
0924 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
0925 extern void blk_queue_max_discard_segments(struct request_queue *,
0926 unsigned short);
0927 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
0928 unsigned int max_sectors);
0929 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
0930 extern void blk_queue_max_discard_sectors(struct request_queue *q,
0931 unsigned int max_discard_sectors);
0932 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
0933 unsigned int max_write_same_sectors);
0934 extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
0935 extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
0936 unsigned int max_zone_append_sectors);
0937 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
0938 void blk_queue_zone_write_granularity(struct request_queue *q,
0939 unsigned int size);
0940 extern void blk_queue_alignment_offset(struct request_queue *q,
0941 unsigned int alignment);
0942 void disk_update_readahead(struct gendisk *disk);
0943 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
0944 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
0945 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
0946 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
0947 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
0948 extern void blk_set_default_limits(struct queue_limits *lim);
0949 extern void blk_set_stacking_limits(struct queue_limits *lim);
0950 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
0951 sector_t offset);
0952 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
0953 sector_t offset);
0954 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
0955 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
0956 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
0957 extern void blk_queue_dma_alignment(struct request_queue *, int);
0958 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
0959 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
0960 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
0961
0962 struct blk_independent_access_ranges *
0963 disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
0964 void disk_set_independent_access_ranges(struct gendisk *disk,
0965 struct blk_independent_access_ranges *iars);
0966
0967
0968
0969
0970
0971 #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
0972
0973 extern void blk_queue_required_elevator_features(struct request_queue *q,
0974 unsigned int features);
0975 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
0976 struct device *dev);
0977
0978 bool __must_check blk_get_queue(struct request_queue *);
0979 extern void blk_put_queue(struct request_queue *);
0980
0981 void blk_mark_disk_dead(struct gendisk *disk);
0982
0983 #ifdef CONFIG_BLOCK
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995 struct blk_plug {
0996 struct request *mq_list;
0997
0998
0999 struct request *cached_rq;
1000 unsigned short nr_ios;
1001
1002 unsigned short rq_count;
1003
1004 bool multiple_queues;
1005 bool has_elevator;
1006 bool nowait;
1007
1008 struct list_head cb_list;
1009 };
1010
1011 struct blk_plug_cb;
1012 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1013 struct blk_plug_cb {
1014 struct list_head list;
1015 blk_plug_cb_fn callback;
1016 void *data;
1017 };
1018 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1019 void *data, int size);
1020 extern void blk_start_plug(struct blk_plug *);
1021 extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
1022 extern void blk_finish_plug(struct blk_plug *);
1023
1024 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
1025 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1026 {
1027 if (plug)
1028 __blk_flush_plug(plug, async);
1029 }
1030
1031 int blkdev_issue_flush(struct block_device *bdev);
1032 long nr_blockdev_pages(void);
1033 #else
1034 struct blk_plug {
1035 };
1036
1037 static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
1038 unsigned short nr_ios)
1039 {
1040 }
1041
1042 static inline void blk_start_plug(struct blk_plug *plug)
1043 {
1044 }
1045
1046 static inline void blk_finish_plug(struct blk_plug *plug)
1047 {
1048 }
1049
1050 static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1051 {
1052 }
1053
1054 static inline int blkdev_issue_flush(struct block_device *bdev)
1055 {
1056 return 0;
1057 }
1058
1059 static inline long nr_blockdev_pages(void)
1060 {
1061 return 0;
1062 }
1063 #endif
1064
1065 extern void blk_io_schedule(void);
1066
1067 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1068 sector_t nr_sects, gfp_t gfp_mask);
1069 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1070 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
1071 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
1072 sector_t nr_sects, gfp_t gfp);
1073
1074 #define BLKDEV_ZERO_NOUNMAP (1 << 0)
1075 #define BLKDEV_ZERO_NOFALLBACK (1 << 1)
1076
1077 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1078 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1079 unsigned flags);
1080 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1081 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1082
1083 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1084 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1085 {
1086 return blkdev_issue_discard(sb->s_bdev,
1087 block << (sb->s_blocksize_bits -
1088 SECTOR_SHIFT),
1089 nr_blocks << (sb->s_blocksize_bits -
1090 SECTOR_SHIFT),
1091 gfp_mask);
1092 }
1093 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1094 sector_t nr_blocks, gfp_t gfp_mask)
1095 {
1096 return blkdev_issue_zeroout(sb->s_bdev,
1097 block << (sb->s_blocksize_bits -
1098 SECTOR_SHIFT),
1099 nr_blocks << (sb->s_blocksize_bits -
1100 SECTOR_SHIFT),
1101 gfp_mask, 0);
1102 }
1103
1104 static inline bool bdev_is_partition(struct block_device *bdev)
1105 {
1106 return bdev->bd_partno;
1107 }
1108
1109 enum blk_default_limits {
1110 BLK_MAX_SEGMENTS = 128,
1111 BLK_SAFE_MAX_SECTORS = 255,
1112 BLK_DEF_MAX_SECTORS = 2560,
1113 BLK_MAX_SEGMENT_SIZE = 65536,
1114 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1115 };
1116
1117 static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1118 {
1119 return q->limits.seg_boundary_mask;
1120 }
1121
1122 static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1123 {
1124 return q->limits.virt_boundary_mask;
1125 }
1126
1127 static inline unsigned int queue_max_sectors(const struct request_queue *q)
1128 {
1129 return q->limits.max_sectors;
1130 }
1131
1132 static inline unsigned int queue_max_bytes(struct request_queue *q)
1133 {
1134 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
1135 }
1136
1137 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1138 {
1139 return q->limits.max_hw_sectors;
1140 }
1141
1142 static inline unsigned short queue_max_segments(const struct request_queue *q)
1143 {
1144 return q->limits.max_segments;
1145 }
1146
1147 static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1148 {
1149 return q->limits.max_discard_segments;
1150 }
1151
1152 static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1153 {
1154 return q->limits.max_segment_size;
1155 }
1156
1157 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
1158 {
1159
1160 const struct queue_limits *l = &q->limits;
1161
1162 return min(l->max_zone_append_sectors, l->max_sectors);
1163 }
1164
1165 static inline unsigned int
1166 bdev_max_zone_append_sectors(struct block_device *bdev)
1167 {
1168 return queue_max_zone_append_sectors(bdev_get_queue(bdev));
1169 }
1170
1171 static inline unsigned int bdev_max_segments(struct block_device *bdev)
1172 {
1173 return queue_max_segments(bdev_get_queue(bdev));
1174 }
1175
1176 static inline unsigned queue_logical_block_size(const struct request_queue *q)
1177 {
1178 int retval = 512;
1179
1180 if (q && q->limits.logical_block_size)
1181 retval = q->limits.logical_block_size;
1182
1183 return retval;
1184 }
1185
1186 static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
1187 {
1188 return queue_logical_block_size(bdev_get_queue(bdev));
1189 }
1190
1191 static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1192 {
1193 return q->limits.physical_block_size;
1194 }
1195
1196 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1197 {
1198 return queue_physical_block_size(bdev_get_queue(bdev));
1199 }
1200
1201 static inline unsigned int queue_io_min(const struct request_queue *q)
1202 {
1203 return q->limits.io_min;
1204 }
1205
1206 static inline int bdev_io_min(struct block_device *bdev)
1207 {
1208 return queue_io_min(bdev_get_queue(bdev));
1209 }
1210
1211 static inline unsigned int queue_io_opt(const struct request_queue *q)
1212 {
1213 return q->limits.io_opt;
1214 }
1215
1216 static inline int bdev_io_opt(struct block_device *bdev)
1217 {
1218 return queue_io_opt(bdev_get_queue(bdev));
1219 }
1220
1221 static inline unsigned int
1222 queue_zone_write_granularity(const struct request_queue *q)
1223 {
1224 return q->limits.zone_write_granularity;
1225 }
1226
1227 static inline unsigned int
1228 bdev_zone_write_granularity(struct block_device *bdev)
1229 {
1230 return queue_zone_write_granularity(bdev_get_queue(bdev));
1231 }
1232
1233 int bdev_alignment_offset(struct block_device *bdev);
1234 unsigned int bdev_discard_alignment(struct block_device *bdev);
1235
1236 static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
1237 {
1238 return bdev_get_queue(bdev)->limits.max_discard_sectors;
1239 }
1240
1241 static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
1242 {
1243 return bdev_get_queue(bdev)->limits.discard_granularity;
1244 }
1245
1246 static inline unsigned int
1247 bdev_max_secure_erase_sectors(struct block_device *bdev)
1248 {
1249 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
1250 }
1251
1252 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1253 {
1254 struct request_queue *q = bdev_get_queue(bdev);
1255
1256 if (q)
1257 return q->limits.max_write_zeroes_sectors;
1258
1259 return 0;
1260 }
1261
1262 static inline bool bdev_nonrot(struct block_device *bdev)
1263 {
1264 return blk_queue_nonrot(bdev_get_queue(bdev));
1265 }
1266
1267 static inline bool bdev_stable_writes(struct block_device *bdev)
1268 {
1269 return test_bit(QUEUE_FLAG_STABLE_WRITES,
1270 &bdev_get_queue(bdev)->queue_flags);
1271 }
1272
1273 static inline bool bdev_write_cache(struct block_device *bdev)
1274 {
1275 return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
1276 }
1277
1278 static inline bool bdev_fua(struct block_device *bdev)
1279 {
1280 return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
1281 }
1282
1283 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1284 {
1285 struct request_queue *q = bdev_get_queue(bdev);
1286
1287 if (q)
1288 return blk_queue_zoned_model(q);
1289
1290 return BLK_ZONED_NONE;
1291 }
1292
1293 static inline bool bdev_is_zoned(struct block_device *bdev)
1294 {
1295 struct request_queue *q = bdev_get_queue(bdev);
1296
1297 if (q)
1298 return blk_queue_is_zoned(q);
1299
1300 return false;
1301 }
1302
1303 static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1304 {
1305 struct request_queue *q = bdev_get_queue(bdev);
1306
1307 if (!blk_queue_is_zoned(q))
1308 return 0;
1309 return q->limits.chunk_sectors;
1310 }
1311
1312 static inline int queue_dma_alignment(const struct request_queue *q)
1313 {
1314 return q ? q->dma_alignment : 511;
1315 }
1316
1317 static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
1318 {
1319 return queue_dma_alignment(bdev_get_queue(bdev));
1320 }
1321
1322 static inline bool bdev_iter_is_aligned(struct block_device *bdev,
1323 struct iov_iter *iter)
1324 {
1325 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
1326 bdev_logical_block_size(bdev) - 1);
1327 }
1328
1329 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1330 unsigned int len)
1331 {
1332 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1333 return !(addr & alignment) && !(len & alignment);
1334 }
1335
1336
1337 static inline unsigned int blksize_bits(unsigned int size)
1338 {
1339 unsigned int bits = 8;
1340 do {
1341 bits++;
1342 size >>= 1;
1343 } while (size > 256);
1344 return bits;
1345 }
1346
1347 static inline unsigned int block_size(struct block_device *bdev)
1348 {
1349 return 1 << bdev->bd_inode->i_blkbits;
1350 }
1351
1352 int kblockd_schedule_work(struct work_struct *work);
1353 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1354
1355 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1356 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1357 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1358 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1359
1360 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1361
1362 bool blk_crypto_register(struct blk_crypto_profile *profile,
1363 struct request_queue *q);
1364
1365 #else
1366
1367 static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
1368 struct request_queue *q)
1369 {
1370 return true;
1371 }
1372
1373 #endif
1374
1375 enum blk_unique_id {
1376
1377 BLK_UID_T10 = 1,
1378 BLK_UID_EUI64 = 2,
1379 BLK_UID_NAA = 3,
1380 };
1381
1382 #define NFL4_UFLG_MASK 0x0000003F
1383
1384 struct block_device_operations {
1385 void (*submit_bio)(struct bio *bio);
1386 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
1387 unsigned int flags);
1388 int (*open) (struct block_device *, fmode_t);
1389 void (*release) (struct gendisk *, fmode_t);
1390 int (*rw_page)(struct block_device *, sector_t, struct page *, enum req_op);
1391 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1392 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1393 unsigned int (*check_events) (struct gendisk *disk,
1394 unsigned int clearing);
1395 void (*unlock_native_capacity) (struct gendisk *);
1396 int (*getgeo)(struct block_device *, struct hd_geometry *);
1397 int (*set_read_only)(struct block_device *bdev, bool ro);
1398 void (*free_disk)(struct gendisk *disk);
1399
1400 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1401 int (*report_zones)(struct gendisk *, sector_t sector,
1402 unsigned int nr_zones, report_zones_cb cb, void *data);
1403 char *(*devnode)(struct gendisk *disk, umode_t *mode);
1404
1405 int (*get_unique_id)(struct gendisk *disk, u8 id[16],
1406 enum blk_unique_id id_type);
1407 struct module *owner;
1408 const struct pr_ops *pr_ops;
1409
1410
1411
1412
1413
1414
1415 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
1416 };
1417
1418 #ifdef CONFIG_COMPAT
1419 extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
1420 unsigned int, unsigned long);
1421 #else
1422 #define blkdev_compat_ptr_ioctl NULL
1423 #endif
1424
1425 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1426 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1427 struct writeback_control *);
1428
1429 static inline void blk_wake_io_task(struct task_struct *waiter)
1430 {
1431
1432
1433
1434
1435
1436 if (waiter == current)
1437 __set_current_state(TASK_RUNNING);
1438 else
1439 wake_up_process(waiter);
1440 }
1441
1442 unsigned long bdev_start_io_acct(struct block_device *bdev,
1443 unsigned int sectors, enum req_op op,
1444 unsigned long start_time);
1445 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1446 unsigned long start_time);
1447
1448 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
1449 unsigned long bio_start_io_acct(struct bio *bio);
1450 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1451 struct block_device *orig_bdev);
1452
1453
1454
1455
1456
1457
1458 static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
1459 {
1460 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1461 }
1462
1463 int bdev_read_only(struct block_device *bdev);
1464 int set_blocksize(struct block_device *bdev, int size);
1465
1466 int lookup_bdev(const char *pathname, dev_t *dev);
1467
1468 void blkdev_show(struct seq_file *seqf, off_t offset);
1469
1470 #define BDEVNAME_SIZE 32
1471 #define BDEVT_SIZE 10
1472 #ifdef CONFIG_BLOCK
1473 #define BLKDEV_MAJOR_MAX 512
1474 #else
1475 #define BLKDEV_MAJOR_MAX 0
1476 #endif
1477
1478 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1479 void *holder);
1480 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
1481 int bd_prepare_to_claim(struct block_device *bdev, void *holder);
1482 void bd_abort_claiming(struct block_device *bdev, void *holder);
1483 void blkdev_put(struct block_device *bdev, fmode_t mode);
1484
1485
1486 struct block_device *blkdev_get_no_open(dev_t dev);
1487 void blkdev_put_no_open(struct block_device *bdev);
1488
1489 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
1490 void bdev_add(struct block_device *bdev, dev_t dev);
1491 struct block_device *I_BDEV(struct inode *inode);
1492 int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
1493 loff_t lend);
1494
1495 #ifdef CONFIG_BLOCK
1496 void invalidate_bdev(struct block_device *bdev);
1497 int sync_blockdev(struct block_device *bdev);
1498 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
1499 int sync_blockdev_nowait(struct block_device *bdev);
1500 void sync_bdevs(bool wait);
1501 void printk_all_partitions(void);
1502 #else
1503 static inline void invalidate_bdev(struct block_device *bdev)
1504 {
1505 }
1506 static inline int sync_blockdev(struct block_device *bdev)
1507 {
1508 return 0;
1509 }
1510 static inline int sync_blockdev_nowait(struct block_device *bdev)
1511 {
1512 return 0;
1513 }
1514 static inline void sync_bdevs(bool wait)
1515 {
1516 }
1517 static inline void printk_all_partitions(void)
1518 {
1519 }
1520 #endif
1521
1522 int fsync_bdev(struct block_device *bdev);
1523
1524 int freeze_bdev(struct block_device *bdev);
1525 int thaw_bdev(struct block_device *bdev);
1526
1527 struct io_comp_batch {
1528 struct request *req_list;
1529 bool need_ts;
1530 void (*complete)(struct io_comp_batch *);
1531 };
1532
1533 #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
1534
1535 #endif