0001
0002 #ifndef BLK_INTERNAL_H
0003 #define BLK_INTERNAL_H
0004
0005 #include <linux/blk-crypto.h>
0006 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
0007 #include <xen/xen.h>
0008 #include "blk-crypto-internal.h"
0009
0010 struct elevator_type;
0011
0012
0013 #define BLK_MAX_TIMEOUT (5 * HZ)
0014
0015 extern struct dentry *blk_debugfs_root;
0016
0017 struct blk_flush_queue {
0018 unsigned int flush_pending_idx:1;
0019 unsigned int flush_running_idx:1;
0020 blk_status_t rq_status;
0021 unsigned long flush_pending_since;
0022 struct list_head flush_queue[2];
0023 struct list_head flush_data_in_flight;
0024 struct request *flush_rq;
0025
0026 spinlock_t mq_flush_lock;
0027 };
0028
0029 extern struct kmem_cache *blk_requestq_cachep;
0030 extern struct kmem_cache *blk_requestq_srcu_cachep;
0031 extern struct kobj_type blk_queue_ktype;
0032 extern struct ida blk_queue_ida;
0033
0034 bool is_flush_rq(struct request *req);
0035
0036 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
0037 gfp_t flags);
0038 void blk_free_flush_queue(struct blk_flush_queue *q);
0039
0040 void blk_freeze_queue(struct request_queue *q);
0041 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
0042 void blk_queue_start_drain(struct request_queue *q);
0043 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
0044 void submit_bio_noacct_nocheck(struct bio *bio);
0045
0046 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
0047 {
0048 rcu_read_lock();
0049 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
0050 goto fail;
0051
0052
0053
0054
0055
0056 if (blk_queue_pm_only(q) &&
0057 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
0058 goto fail_put;
0059
0060 rcu_read_unlock();
0061 return true;
0062
0063 fail_put:
0064 blk_queue_exit(q);
0065 fail:
0066 rcu_read_unlock();
0067 return false;
0068 }
0069
0070 static inline int bio_queue_enter(struct bio *bio)
0071 {
0072 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
0073
0074 if (blk_try_enter_queue(q, false))
0075 return 0;
0076 return __bio_queue_enter(q, bio);
0077 }
0078
0079 #define BIO_INLINE_VECS 4
0080 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
0081 gfp_t gfp_mask);
0082 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
0083
0084 static inline bool biovec_phys_mergeable(struct request_queue *q,
0085 struct bio_vec *vec1, struct bio_vec *vec2)
0086 {
0087 unsigned long mask = queue_segment_boundary(q);
0088 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
0089 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
0090
0091 if (addr1 + vec1->bv_len != addr2)
0092 return false;
0093 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
0094 return false;
0095 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
0096 return false;
0097 return true;
0098 }
0099
0100 static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
0101 struct bio_vec *bprv, unsigned int offset)
0102 {
0103 return (offset & lim->virt_boundary_mask) ||
0104 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
0105 }
0106
0107
0108
0109
0110
0111 static inline bool bvec_gap_to_prev(struct queue_limits *lim,
0112 struct bio_vec *bprv, unsigned int offset)
0113 {
0114 if (!lim->virt_boundary_mask)
0115 return false;
0116 return __bvec_gap_to_prev(lim, bprv, offset);
0117 }
0118
0119 static inline bool rq_mergeable(struct request *rq)
0120 {
0121 if (blk_rq_is_passthrough(rq))
0122 return false;
0123
0124 if (req_op(rq) == REQ_OP_FLUSH)
0125 return false;
0126
0127 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
0128 return false;
0129
0130 if (req_op(rq) == REQ_OP_ZONE_APPEND)
0131 return false;
0132
0133 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
0134 return false;
0135 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
0136 return false;
0137
0138 return true;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 static inline bool blk_discard_mergable(struct request *req)
0150 {
0151 if (req_op(req) == REQ_OP_DISCARD &&
0152 queue_max_discard_segments(req->q) > 1)
0153 return true;
0154 return false;
0155 }
0156
0157 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
0158 enum req_op op)
0159 {
0160 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
0161 return min(q->limits.max_discard_sectors,
0162 UINT_MAX >> SECTOR_SHIFT);
0163
0164 if (unlikely(op == REQ_OP_WRITE_ZEROES))
0165 return q->limits.max_write_zeroes_sectors;
0166
0167 return q->limits.max_sectors;
0168 }
0169
0170 #ifdef CONFIG_BLK_DEV_INTEGRITY
0171 void blk_flush_integrity(void);
0172 bool __bio_integrity_endio(struct bio *);
0173 void bio_integrity_free(struct bio *bio);
0174 static inline bool bio_integrity_endio(struct bio *bio)
0175 {
0176 if (bio_integrity(bio))
0177 return __bio_integrity_endio(bio);
0178 return true;
0179 }
0180
0181 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
0182 struct request *);
0183 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
0184 struct bio *);
0185
0186 static inline bool integrity_req_gap_back_merge(struct request *req,
0187 struct bio *next)
0188 {
0189 struct bio_integrity_payload *bip = bio_integrity(req->bio);
0190 struct bio_integrity_payload *bip_next = bio_integrity(next);
0191
0192 return bvec_gap_to_prev(&req->q->limits,
0193 &bip->bip_vec[bip->bip_vcnt - 1],
0194 bip_next->bip_vec[0].bv_offset);
0195 }
0196
0197 static inline bool integrity_req_gap_front_merge(struct request *req,
0198 struct bio *bio)
0199 {
0200 struct bio_integrity_payload *bip = bio_integrity(bio);
0201 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
0202
0203 return bvec_gap_to_prev(&req->q->limits,
0204 &bip->bip_vec[bip->bip_vcnt - 1],
0205 bip_next->bip_vec[0].bv_offset);
0206 }
0207
0208 int blk_integrity_add(struct gendisk *disk);
0209 void blk_integrity_del(struct gendisk *);
0210 #else
0211 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
0212 struct request *r1, struct request *r2)
0213 {
0214 return true;
0215 }
0216 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
0217 struct request *r, struct bio *b)
0218 {
0219 return true;
0220 }
0221 static inline bool integrity_req_gap_back_merge(struct request *req,
0222 struct bio *next)
0223 {
0224 return false;
0225 }
0226 static inline bool integrity_req_gap_front_merge(struct request *req,
0227 struct bio *bio)
0228 {
0229 return false;
0230 }
0231
0232 static inline void blk_flush_integrity(void)
0233 {
0234 }
0235 static inline bool bio_integrity_endio(struct bio *bio)
0236 {
0237 return true;
0238 }
0239 static inline void bio_integrity_free(struct bio *bio)
0240 {
0241 }
0242 static inline int blk_integrity_add(struct gendisk *disk)
0243 {
0244 return 0;
0245 }
0246 static inline void blk_integrity_del(struct gendisk *disk)
0247 {
0248 }
0249 #endif
0250
0251 unsigned long blk_rq_timeout(unsigned long timeout);
0252 void blk_add_timer(struct request *req);
0253 const char *blk_status_to_str(blk_status_t status);
0254
0255 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
0256 unsigned int nr_segs);
0257 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
0258 struct bio *bio, unsigned int nr_segs);
0259
0260
0261
0262
0263 #define BLK_MAX_REQUEST_COUNT 32
0264 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
0265
0266
0267
0268
0269 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
0270
0271 void blk_insert_flush(struct request *rq);
0272
0273 int elevator_switch_mq(struct request_queue *q,
0274 struct elevator_type *new_e);
0275 void elevator_exit(struct request_queue *q);
0276 int elv_register_queue(struct request_queue *q, bool uevent);
0277 void elv_unregister_queue(struct request_queue *q);
0278
0279 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
0280 char *buf);
0281 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
0282 char *buf);
0283 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
0284 char *buf);
0285 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
0286 char *buf);
0287 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
0288 const char *buf, size_t count);
0289 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
0290 ssize_t part_timeout_store(struct device *, struct device_attribute *,
0291 const char *, size_t);
0292
0293 static inline bool bio_may_exceed_limits(struct bio *bio,
0294 struct queue_limits *lim)
0295 {
0296 switch (bio_op(bio)) {
0297 case REQ_OP_DISCARD:
0298 case REQ_OP_SECURE_ERASE:
0299 case REQ_OP_WRITE_ZEROES:
0300 return true;
0301 default:
0302 break;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 return lim->chunk_sectors || bio->bi_vcnt != 1 ||
0314 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
0315 }
0316
0317 struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
0318 unsigned int *nr_segs);
0319 int ll_back_merge_fn(struct request *req, struct bio *bio,
0320 unsigned int nr_segs);
0321 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
0322 struct request *next);
0323 unsigned int blk_recalc_rq_segments(struct request *rq);
0324 void blk_rq_set_mixed_merge(struct request *rq);
0325 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
0326 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
0327
0328 int blk_dev_init(void);
0329
0330
0331
0332
0333
0334
0335
0336 static inline bool blk_do_io_stat(struct request *rq)
0337 {
0338 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
0339 }
0340
0341 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
0342
0343 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
0344 {
0345 req->cmd_flags |= REQ_NOMERGE;
0346 if (req == q->last_merge)
0347 q->last_merge = NULL;
0348 }
0349
0350
0351
0352
0353 struct io_cq *ioc_find_get_icq(struct request_queue *q);
0354 struct io_cq *ioc_lookup_icq(struct request_queue *q);
0355 #ifdef CONFIG_BLK_ICQ
0356 void ioc_clear_queue(struct request_queue *q);
0357 #else
0358 static inline void ioc_clear_queue(struct request_queue *q)
0359 {
0360 }
0361 #endif
0362
0363 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
0364 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
0365 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
0366 const char *page, size_t count);
0367 extern void blk_throtl_bio_endio(struct bio *bio);
0368 extern void blk_throtl_stat_add(struct request *rq, u64 time);
0369 #else
0370 static inline void blk_throtl_bio_endio(struct bio *bio) { }
0371 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
0372 #endif
0373
0374 struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
0375
0376 static inline bool blk_queue_may_bounce(struct request_queue *q)
0377 {
0378 return IS_ENABLED(CONFIG_BOUNCE) &&
0379 q->limits.bounce == BLK_BOUNCE_HIGH &&
0380 max_low_pfn >= max_pfn;
0381 }
0382
0383 static inline struct bio *blk_queue_bounce(struct bio *bio,
0384 struct request_queue *q)
0385 {
0386 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
0387 return __blk_queue_bounce(bio, q);
0388 return bio;
0389 }
0390
0391 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
0392 extern int blk_iolatency_init(struct request_queue *q);
0393 #else
0394 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
0395 #endif
0396
0397 #ifdef CONFIG_BLK_DEV_ZONED
0398 void disk_free_zone_bitmaps(struct gendisk *disk);
0399 void disk_clear_zone_settings(struct gendisk *disk);
0400 #else
0401 static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
0402 static inline void disk_clear_zone_settings(struct gendisk *disk) {}
0403 #endif
0404
0405 int blk_alloc_ext_minor(void);
0406 void blk_free_ext_minor(unsigned int minor);
0407 #define ADDPART_FLAG_NONE 0
0408 #define ADDPART_FLAG_RAID 1
0409 #define ADDPART_FLAG_WHOLEDISK 2
0410 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
0411 sector_t length);
0412 int bdev_del_partition(struct gendisk *disk, int partno);
0413 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
0414 sector_t length);
0415 void blk_drop_partitions(struct gendisk *disk);
0416
0417 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
0418 struct lock_class_key *lkclass);
0419
0420 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
0421 struct page *page, unsigned int len, unsigned int offset,
0422 unsigned int max_sectors, bool *same_page);
0423
0424 static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
0425 {
0426 if (srcu)
0427 return blk_requestq_srcu_cachep;
0428 return blk_requestq_cachep;
0429 }
0430 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
0431
0432 int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
0433
0434 int disk_alloc_events(struct gendisk *disk);
0435 void disk_add_events(struct gendisk *disk);
0436 void disk_del_events(struct gendisk *disk);
0437 void disk_release_events(struct gendisk *disk);
0438 void disk_block_events(struct gendisk *disk);
0439 void disk_unblock_events(struct gendisk *disk);
0440 void disk_flush_events(struct gendisk *disk, unsigned int mask);
0441 extern struct device_attribute dev_attr_events;
0442 extern struct device_attribute dev_attr_events_async;
0443 extern struct device_attribute dev_attr_events_poll_msecs;
0444
0445 extern struct attribute_group blk_trace_attr_group;
0446
0447 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
0448 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
0449
0450 extern const struct address_space_operations def_blk_aops;
0451
0452 int disk_register_independent_access_ranges(struct gendisk *disk);
0453 void disk_unregister_independent_access_ranges(struct gendisk *disk);
0454
0455 #ifdef CONFIG_FAIL_MAKE_REQUEST
0456 bool should_fail_request(struct block_device *part, unsigned int bytes);
0457 #else
0458 static inline bool should_fail_request(struct block_device *part,
0459 unsigned int bytes)
0460 {
0461 return false;
0462 }
0463 #endif
0464
0465
0466
0467
0468
0469
0470
0471
0472 #define req_ref_zero_or_close_to_overflow(req) \
0473 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
0474
0475 static inline bool req_ref_inc_not_zero(struct request *req)
0476 {
0477 return atomic_inc_not_zero(&req->ref);
0478 }
0479
0480 static inline bool req_ref_put_and_test(struct request *req)
0481 {
0482 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
0483 return atomic_dec_and_test(&req->ref);
0484 }
0485
0486 static inline void req_ref_set(struct request *req, int value)
0487 {
0488 atomic_set(&req->ref, value);
0489 }
0490
0491 static inline int req_ref_read(struct request *req)
0492 {
0493 return atomic_read(&req->ref);
0494 }
0495
0496 #endif