0001
0002
0003
0004
0005 #include <linux/kernel.h>
0006 #include <linux/module.h>
0007 #include <linux/init.h>
0008 #include <linux/bio.h>
0009 #include <linux/blkdev.h>
0010 #include <linux/pagemap.h>
0011 #include <linux/backing-dev-defs.h>
0012 #include <linux/gcd.h>
0013 #include <linux/lcm.h>
0014 #include <linux/jiffies.h>
0015 #include <linux/gfp.h>
0016 #include <linux/dma-mapping.h>
0017
0018 #include "blk.h"
0019 #include "blk-wbt.h"
0020
0021 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
0022 {
0023 q->rq_timeout = timeout;
0024 }
0025 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
0026
0027
0028
0029
0030
0031
0032
0033
0034 void blk_set_default_limits(struct queue_limits *lim)
0035 {
0036 lim->max_segments = BLK_MAX_SEGMENTS;
0037 lim->max_discard_segments = 1;
0038 lim->max_integrity_segments = 0;
0039 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
0040 lim->virt_boundary_mask = 0;
0041 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
0042 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
0043 lim->max_dev_sectors = 0;
0044 lim->chunk_sectors = 0;
0045 lim->max_write_zeroes_sectors = 0;
0046 lim->max_zone_append_sectors = 0;
0047 lim->max_discard_sectors = 0;
0048 lim->max_hw_discard_sectors = 0;
0049 lim->max_secure_erase_sectors = 0;
0050 lim->discard_granularity = 0;
0051 lim->discard_alignment = 0;
0052 lim->discard_misaligned = 0;
0053 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
0054 lim->bounce = BLK_BOUNCE_NONE;
0055 lim->alignment_offset = 0;
0056 lim->io_opt = 0;
0057 lim->misaligned = 0;
0058 lim->zoned = BLK_ZONED_NONE;
0059 lim->zone_write_granularity = 0;
0060 }
0061 EXPORT_SYMBOL(blk_set_default_limits);
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 void blk_set_stacking_limits(struct queue_limits *lim)
0072 {
0073 blk_set_default_limits(lim);
0074
0075
0076 lim->max_segments = USHRT_MAX;
0077 lim->max_discard_segments = USHRT_MAX;
0078 lim->max_hw_sectors = UINT_MAX;
0079 lim->max_segment_size = UINT_MAX;
0080 lim->max_sectors = UINT_MAX;
0081 lim->max_dev_sectors = UINT_MAX;
0082 lim->max_write_zeroes_sectors = UINT_MAX;
0083 lim->max_zone_append_sectors = UINT_MAX;
0084 }
0085 EXPORT_SYMBOL(blk_set_stacking_limits);
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
0098 {
0099 q->limits.bounce = bounce;
0100 }
0101 EXPORT_SYMBOL(blk_queue_bounce_limit);
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
0123 {
0124 struct queue_limits *limits = &q->limits;
0125 unsigned int max_sectors;
0126
0127 if ((max_hw_sectors << 9) < PAGE_SIZE) {
0128 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
0129 printk(KERN_INFO "%s: set to minimum %d\n",
0130 __func__, max_hw_sectors);
0131 }
0132
0133 max_hw_sectors = round_down(max_hw_sectors,
0134 limits->logical_block_size >> SECTOR_SHIFT);
0135 limits->max_hw_sectors = max_hw_sectors;
0136
0137 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
0138 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
0139 max_sectors = round_down(max_sectors,
0140 limits->logical_block_size >> SECTOR_SHIFT);
0141 limits->max_sectors = max_sectors;
0142
0143 if (!q->disk)
0144 return;
0145 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
0146 }
0147 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
0162 {
0163 q->limits.chunk_sectors = chunk_sectors;
0164 }
0165 EXPORT_SYMBOL(blk_queue_chunk_sectors);
0166
0167
0168
0169
0170
0171
0172 void blk_queue_max_discard_sectors(struct request_queue *q,
0173 unsigned int max_discard_sectors)
0174 {
0175 q->limits.max_hw_discard_sectors = max_discard_sectors;
0176 q->limits.max_discard_sectors = max_discard_sectors;
0177 }
0178 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
0179
0180
0181
0182
0183
0184
0185 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
0186 unsigned int max_sectors)
0187 {
0188 q->limits.max_secure_erase_sectors = max_sectors;
0189 }
0190 EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
0191
0192
0193
0194
0195
0196
0197
0198 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
0199 unsigned int max_write_zeroes_sectors)
0200 {
0201 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
0202 }
0203 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
0204
0205
0206
0207
0208
0209
0210 void blk_queue_max_zone_append_sectors(struct request_queue *q,
0211 unsigned int max_zone_append_sectors)
0212 {
0213 unsigned int max_sectors;
0214
0215 if (WARN_ON(!blk_queue_is_zoned(q)))
0216 return;
0217
0218 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
0219 max_sectors = min(q->limits.chunk_sectors, max_sectors);
0220
0221
0222
0223
0224
0225
0226 WARN_ON(!max_sectors);
0227
0228 q->limits.max_zone_append_sectors = max_sectors;
0229 }
0230 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
0242 {
0243 if (!max_segments) {
0244 max_segments = 1;
0245 printk(KERN_INFO "%s: set to minimum %d\n",
0246 __func__, max_segments);
0247 }
0248
0249 q->limits.max_segments = max_segments;
0250 }
0251 EXPORT_SYMBOL(blk_queue_max_segments);
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 void blk_queue_max_discard_segments(struct request_queue *q,
0263 unsigned short max_segments)
0264 {
0265 q->limits.max_discard_segments = max_segments;
0266 }
0267 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
0279 {
0280 if (max_size < PAGE_SIZE) {
0281 max_size = PAGE_SIZE;
0282 printk(KERN_INFO "%s: set to minimum %d\n",
0283 __func__, max_size);
0284 }
0285
0286
0287 WARN_ON_ONCE(q->limits.virt_boundary_mask);
0288
0289 q->limits.max_segment_size = max_size;
0290 }
0291 EXPORT_SYMBOL(blk_queue_max_segment_size);
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
0304 {
0305 struct queue_limits *limits = &q->limits;
0306
0307 limits->logical_block_size = size;
0308
0309 if (limits->physical_block_size < size)
0310 limits->physical_block_size = size;
0311
0312 if (limits->io_min < limits->physical_block_size)
0313 limits->io_min = limits->physical_block_size;
0314
0315 limits->max_hw_sectors =
0316 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
0317 limits->max_sectors =
0318 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
0319 }
0320 EXPORT_SYMBOL(blk_queue_logical_block_size);
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
0333 {
0334 q->limits.physical_block_size = size;
0335
0336 if (q->limits.physical_block_size < q->limits.logical_block_size)
0337 q->limits.physical_block_size = q->limits.logical_block_size;
0338
0339 if (q->limits.io_min < q->limits.physical_block_size)
0340 q->limits.io_min = q->limits.physical_block_size;
0341 }
0342 EXPORT_SYMBOL(blk_queue_physical_block_size);
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 void blk_queue_zone_write_granularity(struct request_queue *q,
0354 unsigned int size)
0355 {
0356 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
0357 return;
0358
0359 q->limits.zone_write_granularity = size;
0360
0361 if (q->limits.zone_write_granularity < q->limits.logical_block_size)
0362 q->limits.zone_write_granularity = q->limits.logical_block_size;
0363 }
0364 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
0378 {
0379 q->limits.alignment_offset =
0380 offset & (q->limits.physical_block_size - 1);
0381 q->limits.misaligned = 0;
0382 }
0383 EXPORT_SYMBOL(blk_queue_alignment_offset);
0384
0385 void disk_update_readahead(struct gendisk *disk)
0386 {
0387 struct request_queue *q = disk->queue;
0388
0389
0390
0391
0392
0393 disk->bdi->ra_pages =
0394 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
0395 disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
0396 }
0397 EXPORT_SYMBOL_GPL(disk_update_readahead);
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
0411 {
0412 limits->io_min = min;
0413
0414 if (limits->io_min < limits->logical_block_size)
0415 limits->io_min = limits->logical_block_size;
0416
0417 if (limits->io_min < limits->physical_block_size)
0418 limits->io_min = limits->physical_block_size;
0419 }
0420 EXPORT_SYMBOL(blk_limits_io_min);
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436 void blk_queue_io_min(struct request_queue *q, unsigned int min)
0437 {
0438 blk_limits_io_min(&q->limits, min);
0439 }
0440 EXPORT_SYMBOL(blk_queue_io_min);
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
0456 {
0457 limits->io_opt = opt;
0458 }
0459 EXPORT_SYMBOL(blk_limits_io_opt);
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
0475 {
0476 blk_limits_io_opt(&q->limits, opt);
0477 if (!q->disk)
0478 return;
0479 q->disk->bdi->ra_pages =
0480 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
0481 }
0482 EXPORT_SYMBOL(blk_queue_io_opt);
0483
0484 static int queue_limit_alignment_offset(struct queue_limits *lim,
0485 sector_t sector)
0486 {
0487 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
0488 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
0489 << SECTOR_SHIFT;
0490
0491 return (granularity + lim->alignment_offset - alignment) % granularity;
0492 }
0493
0494 static unsigned int queue_limit_discard_alignment(struct queue_limits *lim,
0495 sector_t sector)
0496 {
0497 unsigned int alignment, granularity, offset;
0498
0499 if (!lim->max_discard_sectors)
0500 return 0;
0501
0502
0503 alignment = lim->discard_alignment >> SECTOR_SHIFT;
0504 granularity = lim->discard_granularity >> SECTOR_SHIFT;
0505 if (!granularity)
0506 return 0;
0507
0508
0509 offset = sector_div(sector, granularity);
0510
0511
0512 offset = (granularity + alignment - offset) % granularity;
0513
0514
0515 return offset << SECTOR_SHIFT;
0516 }
0517
0518 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
0519 {
0520 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
0521 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
0522 sectors = PAGE_SIZE >> SECTOR_SHIFT;
0523 return sectors;
0524 }
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
0548 sector_t start)
0549 {
0550 unsigned int top, bottom, alignment, ret = 0;
0551
0552 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
0553 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
0554 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
0555 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
0556 b->max_write_zeroes_sectors);
0557 t->max_zone_append_sectors = min(t->max_zone_append_sectors,
0558 b->max_zone_append_sectors);
0559 t->bounce = max(t->bounce, b->bounce);
0560
0561 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
0562 b->seg_boundary_mask);
0563 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
0564 b->virt_boundary_mask);
0565
0566 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
0567 t->max_discard_segments = min_not_zero(t->max_discard_segments,
0568 b->max_discard_segments);
0569 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
0570 b->max_integrity_segments);
0571
0572 t->max_segment_size = min_not_zero(t->max_segment_size,
0573 b->max_segment_size);
0574
0575 t->misaligned |= b->misaligned;
0576
0577 alignment = queue_limit_alignment_offset(b, start);
0578
0579
0580
0581
0582 if (t->alignment_offset != alignment) {
0583
0584 top = max(t->physical_block_size, t->io_min)
0585 + t->alignment_offset;
0586 bottom = max(b->physical_block_size, b->io_min) + alignment;
0587
0588
0589 if (max(top, bottom) % min(top, bottom)) {
0590 t->misaligned = 1;
0591 ret = -1;
0592 }
0593 }
0594
0595 t->logical_block_size = max(t->logical_block_size,
0596 b->logical_block_size);
0597
0598 t->physical_block_size = max(t->physical_block_size,
0599 b->physical_block_size);
0600
0601 t->io_min = max(t->io_min, b->io_min);
0602 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
0603
0604
0605 if (b->chunk_sectors)
0606 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
0607
0608
0609 if (t->physical_block_size & (t->logical_block_size - 1)) {
0610 t->physical_block_size = t->logical_block_size;
0611 t->misaligned = 1;
0612 ret = -1;
0613 }
0614
0615
0616 if (t->io_min & (t->physical_block_size - 1)) {
0617 t->io_min = t->physical_block_size;
0618 t->misaligned = 1;
0619 ret = -1;
0620 }
0621
0622
0623 if (t->io_opt & (t->physical_block_size - 1)) {
0624 t->io_opt = 0;
0625 t->misaligned = 1;
0626 ret = -1;
0627 }
0628
0629
0630 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
0631 t->chunk_sectors = 0;
0632 t->misaligned = 1;
0633 ret = -1;
0634 }
0635
0636 t->raid_partial_stripes_expensive =
0637 max(t->raid_partial_stripes_expensive,
0638 b->raid_partial_stripes_expensive);
0639
0640
0641 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
0642 % max(t->physical_block_size, t->io_min);
0643
0644
0645 if (t->alignment_offset & (t->logical_block_size - 1)) {
0646 t->misaligned = 1;
0647 ret = -1;
0648 }
0649
0650 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
0651 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
0652 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
0653
0654
0655 if (b->discard_granularity) {
0656 alignment = queue_limit_discard_alignment(b, start);
0657
0658 if (t->discard_granularity != 0 &&
0659 t->discard_alignment != alignment) {
0660 top = t->discard_granularity + t->discard_alignment;
0661 bottom = b->discard_granularity + alignment;
0662
0663
0664 if ((max(top, bottom) % min(top, bottom)) != 0)
0665 t->discard_misaligned = 1;
0666 }
0667
0668 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
0669 b->max_discard_sectors);
0670 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
0671 b->max_hw_discard_sectors);
0672 t->discard_granularity = max(t->discard_granularity,
0673 b->discard_granularity);
0674 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
0675 t->discard_granularity;
0676 }
0677 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
0678 b->max_secure_erase_sectors);
0679 t->zone_write_granularity = max(t->zone_write_granularity,
0680 b->zone_write_granularity);
0681 t->zoned = max(t->zoned, b->zoned);
0682 return ret;
0683 }
0684 EXPORT_SYMBOL(blk_stack_limits);
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
0697 sector_t offset)
0698 {
0699 struct request_queue *t = disk->queue;
0700
0701 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
0702 get_start_sect(bdev) + (offset >> 9)) < 0)
0703 pr_notice("%s: Warning: Device %pg is misaligned\n",
0704 disk->disk_name, bdev);
0705
0706 disk_update_readahead(disk);
0707 }
0708 EXPORT_SYMBOL(disk_stack_limits);
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
0721 {
0722 if (mask > q->dma_pad_mask)
0723 q->dma_pad_mask = mask;
0724 }
0725 EXPORT_SYMBOL(blk_queue_update_dma_pad);
0726
0727
0728
0729
0730
0731
0732 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
0733 {
0734 if (mask < PAGE_SIZE - 1) {
0735 mask = PAGE_SIZE - 1;
0736 printk(KERN_INFO "%s: set to minimum %lx\n",
0737 __func__, mask);
0738 }
0739
0740 q->limits.seg_boundary_mask = mask;
0741 }
0742 EXPORT_SYMBOL(blk_queue_segment_boundary);
0743
0744
0745
0746
0747
0748
0749 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
0750 {
0751 q->limits.virt_boundary_mask = mask;
0752
0753
0754
0755
0756
0757
0758
0759 if (mask)
0760 q->limits.max_segment_size = UINT_MAX;
0761 }
0762 EXPORT_SYMBOL(blk_queue_virt_boundary);
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 void blk_queue_dma_alignment(struct request_queue *q, int mask)
0775 {
0776 q->dma_alignment = mask;
0777 }
0778 EXPORT_SYMBOL(blk_queue_dma_alignment);
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
0795 {
0796 BUG_ON(mask > PAGE_SIZE);
0797
0798 if (mask > q->dma_alignment)
0799 q->dma_alignment = mask;
0800 }
0801 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
0802
0803
0804
0805
0806
0807
0808
0809 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
0810 {
0811 q->queue_depth = depth;
0812 rq_qos_queue_depth_changed(q);
0813 }
0814 EXPORT_SYMBOL(blk_set_queue_depth);
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
0825 {
0826 if (wc)
0827 blk_queue_flag_set(QUEUE_FLAG_WC, q);
0828 else
0829 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
0830 if (fua)
0831 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
0832 else
0833 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
0834
0835 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
0836 }
0837 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848 void blk_queue_required_elevator_features(struct request_queue *q,
0849 unsigned int features)
0850 {
0851 q->required_elevator_features = features;
0852 }
0853 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
0854
0855
0856
0857
0858
0859
0860
0861
0862 bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
0863 struct device *dev)
0864 {
0865 unsigned long boundary = dma_get_merge_boundary(dev);
0866
0867 if (!boundary)
0868 return false;
0869
0870
0871 blk_queue_virt_boundary(q, boundary);
0872
0873 return true;
0874 }
0875 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
0876
0877 static bool disk_has_partitions(struct gendisk *disk)
0878 {
0879 unsigned long idx;
0880 struct block_device *part;
0881 bool ret = false;
0882
0883 rcu_read_lock();
0884 xa_for_each(&disk->part_tbl, idx, part) {
0885 if (bdev_is_partition(part)) {
0886 ret = true;
0887 break;
0888 }
0889 }
0890 rcu_read_unlock();
0891
0892 return ret;
0893 }
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908 void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
0909 {
0910 struct request_queue *q = disk->queue;
0911
0912 switch (model) {
0913 case BLK_ZONED_HM:
0914
0915
0916
0917
0918 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
0919 break;
0920 case BLK_ZONED_HA:
0921
0922
0923
0924
0925
0926
0927
0928
0929 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
0930 disk_has_partitions(disk))
0931 model = BLK_ZONED_NONE;
0932 break;
0933 case BLK_ZONED_NONE:
0934 default:
0935 if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
0936 model = BLK_ZONED_NONE;
0937 break;
0938 }
0939
0940 q->limits.zoned = model;
0941 if (model != BLK_ZONED_NONE) {
0942
0943
0944
0945
0946 blk_queue_zone_write_granularity(q,
0947 queue_logical_block_size(q));
0948 } else {
0949 disk_clear_zone_settings(disk);
0950 }
0951 }
0952 EXPORT_SYMBOL_GPL(disk_set_zoned);
0953
0954 int bdev_alignment_offset(struct block_device *bdev)
0955 {
0956 struct request_queue *q = bdev_get_queue(bdev);
0957
0958 if (q->limits.misaligned)
0959 return -1;
0960 if (bdev_is_partition(bdev))
0961 return queue_limit_alignment_offset(&q->limits,
0962 bdev->bd_start_sect);
0963 return q->limits.alignment_offset;
0964 }
0965 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
0966
0967 unsigned int bdev_discard_alignment(struct block_device *bdev)
0968 {
0969 struct request_queue *q = bdev_get_queue(bdev);
0970
0971 if (bdev_is_partition(bdev))
0972 return queue_limit_discard_alignment(&q->limits,
0973 bdev->bd_start_sect);
0974 return q->limits.discard_alignment;
0975 }
0976 EXPORT_SYMBOL_GPL(bdev_discard_alignment);