0001
0002
0003
0004
0005 #include <linux/kernel.h>
0006 #include <linux/module.h>
0007 #include <linux/bio.h>
0008 #include <linux/blkdev.h>
0009 #include <linux/scatterlist.h>
0010
0011 #include "blk.h"
0012
0013 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
0014 {
0015 unsigned int discard_granularity = bdev_discard_granularity(bdev);
0016 sector_t granularity_aligned_sector;
0017
0018 if (bdev_is_partition(bdev))
0019 sector += bdev->bd_start_sect;
0020
0021 granularity_aligned_sector =
0022 round_up(sector, discard_granularity >> SECTOR_SHIFT);
0023
0024
0025
0026
0027
0028 if (granularity_aligned_sector != sector)
0029 return granularity_aligned_sector - sector;
0030
0031
0032
0033
0034
0035 return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
0036 }
0037
0038 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
0039 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
0040 {
0041 struct bio *bio = *biop;
0042 sector_t bs_mask;
0043
0044 if (bdev_read_only(bdev))
0045 return -EPERM;
0046 if (!bdev_max_discard_sectors(bdev))
0047 return -EOPNOTSUPP;
0048
0049
0050 if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
0051 pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n",
0052 bdev);
0053 return -EOPNOTSUPP;
0054 }
0055
0056 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
0057 if ((sector | nr_sects) & bs_mask)
0058 return -EINVAL;
0059
0060 if (!nr_sects)
0061 return -EINVAL;
0062
0063 while (nr_sects) {
0064 sector_t req_sects =
0065 min(nr_sects, bio_discard_limit(bdev, sector));
0066
0067 bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
0068 bio->bi_iter.bi_sector = sector;
0069 bio->bi_iter.bi_size = req_sects << 9;
0070 sector += req_sects;
0071 nr_sects -= req_sects;
0072
0073
0074
0075
0076
0077
0078
0079 cond_resched();
0080 }
0081
0082 *biop = bio;
0083 return 0;
0084 }
0085 EXPORT_SYMBOL(__blkdev_issue_discard);
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
0098 sector_t nr_sects, gfp_t gfp_mask)
0099 {
0100 struct bio *bio = NULL;
0101 struct blk_plug plug;
0102 int ret;
0103
0104 blk_start_plug(&plug);
0105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
0106 if (!ret && bio) {
0107 ret = submit_bio_wait(bio);
0108 if (ret == -EOPNOTSUPP)
0109 ret = 0;
0110 bio_put(bio);
0111 }
0112 blk_finish_plug(&plug);
0113
0114 return ret;
0115 }
0116 EXPORT_SYMBOL(blkdev_issue_discard);
0117
0118 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
0119 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
0120 struct bio **biop, unsigned flags)
0121 {
0122 struct bio *bio = *biop;
0123 unsigned int max_write_zeroes_sectors;
0124
0125 if (bdev_read_only(bdev))
0126 return -EPERM;
0127
0128
0129 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
0130
0131 if (max_write_zeroes_sectors == 0)
0132 return -EOPNOTSUPP;
0133
0134 while (nr_sects) {
0135 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
0136 bio->bi_iter.bi_sector = sector;
0137 if (flags & BLKDEV_ZERO_NOUNMAP)
0138 bio->bi_opf |= REQ_NOUNMAP;
0139
0140 if (nr_sects > max_write_zeroes_sectors) {
0141 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
0142 nr_sects -= max_write_zeroes_sectors;
0143 sector += max_write_zeroes_sectors;
0144 } else {
0145 bio->bi_iter.bi_size = nr_sects << 9;
0146 nr_sects = 0;
0147 }
0148 cond_resched();
0149 }
0150
0151 *biop = bio;
0152 return 0;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
0162 {
0163 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
0164
0165 return min(pages, (sector_t)BIO_MAX_VECS);
0166 }
0167
0168 static int __blkdev_issue_zero_pages(struct block_device *bdev,
0169 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
0170 struct bio **biop)
0171 {
0172 struct bio *bio = *biop;
0173 int bi_size = 0;
0174 unsigned int sz;
0175
0176 if (bdev_read_only(bdev))
0177 return -EPERM;
0178
0179 while (nr_sects != 0) {
0180 bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
0181 REQ_OP_WRITE, gfp_mask);
0182 bio->bi_iter.bi_sector = sector;
0183
0184 while (nr_sects != 0) {
0185 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
0186 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
0187 nr_sects -= bi_size >> 9;
0188 sector += bi_size >> 9;
0189 if (bi_size < sz)
0190 break;
0191 }
0192 cond_resched();
0193 }
0194
0195 *biop = bio;
0196 return 0;
0197 }
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
0219 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
0220 unsigned flags)
0221 {
0222 int ret;
0223 sector_t bs_mask;
0224
0225 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
0226 if ((sector | nr_sects) & bs_mask)
0227 return -EINVAL;
0228
0229 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
0230 biop, flags);
0231 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
0232 return ret;
0233
0234 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
0235 biop);
0236 }
0237 EXPORT_SYMBOL(__blkdev_issue_zeroout);
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
0253 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
0254 {
0255 int ret = 0;
0256 sector_t bs_mask;
0257 struct bio *bio;
0258 struct blk_plug plug;
0259 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
0260
0261 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
0262 if ((sector | nr_sects) & bs_mask)
0263 return -EINVAL;
0264
0265 retry:
0266 bio = NULL;
0267 blk_start_plug(&plug);
0268 if (try_write_zeroes) {
0269 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
0270 gfp_mask, &bio, flags);
0271 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
0272 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
0273 gfp_mask, &bio);
0274 } else {
0275
0276 ret = -EOPNOTSUPP;
0277 }
0278 if (ret == 0 && bio) {
0279 ret = submit_bio_wait(bio);
0280 bio_put(bio);
0281 }
0282 blk_finish_plug(&plug);
0283 if (ret && try_write_zeroes) {
0284 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
0285 try_write_zeroes = false;
0286 goto retry;
0287 }
0288 if (!bdev_write_zeroes_sectors(bdev)) {
0289
0290
0291
0292
0293
0294
0295 ret = -EOPNOTSUPP;
0296 }
0297 }
0298
0299 return ret;
0300 }
0301 EXPORT_SYMBOL(blkdev_issue_zeroout);
0302
0303 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
0304 sector_t nr_sects, gfp_t gfp)
0305 {
0306 sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
0307 unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
0308 struct bio *bio = NULL;
0309 struct blk_plug plug;
0310 int ret = 0;
0311
0312
0313 if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
0314 max_sectors = UINT_MAX >> SECTOR_SHIFT;
0315 max_sectors &= ~bs_mask;
0316
0317 if (max_sectors == 0)
0318 return -EOPNOTSUPP;
0319 if ((sector | nr_sects) & bs_mask)
0320 return -EINVAL;
0321 if (bdev_read_only(bdev))
0322 return -EPERM;
0323
0324 blk_start_plug(&plug);
0325 for (;;) {
0326 unsigned int len = min_t(sector_t, nr_sects, max_sectors);
0327
0328 bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
0329 bio->bi_iter.bi_sector = sector;
0330 bio->bi_iter.bi_size = len << SECTOR_SHIFT;
0331
0332 sector += len;
0333 nr_sects -= len;
0334 if (!nr_sects) {
0335 ret = submit_bio_wait(bio);
0336 bio_put(bio);
0337 break;
0338 }
0339 cond_resched();
0340 }
0341 blk_finish_plug(&plug);
0342
0343 return ret;
0344 }
0345 EXPORT_SYMBOL(blkdev_issue_secure_erase);