0001
0002
0003
0004
0005
0006 #include <linux/bio.h>
0007 #include <linux/blk-crypto.h>
0008 #include <linux/blk-integrity.h>
0009
0010 #include "dm-core.h"
0011
0012 static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv,
0013 struct bvec_iter *iter,
0014 unsigned int bytes)
0015 {
0016 int idx;
0017
0018 iter->bi_size += bytes;
0019 if (bytes <= iter->bi_bvec_done) {
0020 iter->bi_bvec_done -= bytes;
0021 return true;
0022 }
0023
0024 bytes -= iter->bi_bvec_done;
0025 idx = iter->bi_idx - 1;
0026
0027 while (idx >= 0 && bytes && bytes > bv[idx].bv_len) {
0028 bytes -= bv[idx].bv_len;
0029 idx--;
0030 }
0031
0032 if (WARN_ONCE(idx < 0 && bytes,
0033 "Attempted to rewind iter beyond bvec's boundaries\n")) {
0034 iter->bi_size -= bytes;
0035 iter->bi_bvec_done = 0;
0036 iter->bi_idx = 0;
0037 return false;
0038 }
0039
0040 iter->bi_idx = idx;
0041 iter->bi_bvec_done = bv[idx].bv_len - bytes;
0042 return true;
0043 }
0044
0045 #if defined(CONFIG_BLK_DEV_INTEGRITY)
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
0057 {
0058 struct bio_integrity_payload *bip = bio_integrity(bio);
0059 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
0060 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
0061
0062 bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
0063 dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
0064 }
0065
0066 #else
0067
0068 static inline void dm_bio_integrity_rewind(struct bio *bio,
0069 unsigned int bytes_done)
0070 {
0071 return;
0072 }
0073
0074 #endif
0075
0076 #if defined(CONFIG_BLK_INLINE_ENCRYPTION)
0077
0078
0079 static void dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
0080 unsigned int dec)
0081 {
0082 int i;
0083
0084 for (i = 0; dec && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
0085 u64 prev = dun[i];
0086
0087 dun[i] -= dec;
0088 if (dun[i] > prev)
0089 dec = 1;
0090 else
0091 dec = 0;
0092 }
0093 }
0094
0095 static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
0096 {
0097 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
0098
0099 dm_bio_crypt_dun_decrement(bc->bc_dun,
0100 bytes >> bc->bc_key->data_unit_size_bits);
0101 }
0102
0103 #else
0104
0105 static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
0106 {
0107 return;
0108 }
0109
0110 #endif
0111
0112 static inline void dm_bio_rewind_iter(const struct bio *bio,
0113 struct bvec_iter *iter, unsigned int bytes)
0114 {
0115 iter->bi_sector -= bytes >> 9;
0116
0117
0118 if (bio_no_advance_iter(bio))
0119 iter->bi_size += bytes;
0120 else
0121 dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
0122 }
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134 static void dm_bio_rewind(struct bio *bio, unsigned bytes)
0135 {
0136 if (bio_integrity(bio))
0137 dm_bio_integrity_rewind(bio, bytes);
0138
0139 if (bio_has_crypt_ctx(bio))
0140 dm_bio_crypt_rewind(bio, bytes);
0141
0142 dm_bio_rewind_iter(bio, &bio->bi_iter, bytes);
0143 }
0144
0145 void dm_io_rewind(struct dm_io *io, struct bio_set *bs)
0146 {
0147 struct bio *orig = io->orig_bio;
0148 struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig,
0149 GFP_NOIO, bs);
0150
0151
0152
0153
0154
0155 dm_bio_rewind(new_orig, ((io->sector_offset << 9) -
0156 orig->bi_iter.bi_size));
0157 bio_trim(new_orig, 0, io->sectors);
0158
0159 bio_chain(new_orig, orig);
0160
0161
0162
0163
0164 atomic_dec(&orig->__bi_remaining);
0165 io->orig_bio = new_orig;
0166 }