0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) "blk-crypto: " fmt
0011
0012 #include <linux/bio.h>
0013 #include <linux/blkdev.h>
0014 #include <linux/blk-crypto-profile.h>
0015 #include <linux/module.h>
0016 #include <linux/slab.h>
0017
0018 #include "blk-crypto-internal.h"
0019
0020 const struct blk_crypto_mode blk_crypto_modes[] = {
0021 [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
0022 .name = "AES-256-XTS",
0023 .cipher_str = "xts(aes)",
0024 .keysize = 64,
0025 .ivsize = 16,
0026 },
0027 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
0028 .name = "AES-128-CBC-ESSIV",
0029 .cipher_str = "essiv(cbc(aes),sha256)",
0030 .keysize = 16,
0031 .ivsize = 16,
0032 },
0033 [BLK_ENCRYPTION_MODE_ADIANTUM] = {
0034 .name = "Adiantum",
0035 .cipher_str = "adiantum(xchacha12,aes)",
0036 .keysize = 32,
0037 .ivsize = 32,
0038 },
0039 };
0040
0041
0042
0043
0044
0045
0046
0047 static int num_prealloc_crypt_ctxs = 128;
0048
0049 module_param(num_prealloc_crypt_ctxs, int, 0444);
0050 MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
0051 "Number of bio crypto contexts to preallocate");
0052
0053 static struct kmem_cache *bio_crypt_ctx_cache;
0054 static mempool_t *bio_crypt_ctx_pool;
0055
0056 static int __init bio_crypt_ctx_init(void)
0057 {
0058 size_t i;
0059
0060 bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
0061 if (!bio_crypt_ctx_cache)
0062 goto out_no_mem;
0063
0064 bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
0065 bio_crypt_ctx_cache);
0066 if (!bio_crypt_ctx_pool)
0067 goto out_no_mem;
0068
0069
0070 BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
0071
0072
0073 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
0074 BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
0075 BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
0076 }
0077
0078 return 0;
0079 out_no_mem:
0080 panic("Failed to allocate mem for bio crypt ctxs\n");
0081 }
0082 subsys_initcall(bio_crypt_ctx_init);
0083
0084 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
0085 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
0086 {
0087 struct bio_crypt_ctx *bc;
0088
0089
0090
0091
0092
0093 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
0094
0095 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
0096
0097 bc->bc_key = key;
0098 memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
0099
0100 bio->bi_crypt_context = bc;
0101 }
0102
0103 void __bio_crypt_free_ctx(struct bio *bio)
0104 {
0105 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
0106 bio->bi_crypt_context = NULL;
0107 }
0108
0109 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
0110 {
0111 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
0112 if (!dst->bi_crypt_context)
0113 return -ENOMEM;
0114 *dst->bi_crypt_context = *src->bi_crypt_context;
0115 return 0;
0116 }
0117
0118
0119 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
0120 unsigned int inc)
0121 {
0122 int i;
0123
0124 for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
0125 dun[i] += inc;
0126
0127
0128
0129
0130 if (dun[i] < inc)
0131 inc = 1;
0132 else
0133 inc = 0;
0134 }
0135 }
0136
0137 void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
0138 {
0139 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
0140
0141 bio_crypt_dun_increment(bc->bc_dun,
0142 bytes >> bc->bc_key->data_unit_size_bits);
0143 }
0144
0145
0146
0147
0148
0149 bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
0150 unsigned int bytes,
0151 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
0152 {
0153 int i;
0154 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
0155
0156 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
0157 if (bc->bc_dun[i] + carry != next_dun[i])
0158 return false;
0159
0160
0161
0162
0163 if ((bc->bc_dun[i] + carry) < carry)
0164 carry = 1;
0165 else
0166 carry = 0;
0167 }
0168
0169
0170 return carry == 0;
0171 }
0172
0173
0174
0175
0176
0177 static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
0178 struct bio_crypt_ctx *bc2)
0179 {
0180 if (!bc1)
0181 return !bc2;
0182
0183 return bc2 && bc1->bc_key == bc2->bc_key;
0184 }
0185
0186 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
0187 {
0188 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
0189 }
0190
0191
0192
0193
0194
0195
0196 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
0197 struct bio_crypt_ctx *bc2)
0198 {
0199 if (!bio_crypt_ctx_compatible(bc1, bc2))
0200 return false;
0201
0202 return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
0203 }
0204
0205
0206 static bool bio_crypt_check_alignment(struct bio *bio)
0207 {
0208 const unsigned int data_unit_size =
0209 bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
0210 struct bvec_iter iter;
0211 struct bio_vec bv;
0212
0213 bio_for_each_segment(bv, bio, iter) {
0214 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
0215 return false;
0216 }
0217
0218 return true;
0219 }
0220
0221 blk_status_t __blk_crypto_init_request(struct request *rq)
0222 {
0223 return blk_crypto_get_keyslot(rq->q->crypto_profile,
0224 rq->crypt_ctx->bc_key,
0225 &rq->crypt_keyslot);
0226 }
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 void __blk_crypto_free_request(struct request *rq)
0238 {
0239 blk_crypto_put_keyslot(rq->crypt_keyslot);
0240 mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
0241 blk_crypto_rq_set_defaults(rq);
0242 }
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266 bool __blk_crypto_bio_prep(struct bio **bio_ptr)
0267 {
0268 struct bio *bio = *bio_ptr;
0269 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
0270 struct blk_crypto_profile *profile;
0271
0272
0273 if (WARN_ON_ONCE(!bio_has_data(bio))) {
0274 bio->bi_status = BLK_STS_IOERR;
0275 goto fail;
0276 }
0277
0278 if (!bio_crypt_check_alignment(bio)) {
0279 bio->bi_status = BLK_STS_IOERR;
0280 goto fail;
0281 }
0282
0283
0284
0285
0286
0287 profile = bdev_get_queue(bio->bi_bdev)->crypto_profile;
0288 if (__blk_crypto_cfg_supported(profile, &bc_key->crypto_cfg))
0289 return true;
0290
0291 if (blk_crypto_fallback_bio_prep(bio_ptr))
0292 return true;
0293 fail:
0294 bio_endio(*bio_ptr);
0295 return false;
0296 }
0297
0298 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
0299 gfp_t gfp_mask)
0300 {
0301 if (!rq->crypt_ctx) {
0302 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
0303 if (!rq->crypt_ctx)
0304 return -ENOMEM;
0305 }
0306 *rq->crypt_ctx = *bio->bi_crypt_context;
0307 return 0;
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323 int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
0324 enum blk_crypto_mode_num crypto_mode,
0325 unsigned int dun_bytes,
0326 unsigned int data_unit_size)
0327 {
0328 const struct blk_crypto_mode *mode;
0329
0330 memset(blk_key, 0, sizeof(*blk_key));
0331
0332 if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
0333 return -EINVAL;
0334
0335 mode = &blk_crypto_modes[crypto_mode];
0336 if (mode->keysize == 0)
0337 return -EINVAL;
0338
0339 if (dun_bytes == 0 || dun_bytes > mode->ivsize)
0340 return -EINVAL;
0341
0342 if (!is_power_of_2(data_unit_size))
0343 return -EINVAL;
0344
0345 blk_key->crypto_cfg.crypto_mode = crypto_mode;
0346 blk_key->crypto_cfg.dun_bytes = dun_bytes;
0347 blk_key->crypto_cfg.data_unit_size = data_unit_size;
0348 blk_key->data_unit_size_bits = ilog2(data_unit_size);
0349 blk_key->size = mode->keysize;
0350 memcpy(blk_key->raw, raw_key, mode->keysize);
0351
0352 return 0;
0353 }
0354
0355
0356
0357
0358
0359
0360 bool blk_crypto_config_supported(struct request_queue *q,
0361 const struct blk_crypto_config *cfg)
0362 {
0363 return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
0364 __blk_crypto_cfg_supported(q->crypto_profile, cfg);
0365 }
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382 int blk_crypto_start_using_key(const struct blk_crypto_key *key,
0383 struct request_queue *q)
0384 {
0385 if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
0386 return 0;
0387 return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
0388 }
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 int blk_crypto_evict_key(struct request_queue *q,
0404 const struct blk_crypto_key *key)
0405 {
0406 if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
0407 return __blk_crypto_evict_key(q->crypto_profile, key);
0408
0409
0410
0411
0412
0413 return blk_crypto_fallback_evict_key(key);
0414 }
0415 EXPORT_SYMBOL_GPL(blk_crypto_evict_key);