0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
0011
0012 #include <crypto/skcipher.h>
0013 #include <linux/blk-crypto.h>
0014 #include <linux/blk-crypto-profile.h>
0015 #include <linux/blkdev.h>
0016 #include <linux/crypto.h>
0017 #include <linux/mempool.h>
0018 #include <linux/module.h>
0019 #include <linux/random.h>
0020 #include <linux/scatterlist.h>
0021
0022 #include "blk-cgroup.h"
0023 #include "blk-crypto-internal.h"
0024
0025 static unsigned int num_prealloc_bounce_pg = 32;
0026 module_param(num_prealloc_bounce_pg, uint, 0);
0027 MODULE_PARM_DESC(num_prealloc_bounce_pg,
0028 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
0029
0030 static unsigned int blk_crypto_num_keyslots = 100;
0031 module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
0032 MODULE_PARM_DESC(num_keyslots,
0033 "Number of keyslots for the blk-crypto crypto API fallback");
0034
0035 static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
0036 module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
0037 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
0038 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
0039
0040 struct bio_fallback_crypt_ctx {
0041 struct bio_crypt_ctx crypt_ctx;
0042
0043
0044
0045
0046
0047
0048 struct bvec_iter crypt_iter;
0049 union {
0050 struct {
0051 struct work_struct work;
0052 struct bio *bio;
0053 };
0054 struct {
0055 void *bi_private_orig;
0056 bio_end_io_t *bi_end_io_orig;
0057 };
0058 };
0059 };
0060
0061 static struct kmem_cache *bio_fallback_crypt_ctx_cache;
0062 static mempool_t *bio_fallback_crypt_ctx_pool;
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 static DEFINE_MUTEX(tfms_init_lock);
0074 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
0075
0076 static struct blk_crypto_fallback_keyslot {
0077 enum blk_crypto_mode_num crypto_mode;
0078 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
0079 } *blk_crypto_keyslots;
0080
0081 static struct blk_crypto_profile blk_crypto_fallback_profile;
0082 static struct workqueue_struct *blk_crypto_wq;
0083 static mempool_t *blk_crypto_bounce_page_pool;
0084 static struct bio_set crypto_bio_split;
0085
0086
0087
0088
0089
0090 static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
0091
0092 static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
0093 {
0094 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
0095 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
0096 int err;
0097
0098 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
0099
0100
0101 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
0102 blk_crypto_modes[crypto_mode].keysize);
0103 WARN_ON(err);
0104 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
0105 }
0106
0107 static int
0108 blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
0109 const struct blk_crypto_key *key,
0110 unsigned int slot)
0111 {
0112 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
0113 const enum blk_crypto_mode_num crypto_mode =
0114 key->crypto_cfg.crypto_mode;
0115 int err;
0116
0117 if (crypto_mode != slotp->crypto_mode &&
0118 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
0119 blk_crypto_fallback_evict_keyslot(slot);
0120
0121 slotp->crypto_mode = crypto_mode;
0122 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
0123 key->size);
0124 if (err) {
0125 blk_crypto_fallback_evict_keyslot(slot);
0126 return err;
0127 }
0128 return 0;
0129 }
0130
0131 static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile,
0132 const struct blk_crypto_key *key,
0133 unsigned int slot)
0134 {
0135 blk_crypto_fallback_evict_keyslot(slot);
0136 return 0;
0137 }
0138
0139 static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
0140 .keyslot_program = blk_crypto_fallback_keyslot_program,
0141 .keyslot_evict = blk_crypto_fallback_keyslot_evict,
0142 };
0143
0144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
0145 {
0146 struct bio *src_bio = enc_bio->bi_private;
0147 int i;
0148
0149 for (i = 0; i < enc_bio->bi_vcnt; i++)
0150 mempool_free(enc_bio->bi_io_vec[i].bv_page,
0151 blk_crypto_bounce_page_pool);
0152
0153 src_bio->bi_status = enc_bio->bi_status;
0154
0155 bio_uninit(enc_bio);
0156 kfree(enc_bio);
0157 bio_endio(src_bio);
0158 }
0159
0160 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
0161 {
0162 unsigned int nr_segs = bio_segments(bio_src);
0163 struct bvec_iter iter;
0164 struct bio_vec bv;
0165 struct bio *bio;
0166
0167 bio = bio_kmalloc(nr_segs, GFP_NOIO);
0168 if (!bio)
0169 return NULL;
0170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs,
0171 bio_src->bi_opf);
0172 if (bio_flagged(bio_src, BIO_REMAPPED))
0173 bio_set_flag(bio, BIO_REMAPPED);
0174 bio->bi_ioprio = bio_src->bi_ioprio;
0175 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
0176 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
0177
0178 bio_for_each_segment(bv, bio_src, iter)
0179 bio->bi_io_vec[bio->bi_vcnt++] = bv;
0180
0181 bio_clone_blkg_association(bio, bio_src);
0182
0183 return bio;
0184 }
0185
0186 static bool
0187 blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
0188 struct skcipher_request **ciph_req_ret,
0189 struct crypto_wait *wait)
0190 {
0191 struct skcipher_request *ciph_req;
0192 const struct blk_crypto_fallback_keyslot *slotp;
0193 int keyslot_idx = blk_crypto_keyslot_index(slot);
0194
0195 slotp = &blk_crypto_keyslots[keyslot_idx];
0196 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
0197 GFP_NOIO);
0198 if (!ciph_req)
0199 return false;
0200
0201 skcipher_request_set_callback(ciph_req,
0202 CRYPTO_TFM_REQ_MAY_BACKLOG |
0203 CRYPTO_TFM_REQ_MAY_SLEEP,
0204 crypto_req_done, wait);
0205 *ciph_req_ret = ciph_req;
0206
0207 return true;
0208 }
0209
0210 static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
0211 {
0212 struct bio *bio = *bio_ptr;
0213 unsigned int i = 0;
0214 unsigned int num_sectors = 0;
0215 struct bio_vec bv;
0216 struct bvec_iter iter;
0217
0218 bio_for_each_segment(bv, bio, iter) {
0219 num_sectors += bv.bv_len >> SECTOR_SHIFT;
0220 if (++i == BIO_MAX_VECS)
0221 break;
0222 }
0223 if (num_sectors < bio_sectors(bio)) {
0224 struct bio *split_bio;
0225
0226 split_bio = bio_split(bio, num_sectors, GFP_NOIO,
0227 &crypto_bio_split);
0228 if (!split_bio) {
0229 bio->bi_status = BLK_STS_RESOURCE;
0230 return false;
0231 }
0232 bio_chain(split_bio, bio);
0233 submit_bio_noacct(bio);
0234 *bio_ptr = split_bio;
0235 }
0236
0237 return true;
0238 }
0239
0240 union blk_crypto_iv {
0241 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
0242 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
0243 };
0244
0245 static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
0246 union blk_crypto_iv *iv)
0247 {
0248 int i;
0249
0250 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
0251 iv->dun[i] = cpu_to_le64(dun[i]);
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261 static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
0262 {
0263 struct bio *src_bio, *enc_bio;
0264 struct bio_crypt_ctx *bc;
0265 struct blk_crypto_keyslot *slot;
0266 int data_unit_size;
0267 struct skcipher_request *ciph_req = NULL;
0268 DECLARE_CRYPTO_WAIT(wait);
0269 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
0270 struct scatterlist src, dst;
0271 union blk_crypto_iv iv;
0272 unsigned int i, j;
0273 bool ret = false;
0274 blk_status_t blk_st;
0275
0276
0277 if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
0278 return false;
0279
0280 src_bio = *bio_ptr;
0281 bc = src_bio->bi_crypt_context;
0282 data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
0283
0284
0285 enc_bio = blk_crypto_fallback_clone_bio(src_bio);
0286 if (!enc_bio) {
0287 src_bio->bi_status = BLK_STS_RESOURCE;
0288 return false;
0289 }
0290
0291
0292
0293
0294
0295 blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
0296 bc->bc_key, &slot);
0297 if (blk_st != BLK_STS_OK) {
0298 src_bio->bi_status = blk_st;
0299 goto out_put_enc_bio;
0300 }
0301
0302
0303 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
0304 src_bio->bi_status = BLK_STS_RESOURCE;
0305 goto out_release_keyslot;
0306 }
0307
0308 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
0309 sg_init_table(&src, 1);
0310 sg_init_table(&dst, 1);
0311
0312 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
0313 iv.bytes);
0314
0315
0316 for (i = 0; i < enc_bio->bi_vcnt; i++) {
0317 struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
0318 struct page *plaintext_page = enc_bvec->bv_page;
0319 struct page *ciphertext_page =
0320 mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
0321
0322 enc_bvec->bv_page = ciphertext_page;
0323
0324 if (!ciphertext_page) {
0325 src_bio->bi_status = BLK_STS_RESOURCE;
0326 goto out_free_bounce_pages;
0327 }
0328
0329 sg_set_page(&src, plaintext_page, data_unit_size,
0330 enc_bvec->bv_offset);
0331 sg_set_page(&dst, ciphertext_page, data_unit_size,
0332 enc_bvec->bv_offset);
0333
0334
0335 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
0336 blk_crypto_dun_to_iv(curr_dun, &iv);
0337 if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
0338 &wait)) {
0339 i++;
0340 src_bio->bi_status = BLK_STS_IOERR;
0341 goto out_free_bounce_pages;
0342 }
0343 bio_crypt_dun_increment(curr_dun, 1);
0344 src.offset += data_unit_size;
0345 dst.offset += data_unit_size;
0346 }
0347 }
0348
0349 enc_bio->bi_private = src_bio;
0350 enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
0351 *bio_ptr = enc_bio;
0352 ret = true;
0353
0354 enc_bio = NULL;
0355 goto out_free_ciph_req;
0356
0357 out_free_bounce_pages:
0358 while (i > 0)
0359 mempool_free(enc_bio->bi_io_vec[--i].bv_page,
0360 blk_crypto_bounce_page_pool);
0361 out_free_ciph_req:
0362 skcipher_request_free(ciph_req);
0363 out_release_keyslot:
0364 blk_crypto_put_keyslot(slot);
0365 out_put_enc_bio:
0366 if (enc_bio)
0367 bio_uninit(enc_bio);
0368 kfree(enc_bio);
0369 return ret;
0370 }
0371
0372
0373
0374
0375
0376 static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
0377 {
0378 struct bio_fallback_crypt_ctx *f_ctx =
0379 container_of(work, struct bio_fallback_crypt_ctx, work);
0380 struct bio *bio = f_ctx->bio;
0381 struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
0382 struct blk_crypto_keyslot *slot;
0383 struct skcipher_request *ciph_req = NULL;
0384 DECLARE_CRYPTO_WAIT(wait);
0385 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
0386 union blk_crypto_iv iv;
0387 struct scatterlist sg;
0388 struct bio_vec bv;
0389 struct bvec_iter iter;
0390 const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
0391 unsigned int i;
0392 blk_status_t blk_st;
0393
0394
0395
0396
0397
0398 blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
0399 bc->bc_key, &slot);
0400 if (blk_st != BLK_STS_OK) {
0401 bio->bi_status = blk_st;
0402 goto out_no_keyslot;
0403 }
0404
0405
0406 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
0407 bio->bi_status = BLK_STS_RESOURCE;
0408 goto out;
0409 }
0410
0411 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
0412 sg_init_table(&sg, 1);
0413 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
0414 iv.bytes);
0415
0416
0417 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
0418 struct page *page = bv.bv_page;
0419
0420 sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
0421
0422
0423 for (i = 0; i < bv.bv_len; i += data_unit_size) {
0424 blk_crypto_dun_to_iv(curr_dun, &iv);
0425 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
0426 &wait)) {
0427 bio->bi_status = BLK_STS_IOERR;
0428 goto out;
0429 }
0430 bio_crypt_dun_increment(curr_dun, 1);
0431 sg.offset += data_unit_size;
0432 }
0433 }
0434
0435 out:
0436 skcipher_request_free(ciph_req);
0437 blk_crypto_put_keyslot(slot);
0438 out_no_keyslot:
0439 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
0440 bio_endio(bio);
0441 }
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
0452 {
0453 struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
0454
0455 bio->bi_private = f_ctx->bi_private_orig;
0456 bio->bi_end_io = f_ctx->bi_end_io_orig;
0457
0458
0459 if (bio->bi_status) {
0460 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
0461 bio_endio(bio);
0462 return;
0463 }
0464
0465 INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
0466 f_ctx->bio = bio;
0467 queue_work(blk_crypto_wq, &f_ctx->work);
0468 }
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
0491 {
0492 struct bio *bio = *bio_ptr;
0493 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
0494 struct bio_fallback_crypt_ctx *f_ctx;
0495
0496 if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
0497
0498 bio->bi_status = BLK_STS_IOERR;
0499 return false;
0500 }
0501
0502 if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
0503 &bc->bc_key->crypto_cfg)) {
0504 bio->bi_status = BLK_STS_NOTSUPP;
0505 return false;
0506 }
0507
0508 if (bio_data_dir(bio) == WRITE)
0509 return blk_crypto_fallback_encrypt_bio(bio_ptr);
0510
0511
0512
0513
0514
0515 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
0516 f_ctx->crypt_ctx = *bc;
0517 f_ctx->crypt_iter = bio->bi_iter;
0518 f_ctx->bi_private_orig = bio->bi_private;
0519 f_ctx->bi_end_io_orig = bio->bi_end_io;
0520 bio->bi_private = (void *)f_ctx;
0521 bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
0522 bio_crypt_free_ctx(bio);
0523
0524 return true;
0525 }
0526
0527 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
0528 {
0529 return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
0530 }
0531
0532 static bool blk_crypto_fallback_inited;
0533 static int blk_crypto_fallback_init(void)
0534 {
0535 int i;
0536 int err;
0537 struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
0538
0539 if (blk_crypto_fallback_inited)
0540 return 0;
0541
0542 prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
0543
0544 err = bioset_init(&crypto_bio_split, 64, 0, 0);
0545 if (err)
0546 goto out;
0547
0548 err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
0549 if (err)
0550 goto fail_free_bioset;
0551 err = -ENOMEM;
0552
0553 profile->ll_ops = blk_crypto_fallback_ll_ops;
0554 profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
0555
0556
0557 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
0558 profile->modes_supported[i] = 0xFFFFFFFF;
0559 profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
0560
0561 blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
0562 WQ_UNBOUND | WQ_HIGHPRI |
0563 WQ_MEM_RECLAIM, num_online_cpus());
0564 if (!blk_crypto_wq)
0565 goto fail_destroy_profile;
0566
0567 blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
0568 sizeof(blk_crypto_keyslots[0]),
0569 GFP_KERNEL);
0570 if (!blk_crypto_keyslots)
0571 goto fail_free_wq;
0572
0573 blk_crypto_bounce_page_pool =
0574 mempool_create_page_pool(num_prealloc_bounce_pg, 0);
0575 if (!blk_crypto_bounce_page_pool)
0576 goto fail_free_keyslots;
0577
0578 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
0579 if (!bio_fallback_crypt_ctx_cache)
0580 goto fail_free_bounce_page_pool;
0581
0582 bio_fallback_crypt_ctx_pool =
0583 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
0584 bio_fallback_crypt_ctx_cache);
0585 if (!bio_fallback_crypt_ctx_pool)
0586 goto fail_free_crypt_ctx_cache;
0587
0588 blk_crypto_fallback_inited = true;
0589
0590 return 0;
0591 fail_free_crypt_ctx_cache:
0592 kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
0593 fail_free_bounce_page_pool:
0594 mempool_destroy(blk_crypto_bounce_page_pool);
0595 fail_free_keyslots:
0596 kfree(blk_crypto_keyslots);
0597 fail_free_wq:
0598 destroy_workqueue(blk_crypto_wq);
0599 fail_destroy_profile:
0600 blk_crypto_profile_destroy(profile);
0601 fail_free_bioset:
0602 bioset_exit(&crypto_bio_split);
0603 out:
0604 return err;
0605 }
0606
0607
0608
0609
0610
0611 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
0612 {
0613 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
0614 struct blk_crypto_fallback_keyslot *slotp;
0615 unsigned int i;
0616 int err = 0;
0617
0618
0619
0620
0621
0622
0623 if (likely(smp_load_acquire(&tfms_inited[mode_num])))
0624 return 0;
0625
0626 mutex_lock(&tfms_init_lock);
0627 if (tfms_inited[mode_num])
0628 goto out;
0629
0630 err = blk_crypto_fallback_init();
0631 if (err)
0632 goto out;
0633
0634 for (i = 0; i < blk_crypto_num_keyslots; i++) {
0635 slotp = &blk_crypto_keyslots[i];
0636 slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
0637 if (IS_ERR(slotp->tfms[mode_num])) {
0638 err = PTR_ERR(slotp->tfms[mode_num]);
0639 if (err == -ENOENT) {
0640 pr_warn_once("Missing crypto API support for \"%s\"\n",
0641 cipher_str);
0642 err = -ENOPKG;
0643 }
0644 slotp->tfms[mode_num] = NULL;
0645 goto out_free_tfms;
0646 }
0647
0648 crypto_skcipher_set_flags(slotp->tfms[mode_num],
0649 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
0650 }
0651
0652
0653
0654
0655
0656 smp_store_release(&tfms_inited[mode_num], true);
0657 goto out;
0658
0659 out_free_tfms:
0660 for (i = 0; i < blk_crypto_num_keyslots; i++) {
0661 slotp = &blk_crypto_keyslots[i];
0662 crypto_free_skcipher(slotp->tfms[mode_num]);
0663 slotp->tfms[mode_num] = NULL;
0664 }
0665 out:
0666 mutex_unlock(&tfms_init_lock);
0667 return err;
0668 }