0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/completion.h>
0011 #include <linux/err.h>
0012 #include <linux/module.h>
0013 #include <linux/init.h>
0014 #include <linux/kernel.h>
0015 #include <linux/key.h>
0016 #include <linux/bio.h>
0017 #include <linux/blkdev.h>
0018 #include <linux/blk-integrity.h>
0019 #include <linux/mempool.h>
0020 #include <linux/slab.h>
0021 #include <linux/crypto.h>
0022 #include <linux/workqueue.h>
0023 #include <linux/kthread.h>
0024 #include <linux/backing-dev.h>
0025 #include <linux/atomic.h>
0026 #include <linux/scatterlist.h>
0027 #include <linux/rbtree.h>
0028 #include <linux/ctype.h>
0029 #include <asm/page.h>
0030 #include <asm/unaligned.h>
0031 #include <crypto/hash.h>
0032 #include <crypto/md5.h>
0033 #include <crypto/algapi.h>
0034 #include <crypto/skcipher.h>
0035 #include <crypto/aead.h>
0036 #include <crypto/authenc.h>
0037 #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
0038 #include <linux/key-type.h>
0039 #include <keys/user-type.h>
0040 #include <keys/encrypted-type.h>
0041 #include <keys/trusted-type.h>
0042
0043 #include <linux/device-mapper.h>
0044
0045 #include "dm-audit.h"
0046
0047 #define DM_MSG_PREFIX "crypt"
0048
0049
0050
0051
0052 struct convert_context {
0053 struct completion restart;
0054 struct bio *bio_in;
0055 struct bio *bio_out;
0056 struct bvec_iter iter_in;
0057 struct bvec_iter iter_out;
0058 u64 cc_sector;
0059 atomic_t cc_pending;
0060 union {
0061 struct skcipher_request *req;
0062 struct aead_request *req_aead;
0063 } r;
0064
0065 };
0066
0067
0068
0069
0070 struct dm_crypt_io {
0071 struct crypt_config *cc;
0072 struct bio *base_bio;
0073 u8 *integrity_metadata;
0074 bool integrity_metadata_from_pool;
0075 struct work_struct work;
0076 struct tasklet_struct tasklet;
0077
0078 struct convert_context ctx;
0079
0080 atomic_t io_pending;
0081 blk_status_t error;
0082 sector_t sector;
0083
0084 struct rb_node rb_node;
0085 } CRYPTO_MINALIGN_ATTR;
0086
0087 struct dm_crypt_request {
0088 struct convert_context *ctx;
0089 struct scatterlist sg_in[4];
0090 struct scatterlist sg_out[4];
0091 u64 iv_sector;
0092 };
0093
0094 struct crypt_config;
0095
0096 struct crypt_iv_operations {
0097 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
0098 const char *opts);
0099 void (*dtr)(struct crypt_config *cc);
0100 int (*init)(struct crypt_config *cc);
0101 int (*wipe)(struct crypt_config *cc);
0102 int (*generator)(struct crypt_config *cc, u8 *iv,
0103 struct dm_crypt_request *dmreq);
0104 int (*post)(struct crypt_config *cc, u8 *iv,
0105 struct dm_crypt_request *dmreq);
0106 };
0107
0108 struct iv_benbi_private {
0109 int shift;
0110 };
0111
0112 #define LMK_SEED_SIZE 64
0113 struct iv_lmk_private {
0114 struct crypto_shash *hash_tfm;
0115 u8 *seed;
0116 };
0117
0118 #define TCW_WHITENING_SIZE 16
0119 struct iv_tcw_private {
0120 struct crypto_shash *crc32_tfm;
0121 u8 *iv_seed;
0122 u8 *whitening;
0123 };
0124
0125 #define ELEPHANT_MAX_KEY_SIZE 32
0126 struct iv_elephant_private {
0127 struct crypto_skcipher *tfm;
0128 };
0129
0130
0131
0132
0133
0134 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
0135 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
0136 DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
0137 DM_CRYPT_WRITE_INLINE };
0138
0139 enum cipher_flags {
0140 CRYPT_MODE_INTEGRITY_AEAD,
0141 CRYPT_IV_LARGE_SECTORS,
0142 CRYPT_ENCRYPT_PREPROCESS,
0143 };
0144
0145
0146
0147
0148 struct crypt_config {
0149 struct dm_dev *dev;
0150 sector_t start;
0151
0152 struct percpu_counter n_allocated_pages;
0153
0154 struct workqueue_struct *io_queue;
0155 struct workqueue_struct *crypt_queue;
0156
0157 spinlock_t write_thread_lock;
0158 struct task_struct *write_thread;
0159 struct rb_root write_tree;
0160
0161 char *cipher_string;
0162 char *cipher_auth;
0163 char *key_string;
0164
0165 const struct crypt_iv_operations *iv_gen_ops;
0166 union {
0167 struct iv_benbi_private benbi;
0168 struct iv_lmk_private lmk;
0169 struct iv_tcw_private tcw;
0170 struct iv_elephant_private elephant;
0171 } iv_gen_private;
0172 u64 iv_offset;
0173 unsigned int iv_size;
0174 unsigned short int sector_size;
0175 unsigned char sector_shift;
0176
0177 union {
0178 struct crypto_skcipher **tfms;
0179 struct crypto_aead **tfms_aead;
0180 } cipher_tfm;
0181 unsigned tfms_count;
0182 unsigned long cipher_flags;
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 unsigned int dmreq_start;
0198
0199 unsigned int per_bio_data_size;
0200
0201 unsigned long flags;
0202 unsigned int key_size;
0203 unsigned int key_parts;
0204 unsigned int key_extra_size;
0205 unsigned int key_mac_size;
0206
0207 unsigned int integrity_tag_size;
0208 unsigned int integrity_iv_size;
0209 unsigned int on_disk_tag_size;
0210
0211
0212
0213
0214
0215 unsigned tag_pool_max_sectors;
0216 mempool_t tag_pool;
0217 mempool_t req_pool;
0218 mempool_t page_pool;
0219
0220 struct bio_set bs;
0221 struct mutex bio_alloc_lock;
0222
0223 u8 *authenc_key;
0224 u8 key[];
0225 };
0226
0227 #define MIN_IOS 64
0228 #define MAX_TAG_SIZE 480
0229 #define POOL_ENTRY_SIZE 512
0230
0231 static DEFINE_SPINLOCK(dm_crypt_clients_lock);
0232 static unsigned dm_crypt_clients_n = 0;
0233 static volatile unsigned long dm_crypt_pages_per_client;
0234 #define DM_CRYPT_MEMORY_PERCENT 2
0235 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
0236
0237 static void crypt_endio(struct bio *clone);
0238 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
0239 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
0240 struct scatterlist *sg);
0241
0242 static bool crypt_integrity_aead(struct crypt_config *cc);
0243
0244
0245
0246
0247 static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
0248 {
0249 return cc->cipher_tfm.tfms[0];
0250 }
0251
0252 static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
0253 {
0254 return cc->cipher_tfm.tfms_aead[0];
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
0314 struct dm_crypt_request *dmreq)
0315 {
0316 memset(iv, 0, cc->iv_size);
0317 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
0318
0319 return 0;
0320 }
0321
0322 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
0323 struct dm_crypt_request *dmreq)
0324 {
0325 memset(iv, 0, cc->iv_size);
0326 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
0327
0328 return 0;
0329 }
0330
0331 static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
0332 struct dm_crypt_request *dmreq)
0333 {
0334 memset(iv, 0, cc->iv_size);
0335
0336 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
0337
0338 return 0;
0339 }
0340
0341 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
0342 struct dm_crypt_request *dmreq)
0343 {
0344
0345
0346
0347
0348 memset(iv, 0, cc->iv_size);
0349 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
0350
0351 return 0;
0352 }
0353
0354 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
0355 const char *opts)
0356 {
0357 unsigned bs;
0358 int log;
0359
0360 if (crypt_integrity_aead(cc))
0361 bs = crypto_aead_blocksize(any_tfm_aead(cc));
0362 else
0363 bs = crypto_skcipher_blocksize(any_tfm(cc));
0364 log = ilog2(bs);
0365
0366
0367
0368
0369 if (1 << log != bs) {
0370 ti->error = "cypher blocksize is not a power of 2";
0371 return -EINVAL;
0372 }
0373
0374 if (log > 9) {
0375 ti->error = "cypher blocksize is > 512";
0376 return -EINVAL;
0377 }
0378
0379 cc->iv_gen_private.benbi.shift = 9 - log;
0380
0381 return 0;
0382 }
0383
0384 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
0385 {
0386 }
0387
0388 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
0389 struct dm_crypt_request *dmreq)
0390 {
0391 __be64 val;
0392
0393 memset(iv, 0, cc->iv_size - sizeof(u64));
0394
0395 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
0396 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
0397
0398 return 0;
0399 }
0400
0401 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
0402 struct dm_crypt_request *dmreq)
0403 {
0404 memset(iv, 0, cc->iv_size);
0405
0406 return 0;
0407 }
0408
0409 static void crypt_iv_lmk_dtr(struct crypt_config *cc)
0410 {
0411 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
0412
0413 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
0414 crypto_free_shash(lmk->hash_tfm);
0415 lmk->hash_tfm = NULL;
0416
0417 kfree_sensitive(lmk->seed);
0418 lmk->seed = NULL;
0419 }
0420
0421 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
0422 const char *opts)
0423 {
0424 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
0425
0426 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
0427 ti->error = "Unsupported sector size for LMK";
0428 return -EINVAL;
0429 }
0430
0431 lmk->hash_tfm = crypto_alloc_shash("md5", 0,
0432 CRYPTO_ALG_ALLOCATES_MEMORY);
0433 if (IS_ERR(lmk->hash_tfm)) {
0434 ti->error = "Error initializing LMK hash";
0435 return PTR_ERR(lmk->hash_tfm);
0436 }
0437
0438
0439 if (cc->key_parts == cc->tfms_count) {
0440 lmk->seed = NULL;
0441 return 0;
0442 }
0443
0444 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
0445 if (!lmk->seed) {
0446 crypt_iv_lmk_dtr(cc);
0447 ti->error = "Error kmallocing seed storage in LMK";
0448 return -ENOMEM;
0449 }
0450
0451 return 0;
0452 }
0453
0454 static int crypt_iv_lmk_init(struct crypt_config *cc)
0455 {
0456 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
0457 int subkey_size = cc->key_size / cc->key_parts;
0458
0459
0460 if (lmk->seed)
0461 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
0462 crypto_shash_digestsize(lmk->hash_tfm));
0463
0464 return 0;
0465 }
0466
0467 static int crypt_iv_lmk_wipe(struct crypt_config *cc)
0468 {
0469 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
0470
0471 if (lmk->seed)
0472 memset(lmk->seed, 0, LMK_SEED_SIZE);
0473
0474 return 0;
0475 }
0476
0477 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
0478 struct dm_crypt_request *dmreq,
0479 u8 *data)
0480 {
0481 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
0482 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
0483 struct md5_state md5state;
0484 __le32 buf[4];
0485 int i, r;
0486
0487 desc->tfm = lmk->hash_tfm;
0488
0489 r = crypto_shash_init(desc);
0490 if (r)
0491 return r;
0492
0493 if (lmk->seed) {
0494 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
0495 if (r)
0496 return r;
0497 }
0498
0499
0500 r = crypto_shash_update(desc, data + 16, 16 * 31);
0501 if (r)
0502 return r;
0503
0504
0505 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
0506 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
0507 buf[2] = cpu_to_le32(4024);
0508 buf[3] = 0;
0509 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
0510 if (r)
0511 return r;
0512
0513
0514 r = crypto_shash_export(desc, &md5state);
0515 if (r)
0516 return r;
0517
0518 for (i = 0; i < MD5_HASH_WORDS; i++)
0519 __cpu_to_le32s(&md5state.hash[i]);
0520 memcpy(iv, &md5state.hash, cc->iv_size);
0521
0522 return 0;
0523 }
0524
0525 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
0526 struct dm_crypt_request *dmreq)
0527 {
0528 struct scatterlist *sg;
0529 u8 *src;
0530 int r = 0;
0531
0532 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
0533 sg = crypt_get_sg_data(cc, dmreq->sg_in);
0534 src = kmap_atomic(sg_page(sg));
0535 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
0536 kunmap_atomic(src);
0537 } else
0538 memset(iv, 0, cc->iv_size);
0539
0540 return r;
0541 }
0542
0543 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
0544 struct dm_crypt_request *dmreq)
0545 {
0546 struct scatterlist *sg;
0547 u8 *dst;
0548 int r;
0549
0550 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
0551 return 0;
0552
0553 sg = crypt_get_sg_data(cc, dmreq->sg_out);
0554 dst = kmap_atomic(sg_page(sg));
0555 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
0556
0557
0558 if (!r)
0559 crypto_xor(dst + sg->offset, iv, cc->iv_size);
0560
0561 kunmap_atomic(dst);
0562 return r;
0563 }
0564
0565 static void crypt_iv_tcw_dtr(struct crypt_config *cc)
0566 {
0567 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
0568
0569 kfree_sensitive(tcw->iv_seed);
0570 tcw->iv_seed = NULL;
0571 kfree_sensitive(tcw->whitening);
0572 tcw->whitening = NULL;
0573
0574 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
0575 crypto_free_shash(tcw->crc32_tfm);
0576 tcw->crc32_tfm = NULL;
0577 }
0578
0579 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
0580 const char *opts)
0581 {
0582 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
0583
0584 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
0585 ti->error = "Unsupported sector size for TCW";
0586 return -EINVAL;
0587 }
0588
0589 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
0590 ti->error = "Wrong key size for TCW";
0591 return -EINVAL;
0592 }
0593
0594 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
0595 CRYPTO_ALG_ALLOCATES_MEMORY);
0596 if (IS_ERR(tcw->crc32_tfm)) {
0597 ti->error = "Error initializing CRC32 in TCW";
0598 return PTR_ERR(tcw->crc32_tfm);
0599 }
0600
0601 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
0602 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
0603 if (!tcw->iv_seed || !tcw->whitening) {
0604 crypt_iv_tcw_dtr(cc);
0605 ti->error = "Error allocating seed storage in TCW";
0606 return -ENOMEM;
0607 }
0608
0609 return 0;
0610 }
0611
0612 static int crypt_iv_tcw_init(struct crypt_config *cc)
0613 {
0614 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
0615 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
0616
0617 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
0618 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
0619 TCW_WHITENING_SIZE);
0620
0621 return 0;
0622 }
0623
0624 static int crypt_iv_tcw_wipe(struct crypt_config *cc)
0625 {
0626 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
0627
0628 memset(tcw->iv_seed, 0, cc->iv_size);
0629 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
0630
0631 return 0;
0632 }
0633
0634 static int crypt_iv_tcw_whitening(struct crypt_config *cc,
0635 struct dm_crypt_request *dmreq,
0636 u8 *data)
0637 {
0638 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
0639 __le64 sector = cpu_to_le64(dmreq->iv_sector);
0640 u8 buf[TCW_WHITENING_SIZE];
0641 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
0642 int i, r;
0643
0644
0645 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
0646 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
0647
0648
0649 desc->tfm = tcw->crc32_tfm;
0650 for (i = 0; i < 4; i++) {
0651 r = crypto_shash_init(desc);
0652 if (r)
0653 goto out;
0654 r = crypto_shash_update(desc, &buf[i * 4], 4);
0655 if (r)
0656 goto out;
0657 r = crypto_shash_final(desc, &buf[i * 4]);
0658 if (r)
0659 goto out;
0660 }
0661 crypto_xor(&buf[0], &buf[12], 4);
0662 crypto_xor(&buf[4], &buf[8], 4);
0663
0664
0665 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
0666 crypto_xor(data + i * 8, buf, 8);
0667 out:
0668 memzero_explicit(buf, sizeof(buf));
0669 return r;
0670 }
0671
0672 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
0673 struct dm_crypt_request *dmreq)
0674 {
0675 struct scatterlist *sg;
0676 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
0677 __le64 sector = cpu_to_le64(dmreq->iv_sector);
0678 u8 *src;
0679 int r = 0;
0680
0681
0682 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
0683 sg = crypt_get_sg_data(cc, dmreq->sg_in);
0684 src = kmap_atomic(sg_page(sg));
0685 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
0686 kunmap_atomic(src);
0687 }
0688
0689
0690 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
0691 if (cc->iv_size > 8)
0692 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
0693 cc->iv_size - 8);
0694
0695 return r;
0696 }
0697
0698 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
0699 struct dm_crypt_request *dmreq)
0700 {
0701 struct scatterlist *sg;
0702 u8 *dst;
0703 int r;
0704
0705 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
0706 return 0;
0707
0708
0709 sg = crypt_get_sg_data(cc, dmreq->sg_out);
0710 dst = kmap_atomic(sg_page(sg));
0711 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
0712 kunmap_atomic(dst);
0713
0714 return r;
0715 }
0716
0717 static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
0718 struct dm_crypt_request *dmreq)
0719 {
0720
0721 get_random_bytes(iv, cc->iv_size);
0722 return 0;
0723 }
0724
0725 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
0726 const char *opts)
0727 {
0728 if (crypt_integrity_aead(cc)) {
0729 ti->error = "AEAD transforms not supported for EBOIV";
0730 return -EINVAL;
0731 }
0732
0733 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
0734 ti->error = "Block size of EBOIV cipher does "
0735 "not match IV size of block cipher";
0736 return -EINVAL;
0737 }
0738
0739 return 0;
0740 }
0741
0742 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
0743 struct dm_crypt_request *dmreq)
0744 {
0745 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
0746 struct skcipher_request *req;
0747 struct scatterlist src, dst;
0748 DECLARE_CRYPTO_WAIT(wait);
0749 int err;
0750
0751 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
0752 if (!req)
0753 return -ENOMEM;
0754
0755 memset(buf, 0, cc->iv_size);
0756 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
0757
0758 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
0759 sg_init_one(&dst, iv, cc->iv_size);
0760 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
0761 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
0762 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
0763 skcipher_request_free(req);
0764
0765 return err;
0766 }
0767
0768 static void crypt_iv_elephant_dtr(struct crypt_config *cc)
0769 {
0770 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
0771
0772 crypto_free_skcipher(elephant->tfm);
0773 elephant->tfm = NULL;
0774 }
0775
0776 static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
0777 const char *opts)
0778 {
0779 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
0780 int r;
0781
0782 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
0783 CRYPTO_ALG_ALLOCATES_MEMORY);
0784 if (IS_ERR(elephant->tfm)) {
0785 r = PTR_ERR(elephant->tfm);
0786 elephant->tfm = NULL;
0787 return r;
0788 }
0789
0790 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
0791 if (r)
0792 crypt_iv_elephant_dtr(cc);
0793 return r;
0794 }
0795
0796 static void diffuser_disk_to_cpu(u32 *d, size_t n)
0797 {
0798 #ifndef __LITTLE_ENDIAN
0799 int i;
0800
0801 for (i = 0; i < n; i++)
0802 d[i] = le32_to_cpu((__le32)d[i]);
0803 #endif
0804 }
0805
0806 static void diffuser_cpu_to_disk(__le32 *d, size_t n)
0807 {
0808 #ifndef __LITTLE_ENDIAN
0809 int i;
0810
0811 for (i = 0; i < n; i++)
0812 d[i] = cpu_to_le32((u32)d[i]);
0813 #endif
0814 }
0815
0816 static void diffuser_a_decrypt(u32 *d, size_t n)
0817 {
0818 int i, i1, i2, i3;
0819
0820 for (i = 0; i < 5; i++) {
0821 i1 = 0;
0822 i2 = n - 2;
0823 i3 = n - 5;
0824
0825 while (i1 < (n - 1)) {
0826 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
0827 i1++; i2++; i3++;
0828
0829 if (i3 >= n)
0830 i3 -= n;
0831
0832 d[i1] += d[i2] ^ d[i3];
0833 i1++; i2++; i3++;
0834
0835 if (i2 >= n)
0836 i2 -= n;
0837
0838 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
0839 i1++; i2++; i3++;
0840
0841 d[i1] += d[i2] ^ d[i3];
0842 i1++; i2++; i3++;
0843 }
0844 }
0845 }
0846
0847 static void diffuser_a_encrypt(u32 *d, size_t n)
0848 {
0849 int i, i1, i2, i3;
0850
0851 for (i = 0; i < 5; i++) {
0852 i1 = n - 1;
0853 i2 = n - 2 - 1;
0854 i3 = n - 5 - 1;
0855
0856 while (i1 > 0) {
0857 d[i1] -= d[i2] ^ d[i3];
0858 i1--; i2--; i3--;
0859
0860 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
0861 i1--; i2--; i3--;
0862
0863 if (i2 < 0)
0864 i2 += n;
0865
0866 d[i1] -= d[i2] ^ d[i3];
0867 i1--; i2--; i3--;
0868
0869 if (i3 < 0)
0870 i3 += n;
0871
0872 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
0873 i1--; i2--; i3--;
0874 }
0875 }
0876 }
0877
0878 static void diffuser_b_decrypt(u32 *d, size_t n)
0879 {
0880 int i, i1, i2, i3;
0881
0882 for (i = 0; i < 3; i++) {
0883 i1 = 0;
0884 i2 = 2;
0885 i3 = 5;
0886
0887 while (i1 < (n - 1)) {
0888 d[i1] += d[i2] ^ d[i3];
0889 i1++; i2++; i3++;
0890
0891 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
0892 i1++; i2++; i3++;
0893
0894 if (i2 >= n)
0895 i2 -= n;
0896
0897 d[i1] += d[i2] ^ d[i3];
0898 i1++; i2++; i3++;
0899
0900 if (i3 >= n)
0901 i3 -= n;
0902
0903 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
0904 i1++; i2++; i3++;
0905 }
0906 }
0907 }
0908
0909 static void diffuser_b_encrypt(u32 *d, size_t n)
0910 {
0911 int i, i1, i2, i3;
0912
0913 for (i = 0; i < 3; i++) {
0914 i1 = n - 1;
0915 i2 = 2 - 1;
0916 i3 = 5 - 1;
0917
0918 while (i1 > 0) {
0919 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
0920 i1--; i2--; i3--;
0921
0922 if (i3 < 0)
0923 i3 += n;
0924
0925 d[i1] -= d[i2] ^ d[i3];
0926 i1--; i2--; i3--;
0927
0928 if (i2 < 0)
0929 i2 += n;
0930
0931 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
0932 i1--; i2--; i3--;
0933
0934 d[i1] -= d[i2] ^ d[i3];
0935 i1--; i2--; i3--;
0936 }
0937 }
0938 }
0939
0940 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
0941 {
0942 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
0943 u8 *es, *ks, *data, *data2, *data_offset;
0944 struct skcipher_request *req;
0945 struct scatterlist *sg, *sg2, src, dst;
0946 DECLARE_CRYPTO_WAIT(wait);
0947 int i, r;
0948
0949 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
0950 es = kzalloc(16, GFP_NOIO);
0951 ks = kzalloc(32, GFP_NOIO);
0952
0953 if (!req || !es || !ks) {
0954 r = -ENOMEM;
0955 goto out;
0956 }
0957
0958 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
0959
0960
0961 sg_init_one(&src, es, 16);
0962 sg_init_one(&dst, ks, 16);
0963 skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
0964 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
0965 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
0966 if (r)
0967 goto out;
0968
0969
0970 es[15] = 0x80;
0971 sg_init_one(&dst, &ks[16], 16);
0972 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
0973 if (r)
0974 goto out;
0975
0976 sg = crypt_get_sg_data(cc, dmreq->sg_out);
0977 data = kmap_atomic(sg_page(sg));
0978 data_offset = data + sg->offset;
0979
0980
0981 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
0982 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
0983 data2 = kmap_atomic(sg_page(sg2));
0984 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
0985 kunmap_atomic(data2);
0986 }
0987
0988 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
0989 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
0990 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
0991 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
0992 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
0993 }
0994
0995 for (i = 0; i < (cc->sector_size / 32); i++)
0996 crypto_xor(data_offset + i * 32, ks, 32);
0997
0998 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
0999 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
1000 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
1001 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
1002 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
1003 }
1004
1005 kunmap_atomic(data);
1006 out:
1007 kfree_sensitive(ks);
1008 kfree_sensitive(es);
1009 skcipher_request_free(req);
1010 return r;
1011 }
1012
1013 static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1014 struct dm_crypt_request *dmreq)
1015 {
1016 int r;
1017
1018 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1019 r = crypt_iv_elephant(cc, dmreq);
1020 if (r)
1021 return r;
1022 }
1023
1024 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1025 }
1026
1027 static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1028 struct dm_crypt_request *dmreq)
1029 {
1030 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1031 return crypt_iv_elephant(cc, dmreq);
1032
1033 return 0;
1034 }
1035
1036 static int crypt_iv_elephant_init(struct crypt_config *cc)
1037 {
1038 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1039 int key_offset = cc->key_size - cc->key_extra_size;
1040
1041 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1042 }
1043
1044 static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1045 {
1046 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1047 u8 key[ELEPHANT_MAX_KEY_SIZE];
1048
1049 memset(key, 0, cc->key_extra_size);
1050 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1051 }
1052
1053 static const struct crypt_iv_operations crypt_iv_plain_ops = {
1054 .generator = crypt_iv_plain_gen
1055 };
1056
1057 static const struct crypt_iv_operations crypt_iv_plain64_ops = {
1058 .generator = crypt_iv_plain64_gen
1059 };
1060
1061 static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1062 .generator = crypt_iv_plain64be_gen
1063 };
1064
1065 static const struct crypt_iv_operations crypt_iv_essiv_ops = {
1066 .generator = crypt_iv_essiv_gen
1067 };
1068
1069 static const struct crypt_iv_operations crypt_iv_benbi_ops = {
1070 .ctr = crypt_iv_benbi_ctr,
1071 .dtr = crypt_iv_benbi_dtr,
1072 .generator = crypt_iv_benbi_gen
1073 };
1074
1075 static const struct crypt_iv_operations crypt_iv_null_ops = {
1076 .generator = crypt_iv_null_gen
1077 };
1078
1079 static const struct crypt_iv_operations crypt_iv_lmk_ops = {
1080 .ctr = crypt_iv_lmk_ctr,
1081 .dtr = crypt_iv_lmk_dtr,
1082 .init = crypt_iv_lmk_init,
1083 .wipe = crypt_iv_lmk_wipe,
1084 .generator = crypt_iv_lmk_gen,
1085 .post = crypt_iv_lmk_post
1086 };
1087
1088 static const struct crypt_iv_operations crypt_iv_tcw_ops = {
1089 .ctr = crypt_iv_tcw_ctr,
1090 .dtr = crypt_iv_tcw_dtr,
1091 .init = crypt_iv_tcw_init,
1092 .wipe = crypt_iv_tcw_wipe,
1093 .generator = crypt_iv_tcw_gen,
1094 .post = crypt_iv_tcw_post
1095 };
1096
1097 static const struct crypt_iv_operations crypt_iv_random_ops = {
1098 .generator = crypt_iv_random_gen
1099 };
1100
1101 static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
1102 .ctr = crypt_iv_eboiv_ctr,
1103 .generator = crypt_iv_eboiv_gen
1104 };
1105
1106 static const struct crypt_iv_operations crypt_iv_elephant_ops = {
1107 .ctr = crypt_iv_elephant_ctr,
1108 .dtr = crypt_iv_elephant_dtr,
1109 .init = crypt_iv_elephant_init,
1110 .wipe = crypt_iv_elephant_wipe,
1111 .generator = crypt_iv_elephant_gen,
1112 .post = crypt_iv_elephant_post
1113 };
1114
1115
1116
1117
1118 static bool crypt_integrity_aead(struct crypt_config *cc)
1119 {
1120 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1121 }
1122
1123 static bool crypt_integrity_hmac(struct crypt_config *cc)
1124 {
1125 return crypt_integrity_aead(cc) && cc->key_mac_size;
1126 }
1127
1128
1129 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1130 struct scatterlist *sg)
1131 {
1132 if (unlikely(crypt_integrity_aead(cc)))
1133 return &sg[2];
1134
1135 return sg;
1136 }
1137
1138 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1139 {
1140 struct bio_integrity_payload *bip;
1141 unsigned int tag_len;
1142 int ret;
1143
1144 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1145 return 0;
1146
1147 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1148 if (IS_ERR(bip))
1149 return PTR_ERR(bip);
1150
1151 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1152
1153 bip->bip_iter.bi_size = tag_len;
1154 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1155
1156 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1157 tag_len, offset_in_page(io->integrity_metadata));
1158 if (unlikely(ret != tag_len))
1159 return -ENOMEM;
1160
1161 return 0;
1162 }
1163
1164 static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1165 {
1166 #ifdef CONFIG_BLK_DEV_INTEGRITY
1167 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1168 struct mapped_device *md = dm_table_get_md(ti->table);
1169
1170
1171 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1172 ti->error = "Integrity profile not supported.";
1173 return -EINVAL;
1174 }
1175
1176 if (bi->tag_size != cc->on_disk_tag_size ||
1177 bi->tuple_size != cc->on_disk_tag_size) {
1178 ti->error = "Integrity profile tag size mismatch.";
1179 return -EINVAL;
1180 }
1181 if (1 << bi->interval_exp != cc->sector_size) {
1182 ti->error = "Integrity profile sector size mismatch.";
1183 return -EINVAL;
1184 }
1185
1186 if (crypt_integrity_aead(cc)) {
1187 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1188 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1189 cc->integrity_tag_size, cc->integrity_iv_size);
1190
1191 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1192 ti->error = "Integrity AEAD auth tag size is not supported.";
1193 return -EINVAL;
1194 }
1195 } else if (cc->integrity_iv_size)
1196 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1197 cc->integrity_iv_size);
1198
1199 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1200 ti->error = "Not enough space for integrity tag in the profile.";
1201 return -EINVAL;
1202 }
1203
1204 return 0;
1205 #else
1206 ti->error = "Integrity profile not supported.";
1207 return -EINVAL;
1208 #endif
1209 }
1210
1211 static void crypt_convert_init(struct crypt_config *cc,
1212 struct convert_context *ctx,
1213 struct bio *bio_out, struct bio *bio_in,
1214 sector_t sector)
1215 {
1216 ctx->bio_in = bio_in;
1217 ctx->bio_out = bio_out;
1218 if (bio_in)
1219 ctx->iter_in = bio_in->bi_iter;
1220 if (bio_out)
1221 ctx->iter_out = bio_out->bi_iter;
1222 ctx->cc_sector = sector + cc->iv_offset;
1223 init_completion(&ctx->restart);
1224 }
1225
1226 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1227 void *req)
1228 {
1229 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1230 }
1231
1232 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1233 {
1234 return (void *)((char *)dmreq - cc->dmreq_start);
1235 }
1236
1237 static u8 *iv_of_dmreq(struct crypt_config *cc,
1238 struct dm_crypt_request *dmreq)
1239 {
1240 if (crypt_integrity_aead(cc))
1241 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1242 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1243 else
1244 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1245 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1246 }
1247
1248 static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1249 struct dm_crypt_request *dmreq)
1250 {
1251 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1252 }
1253
1254 static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1255 struct dm_crypt_request *dmreq)
1256 {
1257 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1258 return (__le64 *) ptr;
1259 }
1260
1261 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1262 struct dm_crypt_request *dmreq)
1263 {
1264 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1265 cc->iv_size + sizeof(uint64_t);
1266 return (unsigned int*)ptr;
1267 }
1268
1269 static void *tag_from_dmreq(struct crypt_config *cc,
1270 struct dm_crypt_request *dmreq)
1271 {
1272 struct convert_context *ctx = dmreq->ctx;
1273 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1274
1275 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1276 cc->on_disk_tag_size];
1277 }
1278
1279 static void *iv_tag_from_dmreq(struct crypt_config *cc,
1280 struct dm_crypt_request *dmreq)
1281 {
1282 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1283 }
1284
1285 static int crypt_convert_block_aead(struct crypt_config *cc,
1286 struct convert_context *ctx,
1287 struct aead_request *req,
1288 unsigned int tag_offset)
1289 {
1290 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1291 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1292 struct dm_crypt_request *dmreq;
1293 u8 *iv, *org_iv, *tag_iv, *tag;
1294 __le64 *sector;
1295 int r = 0;
1296
1297 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1298
1299
1300 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1301 return -EIO;
1302
1303 dmreq = dmreq_of_req(cc, req);
1304 dmreq->iv_sector = ctx->cc_sector;
1305 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1306 dmreq->iv_sector >>= cc->sector_shift;
1307 dmreq->ctx = ctx;
1308
1309 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1310
1311 sector = org_sector_of_dmreq(cc, dmreq);
1312 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1313
1314 iv = iv_of_dmreq(cc, dmreq);
1315 org_iv = org_iv_of_dmreq(cc, dmreq);
1316 tag = tag_from_dmreq(cc, dmreq);
1317 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1318
1319
1320
1321
1322
1323
1324 sg_init_table(dmreq->sg_in, 4);
1325 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1326 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1327 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1328 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1329
1330 sg_init_table(dmreq->sg_out, 4);
1331 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1332 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1333 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1334 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1335
1336 if (cc->iv_gen_ops) {
1337
1338 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1339 memcpy(org_iv, tag_iv, cc->iv_size);
1340 } else {
1341 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1342 if (r < 0)
1343 return r;
1344
1345 if (cc->integrity_iv_size)
1346 memcpy(tag_iv, org_iv, cc->iv_size);
1347 }
1348
1349 memcpy(iv, org_iv, cc->iv_size);
1350 }
1351
1352 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1353 if (bio_data_dir(ctx->bio_in) == WRITE) {
1354 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1355 cc->sector_size, iv);
1356 r = crypto_aead_encrypt(req);
1357 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1358 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1359 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1360 } else {
1361 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1362 cc->sector_size + cc->integrity_tag_size, iv);
1363 r = crypto_aead_decrypt(req);
1364 }
1365
1366 if (r == -EBADMSG) {
1367 sector_t s = le64_to_cpu(*sector);
1368
1369 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
1370 ctx->bio_in->bi_bdev, s);
1371 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
1372 ctx->bio_in, s, 0);
1373 }
1374
1375 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1376 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1377
1378 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1379 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1380
1381 return r;
1382 }
1383
1384 static int crypt_convert_block_skcipher(struct crypt_config *cc,
1385 struct convert_context *ctx,
1386 struct skcipher_request *req,
1387 unsigned int tag_offset)
1388 {
1389 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1390 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1391 struct scatterlist *sg_in, *sg_out;
1392 struct dm_crypt_request *dmreq;
1393 u8 *iv, *org_iv, *tag_iv;
1394 __le64 *sector;
1395 int r = 0;
1396
1397
1398 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1399 return -EIO;
1400
1401 dmreq = dmreq_of_req(cc, req);
1402 dmreq->iv_sector = ctx->cc_sector;
1403 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1404 dmreq->iv_sector >>= cc->sector_shift;
1405 dmreq->ctx = ctx;
1406
1407 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1408
1409 iv = iv_of_dmreq(cc, dmreq);
1410 org_iv = org_iv_of_dmreq(cc, dmreq);
1411 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1412
1413 sector = org_sector_of_dmreq(cc, dmreq);
1414 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1415
1416
1417 sg_in = &dmreq->sg_in[0];
1418 sg_out = &dmreq->sg_out[0];
1419
1420 sg_init_table(sg_in, 1);
1421 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1422
1423 sg_init_table(sg_out, 1);
1424 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1425
1426 if (cc->iv_gen_ops) {
1427
1428 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1429 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1430 } else {
1431 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1432 if (r < 0)
1433 return r;
1434
1435 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1436 sg_in = sg_out;
1437
1438 if (cc->integrity_iv_size)
1439 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1440 }
1441
1442 memcpy(iv, org_iv, cc->iv_size);
1443 }
1444
1445 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1446
1447 if (bio_data_dir(ctx->bio_in) == WRITE)
1448 r = crypto_skcipher_encrypt(req);
1449 else
1450 r = crypto_skcipher_decrypt(req);
1451
1452 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1453 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1454
1455 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1456 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1457
1458 return r;
1459 }
1460
1461 static void kcryptd_async_done(struct crypto_async_request *async_req,
1462 int error);
1463
1464 static int crypt_alloc_req_skcipher(struct crypt_config *cc,
1465 struct convert_context *ctx)
1466 {
1467 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1468
1469 if (!ctx->r.req) {
1470 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1471 if (!ctx->r.req)
1472 return -ENOMEM;
1473 }
1474
1475 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1476
1477
1478
1479
1480
1481 skcipher_request_set_callback(ctx->r.req,
1482 CRYPTO_TFM_REQ_MAY_BACKLOG,
1483 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1484
1485 return 0;
1486 }
1487
1488 static int crypt_alloc_req_aead(struct crypt_config *cc,
1489 struct convert_context *ctx)
1490 {
1491 if (!ctx->r.req_aead) {
1492 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1493 if (!ctx->r.req_aead)
1494 return -ENOMEM;
1495 }
1496
1497 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1498
1499
1500
1501
1502
1503 aead_request_set_callback(ctx->r.req_aead,
1504 CRYPTO_TFM_REQ_MAY_BACKLOG,
1505 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1506
1507 return 0;
1508 }
1509
1510 static int crypt_alloc_req(struct crypt_config *cc,
1511 struct convert_context *ctx)
1512 {
1513 if (crypt_integrity_aead(cc))
1514 return crypt_alloc_req_aead(cc, ctx);
1515 else
1516 return crypt_alloc_req_skcipher(cc, ctx);
1517 }
1518
1519 static void crypt_free_req_skcipher(struct crypt_config *cc,
1520 struct skcipher_request *req, struct bio *base_bio)
1521 {
1522 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1523
1524 if ((struct skcipher_request *)(io + 1) != req)
1525 mempool_free(req, &cc->req_pool);
1526 }
1527
1528 static void crypt_free_req_aead(struct crypt_config *cc,
1529 struct aead_request *req, struct bio *base_bio)
1530 {
1531 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1532
1533 if ((struct aead_request *)(io + 1) != req)
1534 mempool_free(req, &cc->req_pool);
1535 }
1536
1537 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1538 {
1539 if (crypt_integrity_aead(cc))
1540 crypt_free_req_aead(cc, req, base_bio);
1541 else
1542 crypt_free_req_skcipher(cc, req, base_bio);
1543 }
1544
1545
1546
1547
1548 static blk_status_t crypt_convert(struct crypt_config *cc,
1549 struct convert_context *ctx, bool atomic, bool reset_pending)
1550 {
1551 unsigned int tag_offset = 0;
1552 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1553 int r;
1554
1555
1556
1557
1558
1559
1560 if (reset_pending)
1561 atomic_set(&ctx->cc_pending, 1);
1562
1563 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1564
1565 r = crypt_alloc_req(cc, ctx);
1566 if (r) {
1567 complete(&ctx->restart);
1568 return BLK_STS_DEV_RESOURCE;
1569 }
1570
1571 atomic_inc(&ctx->cc_pending);
1572
1573 if (crypt_integrity_aead(cc))
1574 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1575 else
1576 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1577
1578 switch (r) {
1579
1580
1581
1582
1583 case -EBUSY:
1584 if (in_interrupt()) {
1585 if (try_wait_for_completion(&ctx->restart)) {
1586
1587
1588
1589
1590 } else {
1591
1592
1593
1594
1595 ctx->r.req = NULL;
1596 ctx->cc_sector += sector_step;
1597 tag_offset++;
1598 return BLK_STS_DEV_RESOURCE;
1599 }
1600 } else {
1601 wait_for_completion(&ctx->restart);
1602 }
1603 reinit_completion(&ctx->restart);
1604 fallthrough;
1605
1606
1607
1608
1609 case -EINPROGRESS:
1610 ctx->r.req = NULL;
1611 ctx->cc_sector += sector_step;
1612 tag_offset++;
1613 continue;
1614
1615
1616
1617 case 0:
1618 atomic_dec(&ctx->cc_pending);
1619 ctx->cc_sector += sector_step;
1620 tag_offset++;
1621 if (!atomic)
1622 cond_resched();
1623 continue;
1624
1625
1626
1627 case -EBADMSG:
1628 atomic_dec(&ctx->cc_pending);
1629 return BLK_STS_PROTECTION;
1630
1631
1632
1633 default:
1634 atomic_dec(&ctx->cc_pending);
1635 return BLK_STS_IOERR;
1636 }
1637 }
1638
1639 return 0;
1640 }
1641
1642 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1662 {
1663 struct crypt_config *cc = io->cc;
1664 struct bio *clone;
1665 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1666 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1667 unsigned i, len, remaining_size;
1668 struct page *page;
1669
1670 retry:
1671 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1672 mutex_lock(&cc->bio_alloc_lock);
1673
1674 clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
1675 GFP_NOIO, &cc->bs);
1676 clone->bi_private = io;
1677 clone->bi_end_io = crypt_endio;
1678
1679 remaining_size = size;
1680
1681 for (i = 0; i < nr_iovecs; i++) {
1682 page = mempool_alloc(&cc->page_pool, gfp_mask);
1683 if (!page) {
1684 crypt_free_buffer_pages(cc, clone);
1685 bio_put(clone);
1686 gfp_mask |= __GFP_DIRECT_RECLAIM;
1687 goto retry;
1688 }
1689
1690 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1691
1692 bio_add_page(clone, page, len, 0);
1693
1694 remaining_size -= len;
1695 }
1696
1697
1698 if (dm_crypt_integrity_io_alloc(io, clone)) {
1699 crypt_free_buffer_pages(cc, clone);
1700 bio_put(clone);
1701 clone = NULL;
1702 }
1703
1704 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1705 mutex_unlock(&cc->bio_alloc_lock);
1706
1707 return clone;
1708 }
1709
1710 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1711 {
1712 struct bio_vec *bv;
1713 struct bvec_iter_all iter_all;
1714
1715 bio_for_each_segment_all(bv, clone, iter_all) {
1716 BUG_ON(!bv->bv_page);
1717 mempool_free(bv->bv_page, &cc->page_pool);
1718 }
1719 }
1720
1721 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1722 struct bio *bio, sector_t sector)
1723 {
1724 io->cc = cc;
1725 io->base_bio = bio;
1726 io->sector = sector;
1727 io->error = 0;
1728 io->ctx.r.req = NULL;
1729 io->integrity_metadata = NULL;
1730 io->integrity_metadata_from_pool = false;
1731 atomic_set(&io->io_pending, 0);
1732 }
1733
1734 static void crypt_inc_pending(struct dm_crypt_io *io)
1735 {
1736 atomic_inc(&io->io_pending);
1737 }
1738
1739 static void kcryptd_io_bio_endio(struct work_struct *work)
1740 {
1741 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1742 bio_endio(io->base_bio);
1743 }
1744
1745
1746
1747
1748
1749 static void crypt_dec_pending(struct dm_crypt_io *io)
1750 {
1751 struct crypt_config *cc = io->cc;
1752 struct bio *base_bio = io->base_bio;
1753 blk_status_t error = io->error;
1754
1755 if (!atomic_dec_and_test(&io->io_pending))
1756 return;
1757
1758 if (io->ctx.r.req)
1759 crypt_free_req(cc, io->ctx.r.req, base_bio);
1760
1761 if (unlikely(io->integrity_metadata_from_pool))
1762 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1763 else
1764 kfree(io->integrity_metadata);
1765
1766 base_bio->bi_status = error;
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776 if (tasklet_trylock(&io->tasklet)) {
1777 tasklet_unlock(&io->tasklet);
1778 bio_endio(base_bio);
1779 return;
1780 }
1781
1782 INIT_WORK(&io->work, kcryptd_io_bio_endio);
1783 queue_work(cc->io_queue, &io->work);
1784 }
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 static void crypt_endio(struct bio *clone)
1804 {
1805 struct dm_crypt_io *io = clone->bi_private;
1806 struct crypt_config *cc = io->cc;
1807 unsigned rw = bio_data_dir(clone);
1808 blk_status_t error;
1809
1810
1811
1812
1813 if (rw == WRITE)
1814 crypt_free_buffer_pages(cc, clone);
1815
1816 error = clone->bi_status;
1817 bio_put(clone);
1818
1819 if (rw == READ && !error) {
1820 kcryptd_queue_crypt(io);
1821 return;
1822 }
1823
1824 if (unlikely(error))
1825 io->error = error;
1826
1827 crypt_dec_pending(io);
1828 }
1829
1830 #define CRYPT_MAP_READ_GFP GFP_NOWAIT
1831
1832 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1833 {
1834 struct crypt_config *cc = io->cc;
1835 struct bio *clone;
1836
1837
1838
1839
1840
1841
1842
1843 clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
1844 if (!clone)
1845 return 1;
1846 clone->bi_private = io;
1847 clone->bi_end_io = crypt_endio;
1848
1849 crypt_inc_pending(io);
1850
1851 clone->bi_iter.bi_sector = cc->start + io->sector;
1852
1853 if (dm_crypt_integrity_io_alloc(io, clone)) {
1854 crypt_dec_pending(io);
1855 bio_put(clone);
1856 return 1;
1857 }
1858
1859 dm_submit_bio_remap(io->base_bio, clone);
1860 return 0;
1861 }
1862
1863 static void kcryptd_io_read_work(struct work_struct *work)
1864 {
1865 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1866
1867 crypt_inc_pending(io);
1868 if (kcryptd_io_read(io, GFP_NOIO))
1869 io->error = BLK_STS_RESOURCE;
1870 crypt_dec_pending(io);
1871 }
1872
1873 static void kcryptd_queue_read(struct dm_crypt_io *io)
1874 {
1875 struct crypt_config *cc = io->cc;
1876
1877 INIT_WORK(&io->work, kcryptd_io_read_work);
1878 queue_work(cc->io_queue, &io->work);
1879 }
1880
1881 static void kcryptd_io_write(struct dm_crypt_io *io)
1882 {
1883 struct bio *clone = io->ctx.bio_out;
1884
1885 dm_submit_bio_remap(io->base_bio, clone);
1886 }
1887
1888 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1889
1890 static int dmcrypt_write(void *data)
1891 {
1892 struct crypt_config *cc = data;
1893 struct dm_crypt_io *io;
1894
1895 while (1) {
1896 struct rb_root write_tree;
1897 struct blk_plug plug;
1898
1899 spin_lock_irq(&cc->write_thread_lock);
1900 continue_locked:
1901
1902 if (!RB_EMPTY_ROOT(&cc->write_tree))
1903 goto pop_from_list;
1904
1905 set_current_state(TASK_INTERRUPTIBLE);
1906
1907 spin_unlock_irq(&cc->write_thread_lock);
1908
1909 if (unlikely(kthread_should_stop())) {
1910 set_current_state(TASK_RUNNING);
1911 break;
1912 }
1913
1914 schedule();
1915
1916 set_current_state(TASK_RUNNING);
1917 spin_lock_irq(&cc->write_thread_lock);
1918 goto continue_locked;
1919
1920 pop_from_list:
1921 write_tree = cc->write_tree;
1922 cc->write_tree = RB_ROOT;
1923 spin_unlock_irq(&cc->write_thread_lock);
1924
1925 BUG_ON(rb_parent(write_tree.rb_node));
1926
1927
1928
1929
1930
1931 blk_start_plug(&plug);
1932 do {
1933 io = crypt_io_from_node(rb_first(&write_tree));
1934 rb_erase(&io->rb_node, &write_tree);
1935 kcryptd_io_write(io);
1936 } while (!RB_EMPTY_ROOT(&write_tree));
1937 blk_finish_plug(&plug);
1938 }
1939 return 0;
1940 }
1941
1942 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1943 {
1944 struct bio *clone = io->ctx.bio_out;
1945 struct crypt_config *cc = io->cc;
1946 unsigned long flags;
1947 sector_t sector;
1948 struct rb_node **rbp, *parent;
1949
1950 if (unlikely(io->error)) {
1951 crypt_free_buffer_pages(cc, clone);
1952 bio_put(clone);
1953 crypt_dec_pending(io);
1954 return;
1955 }
1956
1957
1958 BUG_ON(io->ctx.iter_out.bi_size);
1959
1960 clone->bi_iter.bi_sector = cc->start + io->sector;
1961
1962 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1963 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1964 dm_submit_bio_remap(io->base_bio, clone);
1965 return;
1966 }
1967
1968 spin_lock_irqsave(&cc->write_thread_lock, flags);
1969 if (RB_EMPTY_ROOT(&cc->write_tree))
1970 wake_up_process(cc->write_thread);
1971 rbp = &cc->write_tree.rb_node;
1972 parent = NULL;
1973 sector = io->sector;
1974 while (*rbp) {
1975 parent = *rbp;
1976 if (sector < crypt_io_from_node(parent)->sector)
1977 rbp = &(*rbp)->rb_left;
1978 else
1979 rbp = &(*rbp)->rb_right;
1980 }
1981 rb_link_node(&io->rb_node, parent, rbp);
1982 rb_insert_color(&io->rb_node, &cc->write_tree);
1983 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1984 }
1985
1986 static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1987 struct convert_context *ctx)
1988
1989 {
1990 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1991 return false;
1992
1993
1994
1995
1996
1997
1998 switch (bio_op(ctx->bio_in)) {
1999 case REQ_OP_WRITE:
2000 case REQ_OP_WRITE_ZEROES:
2001 return true;
2002 default:
2003 return false;
2004 }
2005 }
2006
2007 static void kcryptd_crypt_write_continue(struct work_struct *work)
2008 {
2009 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2010 struct crypt_config *cc = io->cc;
2011 struct convert_context *ctx = &io->ctx;
2012 int crypt_finished;
2013 sector_t sector = io->sector;
2014 blk_status_t r;
2015
2016 wait_for_completion(&ctx->restart);
2017 reinit_completion(&ctx->restart);
2018
2019 r = crypt_convert(cc, &io->ctx, true, false);
2020 if (r)
2021 io->error = r;
2022 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2023 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2024
2025 wait_for_completion(&ctx->restart);
2026 crypt_finished = 1;
2027 }
2028
2029
2030 if (crypt_finished) {
2031 kcryptd_crypt_write_io_submit(io, 0);
2032 io->sector = sector;
2033 }
2034
2035 crypt_dec_pending(io);
2036 }
2037
2038 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2039 {
2040 struct crypt_config *cc = io->cc;
2041 struct convert_context *ctx = &io->ctx;
2042 struct bio *clone;
2043 int crypt_finished;
2044 sector_t sector = io->sector;
2045 blk_status_t r;
2046
2047
2048
2049
2050 crypt_inc_pending(io);
2051 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
2052
2053 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
2054 if (unlikely(!clone)) {
2055 io->error = BLK_STS_IOERR;
2056 goto dec;
2057 }
2058
2059 io->ctx.bio_out = clone;
2060 io->ctx.iter_out = clone->bi_iter;
2061
2062 sector += bio_sectors(clone);
2063
2064 crypt_inc_pending(io);
2065 r = crypt_convert(cc, ctx,
2066 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2067
2068
2069
2070
2071
2072 if (r == BLK_STS_DEV_RESOURCE) {
2073 INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2074 queue_work(cc->crypt_queue, &io->work);
2075 return;
2076 }
2077 if (r)
2078 io->error = r;
2079 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2080 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2081
2082 wait_for_completion(&ctx->restart);
2083 crypt_finished = 1;
2084 }
2085
2086
2087 if (crypt_finished) {
2088 kcryptd_crypt_write_io_submit(io, 0);
2089 io->sector = sector;
2090 }
2091
2092 dec:
2093 crypt_dec_pending(io);
2094 }
2095
2096 static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
2097 {
2098 crypt_dec_pending(io);
2099 }
2100
2101 static void kcryptd_crypt_read_continue(struct work_struct *work)
2102 {
2103 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2104 struct crypt_config *cc = io->cc;
2105 blk_status_t r;
2106
2107 wait_for_completion(&io->ctx.restart);
2108 reinit_completion(&io->ctx.restart);
2109
2110 r = crypt_convert(cc, &io->ctx, true, false);
2111 if (r)
2112 io->error = r;
2113
2114 if (atomic_dec_and_test(&io->ctx.cc_pending))
2115 kcryptd_crypt_read_done(io);
2116
2117 crypt_dec_pending(io);
2118 }
2119
2120 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2121 {
2122 struct crypt_config *cc = io->cc;
2123 blk_status_t r;
2124
2125 crypt_inc_pending(io);
2126
2127 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2128 io->sector);
2129
2130 r = crypt_convert(cc, &io->ctx,
2131 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2132
2133
2134
2135
2136 if (r == BLK_STS_DEV_RESOURCE) {
2137 INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2138 queue_work(cc->crypt_queue, &io->work);
2139 return;
2140 }
2141 if (r)
2142 io->error = r;
2143
2144 if (atomic_dec_and_test(&io->ctx.cc_pending))
2145 kcryptd_crypt_read_done(io);
2146
2147 crypt_dec_pending(io);
2148 }
2149
2150 static void kcryptd_async_done(struct crypto_async_request *async_req,
2151 int error)
2152 {
2153 struct dm_crypt_request *dmreq = async_req->data;
2154 struct convert_context *ctx = dmreq->ctx;
2155 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
2156 struct crypt_config *cc = io->cc;
2157
2158
2159
2160
2161
2162
2163 if (error == -EINPROGRESS) {
2164 complete(&ctx->restart);
2165 return;
2166 }
2167
2168 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2169 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2170
2171 if (error == -EBADMSG) {
2172 sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
2173
2174 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
2175 ctx->bio_in->bi_bdev, s);
2176 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
2177 ctx->bio_in, s, 0);
2178 io->error = BLK_STS_PROTECTION;
2179 } else if (error < 0)
2180 io->error = BLK_STS_IOERR;
2181
2182 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2183
2184 if (!atomic_dec_and_test(&ctx->cc_pending))
2185 return;
2186
2187
2188
2189
2190
2191 if (bio_data_dir(io->base_bio) == READ) {
2192 kcryptd_crypt_read_done(io);
2193 return;
2194 }
2195
2196 if (kcryptd_crypt_write_inline(cc, ctx)) {
2197 complete(&ctx->restart);
2198 return;
2199 }
2200
2201 kcryptd_crypt_write_io_submit(io, 1);
2202 }
2203
2204 static void kcryptd_crypt(struct work_struct *work)
2205 {
2206 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2207
2208 if (bio_data_dir(io->base_bio) == READ)
2209 kcryptd_crypt_read_convert(io);
2210 else
2211 kcryptd_crypt_write_convert(io);
2212 }
2213
2214 static void kcryptd_crypt_tasklet(unsigned long work)
2215 {
2216 kcryptd_crypt((struct work_struct *)work);
2217 }
2218
2219 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2220 {
2221 struct crypt_config *cc = io->cc;
2222
2223 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2224 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2225
2226
2227
2228
2229
2230 if (in_hardirq() || irqs_disabled()) {
2231 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2232 tasklet_schedule(&io->tasklet);
2233 return;
2234 }
2235
2236 kcryptd_crypt(&io->work);
2237 return;
2238 }
2239
2240 INIT_WORK(&io->work, kcryptd_crypt);
2241 queue_work(cc->crypt_queue, &io->work);
2242 }
2243
2244 static void crypt_free_tfms_aead(struct crypt_config *cc)
2245 {
2246 if (!cc->cipher_tfm.tfms_aead)
2247 return;
2248
2249 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2250 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2251 cc->cipher_tfm.tfms_aead[0] = NULL;
2252 }
2253
2254 kfree(cc->cipher_tfm.tfms_aead);
2255 cc->cipher_tfm.tfms_aead = NULL;
2256 }
2257
2258 static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2259 {
2260 unsigned i;
2261
2262 if (!cc->cipher_tfm.tfms)
2263 return;
2264
2265 for (i = 0; i < cc->tfms_count; i++)
2266 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2267 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2268 cc->cipher_tfm.tfms[i] = NULL;
2269 }
2270
2271 kfree(cc->cipher_tfm.tfms);
2272 cc->cipher_tfm.tfms = NULL;
2273 }
2274
2275 static void crypt_free_tfms(struct crypt_config *cc)
2276 {
2277 if (crypt_integrity_aead(cc))
2278 crypt_free_tfms_aead(cc);
2279 else
2280 crypt_free_tfms_skcipher(cc);
2281 }
2282
2283 static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2284 {
2285 unsigned i;
2286 int err;
2287
2288 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2289 sizeof(struct crypto_skcipher *),
2290 GFP_KERNEL);
2291 if (!cc->cipher_tfm.tfms)
2292 return -ENOMEM;
2293
2294 for (i = 0; i < cc->tfms_count; i++) {
2295 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2296 CRYPTO_ALG_ALLOCATES_MEMORY);
2297 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2298 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2299 crypt_free_tfms(cc);
2300 return err;
2301 }
2302 }
2303
2304
2305
2306
2307
2308
2309 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2310 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2311 return 0;
2312 }
2313
2314 static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2315 {
2316 int err;
2317
2318 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2319 if (!cc->cipher_tfm.tfms)
2320 return -ENOMEM;
2321
2322 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2323 CRYPTO_ALG_ALLOCATES_MEMORY);
2324 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2325 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2326 crypt_free_tfms(cc);
2327 return err;
2328 }
2329
2330 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2331 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2332 return 0;
2333 }
2334
2335 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2336 {
2337 if (crypt_integrity_aead(cc))
2338 return crypt_alloc_tfms_aead(cc, ciphermode);
2339 else
2340 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2341 }
2342
2343 static unsigned crypt_subkey_size(struct crypt_config *cc)
2344 {
2345 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2346 }
2347
2348 static unsigned crypt_authenckey_size(struct crypt_config *cc)
2349 {
2350 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2351 }
2352
2353
2354
2355
2356
2357
2358 static void crypt_copy_authenckey(char *p, const void *key,
2359 unsigned enckeylen, unsigned authkeylen)
2360 {
2361 struct crypto_authenc_key_param *param;
2362 struct rtattr *rta;
2363
2364 rta = (struct rtattr *)p;
2365 param = RTA_DATA(rta);
2366 param->enckeylen = cpu_to_be32(enckeylen);
2367 rta->rta_len = RTA_LENGTH(sizeof(*param));
2368 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2369 p += RTA_SPACE(sizeof(*param));
2370 memcpy(p, key + enckeylen, authkeylen);
2371 p += authkeylen;
2372 memcpy(p, key, enckeylen);
2373 }
2374
2375 static int crypt_setkey(struct crypt_config *cc)
2376 {
2377 unsigned subkey_size;
2378 int err = 0, i, r;
2379
2380
2381 subkey_size = crypt_subkey_size(cc);
2382
2383 if (crypt_integrity_hmac(cc)) {
2384 if (subkey_size < cc->key_mac_size)
2385 return -EINVAL;
2386
2387 crypt_copy_authenckey(cc->authenc_key, cc->key,
2388 subkey_size - cc->key_mac_size,
2389 cc->key_mac_size);
2390 }
2391
2392 for (i = 0; i < cc->tfms_count; i++) {
2393 if (crypt_integrity_hmac(cc))
2394 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2395 cc->authenc_key, crypt_authenckey_size(cc));
2396 else if (crypt_integrity_aead(cc))
2397 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2398 cc->key + (i * subkey_size),
2399 subkey_size);
2400 else
2401 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2402 cc->key + (i * subkey_size),
2403 subkey_size);
2404 if (r)
2405 err = r;
2406 }
2407
2408 if (crypt_integrity_hmac(cc))
2409 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2410
2411 return err;
2412 }
2413
2414 #ifdef CONFIG_KEYS
2415
2416 static bool contains_whitespace(const char *str)
2417 {
2418 while (*str)
2419 if (isspace(*str++))
2420 return true;
2421 return false;
2422 }
2423
2424 static int set_key_user(struct crypt_config *cc, struct key *key)
2425 {
2426 const struct user_key_payload *ukp;
2427
2428 ukp = user_key_payload_locked(key);
2429 if (!ukp)
2430 return -EKEYREVOKED;
2431
2432 if (cc->key_size != ukp->datalen)
2433 return -EINVAL;
2434
2435 memcpy(cc->key, ukp->data, cc->key_size);
2436
2437 return 0;
2438 }
2439
2440 static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2441 {
2442 const struct encrypted_key_payload *ekp;
2443
2444 ekp = key->payload.data[0];
2445 if (!ekp)
2446 return -EKEYREVOKED;
2447
2448 if (cc->key_size != ekp->decrypted_datalen)
2449 return -EINVAL;
2450
2451 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2452
2453 return 0;
2454 }
2455
2456 static int set_key_trusted(struct crypt_config *cc, struct key *key)
2457 {
2458 const struct trusted_key_payload *tkp;
2459
2460 tkp = key->payload.data[0];
2461 if (!tkp)
2462 return -EKEYREVOKED;
2463
2464 if (cc->key_size != tkp->key_len)
2465 return -EINVAL;
2466
2467 memcpy(cc->key, tkp->key, cc->key_size);
2468
2469 return 0;
2470 }
2471
2472 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2473 {
2474 char *new_key_string, *key_desc;
2475 int ret;
2476 struct key_type *type;
2477 struct key *key;
2478 int (*set_key)(struct crypt_config *cc, struct key *key);
2479
2480
2481
2482
2483
2484 if (contains_whitespace(key_string)) {
2485 DMERR("whitespace chars not allowed in key string");
2486 return -EINVAL;
2487 }
2488
2489
2490 key_desc = strpbrk(key_string, ":");
2491 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2492 return -EINVAL;
2493
2494 if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2495 type = &key_type_logon;
2496 set_key = set_key_user;
2497 } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2498 type = &key_type_user;
2499 set_key = set_key_user;
2500 } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) &&
2501 !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2502 type = &key_type_encrypted;
2503 set_key = set_key_encrypted;
2504 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
2505 !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
2506 type = &key_type_trusted;
2507 set_key = set_key_trusted;
2508 } else {
2509 return -EINVAL;
2510 }
2511
2512 new_key_string = kstrdup(key_string, GFP_KERNEL);
2513 if (!new_key_string)
2514 return -ENOMEM;
2515
2516 key = request_key(type, key_desc + 1, NULL);
2517 if (IS_ERR(key)) {
2518 kfree_sensitive(new_key_string);
2519 return PTR_ERR(key);
2520 }
2521
2522 down_read(&key->sem);
2523
2524 ret = set_key(cc, key);
2525 if (ret < 0) {
2526 up_read(&key->sem);
2527 key_put(key);
2528 kfree_sensitive(new_key_string);
2529 return ret;
2530 }
2531
2532 up_read(&key->sem);
2533 key_put(key);
2534
2535
2536 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2537
2538 ret = crypt_setkey(cc);
2539
2540 if (!ret) {
2541 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2542 kfree_sensitive(cc->key_string);
2543 cc->key_string = new_key_string;
2544 } else
2545 kfree_sensitive(new_key_string);
2546
2547 return ret;
2548 }
2549
2550 static int get_key_size(char **key_string)
2551 {
2552 char *colon, dummy;
2553 int ret;
2554
2555 if (*key_string[0] != ':')
2556 return strlen(*key_string) >> 1;
2557
2558
2559 colon = strpbrk(*key_string + 1, ":");
2560 if (!colon)
2561 return -EINVAL;
2562
2563 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2564 return -EINVAL;
2565
2566 *key_string = colon;
2567
2568
2569
2570 return ret;
2571 }
2572
2573 #else
2574
2575 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2576 {
2577 return -EINVAL;
2578 }
2579
2580 static int get_key_size(char **key_string)
2581 {
2582 return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
2583 }
2584
2585 #endif
2586
2587 static int crypt_set_key(struct crypt_config *cc, char *key)
2588 {
2589 int r = -EINVAL;
2590 int key_string_len = strlen(key);
2591
2592
2593 if (!cc->key_size && strcmp(key, "-"))
2594 goto out;
2595
2596
2597 if (key[0] == ':') {
2598 r = crypt_set_keyring_key(cc, key + 1);
2599 goto out;
2600 }
2601
2602
2603 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2604
2605
2606 kfree_sensitive(cc->key_string);
2607 cc->key_string = NULL;
2608
2609
2610 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2611 goto out;
2612
2613 r = crypt_setkey(cc);
2614 if (!r)
2615 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2616
2617 out:
2618
2619 memset(key, '0', key_string_len);
2620
2621 return r;
2622 }
2623
2624 static int crypt_wipe_key(struct crypt_config *cc)
2625 {
2626 int r;
2627
2628 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2629 get_random_bytes(&cc->key, cc->key_size);
2630
2631
2632 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2633 r = cc->iv_gen_ops->wipe(cc);
2634 if (r)
2635 return r;
2636 }
2637
2638 kfree_sensitive(cc->key_string);
2639 cc->key_string = NULL;
2640 r = crypt_setkey(cc);
2641 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2642
2643 return r;
2644 }
2645
2646 static void crypt_calculate_pages_per_client(void)
2647 {
2648 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
2649
2650 if (!dm_crypt_clients_n)
2651 return;
2652
2653 pages /= dm_crypt_clients_n;
2654 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2655 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2656 dm_crypt_pages_per_client = pages;
2657 }
2658
2659 static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2660 {
2661 struct crypt_config *cc = pool_data;
2662 struct page *page;
2663
2664
2665
2666
2667
2668
2669 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2670 likely(gfp_mask & __GFP_NORETRY))
2671 return NULL;
2672
2673 page = alloc_page(gfp_mask);
2674 if (likely(page != NULL))
2675 percpu_counter_add(&cc->n_allocated_pages, 1);
2676
2677 return page;
2678 }
2679
2680 static void crypt_page_free(void *page, void *pool_data)
2681 {
2682 struct crypt_config *cc = pool_data;
2683
2684 __free_page(page);
2685 percpu_counter_sub(&cc->n_allocated_pages, 1);
2686 }
2687
2688 static void crypt_dtr(struct dm_target *ti)
2689 {
2690 struct crypt_config *cc = ti->private;
2691
2692 ti->private = NULL;
2693
2694 if (!cc)
2695 return;
2696
2697 if (cc->write_thread)
2698 kthread_stop(cc->write_thread);
2699
2700 if (cc->io_queue)
2701 destroy_workqueue(cc->io_queue);
2702 if (cc->crypt_queue)
2703 destroy_workqueue(cc->crypt_queue);
2704
2705 crypt_free_tfms(cc);
2706
2707 bioset_exit(&cc->bs);
2708
2709 mempool_exit(&cc->page_pool);
2710 mempool_exit(&cc->req_pool);
2711 mempool_exit(&cc->tag_pool);
2712
2713 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2714 percpu_counter_destroy(&cc->n_allocated_pages);
2715
2716 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2717 cc->iv_gen_ops->dtr(cc);
2718
2719 if (cc->dev)
2720 dm_put_device(ti, cc->dev);
2721
2722 kfree_sensitive(cc->cipher_string);
2723 kfree_sensitive(cc->key_string);
2724 kfree_sensitive(cc->cipher_auth);
2725 kfree_sensitive(cc->authenc_key);
2726
2727 mutex_destroy(&cc->bio_alloc_lock);
2728
2729
2730 kfree_sensitive(cc);
2731
2732 spin_lock(&dm_crypt_clients_lock);
2733 WARN_ON(!dm_crypt_clients_n);
2734 dm_crypt_clients_n--;
2735 crypt_calculate_pages_per_client();
2736 spin_unlock(&dm_crypt_clients_lock);
2737
2738 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
2739 }
2740
2741 static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2742 {
2743 struct crypt_config *cc = ti->private;
2744
2745 if (crypt_integrity_aead(cc))
2746 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2747 else
2748 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2749
2750 if (cc->iv_size)
2751
2752 cc->iv_size = max(cc->iv_size,
2753 (unsigned int)(sizeof(u64) / sizeof(u8)));
2754 else if (ivmode) {
2755 DMWARN("Selected cipher does not support IVs");
2756 ivmode = NULL;
2757 }
2758
2759
2760 if (ivmode == NULL)
2761 cc->iv_gen_ops = NULL;
2762 else if (strcmp(ivmode, "plain") == 0)
2763 cc->iv_gen_ops = &crypt_iv_plain_ops;
2764 else if (strcmp(ivmode, "plain64") == 0)
2765 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2766 else if (strcmp(ivmode, "plain64be") == 0)
2767 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2768 else if (strcmp(ivmode, "essiv") == 0)
2769 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2770 else if (strcmp(ivmode, "benbi") == 0)
2771 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2772 else if (strcmp(ivmode, "null") == 0)
2773 cc->iv_gen_ops = &crypt_iv_null_ops;
2774 else if (strcmp(ivmode, "eboiv") == 0)
2775 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2776 else if (strcmp(ivmode, "elephant") == 0) {
2777 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2778 cc->key_parts = 2;
2779 cc->key_extra_size = cc->key_size / 2;
2780 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2781 return -EINVAL;
2782 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2783 } else if (strcmp(ivmode, "lmk") == 0) {
2784 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2785
2786
2787
2788
2789
2790
2791 if (cc->key_size % cc->key_parts) {
2792 cc->key_parts++;
2793 cc->key_extra_size = cc->key_size / cc->key_parts;
2794 }
2795 } else if (strcmp(ivmode, "tcw") == 0) {
2796 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2797 cc->key_parts += 2;
2798 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2799 } else if (strcmp(ivmode, "random") == 0) {
2800 cc->iv_gen_ops = &crypt_iv_random_ops;
2801
2802 cc->integrity_iv_size = cc->iv_size;
2803 } else {
2804 ti->error = "Invalid IV mode";
2805 return -EINVAL;
2806 }
2807
2808 return 0;
2809 }
2810
2811
2812
2813
2814
2815
2816 static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2817 {
2818 char *start, *end, *mac_alg = NULL;
2819 struct crypto_ahash *mac;
2820
2821 if (!strstarts(cipher_api, "authenc("))
2822 return 0;
2823
2824 start = strchr(cipher_api, '(');
2825 end = strchr(cipher_api, ',');
2826 if (!start || !end || ++start > end)
2827 return -EINVAL;
2828
2829 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2830 if (!mac_alg)
2831 return -ENOMEM;
2832 strncpy(mac_alg, start, end - start);
2833
2834 mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
2835 kfree(mac_alg);
2836
2837 if (IS_ERR(mac))
2838 return PTR_ERR(mac);
2839
2840 cc->key_mac_size = crypto_ahash_digestsize(mac);
2841 crypto_free_ahash(mac);
2842
2843 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2844 if (!cc->authenc_key)
2845 return -ENOMEM;
2846
2847 return 0;
2848 }
2849
2850 static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2851 char **ivmode, char **ivopts)
2852 {
2853 struct crypt_config *cc = ti->private;
2854 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
2855 int ret = -EINVAL;
2856
2857 cc->tfms_count = 1;
2858
2859
2860
2861
2862
2863 tmp = &cipher_in[strlen("capi:")];
2864
2865
2866 *ivopts = strrchr(tmp, ':');
2867 if (*ivopts) {
2868 **ivopts = '\0';
2869 (*ivopts)++;
2870 }
2871
2872 *ivmode = strrchr(tmp, '-');
2873 if (*ivmode) {
2874 **ivmode = '\0';
2875 (*ivmode)++;
2876 }
2877
2878 cipher_api = tmp;
2879
2880
2881 if (crypt_integrity_aead(cc)) {
2882 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2883 if (ret < 0) {
2884 ti->error = "Invalid AEAD cipher spec";
2885 return -ENOMEM;
2886 }
2887 }
2888
2889 if (*ivmode && !strcmp(*ivmode, "lmk"))
2890 cc->tfms_count = 64;
2891
2892 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2893 if (!*ivopts) {
2894 ti->error = "Digest algorithm missing for ESSIV mode";
2895 return -EINVAL;
2896 }
2897 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2898 cipher_api, *ivopts);
2899 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2900 ti->error = "Cannot allocate cipher string";
2901 return -ENOMEM;
2902 }
2903 cipher_api = buf;
2904 }
2905
2906 cc->key_parts = cc->tfms_count;
2907
2908
2909 ret = crypt_alloc_tfms(cc, cipher_api);
2910 if (ret < 0) {
2911 ti->error = "Error allocating crypto tfm";
2912 return ret;
2913 }
2914
2915 if (crypt_integrity_aead(cc))
2916 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2917 else
2918 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2919
2920 return 0;
2921 }
2922
2923 static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2924 char **ivmode, char **ivopts)
2925 {
2926 struct crypt_config *cc = ti->private;
2927 char *tmp, *cipher, *chainmode, *keycount;
2928 char *cipher_api = NULL;
2929 int ret = -EINVAL;
2930 char dummy;
2931
2932 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2933 ti->error = "Bad cipher specification";
2934 return -EINVAL;
2935 }
2936
2937
2938
2939
2940
2941 tmp = cipher_in;
2942 keycount = strsep(&tmp, "-");
2943 cipher = strsep(&keycount, ":");
2944
2945 if (!keycount)
2946 cc->tfms_count = 1;
2947 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2948 !is_power_of_2(cc->tfms_count)) {
2949 ti->error = "Bad cipher key count specification";
2950 return -EINVAL;
2951 }
2952 cc->key_parts = cc->tfms_count;
2953
2954 chainmode = strsep(&tmp, "-");
2955 *ivmode = strsep(&tmp, ":");
2956 *ivopts = tmp;
2957
2958
2959
2960
2961
2962 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2963 chainmode = "cbc";
2964 *ivmode = "plain";
2965 }
2966
2967 if (strcmp(chainmode, "ecb") && !*ivmode) {
2968 ti->error = "IV mechanism required";
2969 return -EINVAL;
2970 }
2971
2972 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2973 if (!cipher_api)
2974 goto bad_mem;
2975
2976 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2977 if (!*ivopts) {
2978 ti->error = "Digest algorithm missing for ESSIV mode";
2979 kfree(cipher_api);
2980 return -EINVAL;
2981 }
2982 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2983 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2984 } else {
2985 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2986 "%s(%s)", chainmode, cipher);
2987 }
2988 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2989 kfree(cipher_api);
2990 goto bad_mem;
2991 }
2992
2993
2994 ret = crypt_alloc_tfms(cc, cipher_api);
2995 if (ret < 0) {
2996 ti->error = "Error allocating crypto tfm";
2997 kfree(cipher_api);
2998 return ret;
2999 }
3000 kfree(cipher_api);
3001
3002 return 0;
3003 bad_mem:
3004 ti->error = "Cannot allocate cipher strings";
3005 return -ENOMEM;
3006 }
3007
3008 static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
3009 {
3010 struct crypt_config *cc = ti->private;
3011 char *ivmode = NULL, *ivopts = NULL;
3012 int ret;
3013
3014 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3015 if (!cc->cipher_string) {
3016 ti->error = "Cannot allocate cipher strings";
3017 return -ENOMEM;
3018 }
3019
3020 if (strstarts(cipher_in, "capi:"))
3021 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
3022 else
3023 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
3024 if (ret)
3025 return ret;
3026
3027
3028 ret = crypt_ctr_ivmode(ti, ivmode);
3029 if (ret < 0)
3030 return ret;
3031
3032
3033 ret = crypt_set_key(cc, key);
3034 if (ret < 0) {
3035 ti->error = "Error decoding and setting key";
3036 return ret;
3037 }
3038
3039
3040 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3041 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3042 if (ret < 0) {
3043 ti->error = "Error creating IV";
3044 return ret;
3045 }
3046 }
3047
3048
3049 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3050 ret = cc->iv_gen_ops->init(cc);
3051 if (ret < 0) {
3052 ti->error = "Error initialising IV";
3053 return ret;
3054 }
3055 }
3056
3057
3058 if (cc->key_string)
3059 memset(cc->key, 0, cc->key_size * sizeof(u8));
3060
3061 return ret;
3062 }
3063
3064 static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
3065 {
3066 struct crypt_config *cc = ti->private;
3067 struct dm_arg_set as;
3068 static const struct dm_arg _args[] = {
3069 {0, 8, "Invalid number of feature args"},
3070 };
3071 unsigned int opt_params, val;
3072 const char *opt_string, *sval;
3073 char dummy;
3074 int ret;
3075
3076
3077 as.argc = argc;
3078 as.argv = argv;
3079
3080 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
3081 if (ret)
3082 return ret;
3083
3084 while (opt_params--) {
3085 opt_string = dm_shift_arg(&as);
3086 if (!opt_string) {
3087 ti->error = "Not enough feature arguments";
3088 return -EINVAL;
3089 }
3090
3091 if (!strcasecmp(opt_string, "allow_discards"))
3092 ti->num_discard_bios = 1;
3093
3094 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
3095 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3096
3097 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
3098 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3099 else if (!strcasecmp(opt_string, "no_read_workqueue"))
3100 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3101 else if (!strcasecmp(opt_string, "no_write_workqueue"))
3102 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3103 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
3104 if (val == 0 || val > MAX_TAG_SIZE) {
3105 ti->error = "Invalid integrity arguments";
3106 return -EINVAL;
3107 }
3108 cc->on_disk_tag_size = val;
3109 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
3110 if (!strcasecmp(sval, "aead")) {
3111 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
3112 } else if (strcasecmp(sval, "none")) {
3113 ti->error = "Unknown integrity profile";
3114 return -EINVAL;
3115 }
3116
3117 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3118 if (!cc->cipher_auth)
3119 return -ENOMEM;
3120 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
3121 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3122 cc->sector_size > 4096 ||
3123 (cc->sector_size & (cc->sector_size - 1))) {
3124 ti->error = "Invalid feature value for sector_size";
3125 return -EINVAL;
3126 }
3127 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3128 ti->error = "Device size is not multiple of sector_size feature";
3129 return -EINVAL;
3130 }
3131 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
3132 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
3133 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3134 else {
3135 ti->error = "Invalid feature arguments";
3136 return -EINVAL;
3137 }
3138 }
3139
3140 return 0;
3141 }
3142
3143 #ifdef CONFIG_BLK_DEV_ZONED
3144 static int crypt_report_zones(struct dm_target *ti,
3145 struct dm_report_zones_args *args, unsigned int nr_zones)
3146 {
3147 struct crypt_config *cc = ti->private;
3148
3149 return dm_report_zones(cc->dev->bdev, cc->start,
3150 cc->start + dm_target_offset(ti, args->next_sector),
3151 args, nr_zones);
3152 }
3153 #else
3154 #define crypt_report_zones NULL
3155 #endif
3156
3157
3158
3159
3160
3161 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3162 {
3163 struct crypt_config *cc;
3164 const char *devname = dm_table_device_name(ti->table);
3165 int key_size;
3166 unsigned int align_mask;
3167 unsigned long long tmpll;
3168 int ret;
3169 size_t iv_size_padding, additional_req_size;
3170 char dummy;
3171
3172 if (argc < 5) {
3173 ti->error = "Not enough arguments";
3174 return -EINVAL;
3175 }
3176
3177 key_size = get_key_size(&argv[1]);
3178 if (key_size < 0) {
3179 ti->error = "Cannot parse key size";
3180 return -EINVAL;
3181 }
3182
3183 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3184 if (!cc) {
3185 ti->error = "Cannot allocate encryption context";
3186 return -ENOMEM;
3187 }
3188 cc->key_size = key_size;
3189 cc->sector_size = (1 << SECTOR_SHIFT);
3190 cc->sector_shift = 0;
3191
3192 ti->private = cc;
3193
3194 spin_lock(&dm_crypt_clients_lock);
3195 dm_crypt_clients_n++;
3196 crypt_calculate_pages_per_client();
3197 spin_unlock(&dm_crypt_clients_lock);
3198
3199 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3200 if (ret < 0)
3201 goto bad;
3202
3203
3204 if (argc > 5) {
3205 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3206 if (ret)
3207 goto bad;
3208 }
3209
3210 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3211 if (ret < 0)
3212 goto bad;
3213
3214 if (crypt_integrity_aead(cc)) {
3215 cc->dmreq_start = sizeof(struct aead_request);
3216 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3217 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3218 } else {
3219 cc->dmreq_start = sizeof(struct skcipher_request);
3220 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3221 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3222 }
3223 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3224
3225 if (align_mask < CRYPTO_MINALIGN) {
3226
3227 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3228 & align_mask;
3229 } else {
3230
3231
3232
3233
3234
3235 iv_size_padding = align_mask;
3236 }
3237
3238
3239 additional_req_size = sizeof(struct dm_crypt_request) +
3240 iv_size_padding + cc->iv_size +
3241 cc->iv_size +
3242 sizeof(uint64_t) +
3243 sizeof(unsigned int);
3244
3245 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3246 if (ret) {
3247 ti->error = "Cannot allocate crypt request mempool";
3248 goto bad;
3249 }
3250
3251 cc->per_bio_data_size = ti->per_io_data_size =
3252 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3253 ARCH_KMALLOC_MINALIGN);
3254
3255 ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
3256 if (ret) {
3257 ti->error = "Cannot allocate page mempool";
3258 goto bad;
3259 }
3260
3261 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3262 if (ret) {
3263 ti->error = "Cannot allocate crypt bioset";
3264 goto bad;
3265 }
3266
3267 mutex_init(&cc->bio_alloc_lock);
3268
3269 ret = -EINVAL;
3270 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3271 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3272 ti->error = "Invalid iv_offset sector";
3273 goto bad;
3274 }
3275 cc->iv_offset = tmpll;
3276
3277 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3278 if (ret) {
3279 ti->error = "Device lookup failed";
3280 goto bad;
3281 }
3282
3283 ret = -EINVAL;
3284 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
3285 ti->error = "Invalid device sector";
3286 goto bad;
3287 }
3288 cc->start = tmpll;
3289
3290 if (bdev_is_zoned(cc->dev->bdev)) {
3291
3292
3293
3294
3295
3296 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3297 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310 DMDEBUG("Zone append operations will be emulated");
3311 ti->emulate_zone_append = true;
3312 }
3313
3314 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3315 ret = crypt_integrity_ctr(cc, ti);
3316 if (ret)
3317 goto bad;
3318
3319 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3320 if (!cc->tag_pool_max_sectors)
3321 cc->tag_pool_max_sectors = 1;
3322
3323 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3324 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
3325 if (ret) {
3326 ti->error = "Cannot allocate integrity tags mempool";
3327 goto bad;
3328 }
3329
3330 cc->tag_pool_max_sectors <<= cc->sector_shift;
3331 }
3332
3333 ret = -ENOMEM;
3334 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
3335 if (!cc->io_queue) {
3336 ti->error = "Couldn't create kcryptd io queue";
3337 goto bad;
3338 }
3339
3340 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3341 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3342 1, devname);
3343 else
3344 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3345 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
3346 num_online_cpus(), devname);
3347 if (!cc->crypt_queue) {
3348 ti->error = "Couldn't create kcryptd queue";
3349 goto bad;
3350 }
3351
3352 spin_lock_init(&cc->write_thread_lock);
3353 cc->write_tree = RB_ROOT;
3354
3355 cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3356 if (IS_ERR(cc->write_thread)) {
3357 ret = PTR_ERR(cc->write_thread);
3358 cc->write_thread = NULL;
3359 ti->error = "Couldn't spawn write thread";
3360 goto bad;
3361 }
3362
3363 ti->num_flush_bios = 1;
3364 ti->limit_swap_bios = true;
3365 ti->accounts_remapped_io = true;
3366
3367 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
3368 return 0;
3369
3370 bad:
3371 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
3372 crypt_dtr(ti);
3373 return ret;
3374 }
3375
3376 static int crypt_map(struct dm_target *ti, struct bio *bio)
3377 {
3378 struct dm_crypt_io *io;
3379 struct crypt_config *cc = ti->private;
3380
3381
3382
3383
3384
3385
3386 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
3387 bio_op(bio) == REQ_OP_DISCARD)) {
3388 bio_set_dev(bio, cc->dev->bdev);
3389 if (bio_sectors(bio))
3390 bio->bi_iter.bi_sector = cc->start +
3391 dm_target_offset(ti, bio->bi_iter.bi_sector);
3392 return DM_MAPIO_REMAPPED;
3393 }
3394
3395
3396
3397
3398 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
3399 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3400 dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
3401
3402
3403
3404
3405
3406 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3407 return DM_MAPIO_KILL;
3408
3409 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3410 return DM_MAPIO_KILL;
3411
3412 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3413 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3414
3415 if (cc->on_disk_tag_size) {
3416 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3417
3418 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
3419 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
3420 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
3421 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3422 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3423 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3424 io->integrity_metadata_from_pool = true;
3425 }
3426 }
3427
3428 if (crypt_integrity_aead(cc))
3429 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3430 else
3431 io->ctx.r.req = (struct skcipher_request *)(io + 1);
3432
3433 if (bio_data_dir(io->base_bio) == READ) {
3434 if (kcryptd_io_read(io, CRYPT_MAP_READ_GFP))
3435 kcryptd_queue_read(io);
3436 } else
3437 kcryptd_queue_crypt(io);
3438
3439 return DM_MAPIO_SUBMITTED;
3440 }
3441
3442 static char hex2asc(unsigned char c)
3443 {
3444 return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
3445 }
3446
3447 static void crypt_status(struct dm_target *ti, status_type_t type,
3448 unsigned status_flags, char *result, unsigned maxlen)
3449 {
3450 struct crypt_config *cc = ti->private;
3451 unsigned i, sz = 0;
3452 int num_feature_args = 0;
3453
3454 switch (type) {
3455 case STATUSTYPE_INFO:
3456 result[0] = '\0';
3457 break;
3458
3459 case STATUSTYPE_TABLE:
3460 DMEMIT("%s ", cc->cipher_string);
3461
3462 if (cc->key_size > 0) {
3463 if (cc->key_string)
3464 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3465 else {
3466 for (i = 0; i < cc->key_size; i++) {
3467 DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3468 hex2asc(cc->key[i] & 0xf));
3469 }
3470 }
3471 } else
3472 DMEMIT("-");
3473
3474 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3475 cc->dev->name, (unsigned long long)cc->start);
3476
3477 num_feature_args += !!ti->num_discard_bios;
3478 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3479 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3480 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3481 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3482 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3483 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3484 if (cc->on_disk_tag_size)
3485 num_feature_args++;
3486 if (num_feature_args) {
3487 DMEMIT(" %d", num_feature_args);
3488 if (ti->num_discard_bios)
3489 DMEMIT(" allow_discards");
3490 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3491 DMEMIT(" same_cpu_crypt");
3492 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3493 DMEMIT(" submit_from_crypt_cpus");
3494 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3495 DMEMIT(" no_read_workqueue");
3496 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3497 DMEMIT(" no_write_workqueue");
3498 if (cc->on_disk_tag_size)
3499 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3500 if (cc->sector_size != (1 << SECTOR_SHIFT))
3501 DMEMIT(" sector_size:%d", cc->sector_size);
3502 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3503 DMEMIT(" iv_large_sectors");
3504 }
3505 break;
3506
3507 case STATUSTYPE_IMA:
3508 DMEMIT_TARGET_NAME_VERSION(ti->type);
3509 DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n');
3510 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
3511 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
3512 'y' : 'n');
3513 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
3514 'y' : 'n');
3515 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
3516 'y' : 'n');
3517 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
3518 'y' : 'n');
3519
3520 if (cc->on_disk_tag_size)
3521 DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
3522 cc->on_disk_tag_size, cc->cipher_auth);
3523 if (cc->sector_size != (1 << SECTOR_SHIFT))
3524 DMEMIT(",sector_size=%d", cc->sector_size);
3525 if (cc->cipher_string)
3526 DMEMIT(",cipher_string=%s", cc->cipher_string);
3527
3528 DMEMIT(",key_size=%u", cc->key_size);
3529 DMEMIT(",key_parts=%u", cc->key_parts);
3530 DMEMIT(",key_extra_size=%u", cc->key_extra_size);
3531 DMEMIT(",key_mac_size=%u", cc->key_mac_size);
3532 DMEMIT(";");
3533 break;
3534 }
3535 }
3536
3537 static void crypt_postsuspend(struct dm_target *ti)
3538 {
3539 struct crypt_config *cc = ti->private;
3540
3541 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3542 }
3543
3544 static int crypt_preresume(struct dm_target *ti)
3545 {
3546 struct crypt_config *cc = ti->private;
3547
3548 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3549 DMERR("aborting resume - crypt key is not set.");
3550 return -EAGAIN;
3551 }
3552
3553 return 0;
3554 }
3555
3556 static void crypt_resume(struct dm_target *ti)
3557 {
3558 struct crypt_config *cc = ti->private;
3559
3560 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3561 }
3562
3563
3564
3565
3566
3567 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
3568 char *result, unsigned maxlen)
3569 {
3570 struct crypt_config *cc = ti->private;
3571 int key_size, ret = -EINVAL;
3572
3573 if (argc < 2)
3574 goto error;
3575
3576 if (!strcasecmp(argv[0], "key")) {
3577 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3578 DMWARN("not suspended during key manipulation.");
3579 return -EINVAL;
3580 }
3581 if (argc == 3 && !strcasecmp(argv[1], "set")) {
3582
3583 key_size = get_key_size(&argv[2]);
3584 if (key_size < 0 || cc->key_size != key_size) {
3585 memset(argv[2], '0', strlen(argv[2]));
3586 return -EINVAL;
3587 }
3588
3589 ret = crypt_set_key(cc, argv[2]);
3590 if (ret)
3591 return ret;
3592 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3593 ret = cc->iv_gen_ops->init(cc);
3594
3595 if (cc->key_string)
3596 memset(cc->key, 0, cc->key_size * sizeof(u8));
3597 return ret;
3598 }
3599 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
3600 return crypt_wipe_key(cc);
3601 }
3602
3603 error:
3604 DMWARN("unrecognised message received.");
3605 return -EINVAL;
3606 }
3607
3608 static int crypt_iterate_devices(struct dm_target *ti,
3609 iterate_devices_callout_fn fn, void *data)
3610 {
3611 struct crypt_config *cc = ti->private;
3612
3613 return fn(ti, cc->dev, cc->start, ti->len, data);
3614 }
3615
3616 static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3617 {
3618 struct crypt_config *cc = ti->private;
3619
3620
3621
3622
3623
3624
3625
3626 limits->max_segment_size = PAGE_SIZE;
3627
3628 limits->logical_block_size =
3629 max_t(unsigned, limits->logical_block_size, cc->sector_size);
3630 limits->physical_block_size =
3631 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3632 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
3633 }
3634
3635 static struct target_type crypt_target = {
3636 .name = "crypt",
3637 .version = {1, 24, 0},
3638 .module = THIS_MODULE,
3639 .ctr = crypt_ctr,
3640 .dtr = crypt_dtr,
3641 .features = DM_TARGET_ZONED_HM,
3642 .report_zones = crypt_report_zones,
3643 .map = crypt_map,
3644 .status = crypt_status,
3645 .postsuspend = crypt_postsuspend,
3646 .preresume = crypt_preresume,
3647 .resume = crypt_resume,
3648 .message = crypt_message,
3649 .iterate_devices = crypt_iterate_devices,
3650 .io_hints = crypt_io_hints,
3651 };
3652
3653 static int __init dm_crypt_init(void)
3654 {
3655 int r;
3656
3657 r = dm_register_target(&crypt_target);
3658 if (r < 0)
3659 DMERR("register failed %d", r);
3660
3661 return r;
3662 }
3663
3664 static void __exit dm_crypt_exit(void)
3665 {
3666 dm_unregister_target(&crypt_target);
3667 }
3668
3669 module_init(dm_crypt_init);
3670 module_exit(dm_crypt_exit);
3671
3672 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3673 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3674 MODULE_LICENSE("GPL");