0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/blk-crypto-profile.h>
0016 #include <linux/blkdev.h>
0017 #include <linux/buffer_head.h>
0018 #include <linux/sched/mm.h>
0019 #include <linux/slab.h>
0020 #include <linux/uio.h>
0021
0022 #include "fscrypt_private.h"
0023
0024 struct fscrypt_blk_crypto_key {
0025 struct blk_crypto_key base;
0026 int num_devs;
0027 struct request_queue *devs[];
0028 };
0029
0030 static int fscrypt_get_num_devices(struct super_block *sb)
0031 {
0032 if (sb->s_cop->get_num_devices)
0033 return sb->s_cop->get_num_devices(sb);
0034 return 1;
0035 }
0036
0037 static void fscrypt_get_devices(struct super_block *sb, int num_devs,
0038 struct request_queue **devs)
0039 {
0040 if (num_devs == 1)
0041 devs[0] = bdev_get_queue(sb->s_bdev);
0042 else
0043 sb->s_cop->get_devices(sb, devs);
0044 }
0045
0046 static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
0047 {
0048 struct super_block *sb = ci->ci_inode->i_sb;
0049 unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
0050 int ino_bits = 64, lblk_bits = 64;
0051
0052 if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
0053 return offsetofend(union fscrypt_iv, nonce);
0054
0055 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
0056 return sizeof(__le64);
0057
0058 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
0059 return sizeof(__le32);
0060
0061
0062 if (sb->s_cop->get_ino_and_lblk_bits)
0063 sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
0064 return DIV_ROUND_UP(lblk_bits, 8);
0065 }
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
0077 struct request_queue **devs,
0078 int num_devs,
0079 const struct blk_crypto_config *cfg)
0080 {
0081 int i;
0082
0083 for (i = 0; i < num_devs; i++) {
0084 if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
0085 __blk_crypto_cfg_supported(devs[i]->crypto_profile, cfg)) {
0086 if (!xchg(&mode->logged_blk_crypto_native, 1))
0087 pr_info("fscrypt: %s using blk-crypto (native)\n",
0088 mode->friendly_name);
0089 } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) {
0090 pr_info("fscrypt: %s using blk-crypto-fallback\n",
0091 mode->friendly_name);
0092 }
0093 }
0094 }
0095
0096
0097 int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
0098 {
0099 const struct inode *inode = ci->ci_inode;
0100 struct super_block *sb = inode->i_sb;
0101 struct blk_crypto_config crypto_cfg;
0102 int num_devs;
0103 struct request_queue **devs;
0104 int i;
0105
0106
0107 if (!S_ISREG(inode->i_mode))
0108 return 0;
0109
0110
0111 if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
0112 return 0;
0113
0114
0115 if (!(sb->s_flags & SB_INLINECRYPT))
0116 return 0;
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 if ((fscrypt_policy_flags(&ci->ci_policy) &
0127 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
0128 sb->s_blocksize != PAGE_SIZE)
0129 return 0;
0130
0131
0132
0133
0134
0135 crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
0136 crypto_cfg.data_unit_size = sb->s_blocksize;
0137 crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
0138 num_devs = fscrypt_get_num_devices(sb);
0139 devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
0140 if (!devs)
0141 return -ENOMEM;
0142 fscrypt_get_devices(sb, num_devs, devs);
0143
0144 for (i = 0; i < num_devs; i++) {
0145 if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
0146 goto out_free_devs;
0147 }
0148
0149 fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg);
0150
0151 ci->ci_inlinecrypt = true;
0152 out_free_devs:
0153 kfree(devs);
0154
0155 return 0;
0156 }
0157
0158 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
0159 const u8 *raw_key,
0160 const struct fscrypt_info *ci)
0161 {
0162 const struct inode *inode = ci->ci_inode;
0163 struct super_block *sb = inode->i_sb;
0164 enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
0165 int num_devs = fscrypt_get_num_devices(sb);
0166 int queue_refs = 0;
0167 struct fscrypt_blk_crypto_key *blk_key;
0168 int err;
0169 int i;
0170
0171 blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL);
0172 if (!blk_key)
0173 return -ENOMEM;
0174
0175 blk_key->num_devs = num_devs;
0176 fscrypt_get_devices(sb, num_devs, blk_key->devs);
0177
0178 err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode,
0179 fscrypt_get_dun_bytes(ci), sb->s_blocksize);
0180 if (err) {
0181 fscrypt_err(inode, "error %d initializing blk-crypto key", err);
0182 goto fail;
0183 }
0184
0185
0186
0187
0188
0189
0190
0191
0192 for (i = 0; i < num_devs; i++) {
0193 if (!blk_get_queue(blk_key->devs[i])) {
0194 fscrypt_err(inode, "couldn't get request_queue");
0195 err = -EAGAIN;
0196 goto fail;
0197 }
0198 queue_refs++;
0199
0200 err = blk_crypto_start_using_key(&blk_key->base,
0201 blk_key->devs[i]);
0202 if (err) {
0203 fscrypt_err(inode,
0204 "error %d starting to use blk-crypto", err);
0205 goto fail;
0206 }
0207 }
0208
0209
0210
0211
0212
0213
0214 smp_store_release(&prep_key->blk_key, blk_key);
0215 return 0;
0216
0217 fail:
0218 for (i = 0; i < queue_refs; i++)
0219 blk_put_queue(blk_key->devs[i]);
0220 kfree_sensitive(blk_key);
0221 return err;
0222 }
0223
0224 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
0225 {
0226 struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
0227 int i;
0228
0229 if (blk_key) {
0230 for (i = 0; i < blk_key->num_devs; i++) {
0231 blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
0232 blk_put_queue(blk_key->devs[i]);
0233 }
0234 kfree_sensitive(blk_key);
0235 }
0236 }
0237
0238 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
0239 {
0240 return inode->i_crypt_info->ci_inlinecrypt;
0241 }
0242 EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
0243
0244 static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
0245 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
0246 {
0247 union fscrypt_iv iv;
0248 int i;
0249
0250 fscrypt_generate_iv(&iv, lblk_num, ci);
0251
0252 BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
0253 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
0254 for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
0255 dun[i] = le64_to_cpu(iv.dun[i]);
0256 }
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
0275 u64 first_lblk, gfp_t gfp_mask)
0276 {
0277 const struct fscrypt_info *ci;
0278 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
0279
0280 if (!fscrypt_inode_uses_inline_crypto(inode))
0281 return;
0282 ci = inode->i_crypt_info;
0283
0284 fscrypt_generate_dun(ci, first_lblk, dun);
0285 bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask);
0286 }
0287 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
0288
0289
0290 static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
0291 const struct inode **inode_ret,
0292 u64 *lblk_num_ret)
0293 {
0294 struct page *page = bh->b_page;
0295 const struct address_space *mapping;
0296 const struct inode *inode;
0297
0298
0299
0300
0301
0302 mapping = page_mapping(page);
0303 if (!mapping)
0304 return false;
0305 inode = mapping->host;
0306
0307 *inode_ret = inode;
0308 *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
0309 (bh_offset(bh) >> inode->i_blkbits);
0310 return true;
0311 }
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323 void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
0324 const struct buffer_head *first_bh,
0325 gfp_t gfp_mask)
0326 {
0327 const struct inode *inode;
0328 u64 first_lblk;
0329
0330 if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
0331 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
0332 }
0333 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
0357 u64 next_lblk)
0358 {
0359 const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
0360 u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
0361
0362 if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
0363 return false;
0364 if (!bc)
0365 return true;
0366
0367
0368
0369
0370
0371
0372 if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base)
0373 return false;
0374
0375 fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
0376 return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
0377 }
0378 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390 bool fscrypt_mergeable_bio_bh(struct bio *bio,
0391 const struct buffer_head *next_bh)
0392 {
0393 const struct inode *inode;
0394 u64 next_lblk;
0395
0396 if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
0397 return !bio->bi_crypt_context;
0398
0399 return fscrypt_mergeable_bio(bio, inode, next_lblk);
0400 }
0401 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
0415 {
0416 const struct inode *inode = file_inode(iocb->ki_filp);
0417 const unsigned int blocksize = i_blocksize(inode);
0418
0419
0420 if (!fscrypt_needs_contents_encryption(inode))
0421 return true;
0422
0423
0424 if (!fscrypt_inode_uses_inline_crypto(inode))
0425 return false;
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize))
0441 return false;
0442
0443 return true;
0444 }
0445 EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467 u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
0468 {
0469 const struct fscrypt_info *ci;
0470 u32 dun;
0471
0472 if (!fscrypt_inode_uses_inline_crypto(inode))
0473 return nr_blocks;
0474
0475 if (nr_blocks <= 1)
0476 return nr_blocks;
0477
0478 ci = inode->i_crypt_info;
0479 if (!(fscrypt_policy_flags(&ci->ci_policy) &
0480 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
0481 return nr_blocks;
0482
0483
0484
0485 dun = ci->ci_hashed_ino + lblk;
0486
0487 return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
0488 }
0489 EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);