Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright 2019 Google LLC
0004  */
0005 
0006 /*
0007  * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
0008  */
0009 
0010 #define pr_fmt(fmt) "blk-crypto: " fmt
0011 
0012 #include <linux/bio.h>
0013 #include <linux/blkdev.h>
0014 #include <linux/blk-crypto-profile.h>
0015 #include <linux/module.h>
0016 #include <linux/slab.h>
0017 
0018 #include "blk-crypto-internal.h"
0019 
0020 const struct blk_crypto_mode blk_crypto_modes[] = {
0021     [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
0022         .name = "AES-256-XTS",
0023         .cipher_str = "xts(aes)",
0024         .keysize = 64,
0025         .ivsize = 16,
0026     },
0027     [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
0028         .name = "AES-128-CBC-ESSIV",
0029         .cipher_str = "essiv(cbc(aes),sha256)",
0030         .keysize = 16,
0031         .ivsize = 16,
0032     },
0033     [BLK_ENCRYPTION_MODE_ADIANTUM] = {
0034         .name = "Adiantum",
0035         .cipher_str = "adiantum(xchacha12,aes)",
0036         .keysize = 32,
0037         .ivsize = 32,
0038     },
0039 };
0040 
0041 /*
0042  * This number needs to be at least (the number of threads doing IO
0043  * concurrently) * (maximum recursive depth of a bio), so that we don't
0044  * deadlock on crypt_ctx allocations. The default is chosen to be the same
0045  * as the default number of post read contexts in both EXT4 and F2FS.
0046  */
0047 static int num_prealloc_crypt_ctxs = 128;
0048 
0049 module_param(num_prealloc_crypt_ctxs, int, 0444);
0050 MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
0051         "Number of bio crypto contexts to preallocate");
0052 
0053 static struct kmem_cache *bio_crypt_ctx_cache;
0054 static mempool_t *bio_crypt_ctx_pool;
0055 
0056 static int __init bio_crypt_ctx_init(void)
0057 {
0058     size_t i;
0059 
0060     bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
0061     if (!bio_crypt_ctx_cache)
0062         goto out_no_mem;
0063 
0064     bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
0065                               bio_crypt_ctx_cache);
0066     if (!bio_crypt_ctx_pool)
0067         goto out_no_mem;
0068 
0069     /* This is assumed in various places. */
0070     BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
0071 
0072     /* Sanity check that no algorithm exceeds the defined limits. */
0073     for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
0074         BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
0075         BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
0076     }
0077 
0078     return 0;
0079 out_no_mem:
0080     panic("Failed to allocate mem for bio crypt ctxs\n");
0081 }
0082 subsys_initcall(bio_crypt_ctx_init);
0083 
0084 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
0085                const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
0086 {
0087     struct bio_crypt_ctx *bc;
0088 
0089     /*
0090      * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
0091      * that the mempool_alloc() can't fail.
0092      */
0093     WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
0094 
0095     bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
0096 
0097     bc->bc_key = key;
0098     memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
0099 
0100     bio->bi_crypt_context = bc;
0101 }
0102 
0103 void __bio_crypt_free_ctx(struct bio *bio)
0104 {
0105     mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
0106     bio->bi_crypt_context = NULL;
0107 }
0108 
0109 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
0110 {
0111     dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
0112     if (!dst->bi_crypt_context)
0113         return -ENOMEM;
0114     *dst->bi_crypt_context = *src->bi_crypt_context;
0115     return 0;
0116 }
0117 
0118 /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
0119 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
0120                  unsigned int inc)
0121 {
0122     int i;
0123 
0124     for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
0125         dun[i] += inc;
0126         /*
0127          * If the addition in this limb overflowed, then we need to
0128          * carry 1 into the next limb. Else the carry is 0.
0129          */
0130         if (dun[i] < inc)
0131             inc = 1;
0132         else
0133             inc = 0;
0134     }
0135 }
0136 
0137 void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
0138 {
0139     struct bio_crypt_ctx *bc = bio->bi_crypt_context;
0140 
0141     bio_crypt_dun_increment(bc->bc_dun,
0142                 bytes >> bc->bc_key->data_unit_size_bits);
0143 }
0144 
0145 /*
0146  * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
0147  * @next_dun, treating the DUNs as multi-limb integers.
0148  */
0149 bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
0150                  unsigned int bytes,
0151                  const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
0152 {
0153     int i;
0154     unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
0155 
0156     for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
0157         if (bc->bc_dun[i] + carry != next_dun[i])
0158             return false;
0159         /*
0160          * If the addition in this limb overflowed, then we need to
0161          * carry 1 into the next limb. Else the carry is 0.
0162          */
0163         if ((bc->bc_dun[i] + carry) < carry)
0164             carry = 1;
0165         else
0166             carry = 0;
0167     }
0168 
0169     /* If the DUN wrapped through 0, don't treat it as contiguous. */
0170     return carry == 0;
0171 }
0172 
0173 /*
0174  * Checks that two bio crypt contexts are compatible - i.e. that
0175  * they are mergeable except for data_unit_num continuity.
0176  */
0177 static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
0178                      struct bio_crypt_ctx *bc2)
0179 {
0180     if (!bc1)
0181         return !bc2;
0182 
0183     return bc2 && bc1->bc_key == bc2->bc_key;
0184 }
0185 
0186 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
0187 {
0188     return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
0189 }
0190 
0191 /*
0192  * Checks that two bio crypt contexts are compatible, and also
0193  * that their data_unit_nums are continuous (and can hence be merged)
0194  * in the order @bc1 followed by @bc2.
0195  */
0196 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
0197                  struct bio_crypt_ctx *bc2)
0198 {
0199     if (!bio_crypt_ctx_compatible(bc1, bc2))
0200         return false;
0201 
0202     return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
0203 }
0204 
0205 /* Check that all I/O segments are data unit aligned. */
0206 static bool bio_crypt_check_alignment(struct bio *bio)
0207 {
0208     const unsigned int data_unit_size =
0209         bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
0210     struct bvec_iter iter;
0211     struct bio_vec bv;
0212 
0213     bio_for_each_segment(bv, bio, iter) {
0214         if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
0215             return false;
0216     }
0217 
0218     return true;
0219 }
0220 
0221 blk_status_t __blk_crypto_init_request(struct request *rq)
0222 {
0223     return blk_crypto_get_keyslot(rq->q->crypto_profile,
0224                       rq->crypt_ctx->bc_key,
0225                       &rq->crypt_keyslot);
0226 }
0227 
0228 /**
0229  * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
0230  *
0231  * @rq: The request whose crypto fields to uninitialize.
0232  *
0233  * Completely uninitializes the crypto fields of a request. If a keyslot has
0234  * been programmed into some inline encryption hardware, that keyslot is
0235  * released. The rq->crypt_ctx is also freed.
0236  */
0237 void __blk_crypto_free_request(struct request *rq)
0238 {
0239     blk_crypto_put_keyslot(rq->crypt_keyslot);
0240     mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
0241     blk_crypto_rq_set_defaults(rq);
0242 }
0243 
0244 /**
0245  * __blk_crypto_bio_prep - Prepare bio for inline encryption
0246  *
0247  * @bio_ptr: pointer to original bio pointer
0248  *
0249  * If the bio crypt context provided for the bio is supported by the underlying
0250  * device's inline encryption hardware, do nothing.
0251  *
0252  * Otherwise, try to perform en/decryption for this bio by falling back to the
0253  * kernel crypto API. When the crypto API fallback is used for encryption,
0254  * blk-crypto may choose to split the bio into 2 - the first one that will
0255  * continue to be processed and the second one that will be resubmitted via
0256  * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
0257  * of the aforementioned "first one", and *bio_ptr will be updated to this
0258  * bounce bio.
0259  *
0260  * Caller must ensure bio has bio_crypt_ctx.
0261  *
0262  * Return: true on success; false on error (and bio->bi_status will be set
0263  *     appropriately, and bio_endio() will have been called so bio
0264  *     submission should abort).
0265  */
0266 bool __blk_crypto_bio_prep(struct bio **bio_ptr)
0267 {
0268     struct bio *bio = *bio_ptr;
0269     const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
0270     struct blk_crypto_profile *profile;
0271 
0272     /* Error if bio has no data. */
0273     if (WARN_ON_ONCE(!bio_has_data(bio))) {
0274         bio->bi_status = BLK_STS_IOERR;
0275         goto fail;
0276     }
0277 
0278     if (!bio_crypt_check_alignment(bio)) {
0279         bio->bi_status = BLK_STS_IOERR;
0280         goto fail;
0281     }
0282 
0283     /*
0284      * Success if device supports the encryption context, or if we succeeded
0285      * in falling back to the crypto API.
0286      */
0287     profile = bdev_get_queue(bio->bi_bdev)->crypto_profile;
0288     if (__blk_crypto_cfg_supported(profile, &bc_key->crypto_cfg))
0289         return true;
0290 
0291     if (blk_crypto_fallback_bio_prep(bio_ptr))
0292         return true;
0293 fail:
0294     bio_endio(*bio_ptr);
0295     return false;
0296 }
0297 
0298 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
0299                  gfp_t gfp_mask)
0300 {
0301     if (!rq->crypt_ctx) {
0302         rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
0303         if (!rq->crypt_ctx)
0304             return -ENOMEM;
0305     }
0306     *rq->crypt_ctx = *bio->bi_crypt_context;
0307     return 0;
0308 }
0309 
0310 /**
0311  * blk_crypto_init_key() - Prepare a key for use with blk-crypto
0312  * @blk_key: Pointer to the blk_crypto_key to initialize.
0313  * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
0314  *       @crypto_mode; see blk_crypto_modes[].
0315  * @crypto_mode: identifier for the encryption algorithm to use
0316  * @dun_bytes: number of bytes that will be used to specify the DUN when this
0317  *         key is used
0318  * @data_unit_size: the data unit size to use for en/decryption
0319  *
0320  * Return: 0 on success, -errno on failure.  The caller is responsible for
0321  *     zeroizing both blk_key and raw_key when done with them.
0322  */
0323 int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
0324             enum blk_crypto_mode_num crypto_mode,
0325             unsigned int dun_bytes,
0326             unsigned int data_unit_size)
0327 {
0328     const struct blk_crypto_mode *mode;
0329 
0330     memset(blk_key, 0, sizeof(*blk_key));
0331 
0332     if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
0333         return -EINVAL;
0334 
0335     mode = &blk_crypto_modes[crypto_mode];
0336     if (mode->keysize == 0)
0337         return -EINVAL;
0338 
0339     if (dun_bytes == 0 || dun_bytes > mode->ivsize)
0340         return -EINVAL;
0341 
0342     if (!is_power_of_2(data_unit_size))
0343         return -EINVAL;
0344 
0345     blk_key->crypto_cfg.crypto_mode = crypto_mode;
0346     blk_key->crypto_cfg.dun_bytes = dun_bytes;
0347     blk_key->crypto_cfg.data_unit_size = data_unit_size;
0348     blk_key->data_unit_size_bits = ilog2(data_unit_size);
0349     blk_key->size = mode->keysize;
0350     memcpy(blk_key->raw, raw_key, mode->keysize);
0351 
0352     return 0;
0353 }
0354 
0355 /*
0356  * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
0357  * request queue it's submitted to supports inline crypto, or the
0358  * blk-crypto-fallback is enabled and supports the cfg).
0359  */
0360 bool blk_crypto_config_supported(struct request_queue *q,
0361                  const struct blk_crypto_config *cfg)
0362 {
0363     return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
0364            __blk_crypto_cfg_supported(q->crypto_profile, cfg);
0365 }
0366 
0367 /**
0368  * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
0369  * @key: A key to use on the device
0370  * @q: the request queue for the device
0371  *
0372  * Upper layers must call this function to ensure that either the hardware
0373  * supports the key's crypto settings, or the crypto API fallback has transforms
0374  * for the needed mode allocated and ready to go. This function may allocate
0375  * an skcipher, and *should not* be called from the data path, since that might
0376  * cause a deadlock
0377  *
0378  * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
0379  *     blk-crypto-fallback is either disabled or the needed algorithm
0380  *     is disabled in the crypto API; or another -errno code.
0381  */
0382 int blk_crypto_start_using_key(const struct blk_crypto_key *key,
0383                    struct request_queue *q)
0384 {
0385     if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
0386         return 0;
0387     return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
0388 }
0389 
0390 /**
0391  * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
0392  *              it may have been programmed into
0393  * @q: The request queue who's associated inline encryption hardware this key
0394  *     might have been programmed into
0395  * @key: The key to evict
0396  *
0397  * Upper layers (filesystems) must call this function to ensure that a key is
0398  * evicted from any hardware that it might have been programmed into.  The key
0399  * must not be in use by any in-flight IO when this function is called.
0400  *
0401  * Return: 0 on success or if the key wasn't in any keyslot; -errno on error.
0402  */
0403 int blk_crypto_evict_key(struct request_queue *q,
0404              const struct blk_crypto_key *key)
0405 {
0406     if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
0407         return __blk_crypto_evict_key(q->crypto_profile, key);
0408 
0409     /*
0410      * If the request_queue didn't support the key, then blk-crypto-fallback
0411      * may have been used, so try to evict the key from blk-crypto-fallback.
0412      */
0413     return blk_crypto_fallback_evict_key(key);
0414 }
0415 EXPORT_SYMBOL_GPL(blk_crypto_evict_key);