Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Adiantum length-preserving encryption mode
0004  *
0005  * Copyright 2018 Google LLC
0006  */
0007 
0008 /*
0009  * Adiantum is a tweakable, length-preserving encryption mode designed for fast
0010  * and secure disk encryption, especially on CPUs without dedicated crypto
0011  * instructions.  Adiantum encrypts each sector using the XChaCha12 stream
0012  * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
0013  * NH and Poly1305, and an invocation of the AES-256 block cipher on a single
0014  * 16-byte block.  See the paper for details:
0015  *
0016  *  Adiantum: length-preserving encryption for entry-level processors
0017  *      (https://eprint.iacr.org/2018/720.pdf)
0018  *
0019  * For flexibility, this implementation also allows other ciphers:
0020  *
0021  *  - Stream cipher: XChaCha12 or XChaCha20
0022  *  - Block cipher: any with a 128-bit block size and 256-bit key
0023  *
0024  * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
0025  * HPolyC is not supported.  This is because Adiantum is ~20% faster than HPolyC
0026  * but still provably as secure, and also the ε-∆U hash function of HBSH is
0027  * formally defined to take two inputs (tweak, message) which makes it difficult
0028  * to wrap with the crypto_shash API.  Rather, some details need to be handled
0029  * here.  Nevertheless, if needed in the future, support for other ε-∆U hash
0030  * functions could be added here.
0031  */
0032 
0033 #include <crypto/b128ops.h>
0034 #include <crypto/chacha.h>
0035 #include <crypto/internal/cipher.h>
0036 #include <crypto/internal/hash.h>
0037 #include <crypto/internal/poly1305.h>
0038 #include <crypto/internal/skcipher.h>
0039 #include <crypto/nhpoly1305.h>
0040 #include <crypto/scatterwalk.h>
0041 #include <linux/module.h>
0042 
0043 /*
0044  * Size of right-hand part of input data, in bytes; also the size of the block
0045  * cipher's block size and the hash function's output.
0046  */
0047 #define BLOCKCIPHER_BLOCK_SIZE      16
0048 
0049 /* Size of the block cipher key (K_E) in bytes */
0050 #define BLOCKCIPHER_KEY_SIZE        32
0051 
0052 /* Size of the hash key (K_H) in bytes */
0053 #define HASH_KEY_SIZE       (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
0054 
0055 /*
0056  * The specification allows variable-length tweaks, but Linux's crypto API
0057  * currently only allows algorithms to support a single length.  The "natural"
0058  * tweak length for Adiantum is 16, since that fits into one Poly1305 block for
0059  * the best performance.  But longer tweaks are useful for fscrypt, to avoid
0060  * needing to derive per-file keys.  So instead we use two blocks, or 32 bytes.
0061  */
0062 #define TWEAK_SIZE      32
0063 
0064 struct adiantum_instance_ctx {
0065     struct crypto_skcipher_spawn streamcipher_spawn;
0066     struct crypto_cipher_spawn blockcipher_spawn;
0067     struct crypto_shash_spawn hash_spawn;
0068 };
0069 
0070 struct adiantum_tfm_ctx {
0071     struct crypto_skcipher *streamcipher;
0072     struct crypto_cipher *blockcipher;
0073     struct crypto_shash *hash;
0074     struct poly1305_core_key header_hash_key;
0075 };
0076 
0077 struct adiantum_request_ctx {
0078 
0079     /*
0080      * Buffer for right-hand part of data, i.e.
0081      *
0082      *    P_L => P_M => C_M => C_R when encrypting, or
0083      *    C_R => C_M => P_M => P_L when decrypting.
0084      *
0085      * Also used to build the IV for the stream cipher.
0086      */
0087     union {
0088         u8 bytes[XCHACHA_IV_SIZE];
0089         __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
0090         le128 bignum;   /* interpret as element of Z/(2^{128}Z) */
0091     } rbuf;
0092 
0093     bool enc; /* true if encrypting, false if decrypting */
0094 
0095     /*
0096      * The result of the Poly1305 ε-∆U hash function applied to
0097      * (bulk length, tweak)
0098      */
0099     le128 header_hash;
0100 
0101     /* Sub-requests, must be last */
0102     union {
0103         struct shash_desc hash_desc;
0104         struct skcipher_request streamcipher_req;
0105     } u;
0106 };
0107 
0108 /*
0109  * Given the XChaCha stream key K_S, derive the block cipher key K_E and the
0110  * hash key K_H as follows:
0111  *
0112  *     K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
0113  *
0114  * Note that this denotes using bits from the XChaCha keystream, which here we
0115  * get indirectly by encrypting a buffer containing all 0's.
0116  */
0117 static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
0118                unsigned int keylen)
0119 {
0120     struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
0121     struct {
0122         u8 iv[XCHACHA_IV_SIZE];
0123         u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
0124         struct scatterlist sg;
0125         struct crypto_wait wait;
0126         struct skcipher_request req; /* must be last */
0127     } *data;
0128     u8 *keyp;
0129     int err;
0130 
0131     /* Set the stream cipher key (K_S) */
0132     crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
0133     crypto_skcipher_set_flags(tctx->streamcipher,
0134                   crypto_skcipher_get_flags(tfm) &
0135                   CRYPTO_TFM_REQ_MASK);
0136     err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
0137     if (err)
0138         return err;
0139 
0140     /* Derive the subkeys */
0141     data = kzalloc(sizeof(*data) +
0142                crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
0143     if (!data)
0144         return -ENOMEM;
0145     data->iv[0] = 1;
0146     sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
0147     crypto_init_wait(&data->wait);
0148     skcipher_request_set_tfm(&data->req, tctx->streamcipher);
0149     skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
0150                           CRYPTO_TFM_REQ_MAY_BACKLOG,
0151                       crypto_req_done, &data->wait);
0152     skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
0153                    sizeof(data->derived_keys), data->iv);
0154     err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
0155     if (err)
0156         goto out;
0157     keyp = data->derived_keys;
0158 
0159     /* Set the block cipher key (K_E) */
0160     crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
0161     crypto_cipher_set_flags(tctx->blockcipher,
0162                 crypto_skcipher_get_flags(tfm) &
0163                 CRYPTO_TFM_REQ_MASK);
0164     err = crypto_cipher_setkey(tctx->blockcipher, keyp,
0165                    BLOCKCIPHER_KEY_SIZE);
0166     if (err)
0167         goto out;
0168     keyp += BLOCKCIPHER_KEY_SIZE;
0169 
0170     /* Set the hash key (K_H) */
0171     poly1305_core_setkey(&tctx->header_hash_key, keyp);
0172     keyp += POLY1305_BLOCK_SIZE;
0173 
0174     crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
0175     crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
0176                        CRYPTO_TFM_REQ_MASK);
0177     err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
0178     keyp += NHPOLY1305_KEY_SIZE;
0179     WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
0180 out:
0181     kfree_sensitive(data);
0182     return err;
0183 }
0184 
0185 /* Addition in Z/(2^{128}Z) */
0186 static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
0187 {
0188     u64 x = le64_to_cpu(v1->b);
0189     u64 y = le64_to_cpu(v2->b);
0190 
0191     r->b = cpu_to_le64(x + y);
0192     r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
0193                (x + y < x));
0194 }
0195 
0196 /* Subtraction in Z/(2^{128}Z) */
0197 static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
0198 {
0199     u64 x = le64_to_cpu(v1->b);
0200     u64 y = le64_to_cpu(v2->b);
0201 
0202     r->b = cpu_to_le64(x - y);
0203     r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
0204                (x - y > x));
0205 }
0206 
0207 /*
0208  * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
0209  * result to rctx->header_hash.  This is the calculation
0210  *
0211  *  H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
0212  *
0213  * from the procedure in section 6.4 of the Adiantum paper.  The resulting value
0214  * is reused in both the first and second hash steps.  Specifically, it's added
0215  * to the result of an independently keyed ε-∆U hash function (for equal length
0216  * inputs only) taken over the left-hand part (the "bulk") of the message, to
0217  * give the overall Adiantum hash of the (tweak, left-hand part) pair.
0218  */
0219 static void adiantum_hash_header(struct skcipher_request *req)
0220 {
0221     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0222     const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
0223     struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
0224     const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
0225     struct {
0226         __le64 message_bits;
0227         __le64 padding;
0228     } header = {
0229         .message_bits = cpu_to_le64((u64)bulk_len * 8)
0230     };
0231     struct poly1305_state state;
0232 
0233     poly1305_core_init(&state);
0234 
0235     BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
0236     poly1305_core_blocks(&state, &tctx->header_hash_key,
0237                  &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
0238 
0239     BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
0240     poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
0241                  TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
0242 
0243     poly1305_core_emit(&state, NULL, &rctx->header_hash);
0244 }
0245 
0246 /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
0247 static int adiantum_hash_message(struct skcipher_request *req,
0248                  struct scatterlist *sgl, le128 *digest)
0249 {
0250     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0251     const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
0252     struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
0253     const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
0254     struct shash_desc *hash_desc = &rctx->u.hash_desc;
0255     struct sg_mapping_iter miter;
0256     unsigned int i, n;
0257     int err;
0258 
0259     hash_desc->tfm = tctx->hash;
0260 
0261     err = crypto_shash_init(hash_desc);
0262     if (err)
0263         return err;
0264 
0265     sg_miter_start(&miter, sgl, sg_nents(sgl),
0266                SG_MITER_FROM_SG | SG_MITER_ATOMIC);
0267     for (i = 0; i < bulk_len; i += n) {
0268         sg_miter_next(&miter);
0269         n = min_t(unsigned int, miter.length, bulk_len - i);
0270         err = crypto_shash_update(hash_desc, miter.addr, n);
0271         if (err)
0272             break;
0273     }
0274     sg_miter_stop(&miter);
0275     if (err)
0276         return err;
0277 
0278     return crypto_shash_final(hash_desc, (u8 *)digest);
0279 }
0280 
0281 /* Continue Adiantum encryption/decryption after the stream cipher step */
0282 static int adiantum_finish(struct skcipher_request *req)
0283 {
0284     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0285     const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
0286     struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
0287     const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
0288     le128 digest;
0289     int err;
0290 
0291     /* If decrypting, decrypt C_M with the block cipher to get P_M */
0292     if (!rctx->enc)
0293         crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
0294                       rctx->rbuf.bytes);
0295 
0296     /*
0297      * Second hash step
0298      *  enc: C_R = C_M - H_{K_H}(T, C_L)
0299      *  dec: P_R = P_M - H_{K_H}(T, P_L)
0300      */
0301     err = adiantum_hash_message(req, req->dst, &digest);
0302     if (err)
0303         return err;
0304     le128_add(&digest, &digest, &rctx->header_hash);
0305     le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
0306     scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
0307                  bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
0308     return 0;
0309 }
0310 
0311 static void adiantum_streamcipher_done(struct crypto_async_request *areq,
0312                        int err)
0313 {
0314     struct skcipher_request *req = areq->data;
0315 
0316     if (!err)
0317         err = adiantum_finish(req);
0318 
0319     skcipher_request_complete(req, err);
0320 }
0321 
0322 static int adiantum_crypt(struct skcipher_request *req, bool enc)
0323 {
0324     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0325     const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
0326     struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
0327     const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
0328     unsigned int stream_len;
0329     le128 digest;
0330     int err;
0331 
0332     if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
0333         return -EINVAL;
0334 
0335     rctx->enc = enc;
0336 
0337     /*
0338      * First hash step
0339      *  enc: P_M = P_R + H_{K_H}(T, P_L)
0340      *  dec: C_M = C_R + H_{K_H}(T, C_L)
0341      */
0342     adiantum_hash_header(req);
0343     err = adiantum_hash_message(req, req->src, &digest);
0344     if (err)
0345         return err;
0346     le128_add(&digest, &digest, &rctx->header_hash);
0347     scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
0348                  bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
0349     le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
0350 
0351     /* If encrypting, encrypt P_M with the block cipher to get C_M */
0352     if (enc)
0353         crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
0354                       rctx->rbuf.bytes);
0355 
0356     /* Initialize the rest of the XChaCha IV (first part is C_M) */
0357     BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
0358     BUILD_BUG_ON(XCHACHA_IV_SIZE != 32);    /* nonce || stream position */
0359     rctx->rbuf.words[4] = cpu_to_le32(1);
0360     rctx->rbuf.words[5] = 0;
0361     rctx->rbuf.words[6] = 0;
0362     rctx->rbuf.words[7] = 0;
0363 
0364     /*
0365      * XChaCha needs to be done on all the data except the last 16 bytes;
0366      * for disk encryption that usually means 4080 or 496 bytes.  But ChaCha
0367      * implementations tend to be most efficient when passed a whole number
0368      * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
0369      * And here it doesn't matter whether the last 16 bytes are written to,
0370      * as the second hash step will overwrite them.  Thus, round the XChaCha
0371      * length up to the next 64-byte boundary if possible.
0372      */
0373     stream_len = bulk_len;
0374     if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
0375         stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
0376 
0377     skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
0378     skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
0379                    req->dst, stream_len, &rctx->rbuf);
0380     skcipher_request_set_callback(&rctx->u.streamcipher_req,
0381                       req->base.flags,
0382                       adiantum_streamcipher_done, req);
0383     return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
0384         adiantum_finish(req);
0385 }
0386 
0387 static int adiantum_encrypt(struct skcipher_request *req)
0388 {
0389     return adiantum_crypt(req, true);
0390 }
0391 
0392 static int adiantum_decrypt(struct skcipher_request *req)
0393 {
0394     return adiantum_crypt(req, false);
0395 }
0396 
0397 static int adiantum_init_tfm(struct crypto_skcipher *tfm)
0398 {
0399     struct skcipher_instance *inst = skcipher_alg_instance(tfm);
0400     struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
0401     struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
0402     struct crypto_skcipher *streamcipher;
0403     struct crypto_cipher *blockcipher;
0404     struct crypto_shash *hash;
0405     unsigned int subreq_size;
0406     int err;
0407 
0408     streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
0409     if (IS_ERR(streamcipher))
0410         return PTR_ERR(streamcipher);
0411 
0412     blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
0413     if (IS_ERR(blockcipher)) {
0414         err = PTR_ERR(blockcipher);
0415         goto err_free_streamcipher;
0416     }
0417 
0418     hash = crypto_spawn_shash(&ictx->hash_spawn);
0419     if (IS_ERR(hash)) {
0420         err = PTR_ERR(hash);
0421         goto err_free_blockcipher;
0422     }
0423 
0424     tctx->streamcipher = streamcipher;
0425     tctx->blockcipher = blockcipher;
0426     tctx->hash = hash;
0427 
0428     BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
0429              sizeof(struct adiantum_request_ctx));
0430     subreq_size = max(sizeof_field(struct adiantum_request_ctx,
0431                        u.hash_desc) +
0432               crypto_shash_descsize(hash),
0433               sizeof_field(struct adiantum_request_ctx,
0434                        u.streamcipher_req) +
0435               crypto_skcipher_reqsize(streamcipher));
0436 
0437     crypto_skcipher_set_reqsize(tfm,
0438                     offsetof(struct adiantum_request_ctx, u) +
0439                     subreq_size);
0440     return 0;
0441 
0442 err_free_blockcipher:
0443     crypto_free_cipher(blockcipher);
0444 err_free_streamcipher:
0445     crypto_free_skcipher(streamcipher);
0446     return err;
0447 }
0448 
0449 static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
0450 {
0451     struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
0452 
0453     crypto_free_skcipher(tctx->streamcipher);
0454     crypto_free_cipher(tctx->blockcipher);
0455     crypto_free_shash(tctx->hash);
0456 }
0457 
0458 static void adiantum_free_instance(struct skcipher_instance *inst)
0459 {
0460     struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
0461 
0462     crypto_drop_skcipher(&ictx->streamcipher_spawn);
0463     crypto_drop_cipher(&ictx->blockcipher_spawn);
0464     crypto_drop_shash(&ictx->hash_spawn);
0465     kfree(inst);
0466 }
0467 
0468 /*
0469  * Check for a supported set of inner algorithms.
0470  * See the comment at the beginning of this file.
0471  */
0472 static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
0473                       struct crypto_alg *blockcipher_alg,
0474                       struct shash_alg *hash_alg)
0475 {
0476     if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
0477         strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
0478         return false;
0479 
0480     if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
0481         blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
0482         return false;
0483     if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
0484         return false;
0485 
0486     if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
0487         return false;
0488 
0489     return true;
0490 }
0491 
0492 static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
0493 {
0494     u32 mask;
0495     const char *nhpoly1305_name;
0496     struct skcipher_instance *inst;
0497     struct adiantum_instance_ctx *ictx;
0498     struct skcipher_alg *streamcipher_alg;
0499     struct crypto_alg *blockcipher_alg;
0500     struct shash_alg *hash_alg;
0501     int err;
0502 
0503     err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
0504     if (err)
0505         return err;
0506 
0507     inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
0508     if (!inst)
0509         return -ENOMEM;
0510     ictx = skcipher_instance_ctx(inst);
0511 
0512     /* Stream cipher, e.g. "xchacha12" */
0513     err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
0514                    skcipher_crypto_instance(inst),
0515                    crypto_attr_alg_name(tb[1]), 0, mask);
0516     if (err)
0517         goto err_free_inst;
0518     streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
0519 
0520     /* Block cipher, e.g. "aes" */
0521     err = crypto_grab_cipher(&ictx->blockcipher_spawn,
0522                  skcipher_crypto_instance(inst),
0523                  crypto_attr_alg_name(tb[2]), 0, mask);
0524     if (err)
0525         goto err_free_inst;
0526     blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
0527 
0528     /* NHPoly1305 ε-∆U hash function */
0529     nhpoly1305_name = crypto_attr_alg_name(tb[3]);
0530     if (nhpoly1305_name == ERR_PTR(-ENOENT))
0531         nhpoly1305_name = "nhpoly1305";
0532     err = crypto_grab_shash(&ictx->hash_spawn,
0533                 skcipher_crypto_instance(inst),
0534                 nhpoly1305_name, 0, mask);
0535     if (err)
0536         goto err_free_inst;
0537     hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
0538 
0539     /* Check the set of algorithms */
0540     if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
0541                        hash_alg)) {
0542         pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
0543             streamcipher_alg->base.cra_name,
0544             blockcipher_alg->cra_name, hash_alg->base.cra_name);
0545         err = -EINVAL;
0546         goto err_free_inst;
0547     }
0548 
0549     /* Instance fields */
0550 
0551     err = -ENAMETOOLONG;
0552     if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
0553              "adiantum(%s,%s)", streamcipher_alg->base.cra_name,
0554              blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
0555         goto err_free_inst;
0556     if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
0557              "adiantum(%s,%s,%s)",
0558              streamcipher_alg->base.cra_driver_name,
0559              blockcipher_alg->cra_driver_name,
0560              hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
0561         goto err_free_inst;
0562 
0563     inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
0564     inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
0565     inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
0566                        hash_alg->base.cra_alignmask;
0567     /*
0568      * The block cipher is only invoked once per message, so for long
0569      * messages (e.g. sectors for disk encryption) its performance doesn't
0570      * matter as much as that of the stream cipher and hash function.  Thus,
0571      * weigh the block cipher's ->cra_priority less.
0572      */
0573     inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
0574                        2 * hash_alg->base.cra_priority +
0575                        blockcipher_alg->cra_priority) / 7;
0576 
0577     inst->alg.setkey = adiantum_setkey;
0578     inst->alg.encrypt = adiantum_encrypt;
0579     inst->alg.decrypt = adiantum_decrypt;
0580     inst->alg.init = adiantum_init_tfm;
0581     inst->alg.exit = adiantum_exit_tfm;
0582     inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
0583     inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
0584     inst->alg.ivsize = TWEAK_SIZE;
0585 
0586     inst->free = adiantum_free_instance;
0587 
0588     err = skcipher_register_instance(tmpl, inst);
0589     if (err) {
0590 err_free_inst:
0591         adiantum_free_instance(inst);
0592     }
0593     return err;
0594 }
0595 
0596 /* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
0597 static struct crypto_template adiantum_tmpl = {
0598     .name = "adiantum",
0599     .create = adiantum_create,
0600     .module = THIS_MODULE,
0601 };
0602 
0603 static int __init adiantum_module_init(void)
0604 {
0605     return crypto_register_template(&adiantum_tmpl);
0606 }
0607 
0608 static void __exit adiantum_module_exit(void)
0609 {
0610     crypto_unregister_template(&adiantum_tmpl);
0611 }
0612 
0613 subsys_initcall(adiantum_module_init);
0614 module_exit(adiantum_module_exit);
0615 
0616 MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
0617 MODULE_LICENSE("GPL v2");
0618 MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
0619 MODULE_ALIAS_CRYPTO("adiantum");
0620 MODULE_IMPORT_NS(CRYPTO_INTERNAL);