Back to home page

LXR

 
 

    


0001 /*
0002  * Key Wrapping: RFC3394 / NIST SP800-38F
0003  *
0004  * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
0005  *
0006  * Redistribution and use in source and binary forms, with or without
0007  * modification, are permitted provided that the following conditions
0008  * are met:
0009  * 1. Redistributions of source code must retain the above copyright
0010  *    notice, and the entire permission notice in its entirety,
0011  *    including the disclaimer of warranties.
0012  * 2. Redistributions in binary form must reproduce the above copyright
0013  *    notice, this list of conditions and the following disclaimer in the
0014  *    documentation and/or other materials provided with the distribution.
0015  * 3. The name of the author may not be used to endorse or promote
0016  *    products derived from this software without specific prior
0017  *    written permission.
0018  *
0019  * ALTERNATIVELY, this product may be distributed under the terms of
0020  * the GNU General Public License, in which case the provisions of the GPL2
0021  * are required INSTEAD OF the above restrictions.  (This clause is
0022  * necessary due to a potential bad interaction between the GPL and
0023  * the restrictions contained in a BSD-style copyright.)
0024  *
0025  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
0026  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
0027  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
0028  * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
0029  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0030  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
0031  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
0032  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
0033  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
0034  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
0035  * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
0036  * DAMAGE.
0037  */
0038 
0039 /*
0040  * Note for using key wrapping:
0041  *
0042  *  * The result of the encryption operation is the ciphertext starting
0043  *    with the 2nd semiblock. The first semiblock is provided as the IV.
0044  *    The IV used to start the encryption operation is the default IV.
0045  *
0046  *  * The input for the decryption is the first semiblock handed in as an
0047  *    IV. The ciphertext is the data starting with the 2nd semiblock. The
0048  *    return code of the decryption operation will be EBADMSG in case an
0049  *    integrity error occurs.
0050  *
0051  * To obtain the full result of an encryption as expected by SP800-38F, the
0052  * caller must allocate a buffer of plaintext + 8 bytes:
0053  *
0054  *  unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
0055  *  u8 data[datalen];
0056  *  u8 *iv = data;
0057  *  u8 *pt = data + crypto_skcipher_ivsize(tfm);
0058  *      <ensure that pt contains the plaintext of size ptlen>
0059  *  sg_init_one(&sg, ptdata, ptlen);
0060  *  skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
0061  *
0062  *  ==> After encryption, data now contains full KW result as per SP800-38F.
0063  *
0064  * In case of decryption, ciphertext now already has the expected length
0065  * and must be segmented appropriately:
0066  *
0067  *  unsigned int datalen = CTLEN;
0068  *  u8 data[datalen];
0069  *      <ensure that data contains full ciphertext>
0070  *  u8 *iv = data;
0071  *  u8 *ct = data + crypto_skcipher_ivsize(tfm);
0072  *  unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
0073  *  sg_init_one(&sg, ctdata, ctlen);
0074  *  skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
0075  *
0076  *  ==> After decryption (which hopefully does not return EBADMSG), the ct
0077  *  pointer now points to the plaintext of size ctlen.
0078  *
0079  * Note 2: KWP is not implemented as this would defy in-place operation.
0080  *     If somebody wants to wrap non-aligned data, he should simply pad
0081  *     the input with zeros to fill it up to the 8 byte boundary.
0082  */
0083 
0084 #include <linux/module.h>
0085 #include <linux/crypto.h>
0086 #include <linux/scatterlist.h>
0087 #include <crypto/scatterwalk.h>
0088 #include <crypto/internal/skcipher.h>
0089 
0090 struct crypto_kw_ctx {
0091     struct crypto_cipher *child;
0092 };
0093 
0094 struct crypto_kw_block {
0095 #define SEMIBSIZE 8
0096     u8 A[SEMIBSIZE];
0097     u8 R[SEMIBSIZE];
0098 };
0099 
0100 /* convert 64 bit integer into its string representation */
0101 static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf)
0102 {
0103     __be64 *a = (__be64 *)buf;
0104 
0105     *a = cpu_to_be64(val);
0106 }
0107 
0108 /*
0109  * Fast forward the SGL to the "end" length minus SEMIBSIZE.
0110  * The start in the SGL defined by the fast-forward is returned with
0111  * the walk variable
0112  */
0113 static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
0114                      struct scatterlist *sg,
0115                      unsigned int end)
0116 {
0117     unsigned int skip = 0;
0118 
0119     /* The caller should only operate on full SEMIBLOCKs. */
0120     BUG_ON(end < SEMIBSIZE);
0121 
0122     skip = end - SEMIBSIZE;
0123     while (sg) {
0124         if (sg->length > skip) {
0125             scatterwalk_start(walk, sg);
0126             scatterwalk_advance(walk, skip);
0127             break;
0128         } else
0129             skip -= sg->length;
0130 
0131         sg = sg_next(sg);
0132     }
0133 }
0134 
0135 static int crypto_kw_decrypt(struct blkcipher_desc *desc,
0136                  struct scatterlist *dst, struct scatterlist *src,
0137                  unsigned int nbytes)
0138 {
0139     struct crypto_blkcipher *tfm = desc->tfm;
0140     struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
0141     struct crypto_cipher *child = ctx->child;
0142 
0143     unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
0144                     crypto_cipher_alignmask(child));
0145     unsigned int i;
0146 
0147     u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
0148     struct crypto_kw_block *block = (struct crypto_kw_block *)
0149                     PTR_ALIGN(blockbuf + 0, alignmask + 1);
0150 
0151     u64 t = 6 * ((nbytes) >> 3);
0152     struct scatterlist *lsrc, *ldst;
0153     int ret = 0;
0154 
0155     /*
0156      * Require at least 2 semiblocks (note, the 3rd semiblock that is
0157      * required by SP800-38F is the IV.
0158      */
0159     if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
0160         return -EINVAL;
0161 
0162     /* Place the IV into block A */
0163     memcpy(block->A, desc->info, SEMIBSIZE);
0164 
0165     /*
0166      * src scatterlist is read-only. dst scatterlist is r/w. During the
0167      * first loop, lsrc points to src and ldst to dst. For any
0168      * subsequent round, the code operates on dst only.
0169      */
0170     lsrc = src;
0171     ldst = dst;
0172 
0173     for (i = 0; i < 6; i++) {
0174         u8 tbe_buffer[SEMIBSIZE + alignmask];
0175         /* alignment for the crypto_xor and the _to_be64 operation */
0176         u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
0177         unsigned int tmp_nbytes = nbytes;
0178         struct scatter_walk src_walk, dst_walk;
0179 
0180         while (tmp_nbytes) {
0181             /* move pointer by tmp_nbytes in the SGL */
0182             crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
0183             /* get the source block */
0184             scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
0185                            false);
0186 
0187             /* perform KW operation: get counter as byte string */
0188             crypto_kw_cpu_to_be64(t, tbe);
0189             /* perform KW operation: modify IV with counter */
0190             crypto_xor(block->A, tbe, SEMIBSIZE);
0191             t--;
0192             /* perform KW operation: decrypt block */
0193             crypto_cipher_decrypt_one(child, (u8*)block,
0194                           (u8*)block);
0195 
0196             /* move pointer by tmp_nbytes in the SGL */
0197             crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
0198             /* Copy block->R into place */
0199             scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
0200                            true);
0201 
0202             tmp_nbytes -= SEMIBSIZE;
0203         }
0204 
0205         /* we now start to operate on the dst SGL only */
0206         lsrc = dst;
0207         ldst = dst;
0208     }
0209 
0210     /* Perform authentication check */
0211     if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A,
0212               SEMIBSIZE))
0213         ret = -EBADMSG;
0214 
0215     memzero_explicit(block, sizeof(struct crypto_kw_block));
0216 
0217     return ret;
0218 }
0219 
0220 static int crypto_kw_encrypt(struct blkcipher_desc *desc,
0221                  struct scatterlist *dst, struct scatterlist *src,
0222                  unsigned int nbytes)
0223 {
0224     struct crypto_blkcipher *tfm = desc->tfm;
0225     struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
0226     struct crypto_cipher *child = ctx->child;
0227 
0228     unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
0229                     crypto_cipher_alignmask(child));
0230     unsigned int i;
0231 
0232     u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
0233     struct crypto_kw_block *block = (struct crypto_kw_block *)
0234                     PTR_ALIGN(blockbuf + 0, alignmask + 1);
0235 
0236     u64 t = 1;
0237     struct scatterlist *lsrc, *ldst;
0238 
0239     /*
0240      * Require at least 2 semiblocks (note, the 3rd semiblock that is
0241      * required by SP800-38F is the IV that occupies the first semiblock.
0242      * This means that the dst memory must be one semiblock larger than src.
0243      * Also ensure that the given data is aligned to semiblock.
0244      */
0245     if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
0246         return -EINVAL;
0247 
0248     /*
0249      * Place the predefined IV into block A -- for encrypt, the caller
0250      * does not need to provide an IV, but he needs to fetch the final IV.
0251      */
0252     memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE);
0253 
0254     /*
0255      * src scatterlist is read-only. dst scatterlist is r/w. During the
0256      * first loop, lsrc points to src and ldst to dst. For any
0257      * subsequent round, the code operates on dst only.
0258      */
0259     lsrc = src;
0260     ldst = dst;
0261 
0262     for (i = 0; i < 6; i++) {
0263         u8 tbe_buffer[SEMIBSIZE + alignmask];
0264         u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
0265         unsigned int tmp_nbytes = nbytes;
0266         struct scatter_walk src_walk, dst_walk;
0267 
0268         scatterwalk_start(&src_walk, lsrc);
0269         scatterwalk_start(&dst_walk, ldst);
0270 
0271         while (tmp_nbytes) {
0272             /* get the source block */
0273             scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
0274                            false);
0275 
0276             /* perform KW operation: encrypt block */
0277             crypto_cipher_encrypt_one(child, (u8 *)block,
0278                           (u8 *)block);
0279             /* perform KW operation: get counter as byte string */
0280             crypto_kw_cpu_to_be64(t, tbe);
0281             /* perform KW operation: modify IV with counter */
0282             crypto_xor(block->A, tbe, SEMIBSIZE);
0283             t++;
0284 
0285             /* Copy block->R into place */
0286             scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
0287                            true);
0288 
0289             tmp_nbytes -= SEMIBSIZE;
0290         }
0291 
0292         /* we now start to operate on the dst SGL only */
0293         lsrc = dst;
0294         ldst = dst;
0295     }
0296 
0297     /* establish the IV for the caller to pick up */
0298     memcpy(desc->info, block->A, SEMIBSIZE);
0299 
0300     memzero_explicit(block, sizeof(struct crypto_kw_block));
0301 
0302     return 0;
0303 }
0304 
0305 static int crypto_kw_setkey(struct crypto_tfm *parent, const u8 *key,
0306                 unsigned int keylen)
0307 {
0308     struct crypto_kw_ctx *ctx = crypto_tfm_ctx(parent);
0309     struct crypto_cipher *child = ctx->child;
0310     int err;
0311 
0312     crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
0313     crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
0314                        CRYPTO_TFM_REQ_MASK);
0315     err = crypto_cipher_setkey(child, key, keylen);
0316     crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
0317                      CRYPTO_TFM_RES_MASK);
0318     return err;
0319 }
0320 
0321 static int crypto_kw_init_tfm(struct crypto_tfm *tfm)
0322 {
0323     struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
0324     struct crypto_spawn *spawn = crypto_instance_ctx(inst);
0325     struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
0326     struct crypto_cipher *cipher;
0327 
0328     cipher = crypto_spawn_cipher(spawn);
0329     if (IS_ERR(cipher))
0330         return PTR_ERR(cipher);
0331 
0332     ctx->child = cipher;
0333     return 0;
0334 }
0335 
0336 static void crypto_kw_exit_tfm(struct crypto_tfm *tfm)
0337 {
0338     struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
0339 
0340     crypto_free_cipher(ctx->child);
0341 }
0342 
0343 static struct crypto_instance *crypto_kw_alloc(struct rtattr **tb)
0344 {
0345     struct crypto_instance *inst = NULL;
0346     struct crypto_alg *alg = NULL;
0347     int err;
0348 
0349     err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
0350     if (err)
0351         return ERR_PTR(err);
0352 
0353     alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
0354                   CRYPTO_ALG_TYPE_MASK);
0355     if (IS_ERR(alg))
0356         return ERR_CAST(alg);
0357 
0358     inst = ERR_PTR(-EINVAL);
0359     /* Section 5.1 requirement for KW */
0360     if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
0361         goto err;
0362 
0363     inst = crypto_alloc_instance("kw", alg);
0364     if (IS_ERR(inst))
0365         goto err;
0366 
0367     inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
0368     inst->alg.cra_priority = alg->cra_priority;
0369     inst->alg.cra_blocksize = SEMIBSIZE;
0370     inst->alg.cra_alignmask = 0;
0371     inst->alg.cra_type = &crypto_blkcipher_type;
0372     inst->alg.cra_blkcipher.ivsize = SEMIBSIZE;
0373     inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
0374     inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
0375 
0376     inst->alg.cra_ctxsize = sizeof(struct crypto_kw_ctx);
0377 
0378     inst->alg.cra_init = crypto_kw_init_tfm;
0379     inst->alg.cra_exit = crypto_kw_exit_tfm;
0380 
0381     inst->alg.cra_blkcipher.setkey = crypto_kw_setkey;
0382     inst->alg.cra_blkcipher.encrypt = crypto_kw_encrypt;
0383     inst->alg.cra_blkcipher.decrypt = crypto_kw_decrypt;
0384 
0385 err:
0386     crypto_mod_put(alg);
0387     return inst;
0388 }
0389 
0390 static void crypto_kw_free(struct crypto_instance *inst)
0391 {
0392     crypto_drop_spawn(crypto_instance_ctx(inst));
0393     kfree(inst);
0394 }
0395 
0396 static struct crypto_template crypto_kw_tmpl = {
0397     .name = "kw",
0398     .alloc = crypto_kw_alloc,
0399     .free = crypto_kw_free,
0400     .module = THIS_MODULE,
0401 };
0402 
0403 static int __init crypto_kw_init(void)
0404 {
0405     return crypto_register_template(&crypto_kw_tmpl);
0406 }
0407 
0408 static void __exit crypto_kw_exit(void)
0409 {
0410     crypto_unregister_template(&crypto_kw_tmpl);
0411 }
0412 
0413 module_init(crypto_kw_init);
0414 module_exit(crypto_kw_exit);
0415 
0416 MODULE_LICENSE("Dual BSD/GPL");
0417 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
0418 MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
0419 MODULE_ALIAS_CRYPTO("kw");