Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Key Wrapping: RFC3394 / NIST SP800-38F
0003  *
0004  * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
0005  *
0006  * Redistribution and use in source and binary forms, with or without
0007  * modification, are permitted provided that the following conditions
0008  * are met:
0009  * 1. Redistributions of source code must retain the above copyright
0010  *    notice, and the entire permission notice in its entirety,
0011  *    including the disclaimer of warranties.
0012  * 2. Redistributions in binary form must reproduce the above copyright
0013  *    notice, this list of conditions and the following disclaimer in the
0014  *    documentation and/or other materials provided with the distribution.
0015  * 3. The name of the author may not be used to endorse or promote
0016  *    products derived from this software without specific prior
0017  *    written permission.
0018  *
0019  * ALTERNATIVELY, this product may be distributed under the terms of
0020  * the GNU General Public License, in which case the provisions of the GPL2
0021  * are required INSTEAD OF the above restrictions.  (This clause is
0022  * necessary due to a potential bad interaction between the GPL and
0023  * the restrictions contained in a BSD-style copyright.)
0024  *
0025  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
0026  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
0027  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
0028  * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
0029  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0030  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
0031  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
0032  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
0033  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
0034  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
0035  * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
0036  * DAMAGE.
0037  */
0038 
0039 /*
0040  * Note for using key wrapping:
0041  *
0042  *  * The result of the encryption operation is the ciphertext starting
0043  *    with the 2nd semiblock. The first semiblock is provided as the IV.
0044  *    The IV used to start the encryption operation is the default IV.
0045  *
0046  *  * The input for the decryption is the first semiblock handed in as an
0047  *    IV. The ciphertext is the data starting with the 2nd semiblock. The
0048  *    return code of the decryption operation will be EBADMSG in case an
0049  *    integrity error occurs.
0050  *
0051  * To obtain the full result of an encryption as expected by SP800-38F, the
0052  * caller must allocate a buffer of plaintext + 8 bytes:
0053  *
0054  *  unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
0055  *  u8 data[datalen];
0056  *  u8 *iv = data;
0057  *  u8 *pt = data + crypto_skcipher_ivsize(tfm);
0058  *      <ensure that pt contains the plaintext of size ptlen>
0059  *  sg_init_one(&sg, pt, ptlen);
0060  *  skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
0061  *
0062  *  ==> After encryption, data now contains full KW result as per SP800-38F.
0063  *
0064  * In case of decryption, ciphertext now already has the expected length
0065  * and must be segmented appropriately:
0066  *
0067  *  unsigned int datalen = CTLEN;
0068  *  u8 data[datalen];
0069  *      <ensure that data contains full ciphertext>
0070  *  u8 *iv = data;
0071  *  u8 *ct = data + crypto_skcipher_ivsize(tfm);
0072  *  unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
0073  *  sg_init_one(&sg, ct, ctlen);
0074  *  skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
0075  *
0076  *  ==> After decryption (which hopefully does not return EBADMSG), the ct
0077  *  pointer now points to the plaintext of size ctlen.
0078  *
0079  * Note 2: KWP is not implemented as this would defy in-place operation.
0080  *     If somebody wants to wrap non-aligned data, he should simply pad
0081  *     the input with zeros to fill it up to the 8 byte boundary.
0082  */
0083 
0084 #include <linux/module.h>
0085 #include <linux/crypto.h>
0086 #include <linux/scatterlist.h>
0087 #include <crypto/scatterwalk.h>
0088 #include <crypto/internal/cipher.h>
0089 #include <crypto/internal/skcipher.h>
0090 
0091 struct crypto_kw_block {
0092 #define SEMIBSIZE 8
0093     __be64 A;
0094     __be64 R;
0095 };
0096 
0097 /*
0098  * Fast forward the SGL to the "end" length minus SEMIBSIZE.
0099  * The start in the SGL defined by the fast-forward is returned with
0100  * the walk variable
0101  */
0102 static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
0103                      struct scatterlist *sg,
0104                      unsigned int end)
0105 {
0106     unsigned int skip = 0;
0107 
0108     /* The caller should only operate on full SEMIBLOCKs. */
0109     BUG_ON(end < SEMIBSIZE);
0110 
0111     skip = end - SEMIBSIZE;
0112     while (sg) {
0113         if (sg->length > skip) {
0114             scatterwalk_start(walk, sg);
0115             scatterwalk_advance(walk, skip);
0116             break;
0117         }
0118 
0119         skip -= sg->length;
0120         sg = sg_next(sg);
0121     }
0122 }
0123 
0124 static int crypto_kw_decrypt(struct skcipher_request *req)
0125 {
0126     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0127     struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
0128     struct crypto_kw_block block;
0129     struct scatterlist *src, *dst;
0130     u64 t = 6 * ((req->cryptlen) >> 3);
0131     unsigned int i;
0132     int ret = 0;
0133 
0134     /*
0135      * Require at least 2 semiblocks (note, the 3rd semiblock that is
0136      * required by SP800-38F is the IV.
0137      */
0138     if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
0139         return -EINVAL;
0140 
0141     /* Place the IV into block A */
0142     memcpy(&block.A, req->iv, SEMIBSIZE);
0143 
0144     /*
0145      * src scatterlist is read-only. dst scatterlist is r/w. During the
0146      * first loop, src points to req->src and dst to req->dst. For any
0147      * subsequent round, the code operates on req->dst only.
0148      */
0149     src = req->src;
0150     dst = req->dst;
0151 
0152     for (i = 0; i < 6; i++) {
0153         struct scatter_walk src_walk, dst_walk;
0154         unsigned int nbytes = req->cryptlen;
0155 
0156         while (nbytes) {
0157             /* move pointer by nbytes in the SGL */
0158             crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
0159             /* get the source block */
0160             scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
0161                            false);
0162 
0163             /* perform KW operation: modify IV with counter */
0164             block.A ^= cpu_to_be64(t);
0165             t--;
0166             /* perform KW operation: decrypt block */
0167             crypto_cipher_decrypt_one(cipher, (u8 *)&block,
0168                           (u8 *)&block);
0169 
0170             /* move pointer by nbytes in the SGL */
0171             crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
0172             /* Copy block->R into place */
0173             scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
0174                            true);
0175 
0176             nbytes -= SEMIBSIZE;
0177         }
0178 
0179         /* we now start to operate on the dst SGL only */
0180         src = req->dst;
0181         dst = req->dst;
0182     }
0183 
0184     /* Perform authentication check */
0185     if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
0186         ret = -EBADMSG;
0187 
0188     memzero_explicit(&block, sizeof(struct crypto_kw_block));
0189 
0190     return ret;
0191 }
0192 
0193 static int crypto_kw_encrypt(struct skcipher_request *req)
0194 {
0195     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0196     struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
0197     struct crypto_kw_block block;
0198     struct scatterlist *src, *dst;
0199     u64 t = 1;
0200     unsigned int i;
0201 
0202     /*
0203      * Require at least 2 semiblocks (note, the 3rd semiblock that is
0204      * required by SP800-38F is the IV that occupies the first semiblock.
0205      * This means that the dst memory must be one semiblock larger than src.
0206      * Also ensure that the given data is aligned to semiblock.
0207      */
0208     if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
0209         return -EINVAL;
0210 
0211     /*
0212      * Place the predefined IV into block A -- for encrypt, the caller
0213      * does not need to provide an IV, but he needs to fetch the final IV.
0214      */
0215     block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
0216 
0217     /*
0218      * src scatterlist is read-only. dst scatterlist is r/w. During the
0219      * first loop, src points to req->src and dst to req->dst. For any
0220      * subsequent round, the code operates on req->dst only.
0221      */
0222     src = req->src;
0223     dst = req->dst;
0224 
0225     for (i = 0; i < 6; i++) {
0226         struct scatter_walk src_walk, dst_walk;
0227         unsigned int nbytes = req->cryptlen;
0228 
0229         scatterwalk_start(&src_walk, src);
0230         scatterwalk_start(&dst_walk, dst);
0231 
0232         while (nbytes) {
0233             /* get the source block */
0234             scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
0235                            false);
0236 
0237             /* perform KW operation: encrypt block */
0238             crypto_cipher_encrypt_one(cipher, (u8 *)&block,
0239                           (u8 *)&block);
0240             /* perform KW operation: modify IV with counter */
0241             block.A ^= cpu_to_be64(t);
0242             t++;
0243 
0244             /* Copy block->R into place */
0245             scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
0246                            true);
0247 
0248             nbytes -= SEMIBSIZE;
0249         }
0250 
0251         /* we now start to operate on the dst SGL only */
0252         src = req->dst;
0253         dst = req->dst;
0254     }
0255 
0256     /* establish the IV for the caller to pick up */
0257     memcpy(req->iv, &block.A, SEMIBSIZE);
0258 
0259     memzero_explicit(&block, sizeof(struct crypto_kw_block));
0260 
0261     return 0;
0262 }
0263 
0264 static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
0265 {
0266     struct skcipher_instance *inst;
0267     struct crypto_alg *alg;
0268     int err;
0269 
0270     inst = skcipher_alloc_instance_simple(tmpl, tb);
0271     if (IS_ERR(inst))
0272         return PTR_ERR(inst);
0273 
0274     alg = skcipher_ialg_simple(inst);
0275 
0276     err = -EINVAL;
0277     /* Section 5.1 requirement for KW */
0278     if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
0279         goto out_free_inst;
0280 
0281     inst->alg.base.cra_blocksize = SEMIBSIZE;
0282     inst->alg.base.cra_alignmask = 0;
0283     inst->alg.ivsize = SEMIBSIZE;
0284 
0285     inst->alg.encrypt = crypto_kw_encrypt;
0286     inst->alg.decrypt = crypto_kw_decrypt;
0287 
0288     err = skcipher_register_instance(tmpl, inst);
0289     if (err) {
0290 out_free_inst:
0291         inst->free(inst);
0292     }
0293 
0294     return err;
0295 }
0296 
0297 static struct crypto_template crypto_kw_tmpl = {
0298     .name = "kw",
0299     .create = crypto_kw_create,
0300     .module = THIS_MODULE,
0301 };
0302 
0303 static int __init crypto_kw_init(void)
0304 {
0305     return crypto_register_template(&crypto_kw_tmpl);
0306 }
0307 
0308 static void __exit crypto_kw_exit(void)
0309 {
0310     crypto_unregister_template(&crypto_kw_tmpl);
0311 }
0312 
0313 subsys_initcall(crypto_kw_init);
0314 module_exit(crypto_kw_exit);
0315 
0316 MODULE_LICENSE("Dual BSD/GPL");
0317 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
0318 MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
0319 MODULE_ALIAS_CRYPTO("kw");
0320 MODULE_IMPORT_NS(CRYPTO_INTERNAL);