Back to home page

LXR

 
 

    


0001 /* LRW: as defined by Cyril Guyot in
0002  *  http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
0003  *
0004  * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
0005  *
0006  * Based on ecb.c
0007  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
0008  *
0009  * This program is free software; you can redistribute it and/or modify it
0010  * under the terms of the GNU General Public License as published by the Free
0011  * Software Foundation; either version 2 of the License, or (at your option)
0012  * any later version.
0013  */
0014 /* This implementation is checked against the test vectors in the above
0015  * document and by a test vector provided by Ken Buchanan at
0016  * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
0017  *
0018  * The test vectors are included in the testing module tcrypt.[ch] */
0019 
0020 #include <crypto/internal/skcipher.h>
0021 #include <crypto/scatterwalk.h>
0022 #include <linux/err.h>
0023 #include <linux/init.h>
0024 #include <linux/kernel.h>
0025 #include <linux/module.h>
0026 #include <linux/scatterlist.h>
0027 #include <linux/slab.h>
0028 
0029 #include <crypto/b128ops.h>
0030 #include <crypto/gf128mul.h>
0031 #include <crypto/lrw.h>
0032 
0033 #define LRW_BUFFER_SIZE 128u
0034 
0035 struct priv {
0036     struct crypto_skcipher *child;
0037     struct lrw_table_ctx table;
0038 };
0039 
0040 struct rctx {
0041     be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
0042 
0043     be128 t;
0044 
0045     be128 *ext;
0046 
0047     struct scatterlist srcbuf[2];
0048     struct scatterlist dstbuf[2];
0049     struct scatterlist *src;
0050     struct scatterlist *dst;
0051 
0052     unsigned int left;
0053 
0054     struct skcipher_request subreq;
0055 };
0056 
0057 static inline void setbit128_bbe(void *b, int bit)
0058 {
0059     __set_bit(bit ^ (0x80 -
0060 #ifdef __BIG_ENDIAN
0061              BITS_PER_LONG
0062 #else
0063              BITS_PER_BYTE
0064 #endif
0065             ), b);
0066 }
0067 
0068 int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
0069 {
0070     be128 tmp = { 0 };
0071     int i;
0072 
0073     if (ctx->table)
0074         gf128mul_free_64k(ctx->table);
0075 
0076     /* initialize multiplication table for Key2 */
0077     ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
0078     if (!ctx->table)
0079         return -ENOMEM;
0080 
0081     /* initialize optimization table */
0082     for (i = 0; i < 128; i++) {
0083         setbit128_bbe(&tmp, i);
0084         ctx->mulinc[i] = tmp;
0085         gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
0086     }
0087 
0088     return 0;
0089 }
0090 EXPORT_SYMBOL_GPL(lrw_init_table);
0091 
0092 void lrw_free_table(struct lrw_table_ctx *ctx)
0093 {
0094     if (ctx->table)
0095         gf128mul_free_64k(ctx->table);
0096 }
0097 EXPORT_SYMBOL_GPL(lrw_free_table);
0098 
0099 static int setkey(struct crypto_skcipher *parent, const u8 *key,
0100           unsigned int keylen)
0101 {
0102     struct priv *ctx = crypto_skcipher_ctx(parent);
0103     struct crypto_skcipher *child = ctx->child;
0104     int err, bsize = LRW_BLOCK_SIZE;
0105     const u8 *tweak = key + keylen - bsize;
0106 
0107     crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
0108     crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
0109                      CRYPTO_TFM_REQ_MASK);
0110     err = crypto_skcipher_setkey(child, key, keylen - bsize);
0111     crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
0112                       CRYPTO_TFM_RES_MASK);
0113     if (err)
0114         return err;
0115 
0116     return lrw_init_table(&ctx->table, tweak);
0117 }
0118 
0119 static inline void inc(be128 *iv)
0120 {
0121     be64_add_cpu(&iv->b, 1);
0122     if (!iv->b)
0123         be64_add_cpu(&iv->a, 1);
0124 }
0125 
0126 /* this returns the number of consequative 1 bits starting
0127  * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
0128 static inline int get_index128(be128 *block)
0129 {
0130     int x;
0131     __be32 *p = (__be32 *) block;
0132 
0133     for (p += 3, x = 0; x < 128; p--, x += 32) {
0134         u32 val = be32_to_cpup(p);
0135 
0136         if (!~val)
0137             continue;
0138 
0139         return x + ffz(val);
0140     }
0141 
0142     return x;
0143 }
0144 
0145 static int post_crypt(struct skcipher_request *req)
0146 {
0147     struct rctx *rctx = skcipher_request_ctx(req);
0148     be128 *buf = rctx->ext ?: rctx->buf;
0149     struct skcipher_request *subreq;
0150     const int bs = LRW_BLOCK_SIZE;
0151     struct skcipher_walk w;
0152     struct scatterlist *sg;
0153     unsigned offset;
0154     int err;
0155 
0156     subreq = &rctx->subreq;
0157     err = skcipher_walk_virt(&w, subreq, false);
0158 
0159     while (w.nbytes) {
0160         unsigned int avail = w.nbytes;
0161         be128 *wdst;
0162 
0163         wdst = w.dst.virt.addr;
0164 
0165         do {
0166             be128_xor(wdst, buf++, wdst);
0167             wdst++;
0168         } while ((avail -= bs) >= bs);
0169 
0170         err = skcipher_walk_done(&w, avail);
0171     }
0172 
0173     rctx->left -= subreq->cryptlen;
0174 
0175     if (err || !rctx->left)
0176         goto out;
0177 
0178     rctx->dst = rctx->dstbuf;
0179 
0180     scatterwalk_done(&w.out, 0, 1);
0181     sg = w.out.sg;
0182     offset = w.out.offset;
0183 
0184     if (rctx->dst != sg) {
0185         rctx->dst[0] = *sg;
0186         sg_unmark_end(rctx->dst);
0187         scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
0188     }
0189     rctx->dst[0].length -= offset - sg->offset;
0190     rctx->dst[0].offset = offset;
0191 
0192 out:
0193     return err;
0194 }
0195 
0196 static int pre_crypt(struct skcipher_request *req)
0197 {
0198     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0199     struct rctx *rctx = skcipher_request_ctx(req);
0200     struct priv *ctx = crypto_skcipher_ctx(tfm);
0201     be128 *buf = rctx->ext ?: rctx->buf;
0202     struct skcipher_request *subreq;
0203     const int bs = LRW_BLOCK_SIZE;
0204     struct skcipher_walk w;
0205     struct scatterlist *sg;
0206     unsigned cryptlen;
0207     unsigned offset;
0208     be128 *iv;
0209     bool more;
0210     int err;
0211 
0212     subreq = &rctx->subreq;
0213     skcipher_request_set_tfm(subreq, tfm);
0214 
0215     cryptlen = subreq->cryptlen;
0216     more = rctx->left > cryptlen;
0217     if (!more)
0218         cryptlen = rctx->left;
0219 
0220     skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
0221                    cryptlen, req->iv);
0222 
0223     err = skcipher_walk_virt(&w, subreq, false);
0224     iv = w.iv;
0225 
0226     while (w.nbytes) {
0227         unsigned int avail = w.nbytes;
0228         be128 *wsrc;
0229         be128 *wdst;
0230 
0231         wsrc = w.src.virt.addr;
0232         wdst = w.dst.virt.addr;
0233 
0234         do {
0235             *buf++ = rctx->t;
0236             be128_xor(wdst++, &rctx->t, wsrc++);
0237 
0238             /* T <- I*Key2, using the optimization
0239              * discussed in the specification */
0240             be128_xor(&rctx->t, &rctx->t,
0241                   &ctx->table.mulinc[get_index128(iv)]);
0242             inc(iv);
0243         } while ((avail -= bs) >= bs);
0244 
0245         err = skcipher_walk_done(&w, avail);
0246     }
0247 
0248     skcipher_request_set_tfm(subreq, ctx->child);
0249     skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
0250                    cryptlen, NULL);
0251 
0252     if (err || !more)
0253         goto out;
0254 
0255     rctx->src = rctx->srcbuf;
0256 
0257     scatterwalk_done(&w.in, 0, 1);
0258     sg = w.in.sg;
0259     offset = w.in.offset;
0260 
0261     if (rctx->src != sg) {
0262         rctx->src[0] = *sg;
0263         sg_unmark_end(rctx->src);
0264         scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
0265     }
0266     rctx->src[0].length -= offset - sg->offset;
0267     rctx->src[0].offset = offset;
0268 
0269 out:
0270     return err;
0271 }
0272 
0273 static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
0274 {
0275     struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
0276     struct rctx *rctx = skcipher_request_ctx(req);
0277     struct skcipher_request *subreq;
0278     gfp_t gfp;
0279 
0280     subreq = &rctx->subreq;
0281     skcipher_request_set_callback(subreq, req->base.flags, done, req);
0282 
0283     gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
0284                                GFP_ATOMIC;
0285     rctx->ext = NULL;
0286 
0287     subreq->cryptlen = LRW_BUFFER_SIZE;
0288     if (req->cryptlen > LRW_BUFFER_SIZE) {
0289         subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
0290         rctx->ext = kmalloc(subreq->cryptlen, gfp);
0291     }
0292 
0293     rctx->src = req->src;
0294     rctx->dst = req->dst;
0295     rctx->left = req->cryptlen;
0296 
0297     /* calculate first value of T */
0298     memcpy(&rctx->t, req->iv, sizeof(rctx->t));
0299 
0300     /* T <- I*Key2 */
0301     gf128mul_64k_bbe(&rctx->t, ctx->table.table);
0302 
0303     return 0;
0304 }
0305 
0306 static void exit_crypt(struct skcipher_request *req)
0307 {
0308     struct rctx *rctx = skcipher_request_ctx(req);
0309 
0310     rctx->left = 0;
0311 
0312     if (rctx->ext)
0313         kfree(rctx->ext);
0314 }
0315 
0316 static int do_encrypt(struct skcipher_request *req, int err)
0317 {
0318     struct rctx *rctx = skcipher_request_ctx(req);
0319     struct skcipher_request *subreq;
0320 
0321     subreq = &rctx->subreq;
0322 
0323     while (!err && rctx->left) {
0324         err = pre_crypt(req) ?:
0325               crypto_skcipher_encrypt(subreq) ?:
0326               post_crypt(req);
0327 
0328         if (err == -EINPROGRESS ||
0329             (err == -EBUSY &&
0330              req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
0331             return err;
0332     }
0333 
0334     exit_crypt(req);
0335     return err;
0336 }
0337 
0338 static void encrypt_done(struct crypto_async_request *areq, int err)
0339 {
0340     struct skcipher_request *req = areq->data;
0341     struct skcipher_request *subreq;
0342     struct rctx *rctx;
0343 
0344     rctx = skcipher_request_ctx(req);
0345     subreq = &rctx->subreq;
0346     subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
0347 
0348     err = do_encrypt(req, err ?: post_crypt(req));
0349     if (rctx->left)
0350         return;
0351 
0352     skcipher_request_complete(req, err);
0353 }
0354 
0355 static int encrypt(struct skcipher_request *req)
0356 {
0357     return do_encrypt(req, init_crypt(req, encrypt_done));
0358 }
0359 
0360 static int do_decrypt(struct skcipher_request *req, int err)
0361 {
0362     struct rctx *rctx = skcipher_request_ctx(req);
0363     struct skcipher_request *subreq;
0364 
0365     subreq = &rctx->subreq;
0366 
0367     while (!err && rctx->left) {
0368         err = pre_crypt(req) ?:
0369               crypto_skcipher_decrypt(subreq) ?:
0370               post_crypt(req);
0371 
0372         if (err == -EINPROGRESS ||
0373             (err == -EBUSY &&
0374              req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
0375             return err;
0376     }
0377 
0378     exit_crypt(req);
0379     return err;
0380 }
0381 
0382 static void decrypt_done(struct crypto_async_request *areq, int err)
0383 {
0384     struct skcipher_request *req = areq->data;
0385     struct skcipher_request *subreq;
0386     struct rctx *rctx;
0387 
0388     rctx = skcipher_request_ctx(req);
0389     subreq = &rctx->subreq;
0390     subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
0391 
0392     err = do_decrypt(req, err ?: post_crypt(req));
0393     if (rctx->left)
0394         return;
0395 
0396     skcipher_request_complete(req, err);
0397 }
0398 
0399 static int decrypt(struct skcipher_request *req)
0400 {
0401     return do_decrypt(req, init_crypt(req, decrypt_done));
0402 }
0403 
0404 int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
0405           struct scatterlist *ssrc, unsigned int nbytes,
0406           struct lrw_crypt_req *req)
0407 {
0408     const unsigned int bsize = LRW_BLOCK_SIZE;
0409     const unsigned int max_blks = req->tbuflen / bsize;
0410     struct lrw_table_ctx *ctx = req->table_ctx;
0411     struct blkcipher_walk walk;
0412     unsigned int nblocks;
0413     be128 *iv, *src, *dst, *t;
0414     be128 *t_buf = req->tbuf;
0415     int err, i;
0416 
0417     BUG_ON(max_blks < 1);
0418 
0419     blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
0420 
0421     err = blkcipher_walk_virt(desc, &walk);
0422     nbytes = walk.nbytes;
0423     if (!nbytes)
0424         return err;
0425 
0426     nblocks = min(walk.nbytes / bsize, max_blks);
0427     src = (be128 *)walk.src.virt.addr;
0428     dst = (be128 *)walk.dst.virt.addr;
0429 
0430     /* calculate first value of T */
0431     iv = (be128 *)walk.iv;
0432     t_buf[0] = *iv;
0433 
0434     /* T <- I*Key2 */
0435     gf128mul_64k_bbe(&t_buf[0], ctx->table);
0436 
0437     i = 0;
0438     goto first;
0439 
0440     for (;;) {
0441         do {
0442             for (i = 0; i < nblocks; i++) {
0443                 /* T <- I*Key2, using the optimization
0444                  * discussed in the specification */
0445                 be128_xor(&t_buf[i], t,
0446                         &ctx->mulinc[get_index128(iv)]);
0447                 inc(iv);
0448 first:
0449                 t = &t_buf[i];
0450 
0451                 /* PP <- T xor P */
0452                 be128_xor(dst + i, t, src + i);
0453             }
0454 
0455             /* CC <- E(Key2,PP) */
0456             req->crypt_fn(req->crypt_ctx, (u8 *)dst,
0457                       nblocks * bsize);
0458 
0459             /* C <- T xor CC */
0460             for (i = 0; i < nblocks; i++)
0461                 be128_xor(dst + i, dst + i, &t_buf[i]);
0462 
0463             src += nblocks;
0464             dst += nblocks;
0465             nbytes -= nblocks * bsize;
0466             nblocks = min(nbytes / bsize, max_blks);
0467         } while (nblocks > 0);
0468 
0469         err = blkcipher_walk_done(desc, &walk, nbytes);
0470         nbytes = walk.nbytes;
0471         if (!nbytes)
0472             break;
0473 
0474         nblocks = min(nbytes / bsize, max_blks);
0475         src = (be128 *)walk.src.virt.addr;
0476         dst = (be128 *)walk.dst.virt.addr;
0477     }
0478 
0479     return err;
0480 }
0481 EXPORT_SYMBOL_GPL(lrw_crypt);
0482 
0483 static int init_tfm(struct crypto_skcipher *tfm)
0484 {
0485     struct skcipher_instance *inst = skcipher_alg_instance(tfm);
0486     struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
0487     struct priv *ctx = crypto_skcipher_ctx(tfm);
0488     struct crypto_skcipher *cipher;
0489 
0490     cipher = crypto_spawn_skcipher(spawn);
0491     if (IS_ERR(cipher))
0492         return PTR_ERR(cipher);
0493 
0494     ctx->child = cipher;
0495 
0496     crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
0497                      sizeof(struct rctx));
0498 
0499     return 0;
0500 }
0501 
0502 static void exit_tfm(struct crypto_skcipher *tfm)
0503 {
0504     struct priv *ctx = crypto_skcipher_ctx(tfm);
0505 
0506     lrw_free_table(&ctx->table);
0507     crypto_free_skcipher(ctx->child);
0508 }
0509 
0510 static void free(struct skcipher_instance *inst)
0511 {
0512     crypto_drop_skcipher(skcipher_instance_ctx(inst));
0513     kfree(inst);
0514 }
0515 
0516 static int create(struct crypto_template *tmpl, struct rtattr **tb)
0517 {
0518     struct crypto_skcipher_spawn *spawn;
0519     struct skcipher_instance *inst;
0520     struct crypto_attr_type *algt;
0521     struct skcipher_alg *alg;
0522     const char *cipher_name;
0523     char ecb_name[CRYPTO_MAX_ALG_NAME];
0524     int err;
0525 
0526     algt = crypto_get_attr_type(tb);
0527     if (IS_ERR(algt))
0528         return PTR_ERR(algt);
0529 
0530     if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
0531         return -EINVAL;
0532 
0533     cipher_name = crypto_attr_alg_name(tb[1]);
0534     if (IS_ERR(cipher_name))
0535         return PTR_ERR(cipher_name);
0536 
0537     inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
0538     if (!inst)
0539         return -ENOMEM;
0540 
0541     spawn = skcipher_instance_ctx(inst);
0542 
0543     crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
0544     err = crypto_grab_skcipher(spawn, cipher_name, 0,
0545                    crypto_requires_sync(algt->type,
0546                             algt->mask));
0547     if (err == -ENOENT) {
0548         err = -ENAMETOOLONG;
0549         if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
0550                  cipher_name) >= CRYPTO_MAX_ALG_NAME)
0551             goto err_free_inst;
0552 
0553         err = crypto_grab_skcipher(spawn, ecb_name, 0,
0554                        crypto_requires_sync(algt->type,
0555                                 algt->mask));
0556     }
0557 
0558     if (err)
0559         goto err_free_inst;
0560 
0561     alg = crypto_skcipher_spawn_alg(spawn);
0562 
0563     err = -EINVAL;
0564     if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
0565         goto err_drop_spawn;
0566 
0567     if (crypto_skcipher_alg_ivsize(alg))
0568         goto err_drop_spawn;
0569 
0570     err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
0571                   &alg->base);
0572     if (err)
0573         goto err_drop_spawn;
0574 
0575     err = -EINVAL;
0576     cipher_name = alg->base.cra_name;
0577 
0578     /* Alas we screwed up the naming so we have to mangle the
0579      * cipher name.
0580      */
0581     if (!strncmp(cipher_name, "ecb(", 4)) {
0582         unsigned len;
0583 
0584         len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
0585         if (len < 2 || len >= sizeof(ecb_name))
0586             goto err_drop_spawn;
0587 
0588         if (ecb_name[len - 1] != ')')
0589             goto err_drop_spawn;
0590 
0591         ecb_name[len - 1] = 0;
0592 
0593         if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
0594                  "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
0595             return -ENAMETOOLONG;
0596     }
0597 
0598     inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
0599     inst->alg.base.cra_priority = alg->base.cra_priority;
0600     inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
0601     inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
0602                        (__alignof__(u64) - 1);
0603 
0604     inst->alg.ivsize = LRW_BLOCK_SIZE;
0605     inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
0606                 LRW_BLOCK_SIZE;
0607     inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
0608                 LRW_BLOCK_SIZE;
0609 
0610     inst->alg.base.cra_ctxsize = sizeof(struct priv);
0611 
0612     inst->alg.init = init_tfm;
0613     inst->alg.exit = exit_tfm;
0614 
0615     inst->alg.setkey = setkey;
0616     inst->alg.encrypt = encrypt;
0617     inst->alg.decrypt = decrypt;
0618 
0619     inst->free = free;
0620 
0621     err = skcipher_register_instance(tmpl, inst);
0622     if (err)
0623         goto err_drop_spawn;
0624 
0625 out:
0626     return err;
0627 
0628 err_drop_spawn:
0629     crypto_drop_skcipher(spawn);
0630 err_free_inst:
0631     kfree(inst);
0632     goto out;
0633 }
0634 
0635 static struct crypto_template crypto_tmpl = {
0636     .name = "lrw",
0637     .create = create,
0638     .module = THIS_MODULE,
0639 };
0640 
0641 static int __init crypto_module_init(void)
0642 {
0643     return crypto_register_template(&crypto_tmpl);
0644 }
0645 
0646 static void __exit crypto_module_exit(void)
0647 {
0648     crypto_unregister_template(&crypto_tmpl);
0649 }
0650 
0651 module_init(crypto_module_init);
0652 module_exit(crypto_module_exit);
0653 
0654 MODULE_LICENSE("GPL");
0655 MODULE_DESCRIPTION("LRW block cipher mode");
0656 MODULE_ALIAS_CRYPTO("lrw");