Back to home page

LXR

 
 

    


0001 /*
0002  * Block chaining cipher operations.
0003  * 
0004  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
0005  * multiple page boundaries by using temporary blocks.  In user context,
0006  * the kernel is given a chance to schedule us once per page.
0007  *
0008  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
0009  *
0010  * This program is free software; you can redistribute it and/or modify it
0011  * under the terms of the GNU General Public License as published by the Free
0012  * Software Foundation; either version 2 of the License, or (at your option) 
0013  * any later version.
0014  *
0015  */
0016 
0017 #include <crypto/aead.h>
0018 #include <crypto/internal/skcipher.h>
0019 #include <crypto/scatterwalk.h>
0020 #include <linux/errno.h>
0021 #include <linux/hardirq.h>
0022 #include <linux/kernel.h>
0023 #include <linux/module.h>
0024 #include <linux/seq_file.h>
0025 #include <linux/slab.h>
0026 #include <linux/string.h>
0027 #include <linux/cryptouser.h>
0028 #include <net/netlink.h>
0029 
0030 #include "internal.h"
0031 
0032 enum {
0033     BLKCIPHER_WALK_PHYS = 1 << 0,
0034     BLKCIPHER_WALK_SLOW = 1 << 1,
0035     BLKCIPHER_WALK_COPY = 1 << 2,
0036     BLKCIPHER_WALK_DIFF = 1 << 3,
0037 };
0038 
0039 static int blkcipher_walk_next(struct blkcipher_desc *desc,
0040                    struct blkcipher_walk *walk);
0041 static int blkcipher_walk_first(struct blkcipher_desc *desc,
0042                 struct blkcipher_walk *walk);
0043 
0044 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
0045 {
0046     walk->src.virt.addr = scatterwalk_map(&walk->in);
0047 }
0048 
0049 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
0050 {
0051     walk->dst.virt.addr = scatterwalk_map(&walk->out);
0052 }
0053 
0054 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
0055 {
0056     scatterwalk_unmap(walk->src.virt.addr);
0057 }
0058 
0059 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
0060 {
0061     scatterwalk_unmap(walk->dst.virt.addr);
0062 }
0063 
0064 /* Get a spot of the specified length that does not straddle a page.
0065  * The caller needs to ensure that there is enough space for this operation.
0066  */
0067 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
0068 {
0069     u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
0070     return max(start, end_page);
0071 }
0072 
0073 static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
0074                            unsigned int bsize)
0075 {
0076     u8 *addr;
0077 
0078     addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
0079     addr = blkcipher_get_spot(addr, bsize);
0080     scatterwalk_copychunks(addr, &walk->out, bsize, 1);
0081     return bsize;
0082 }
0083 
0084 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
0085                            unsigned int n)
0086 {
0087     if (walk->flags & BLKCIPHER_WALK_COPY) {
0088         blkcipher_map_dst(walk);
0089         memcpy(walk->dst.virt.addr, walk->page, n);
0090         blkcipher_unmap_dst(walk);
0091     } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
0092         if (walk->flags & BLKCIPHER_WALK_DIFF)
0093             blkcipher_unmap_dst(walk);
0094         blkcipher_unmap_src(walk);
0095     }
0096 
0097     scatterwalk_advance(&walk->in, n);
0098     scatterwalk_advance(&walk->out, n);
0099 
0100     return n;
0101 }
0102 
0103 int blkcipher_walk_done(struct blkcipher_desc *desc,
0104             struct blkcipher_walk *walk, int err)
0105 {
0106     unsigned int nbytes = 0;
0107 
0108     if (likely(err >= 0)) {
0109         unsigned int n = walk->nbytes - err;
0110 
0111         if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
0112             n = blkcipher_done_fast(walk, n);
0113         else if (WARN_ON(err)) {
0114             err = -EINVAL;
0115             goto err;
0116         } else
0117             n = blkcipher_done_slow(walk, n);
0118 
0119         nbytes = walk->total - n;
0120         err = 0;
0121     }
0122 
0123     scatterwalk_done(&walk->in, 0, nbytes);
0124     scatterwalk_done(&walk->out, 1, nbytes);
0125 
0126 err:
0127     walk->total = nbytes;
0128     walk->nbytes = nbytes;
0129 
0130     if (nbytes) {
0131         crypto_yield(desc->flags);
0132         return blkcipher_walk_next(desc, walk);
0133     }
0134 
0135     if (walk->iv != desc->info)
0136         memcpy(desc->info, walk->iv, walk->ivsize);
0137     if (walk->buffer != walk->page)
0138         kfree(walk->buffer);
0139     if (walk->page)
0140         free_page((unsigned long)walk->page);
0141 
0142     return err;
0143 }
0144 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
0145 
0146 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
0147                       struct blkcipher_walk *walk,
0148                       unsigned int bsize,
0149                       unsigned int alignmask)
0150 {
0151     unsigned int n;
0152     unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
0153 
0154     if (walk->buffer)
0155         goto ok;
0156 
0157     walk->buffer = walk->page;
0158     if (walk->buffer)
0159         goto ok;
0160 
0161     n = aligned_bsize * 3 - (alignmask + 1) +
0162         (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
0163     walk->buffer = kmalloc(n, GFP_ATOMIC);
0164     if (!walk->buffer)
0165         return blkcipher_walk_done(desc, walk, -ENOMEM);
0166 
0167 ok:
0168     walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
0169                       alignmask + 1);
0170     walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
0171     walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
0172                          aligned_bsize, bsize);
0173 
0174     scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
0175 
0176     walk->nbytes = bsize;
0177     walk->flags |= BLKCIPHER_WALK_SLOW;
0178 
0179     return 0;
0180 }
0181 
0182 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
0183 {
0184     u8 *tmp = walk->page;
0185 
0186     blkcipher_map_src(walk);
0187     memcpy(tmp, walk->src.virt.addr, walk->nbytes);
0188     blkcipher_unmap_src(walk);
0189 
0190     walk->src.virt.addr = tmp;
0191     walk->dst.virt.addr = tmp;
0192 
0193     return 0;
0194 }
0195 
0196 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
0197                       struct blkcipher_walk *walk)
0198 {
0199     unsigned long diff;
0200 
0201     walk->src.phys.page = scatterwalk_page(&walk->in);
0202     walk->src.phys.offset = offset_in_page(walk->in.offset);
0203     walk->dst.phys.page = scatterwalk_page(&walk->out);
0204     walk->dst.phys.offset = offset_in_page(walk->out.offset);
0205 
0206     if (walk->flags & BLKCIPHER_WALK_PHYS)
0207         return 0;
0208 
0209     diff = walk->src.phys.offset - walk->dst.phys.offset;
0210     diff |= walk->src.virt.page - walk->dst.virt.page;
0211 
0212     blkcipher_map_src(walk);
0213     walk->dst.virt.addr = walk->src.virt.addr;
0214 
0215     if (diff) {
0216         walk->flags |= BLKCIPHER_WALK_DIFF;
0217         blkcipher_map_dst(walk);
0218     }
0219 
0220     return 0;
0221 }
0222 
0223 static int blkcipher_walk_next(struct blkcipher_desc *desc,
0224                    struct blkcipher_walk *walk)
0225 {
0226     unsigned int bsize;
0227     unsigned int n;
0228     int err;
0229 
0230     n = walk->total;
0231     if (unlikely(n < walk->cipher_blocksize)) {
0232         desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
0233         return blkcipher_walk_done(desc, walk, -EINVAL);
0234     }
0235 
0236     bsize = min(walk->walk_blocksize, n);
0237 
0238     walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
0239              BLKCIPHER_WALK_DIFF);
0240     if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
0241         !scatterwalk_aligned(&walk->out, walk->alignmask)) {
0242         walk->flags |= BLKCIPHER_WALK_COPY;
0243         if (!walk->page) {
0244             walk->page = (void *)__get_free_page(GFP_ATOMIC);
0245             if (!walk->page)
0246                 n = 0;
0247         }
0248     }
0249 
0250     n = scatterwalk_clamp(&walk->in, n);
0251     n = scatterwalk_clamp(&walk->out, n);
0252 
0253     if (unlikely(n < bsize)) {
0254         err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
0255         goto set_phys_lowmem;
0256     }
0257 
0258     walk->nbytes = n;
0259     if (walk->flags & BLKCIPHER_WALK_COPY) {
0260         err = blkcipher_next_copy(walk);
0261         goto set_phys_lowmem;
0262     }
0263 
0264     return blkcipher_next_fast(desc, walk);
0265 
0266 set_phys_lowmem:
0267     if (walk->flags & BLKCIPHER_WALK_PHYS) {
0268         walk->src.phys.page = virt_to_page(walk->src.virt.addr);
0269         walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
0270         walk->src.phys.offset &= PAGE_SIZE - 1;
0271         walk->dst.phys.offset &= PAGE_SIZE - 1;
0272     }
0273     return err;
0274 }
0275 
0276 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
0277 {
0278     unsigned bs = walk->walk_blocksize;
0279     unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
0280     unsigned int size = aligned_bs * 2 +
0281                 walk->ivsize + max(aligned_bs, walk->ivsize) -
0282                 (walk->alignmask + 1);
0283     u8 *iv;
0284 
0285     size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
0286     walk->buffer = kmalloc(size, GFP_ATOMIC);
0287     if (!walk->buffer)
0288         return -ENOMEM;
0289 
0290     iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
0291     iv = blkcipher_get_spot(iv, bs) + aligned_bs;
0292     iv = blkcipher_get_spot(iv, bs) + aligned_bs;
0293     iv = blkcipher_get_spot(iv, walk->ivsize);
0294 
0295     walk->iv = memcpy(iv, walk->iv, walk->ivsize);
0296     return 0;
0297 }
0298 
0299 int blkcipher_walk_virt(struct blkcipher_desc *desc,
0300             struct blkcipher_walk *walk)
0301 {
0302     walk->flags &= ~BLKCIPHER_WALK_PHYS;
0303     walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
0304     walk->cipher_blocksize = walk->walk_blocksize;
0305     walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
0306     walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
0307     return blkcipher_walk_first(desc, walk);
0308 }
0309 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
0310 
0311 int blkcipher_walk_phys(struct blkcipher_desc *desc,
0312             struct blkcipher_walk *walk)
0313 {
0314     walk->flags |= BLKCIPHER_WALK_PHYS;
0315     walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
0316     walk->cipher_blocksize = walk->walk_blocksize;
0317     walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
0318     walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
0319     return blkcipher_walk_first(desc, walk);
0320 }
0321 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
0322 
0323 static int blkcipher_walk_first(struct blkcipher_desc *desc,
0324                 struct blkcipher_walk *walk)
0325 {
0326     if (WARN_ON_ONCE(in_irq()))
0327         return -EDEADLK;
0328 
0329     walk->iv = desc->info;
0330     walk->nbytes = walk->total;
0331     if (unlikely(!walk->total))
0332         return 0;
0333 
0334     walk->buffer = NULL;
0335     if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
0336         int err = blkcipher_copy_iv(walk);
0337         if (err)
0338             return err;
0339     }
0340 
0341     scatterwalk_start(&walk->in, walk->in.sg);
0342     scatterwalk_start(&walk->out, walk->out.sg);
0343     walk->page = NULL;
0344 
0345     return blkcipher_walk_next(desc, walk);
0346 }
0347 
0348 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
0349                   struct blkcipher_walk *walk,
0350                   unsigned int blocksize)
0351 {
0352     walk->flags &= ~BLKCIPHER_WALK_PHYS;
0353     walk->walk_blocksize = blocksize;
0354     walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
0355     walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
0356     walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
0357     return blkcipher_walk_first(desc, walk);
0358 }
0359 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
0360 
0361 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
0362                    struct blkcipher_walk *walk,
0363                    struct crypto_aead *tfm,
0364                    unsigned int blocksize)
0365 {
0366     walk->flags &= ~BLKCIPHER_WALK_PHYS;
0367     walk->walk_blocksize = blocksize;
0368     walk->cipher_blocksize = crypto_aead_blocksize(tfm);
0369     walk->ivsize = crypto_aead_ivsize(tfm);
0370     walk->alignmask = crypto_aead_alignmask(tfm);
0371     return blkcipher_walk_first(desc, walk);
0372 }
0373 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
0374 
0375 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
0376                 unsigned int keylen)
0377 {
0378     struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
0379     unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
0380     int ret;
0381     u8 *buffer, *alignbuffer;
0382     unsigned long absize;
0383 
0384     absize = keylen + alignmask;
0385     buffer = kmalloc(absize, GFP_ATOMIC);
0386     if (!buffer)
0387         return -ENOMEM;
0388 
0389     alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
0390     memcpy(alignbuffer, key, keylen);
0391     ret = cipher->setkey(tfm, alignbuffer, keylen);
0392     memset(alignbuffer, 0, keylen);
0393     kfree(buffer);
0394     return ret;
0395 }
0396 
0397 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
0398 {
0399     struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
0400     unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
0401 
0402     if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
0403         tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
0404         return -EINVAL;
0405     }
0406 
0407     if ((unsigned long)key & alignmask)
0408         return setkey_unaligned(tfm, key, keylen);
0409 
0410     return cipher->setkey(tfm, key, keylen);
0411 }
0412 
0413 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
0414             unsigned int keylen)
0415 {
0416     return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
0417 }
0418 
0419 static int async_encrypt(struct ablkcipher_request *req)
0420 {
0421     struct crypto_tfm *tfm = req->base.tfm;
0422     struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
0423     struct blkcipher_desc desc = {
0424         .tfm = __crypto_blkcipher_cast(tfm),
0425         .info = req->info,
0426         .flags = req->base.flags,
0427     };
0428 
0429 
0430     return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
0431 }
0432 
0433 static int async_decrypt(struct ablkcipher_request *req)
0434 {
0435     struct crypto_tfm *tfm = req->base.tfm;
0436     struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
0437     struct blkcipher_desc desc = {
0438         .tfm = __crypto_blkcipher_cast(tfm),
0439         .info = req->info,
0440         .flags = req->base.flags,
0441     };
0442 
0443     return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
0444 }
0445 
0446 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
0447                          u32 mask)
0448 {
0449     struct blkcipher_alg *cipher = &alg->cra_blkcipher;
0450     unsigned int len = alg->cra_ctxsize;
0451 
0452     if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
0453         cipher->ivsize) {
0454         len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
0455         len += cipher->ivsize;
0456     }
0457 
0458     return len;
0459 }
0460 
0461 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
0462 {
0463     struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
0464     struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
0465 
0466     crt->setkey = async_setkey;
0467     crt->encrypt = async_encrypt;
0468     crt->decrypt = async_decrypt;
0469     crt->base = __crypto_ablkcipher_cast(tfm);
0470     crt->ivsize = alg->ivsize;
0471 
0472     return 0;
0473 }
0474 
0475 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
0476 {
0477     struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
0478     struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
0479     unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
0480     unsigned long addr;
0481 
0482     crt->setkey = setkey;
0483     crt->encrypt = alg->encrypt;
0484     crt->decrypt = alg->decrypt;
0485 
0486     addr = (unsigned long)crypto_tfm_ctx(tfm);
0487     addr = ALIGN(addr, align);
0488     addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
0489     crt->iv = (void *)addr;
0490 
0491     return 0;
0492 }
0493 
0494 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
0495 {
0496     struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
0497 
0498     if (alg->ivsize > PAGE_SIZE / 8)
0499         return -EINVAL;
0500 
0501     if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
0502         return crypto_init_blkcipher_ops_sync(tfm);
0503     else
0504         return crypto_init_blkcipher_ops_async(tfm);
0505 }
0506 
0507 #ifdef CONFIG_NET
0508 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0509 {
0510     struct crypto_report_blkcipher rblkcipher;
0511 
0512     strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
0513     strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
0514         sizeof(rblkcipher.geniv));
0515 
0516     rblkcipher.blocksize = alg->cra_blocksize;
0517     rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
0518     rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
0519     rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
0520 
0521     if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
0522             sizeof(struct crypto_report_blkcipher), &rblkcipher))
0523         goto nla_put_failure;
0524     return 0;
0525 
0526 nla_put_failure:
0527     return -EMSGSIZE;
0528 }
0529 #else
0530 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0531 {
0532     return -ENOSYS;
0533 }
0534 #endif
0535 
0536 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
0537     __attribute__ ((unused));
0538 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
0539 {
0540     seq_printf(m, "type         : blkcipher\n");
0541     seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
0542     seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
0543     seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
0544     seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
0545     seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
0546                          "<default>");
0547 }
0548 
0549 const struct crypto_type crypto_blkcipher_type = {
0550     .ctxsize = crypto_blkcipher_ctxsize,
0551     .init = crypto_init_blkcipher_ops,
0552 #ifdef CONFIG_PROC_FS
0553     .show = crypto_blkcipher_show,
0554 #endif
0555     .report = crypto_blkcipher_report,
0556 };
0557 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
0558 
0559 MODULE_LICENSE("GPL");
0560 MODULE_DESCRIPTION("Generic block chaining cipher type");