Back to home page

LXR

 
 

    


0001 /*
0002  * Asynchronous block chaining cipher operations.
0003  *
0004  * This is the asynchronous version of blkcipher.c indicating completion
0005  * via a callback.
0006  *
0007  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
0008  *
0009  * This program is free software; you can redistribute it and/or modify it
0010  * under the terms of the GNU General Public License as published by the Free
0011  * Software Foundation; either version 2 of the License, or (at your option)
0012  * any later version.
0013  *
0014  */
0015 
0016 #include <crypto/internal/skcipher.h>
0017 #include <linux/err.h>
0018 #include <linux/kernel.h>
0019 #include <linux/slab.h>
0020 #include <linux/seq_file.h>
0021 #include <linux/cryptouser.h>
0022 #include <net/netlink.h>
0023 
0024 #include <crypto/scatterwalk.h>
0025 
0026 #include "internal.h"
0027 
0028 struct ablkcipher_buffer {
0029     struct list_head    entry;
0030     struct scatter_walk dst;
0031     unsigned int        len;
0032     void            *data;
0033 };
0034 
0035 enum {
0036     ABLKCIPHER_WALK_SLOW = 1 << 0,
0037 };
0038 
0039 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
0040 {
0041     scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
0042 }
0043 
0044 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
0045 {
0046     struct ablkcipher_buffer *p, *tmp;
0047 
0048     list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
0049         ablkcipher_buffer_write(p);
0050         list_del(&p->entry);
0051         kfree(p);
0052     }
0053 }
0054 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
0055 
0056 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
0057                       struct ablkcipher_buffer *p)
0058 {
0059     p->dst = walk->out;
0060     list_add_tail(&p->entry, &walk->buffers);
0061 }
0062 
0063 /* Get a spot of the specified length that does not straddle a page.
0064  * The caller needs to ensure that there is enough space for this operation.
0065  */
0066 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
0067 {
0068     u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
0069 
0070     return max(start, end_page);
0071 }
0072 
0073 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
0074                         unsigned int bsize)
0075 {
0076     unsigned int n = bsize;
0077 
0078     for (;;) {
0079         unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
0080 
0081         if (len_this_page > n)
0082             len_this_page = n;
0083         scatterwalk_advance(&walk->out, n);
0084         if (n == len_this_page)
0085             break;
0086         n -= len_this_page;
0087         scatterwalk_start(&walk->out, sg_next(walk->out.sg));
0088     }
0089 
0090     return bsize;
0091 }
0092 
0093 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
0094                         unsigned int n)
0095 {
0096     scatterwalk_advance(&walk->in, n);
0097     scatterwalk_advance(&walk->out, n);
0098 
0099     return n;
0100 }
0101 
0102 static int ablkcipher_walk_next(struct ablkcipher_request *req,
0103                 struct ablkcipher_walk *walk);
0104 
0105 int ablkcipher_walk_done(struct ablkcipher_request *req,
0106              struct ablkcipher_walk *walk, int err)
0107 {
0108     struct crypto_tfm *tfm = req->base.tfm;
0109     unsigned int nbytes = 0;
0110 
0111     if (likely(err >= 0)) {
0112         unsigned int n = walk->nbytes - err;
0113 
0114         if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
0115             n = ablkcipher_done_fast(walk, n);
0116         else if (WARN_ON(err)) {
0117             err = -EINVAL;
0118             goto err;
0119         } else
0120             n = ablkcipher_done_slow(walk, n);
0121 
0122         nbytes = walk->total - n;
0123         err = 0;
0124     }
0125 
0126     scatterwalk_done(&walk->in, 0, nbytes);
0127     scatterwalk_done(&walk->out, 1, nbytes);
0128 
0129 err:
0130     walk->total = nbytes;
0131     walk->nbytes = nbytes;
0132 
0133     if (nbytes) {
0134         crypto_yield(req->base.flags);
0135         return ablkcipher_walk_next(req, walk);
0136     }
0137 
0138     if (walk->iv != req->info)
0139         memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
0140     kfree(walk->iv_buffer);
0141 
0142     return err;
0143 }
0144 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
0145 
0146 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
0147                        struct ablkcipher_walk *walk,
0148                        unsigned int bsize,
0149                        unsigned int alignmask,
0150                        void **src_p, void **dst_p)
0151 {
0152     unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
0153     struct ablkcipher_buffer *p;
0154     void *src, *dst, *base;
0155     unsigned int n;
0156 
0157     n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
0158     n += (aligned_bsize * 3 - (alignmask + 1) +
0159           (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
0160 
0161     p = kmalloc(n, GFP_ATOMIC);
0162     if (!p)
0163         return ablkcipher_walk_done(req, walk, -ENOMEM);
0164 
0165     base = p + 1;
0166 
0167     dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
0168     src = dst = ablkcipher_get_spot(dst, bsize);
0169 
0170     p->len = bsize;
0171     p->data = dst;
0172 
0173     scatterwalk_copychunks(src, &walk->in, bsize, 0);
0174 
0175     ablkcipher_queue_write(walk, p);
0176 
0177     walk->nbytes = bsize;
0178     walk->flags |= ABLKCIPHER_WALK_SLOW;
0179 
0180     *src_p = src;
0181     *dst_p = dst;
0182 
0183     return 0;
0184 }
0185 
0186 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
0187                      struct crypto_tfm *tfm,
0188                      unsigned int alignmask)
0189 {
0190     unsigned bs = walk->blocksize;
0191     unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
0192     unsigned aligned_bs = ALIGN(bs, alignmask + 1);
0193     unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
0194                 (alignmask + 1);
0195     u8 *iv;
0196 
0197     size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
0198     walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
0199     if (!walk->iv_buffer)
0200         return -ENOMEM;
0201 
0202     iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
0203     iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
0204     iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
0205     iv = ablkcipher_get_spot(iv, ivsize);
0206 
0207     walk->iv = memcpy(iv, walk->iv, ivsize);
0208     return 0;
0209 }
0210 
0211 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
0212                        struct ablkcipher_walk *walk)
0213 {
0214     walk->src.page = scatterwalk_page(&walk->in);
0215     walk->src.offset = offset_in_page(walk->in.offset);
0216     walk->dst.page = scatterwalk_page(&walk->out);
0217     walk->dst.offset = offset_in_page(walk->out.offset);
0218 
0219     return 0;
0220 }
0221 
0222 static int ablkcipher_walk_next(struct ablkcipher_request *req,
0223                 struct ablkcipher_walk *walk)
0224 {
0225     struct crypto_tfm *tfm = req->base.tfm;
0226     unsigned int alignmask, bsize, n;
0227     void *src, *dst;
0228     int err;
0229 
0230     alignmask = crypto_tfm_alg_alignmask(tfm);
0231     n = walk->total;
0232     if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
0233         req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
0234         return ablkcipher_walk_done(req, walk, -EINVAL);
0235     }
0236 
0237     walk->flags &= ~ABLKCIPHER_WALK_SLOW;
0238     src = dst = NULL;
0239 
0240     bsize = min(walk->blocksize, n);
0241     n = scatterwalk_clamp(&walk->in, n);
0242     n = scatterwalk_clamp(&walk->out, n);
0243 
0244     if (n < bsize ||
0245         !scatterwalk_aligned(&walk->in, alignmask) ||
0246         !scatterwalk_aligned(&walk->out, alignmask)) {
0247         err = ablkcipher_next_slow(req, walk, bsize, alignmask,
0248                        &src, &dst);
0249         goto set_phys_lowmem;
0250     }
0251 
0252     walk->nbytes = n;
0253 
0254     return ablkcipher_next_fast(req, walk);
0255 
0256 set_phys_lowmem:
0257     if (err >= 0) {
0258         walk->src.page = virt_to_page(src);
0259         walk->dst.page = virt_to_page(dst);
0260         walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
0261         walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
0262     }
0263 
0264     return err;
0265 }
0266 
0267 static int ablkcipher_walk_first(struct ablkcipher_request *req,
0268                  struct ablkcipher_walk *walk)
0269 {
0270     struct crypto_tfm *tfm = req->base.tfm;
0271     unsigned int alignmask;
0272 
0273     alignmask = crypto_tfm_alg_alignmask(tfm);
0274     if (WARN_ON_ONCE(in_irq()))
0275         return -EDEADLK;
0276 
0277     walk->iv = req->info;
0278     walk->nbytes = walk->total;
0279     if (unlikely(!walk->total))
0280         return 0;
0281 
0282     walk->iv_buffer = NULL;
0283     if (unlikely(((unsigned long)walk->iv & alignmask))) {
0284         int err = ablkcipher_copy_iv(walk, tfm, alignmask);
0285 
0286         if (err)
0287             return err;
0288     }
0289 
0290     scatterwalk_start(&walk->in, walk->in.sg);
0291     scatterwalk_start(&walk->out, walk->out.sg);
0292 
0293     return ablkcipher_walk_next(req, walk);
0294 }
0295 
0296 int ablkcipher_walk_phys(struct ablkcipher_request *req,
0297              struct ablkcipher_walk *walk)
0298 {
0299     walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
0300     return ablkcipher_walk_first(req, walk);
0301 }
0302 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
0303 
0304 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
0305                 unsigned int keylen)
0306 {
0307     struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
0308     unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
0309     int ret;
0310     u8 *buffer, *alignbuffer;
0311     unsigned long absize;
0312 
0313     absize = keylen + alignmask;
0314     buffer = kmalloc(absize, GFP_ATOMIC);
0315     if (!buffer)
0316         return -ENOMEM;
0317 
0318     alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
0319     memcpy(alignbuffer, key, keylen);
0320     ret = cipher->setkey(tfm, alignbuffer, keylen);
0321     memset(alignbuffer, 0, keylen);
0322     kfree(buffer);
0323     return ret;
0324 }
0325 
0326 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
0327           unsigned int keylen)
0328 {
0329     struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
0330     unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
0331 
0332     if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
0333         crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
0334         return -EINVAL;
0335     }
0336 
0337     if ((unsigned long)key & alignmask)
0338         return setkey_unaligned(tfm, key, keylen);
0339 
0340     return cipher->setkey(tfm, key, keylen);
0341 }
0342 
0343 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
0344                           u32 mask)
0345 {
0346     return alg->cra_ctxsize;
0347 }
0348 
0349 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
0350                       u32 mask)
0351 {
0352     struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
0353     struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
0354 
0355     if (alg->ivsize > PAGE_SIZE / 8)
0356         return -EINVAL;
0357 
0358     crt->setkey = setkey;
0359     crt->encrypt = alg->encrypt;
0360     crt->decrypt = alg->decrypt;
0361     crt->base = __crypto_ablkcipher_cast(tfm);
0362     crt->ivsize = alg->ivsize;
0363 
0364     return 0;
0365 }
0366 
0367 #ifdef CONFIG_NET
0368 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0369 {
0370     struct crypto_report_blkcipher rblkcipher;
0371 
0372     strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
0373     strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
0374         sizeof(rblkcipher.geniv));
0375 
0376     rblkcipher.blocksize = alg->cra_blocksize;
0377     rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
0378     rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
0379     rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
0380 
0381     if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
0382             sizeof(struct crypto_report_blkcipher), &rblkcipher))
0383         goto nla_put_failure;
0384     return 0;
0385 
0386 nla_put_failure:
0387     return -EMSGSIZE;
0388 }
0389 #else
0390 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0391 {
0392     return -ENOSYS;
0393 }
0394 #endif
0395 
0396 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
0397     __attribute__ ((unused));
0398 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
0399 {
0400     struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
0401 
0402     seq_printf(m, "type         : ablkcipher\n");
0403     seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
0404                          "yes" : "no");
0405     seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
0406     seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
0407     seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
0408     seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
0409     seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
0410 }
0411 
0412 const struct crypto_type crypto_ablkcipher_type = {
0413     .ctxsize = crypto_ablkcipher_ctxsize,
0414     .init = crypto_init_ablkcipher_ops,
0415 #ifdef CONFIG_PROC_FS
0416     .show = crypto_ablkcipher_show,
0417 #endif
0418     .report = crypto_ablkcipher_report,
0419 };
0420 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
0421 
0422 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
0423                       u32 mask)
0424 {
0425     struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
0426     struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
0427 
0428     if (alg->ivsize > PAGE_SIZE / 8)
0429         return -EINVAL;
0430 
0431     crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
0432               alg->setkey : setkey;
0433     crt->encrypt = alg->encrypt;
0434     crt->decrypt = alg->decrypt;
0435     crt->base = __crypto_ablkcipher_cast(tfm);
0436     crt->ivsize = alg->ivsize;
0437 
0438     return 0;
0439 }
0440 
0441 #ifdef CONFIG_NET
0442 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0443 {
0444     struct crypto_report_blkcipher rblkcipher;
0445 
0446     strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
0447     strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
0448         sizeof(rblkcipher.geniv));
0449 
0450     rblkcipher.blocksize = alg->cra_blocksize;
0451     rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
0452     rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
0453     rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
0454 
0455     if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
0456             sizeof(struct crypto_report_blkcipher), &rblkcipher))
0457         goto nla_put_failure;
0458     return 0;
0459 
0460 nla_put_failure:
0461     return -EMSGSIZE;
0462 }
0463 #else
0464 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0465 {
0466     return -ENOSYS;
0467 }
0468 #endif
0469 
0470 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
0471     __attribute__ ((unused));
0472 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
0473 {
0474     struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
0475 
0476     seq_printf(m, "type         : givcipher\n");
0477     seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
0478                          "yes" : "no");
0479     seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
0480     seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
0481     seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
0482     seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
0483     seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
0484 }
0485 
0486 const struct crypto_type crypto_givcipher_type = {
0487     .ctxsize = crypto_ablkcipher_ctxsize,
0488     .init = crypto_init_givcipher_ops,
0489 #ifdef CONFIG_PROC_FS
0490     .show = crypto_givcipher_show,
0491 #endif
0492     .report = crypto_givcipher_report,
0493 };
0494 EXPORT_SYMBOL_GPL(crypto_givcipher_type);