Back to home page

LXR

 
 

    


0001 /*
0002  * Symmetric key cipher operations.
0003  *
0004  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
0005  * multiple page boundaries by using temporary blocks.  In user context,
0006  * the kernel is given a chance to schedule us once per page.
0007  *
0008  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
0009  *
0010  * This program is free software; you can redistribute it and/or modify it
0011  * under the terms of the GNU General Public License as published by the Free
0012  * Software Foundation; either version 2 of the License, or (at your option)
0013  * any later version.
0014  *
0015  */
0016 
0017 #include <crypto/internal/aead.h>
0018 #include <crypto/internal/skcipher.h>
0019 #include <crypto/scatterwalk.h>
0020 #include <linux/bug.h>
0021 #include <linux/cryptouser.h>
0022 #include <linux/list.h>
0023 #include <linux/module.h>
0024 #include <linux/rtnetlink.h>
0025 #include <linux/seq_file.h>
0026 #include <net/netlink.h>
0027 
0028 #include "internal.h"
0029 
0030 enum {
0031     SKCIPHER_WALK_PHYS = 1 << 0,
0032     SKCIPHER_WALK_SLOW = 1 << 1,
0033     SKCIPHER_WALK_COPY = 1 << 2,
0034     SKCIPHER_WALK_DIFF = 1 << 3,
0035     SKCIPHER_WALK_SLEEP = 1 << 4,
0036 };
0037 
0038 struct skcipher_walk_buffer {
0039     struct list_head entry;
0040     struct scatter_walk dst;
0041     unsigned int len;
0042     u8 *data;
0043     u8 buffer[];
0044 };
0045 
0046 static int skcipher_walk_next(struct skcipher_walk *walk);
0047 
0048 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
0049 {
0050     if (PageHighMem(scatterwalk_page(walk)))
0051         kunmap_atomic(vaddr);
0052 }
0053 
0054 static inline void *skcipher_map(struct scatter_walk *walk)
0055 {
0056     struct page *page = scatterwalk_page(walk);
0057 
0058     return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
0059            offset_in_page(walk->offset);
0060 }
0061 
0062 static inline void skcipher_map_src(struct skcipher_walk *walk)
0063 {
0064     walk->src.virt.addr = skcipher_map(&walk->in);
0065 }
0066 
0067 static inline void skcipher_map_dst(struct skcipher_walk *walk)
0068 {
0069     walk->dst.virt.addr = skcipher_map(&walk->out);
0070 }
0071 
0072 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
0073 {
0074     skcipher_unmap(&walk->in, walk->src.virt.addr);
0075 }
0076 
0077 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
0078 {
0079     skcipher_unmap(&walk->out, walk->dst.virt.addr);
0080 }
0081 
0082 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
0083 {
0084     return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
0085 }
0086 
0087 /* Get a spot of the specified length that does not straddle a page.
0088  * The caller needs to ensure that there is enough space for this operation.
0089  */
0090 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
0091 {
0092     u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
0093 
0094     return max(start, end_page);
0095 }
0096 
0097 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
0098 {
0099     u8 *addr;
0100 
0101     addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
0102     addr = skcipher_get_spot(addr, bsize);
0103     scatterwalk_copychunks(addr, &walk->out, bsize,
0104                    (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
0105     return 0;
0106 }
0107 
0108 int skcipher_walk_done(struct skcipher_walk *walk, int err)
0109 {
0110     unsigned int n = walk->nbytes - err;
0111     unsigned int nbytes;
0112 
0113     nbytes = walk->total - n;
0114 
0115     if (unlikely(err < 0)) {
0116         nbytes = 0;
0117         n = 0;
0118     } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
0119                        SKCIPHER_WALK_SLOW |
0120                        SKCIPHER_WALK_COPY |
0121                        SKCIPHER_WALK_DIFF)))) {
0122 unmap_src:
0123         skcipher_unmap_src(walk);
0124     } else if (walk->flags & SKCIPHER_WALK_DIFF) {
0125         skcipher_unmap_dst(walk);
0126         goto unmap_src;
0127     } else if (walk->flags & SKCIPHER_WALK_COPY) {
0128         skcipher_map_dst(walk);
0129         memcpy(walk->dst.virt.addr, walk->page, n);
0130         skcipher_unmap_dst(walk);
0131     } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
0132         if (WARN_ON(err)) {
0133             err = -EINVAL;
0134             nbytes = 0;
0135         } else
0136             n = skcipher_done_slow(walk, n);
0137     }
0138 
0139     if (err > 0)
0140         err = 0;
0141 
0142     walk->total = nbytes;
0143     walk->nbytes = nbytes;
0144 
0145     scatterwalk_advance(&walk->in, n);
0146     scatterwalk_advance(&walk->out, n);
0147     scatterwalk_done(&walk->in, 0, nbytes);
0148     scatterwalk_done(&walk->out, 1, nbytes);
0149 
0150     if (nbytes) {
0151         crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
0152                  CRYPTO_TFM_REQ_MAY_SLEEP : 0);
0153         return skcipher_walk_next(walk);
0154     }
0155 
0156     /* Short-circuit for the common/fast path. */
0157     if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
0158         goto out;
0159 
0160     if (walk->flags & SKCIPHER_WALK_PHYS)
0161         goto out;
0162 
0163     if (walk->iv != walk->oiv)
0164         memcpy(walk->oiv, walk->iv, walk->ivsize);
0165     if (walk->buffer != walk->page)
0166         kfree(walk->buffer);
0167     if (walk->page)
0168         free_page((unsigned long)walk->page);
0169 
0170 out:
0171     return err;
0172 }
0173 EXPORT_SYMBOL_GPL(skcipher_walk_done);
0174 
0175 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
0176 {
0177     struct skcipher_walk_buffer *p, *tmp;
0178 
0179     list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
0180         u8 *data;
0181 
0182         if (err)
0183             goto done;
0184 
0185         data = p->data;
0186         if (!data) {
0187             data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
0188             data = skcipher_get_spot(data, walk->chunksize);
0189         }
0190 
0191         scatterwalk_copychunks(data, &p->dst, p->len, 1);
0192 
0193         if (offset_in_page(p->data) + p->len + walk->chunksize >
0194             PAGE_SIZE)
0195             free_page((unsigned long)p->data);
0196 
0197 done:
0198         list_del(&p->entry);
0199         kfree(p);
0200     }
0201 
0202     if (!err && walk->iv != walk->oiv)
0203         memcpy(walk->oiv, walk->iv, walk->ivsize);
0204     if (walk->buffer != walk->page)
0205         kfree(walk->buffer);
0206     if (walk->page)
0207         free_page((unsigned long)walk->page);
0208 }
0209 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
0210 
0211 static void skcipher_queue_write(struct skcipher_walk *walk,
0212                  struct skcipher_walk_buffer *p)
0213 {
0214     p->dst = walk->out;
0215     list_add_tail(&p->entry, &walk->buffers);
0216 }
0217 
0218 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
0219 {
0220     bool phys = walk->flags & SKCIPHER_WALK_PHYS;
0221     unsigned alignmask = walk->alignmask;
0222     struct skcipher_walk_buffer *p;
0223     unsigned a;
0224     unsigned n;
0225     u8 *buffer;
0226     void *v;
0227 
0228     if (!phys) {
0229         if (!walk->buffer)
0230             walk->buffer = walk->page;
0231         buffer = walk->buffer;
0232         if (buffer)
0233             goto ok;
0234     }
0235 
0236     /* Start with the minimum alignment of kmalloc. */
0237     a = crypto_tfm_ctx_alignment() - 1;
0238     n = bsize;
0239 
0240     if (phys) {
0241         /* Calculate the minimum alignment of p->buffer. */
0242         a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
0243         n += sizeof(*p);
0244     }
0245 
0246     /* Minimum size to align p->buffer by alignmask. */
0247     n += alignmask & ~a;
0248 
0249     /* Minimum size to ensure p->buffer does not straddle a page. */
0250     n += (bsize - 1) & ~(alignmask | a);
0251 
0252     v = kzalloc(n, skcipher_walk_gfp(walk));
0253     if (!v)
0254         return skcipher_walk_done(walk, -ENOMEM);
0255 
0256     if (phys) {
0257         p = v;
0258         p->len = bsize;
0259         skcipher_queue_write(walk, p);
0260         buffer = p->buffer;
0261     } else {
0262         walk->buffer = v;
0263         buffer = v;
0264     }
0265 
0266 ok:
0267     walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
0268     walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
0269     walk->src.virt.addr = walk->dst.virt.addr;
0270 
0271     scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
0272 
0273     walk->nbytes = bsize;
0274     walk->flags |= SKCIPHER_WALK_SLOW;
0275 
0276     return 0;
0277 }
0278 
0279 static int skcipher_next_copy(struct skcipher_walk *walk)
0280 {
0281     struct skcipher_walk_buffer *p;
0282     u8 *tmp = walk->page;
0283 
0284     skcipher_map_src(walk);
0285     memcpy(tmp, walk->src.virt.addr, walk->nbytes);
0286     skcipher_unmap_src(walk);
0287 
0288     walk->src.virt.addr = tmp;
0289     walk->dst.virt.addr = tmp;
0290 
0291     if (!(walk->flags & SKCIPHER_WALK_PHYS))
0292         return 0;
0293 
0294     p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
0295     if (!p)
0296         return -ENOMEM;
0297 
0298     p->data = walk->page;
0299     p->len = walk->nbytes;
0300     skcipher_queue_write(walk, p);
0301 
0302     if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize >
0303         PAGE_SIZE)
0304         walk->page = NULL;
0305     else
0306         walk->page += walk->nbytes;
0307 
0308     return 0;
0309 }
0310 
0311 static int skcipher_next_fast(struct skcipher_walk *walk)
0312 {
0313     unsigned long diff;
0314 
0315     walk->src.phys.page = scatterwalk_page(&walk->in);
0316     walk->src.phys.offset = offset_in_page(walk->in.offset);
0317     walk->dst.phys.page = scatterwalk_page(&walk->out);
0318     walk->dst.phys.offset = offset_in_page(walk->out.offset);
0319 
0320     if (walk->flags & SKCIPHER_WALK_PHYS)
0321         return 0;
0322 
0323     diff = walk->src.phys.offset - walk->dst.phys.offset;
0324     diff |= walk->src.virt.page - walk->dst.virt.page;
0325 
0326     skcipher_map_src(walk);
0327     walk->dst.virt.addr = walk->src.virt.addr;
0328 
0329     if (diff) {
0330         walk->flags |= SKCIPHER_WALK_DIFF;
0331         skcipher_map_dst(walk);
0332     }
0333 
0334     return 0;
0335 }
0336 
0337 static int skcipher_walk_next(struct skcipher_walk *walk)
0338 {
0339     unsigned int bsize;
0340     unsigned int n;
0341     int err;
0342 
0343     walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
0344              SKCIPHER_WALK_DIFF);
0345 
0346     n = walk->total;
0347     bsize = min(walk->chunksize, max(n, walk->blocksize));
0348     n = scatterwalk_clamp(&walk->in, n);
0349     n = scatterwalk_clamp(&walk->out, n);
0350 
0351     if (unlikely(n < bsize)) {
0352         if (unlikely(walk->total < walk->blocksize))
0353             return skcipher_walk_done(walk, -EINVAL);
0354 
0355 slow_path:
0356         err = skcipher_next_slow(walk, bsize);
0357         goto set_phys_lowmem;
0358     }
0359 
0360     if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
0361         if (!walk->page) {
0362             gfp_t gfp = skcipher_walk_gfp(walk);
0363 
0364             walk->page = (void *)__get_free_page(gfp);
0365             if (!walk->page)
0366                 goto slow_path;
0367         }
0368 
0369         walk->nbytes = min_t(unsigned, n,
0370                      PAGE_SIZE - offset_in_page(walk->page));
0371         walk->flags |= SKCIPHER_WALK_COPY;
0372         err = skcipher_next_copy(walk);
0373         goto set_phys_lowmem;
0374     }
0375 
0376     walk->nbytes = n;
0377 
0378     return skcipher_next_fast(walk);
0379 
0380 set_phys_lowmem:
0381     if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
0382         walk->src.phys.page = virt_to_page(walk->src.virt.addr);
0383         walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
0384         walk->src.phys.offset &= PAGE_SIZE - 1;
0385         walk->dst.phys.offset &= PAGE_SIZE - 1;
0386     }
0387     return err;
0388 }
0389 EXPORT_SYMBOL_GPL(skcipher_walk_next);
0390 
0391 static int skcipher_copy_iv(struct skcipher_walk *walk)
0392 {
0393     unsigned a = crypto_tfm_ctx_alignment() - 1;
0394     unsigned alignmask = walk->alignmask;
0395     unsigned ivsize = walk->ivsize;
0396     unsigned bs = walk->chunksize;
0397     unsigned aligned_bs;
0398     unsigned size;
0399     u8 *iv;
0400 
0401     aligned_bs = ALIGN(bs, alignmask);
0402 
0403     /* Minimum size to align buffer by alignmask. */
0404     size = alignmask & ~a;
0405 
0406     if (walk->flags & SKCIPHER_WALK_PHYS)
0407         size += ivsize;
0408     else {
0409         size += aligned_bs + ivsize;
0410 
0411         /* Minimum size to ensure buffer does not straddle a page. */
0412         size += (bs - 1) & ~(alignmask | a);
0413     }
0414 
0415     walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
0416     if (!walk->buffer)
0417         return -ENOMEM;
0418 
0419     iv = PTR_ALIGN(walk->buffer, alignmask + 1);
0420     iv = skcipher_get_spot(iv, bs) + aligned_bs;
0421 
0422     walk->iv = memcpy(iv, walk->iv, walk->ivsize);
0423     return 0;
0424 }
0425 
0426 static int skcipher_walk_first(struct skcipher_walk *walk)
0427 {
0428     walk->nbytes = 0;
0429 
0430     if (WARN_ON_ONCE(in_irq()))
0431         return -EDEADLK;
0432 
0433     if (unlikely(!walk->total))
0434         return 0;
0435 
0436     walk->buffer = NULL;
0437     if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
0438         int err = skcipher_copy_iv(walk);
0439         if (err)
0440             return err;
0441     }
0442 
0443     walk->page = NULL;
0444     walk->nbytes = walk->total;
0445 
0446     return skcipher_walk_next(walk);
0447 }
0448 
0449 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
0450                   struct skcipher_request *req)
0451 {
0452     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0453 
0454     scatterwalk_start(&walk->in, req->src);
0455     scatterwalk_start(&walk->out, req->dst);
0456 
0457     walk->total = req->cryptlen;
0458     walk->iv = req->iv;
0459     walk->oiv = req->iv;
0460 
0461     walk->flags &= ~SKCIPHER_WALK_SLEEP;
0462     walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
0463                SKCIPHER_WALK_SLEEP : 0;
0464 
0465     walk->blocksize = crypto_skcipher_blocksize(tfm);
0466     walk->chunksize = crypto_skcipher_chunksize(tfm);
0467     walk->ivsize = crypto_skcipher_ivsize(tfm);
0468     walk->alignmask = crypto_skcipher_alignmask(tfm);
0469 
0470     return skcipher_walk_first(walk);
0471 }
0472 
0473 int skcipher_walk_virt(struct skcipher_walk *walk,
0474                struct skcipher_request *req, bool atomic)
0475 {
0476     int err;
0477 
0478     walk->flags &= ~SKCIPHER_WALK_PHYS;
0479 
0480     err = skcipher_walk_skcipher(walk, req);
0481 
0482     walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
0483 
0484     return err;
0485 }
0486 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
0487 
0488 void skcipher_walk_atomise(struct skcipher_walk *walk)
0489 {
0490     walk->flags &= ~SKCIPHER_WALK_SLEEP;
0491 }
0492 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
0493 
0494 int skcipher_walk_async(struct skcipher_walk *walk,
0495             struct skcipher_request *req)
0496 {
0497     walk->flags |= SKCIPHER_WALK_PHYS;
0498 
0499     INIT_LIST_HEAD(&walk->buffers);
0500 
0501     return skcipher_walk_skcipher(walk, req);
0502 }
0503 EXPORT_SYMBOL_GPL(skcipher_walk_async);
0504 
0505 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
0506                      struct aead_request *req, bool atomic)
0507 {
0508     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0509     int err;
0510 
0511     walk->flags &= ~SKCIPHER_WALK_PHYS;
0512 
0513     scatterwalk_start(&walk->in, req->src);
0514     scatterwalk_start(&walk->out, req->dst);
0515 
0516     scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
0517     scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
0518 
0519     walk->iv = req->iv;
0520     walk->oiv = req->iv;
0521 
0522     if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
0523         walk->flags |= SKCIPHER_WALK_SLEEP;
0524     else
0525         walk->flags &= ~SKCIPHER_WALK_SLEEP;
0526 
0527     walk->blocksize = crypto_aead_blocksize(tfm);
0528     walk->chunksize = crypto_aead_chunksize(tfm);
0529     walk->ivsize = crypto_aead_ivsize(tfm);
0530     walk->alignmask = crypto_aead_alignmask(tfm);
0531 
0532     err = skcipher_walk_first(walk);
0533 
0534     if (atomic)
0535         walk->flags &= ~SKCIPHER_WALK_SLEEP;
0536 
0537     return err;
0538 }
0539 
0540 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
0541                bool atomic)
0542 {
0543     walk->total = req->cryptlen;
0544 
0545     return skcipher_walk_aead_common(walk, req, atomic);
0546 }
0547 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
0548 
0549 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
0550                    struct aead_request *req, bool atomic)
0551 {
0552     walk->total = req->cryptlen;
0553 
0554     return skcipher_walk_aead_common(walk, req, atomic);
0555 }
0556 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
0557 
0558 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
0559                    struct aead_request *req, bool atomic)
0560 {
0561     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0562 
0563     walk->total = req->cryptlen - crypto_aead_authsize(tfm);
0564 
0565     return skcipher_walk_aead_common(walk, req, atomic);
0566 }
0567 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
0568 
0569 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
0570 {
0571     if (alg->cra_type == &crypto_blkcipher_type)
0572         return sizeof(struct crypto_blkcipher *);
0573 
0574     if (alg->cra_type == &crypto_ablkcipher_type ||
0575         alg->cra_type == &crypto_givcipher_type)
0576         return sizeof(struct crypto_ablkcipher *);
0577 
0578     return crypto_alg_extsize(alg);
0579 }
0580 
0581 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
0582                      const u8 *key, unsigned int keylen)
0583 {
0584     struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
0585     struct crypto_blkcipher *blkcipher = *ctx;
0586     int err;
0587 
0588     crypto_blkcipher_clear_flags(blkcipher, ~0);
0589     crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
0590                           CRYPTO_TFM_REQ_MASK);
0591     err = crypto_blkcipher_setkey(blkcipher, key, keylen);
0592     crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
0593                        CRYPTO_TFM_RES_MASK);
0594 
0595     return err;
0596 }
0597 
0598 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
0599                     int (*crypt)(struct blkcipher_desc *,
0600                          struct scatterlist *,
0601                          struct scatterlist *,
0602                          unsigned int))
0603 {
0604     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0605     struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
0606     struct blkcipher_desc desc = {
0607         .tfm = *ctx,
0608         .info = req->iv,
0609         .flags = req->base.flags,
0610     };
0611 
0612 
0613     return crypt(&desc, req->dst, req->src, req->cryptlen);
0614 }
0615 
0616 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
0617 {
0618     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
0619     struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
0620     struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
0621 
0622     return skcipher_crypt_blkcipher(req, alg->encrypt);
0623 }
0624 
0625 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
0626 {
0627     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
0628     struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
0629     struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
0630 
0631     return skcipher_crypt_blkcipher(req, alg->decrypt);
0632 }
0633 
0634 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
0635 {
0636     struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
0637 
0638     crypto_free_blkcipher(*ctx);
0639 }
0640 
0641 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
0642 {
0643     struct crypto_alg *calg = tfm->__crt_alg;
0644     struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
0645     struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
0646     struct crypto_blkcipher *blkcipher;
0647     struct crypto_tfm *btfm;
0648 
0649     if (!crypto_mod_get(calg))
0650         return -EAGAIN;
0651 
0652     btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
0653                     CRYPTO_ALG_TYPE_MASK);
0654     if (IS_ERR(btfm)) {
0655         crypto_mod_put(calg);
0656         return PTR_ERR(btfm);
0657     }
0658 
0659     blkcipher = __crypto_blkcipher_cast(btfm);
0660     *ctx = blkcipher;
0661     tfm->exit = crypto_exit_skcipher_ops_blkcipher;
0662 
0663     skcipher->setkey = skcipher_setkey_blkcipher;
0664     skcipher->encrypt = skcipher_encrypt_blkcipher;
0665     skcipher->decrypt = skcipher_decrypt_blkcipher;
0666 
0667     skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
0668     skcipher->keysize = calg->cra_blkcipher.max_keysize;
0669 
0670     return 0;
0671 }
0672 
0673 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
0674                       const u8 *key, unsigned int keylen)
0675 {
0676     struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
0677     struct crypto_ablkcipher *ablkcipher = *ctx;
0678     int err;
0679 
0680     crypto_ablkcipher_clear_flags(ablkcipher, ~0);
0681     crypto_ablkcipher_set_flags(ablkcipher,
0682                     crypto_skcipher_get_flags(tfm) &
0683                     CRYPTO_TFM_REQ_MASK);
0684     err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
0685     crypto_skcipher_set_flags(tfm,
0686                   crypto_ablkcipher_get_flags(ablkcipher) &
0687                   CRYPTO_TFM_RES_MASK);
0688 
0689     return err;
0690 }
0691 
0692 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
0693                      int (*crypt)(struct ablkcipher_request *))
0694 {
0695     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0696     struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
0697     struct ablkcipher_request *subreq = skcipher_request_ctx(req);
0698 
0699     ablkcipher_request_set_tfm(subreq, *ctx);
0700     ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
0701                     req->base.complete, req->base.data);
0702     ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
0703                      req->iv);
0704 
0705     return crypt(subreq);
0706 }
0707 
0708 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
0709 {
0710     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
0711     struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
0712     struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
0713 
0714     return skcipher_crypt_ablkcipher(req, alg->encrypt);
0715 }
0716 
0717 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
0718 {
0719     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
0720     struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
0721     struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
0722 
0723     return skcipher_crypt_ablkcipher(req, alg->decrypt);
0724 }
0725 
0726 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
0727 {
0728     struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
0729 
0730     crypto_free_ablkcipher(*ctx);
0731 }
0732 
0733 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
0734 {
0735     struct crypto_alg *calg = tfm->__crt_alg;
0736     struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
0737     struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
0738     struct crypto_ablkcipher *ablkcipher;
0739     struct crypto_tfm *abtfm;
0740 
0741     if (!crypto_mod_get(calg))
0742         return -EAGAIN;
0743 
0744     abtfm = __crypto_alloc_tfm(calg, 0, 0);
0745     if (IS_ERR(abtfm)) {
0746         crypto_mod_put(calg);
0747         return PTR_ERR(abtfm);
0748     }
0749 
0750     ablkcipher = __crypto_ablkcipher_cast(abtfm);
0751     *ctx = ablkcipher;
0752     tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
0753 
0754     skcipher->setkey = skcipher_setkey_ablkcipher;
0755     skcipher->encrypt = skcipher_encrypt_ablkcipher;
0756     skcipher->decrypt = skcipher_decrypt_ablkcipher;
0757 
0758     skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
0759     skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
0760                 sizeof(struct ablkcipher_request);
0761     skcipher->keysize = calg->cra_ablkcipher.max_keysize;
0762 
0763     return 0;
0764 }
0765 
0766 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
0767 {
0768     struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
0769     struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
0770 
0771     alg->exit(skcipher);
0772 }
0773 
0774 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
0775 {
0776     struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
0777     struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
0778 
0779     if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
0780         return crypto_init_skcipher_ops_blkcipher(tfm);
0781 
0782     if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
0783         tfm->__crt_alg->cra_type == &crypto_givcipher_type)
0784         return crypto_init_skcipher_ops_ablkcipher(tfm);
0785 
0786     skcipher->setkey = alg->setkey;
0787     skcipher->encrypt = alg->encrypt;
0788     skcipher->decrypt = alg->decrypt;
0789     skcipher->ivsize = alg->ivsize;
0790     skcipher->keysize = alg->max_keysize;
0791 
0792     if (alg->exit)
0793         skcipher->base.exit = crypto_skcipher_exit_tfm;
0794 
0795     if (alg->init)
0796         return alg->init(skcipher);
0797 
0798     return 0;
0799 }
0800 
0801 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
0802 {
0803     struct skcipher_instance *skcipher =
0804         container_of(inst, struct skcipher_instance, s.base);
0805 
0806     skcipher->free(skcipher);
0807 }
0808 
0809 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
0810     __attribute__ ((unused));
0811 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
0812 {
0813     struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
0814                              base);
0815 
0816     seq_printf(m, "type         : skcipher\n");
0817     seq_printf(m, "async        : %s\n",
0818            alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
0819     seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
0820     seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
0821     seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
0822     seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
0823     seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
0824 }
0825 
0826 #ifdef CONFIG_NET
0827 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0828 {
0829     struct crypto_report_blkcipher rblkcipher;
0830     struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
0831                              base);
0832 
0833     strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
0834     strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
0835 
0836     rblkcipher.blocksize = alg->cra_blocksize;
0837     rblkcipher.min_keysize = skcipher->min_keysize;
0838     rblkcipher.max_keysize = skcipher->max_keysize;
0839     rblkcipher.ivsize = skcipher->ivsize;
0840 
0841     if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
0842             sizeof(struct crypto_report_blkcipher), &rblkcipher))
0843         goto nla_put_failure;
0844     return 0;
0845 
0846 nla_put_failure:
0847     return -EMSGSIZE;
0848 }
0849 #else
0850 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
0851 {
0852     return -ENOSYS;
0853 }
0854 #endif
0855 
0856 static const struct crypto_type crypto_skcipher_type2 = {
0857     .extsize = crypto_skcipher_extsize,
0858     .init_tfm = crypto_skcipher_init_tfm,
0859     .free = crypto_skcipher_free_instance,
0860 #ifdef CONFIG_PROC_FS
0861     .show = crypto_skcipher_show,
0862 #endif
0863     .report = crypto_skcipher_report,
0864     .maskclear = ~CRYPTO_ALG_TYPE_MASK,
0865     .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
0866     .type = CRYPTO_ALG_TYPE_SKCIPHER,
0867     .tfmsize = offsetof(struct crypto_skcipher, base),
0868 };
0869 
0870 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
0871               const char *name, u32 type, u32 mask)
0872 {
0873     spawn->base.frontend = &crypto_skcipher_type2;
0874     return crypto_grab_spawn(&spawn->base, name, type, mask);
0875 }
0876 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
0877 
0878 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
0879                           u32 type, u32 mask)
0880 {
0881     return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
0882 }
0883 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
0884 
0885 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
0886 {
0887     return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
0888                    type, mask);
0889 }
0890 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
0891 
0892 static int skcipher_prepare_alg(struct skcipher_alg *alg)
0893 {
0894     struct crypto_alg *base = &alg->base;
0895 
0896     if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
0897         return -EINVAL;
0898 
0899     if (!alg->chunksize)
0900         alg->chunksize = base->cra_blocksize;
0901 
0902     base->cra_type = &crypto_skcipher_type2;
0903     base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
0904     base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
0905 
0906     return 0;
0907 }
0908 
0909 int crypto_register_skcipher(struct skcipher_alg *alg)
0910 {
0911     struct crypto_alg *base = &alg->base;
0912     int err;
0913 
0914     err = skcipher_prepare_alg(alg);
0915     if (err)
0916         return err;
0917 
0918     return crypto_register_alg(base);
0919 }
0920 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
0921 
0922 void crypto_unregister_skcipher(struct skcipher_alg *alg)
0923 {
0924     crypto_unregister_alg(&alg->base);
0925 }
0926 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
0927 
0928 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
0929 {
0930     int i, ret;
0931 
0932     for (i = 0; i < count; i++) {
0933         ret = crypto_register_skcipher(&algs[i]);
0934         if (ret)
0935             goto err;
0936     }
0937 
0938     return 0;
0939 
0940 err:
0941     for (--i; i >= 0; --i)
0942         crypto_unregister_skcipher(&algs[i]);
0943 
0944     return ret;
0945 }
0946 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
0947 
0948 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
0949 {
0950     int i;
0951 
0952     for (i = count - 1; i >= 0; --i)
0953         crypto_unregister_skcipher(&algs[i]);
0954 }
0955 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
0956 
0957 int skcipher_register_instance(struct crypto_template *tmpl,
0958                struct skcipher_instance *inst)
0959 {
0960     int err;
0961 
0962     err = skcipher_prepare_alg(&inst->alg);
0963     if (err)
0964         return err;
0965 
0966     return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
0967 }
0968 EXPORT_SYMBOL_GPL(skcipher_register_instance);
0969 
0970 MODULE_LICENSE("GPL");
0971 MODULE_DESCRIPTION("Symmetric key cipher type");