Back to home page

LXR

 
 

    


0001 /*
0002  * Asynchronous Cryptographic Hash operations.
0003  *
0004  * This is the asynchronous version of hash.c with notification of
0005  * completion via a callback.
0006  *
0007  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
0008  *
0009  * This program is free software; you can redistribute it and/or modify it
0010  * under the terms of the GNU General Public License as published by the Free
0011  * Software Foundation; either version 2 of the License, or (at your option)
0012  * any later version.
0013  *
0014  */
0015 
0016 #include <crypto/internal/hash.h>
0017 #include <crypto/scatterwalk.h>
0018 #include <linux/bug.h>
0019 #include <linux/err.h>
0020 #include <linux/kernel.h>
0021 #include <linux/module.h>
0022 #include <linux/sched.h>
0023 #include <linux/slab.h>
0024 #include <linux/seq_file.h>
0025 #include <linux/cryptouser.h>
0026 #include <net/netlink.h>
0027 
0028 #include "internal.h"
0029 
0030 struct ahash_request_priv {
0031     crypto_completion_t complete;
0032     void *data;
0033     u8 *result;
0034     void *ubuf[] CRYPTO_MINALIGN_ATTR;
0035 };
0036 
0037 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
0038 {
0039     return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
0040                 halg);
0041 }
0042 
0043 static int hash_walk_next(struct crypto_hash_walk *walk)
0044 {
0045     unsigned int alignmask = walk->alignmask;
0046     unsigned int offset = walk->offset;
0047     unsigned int nbytes = min(walk->entrylen,
0048                   ((unsigned int)(PAGE_SIZE)) - offset);
0049 
0050     if (walk->flags & CRYPTO_ALG_ASYNC)
0051         walk->data = kmap(walk->pg);
0052     else
0053         walk->data = kmap_atomic(walk->pg);
0054     walk->data += offset;
0055 
0056     if (offset & alignmask) {
0057         unsigned int unaligned = alignmask + 1 - (offset & alignmask);
0058 
0059         if (nbytes > unaligned)
0060             nbytes = unaligned;
0061     }
0062 
0063     walk->entrylen -= nbytes;
0064     return nbytes;
0065 }
0066 
0067 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
0068 {
0069     struct scatterlist *sg;
0070 
0071     sg = walk->sg;
0072     walk->offset = sg->offset;
0073     walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
0074     walk->offset = offset_in_page(walk->offset);
0075     walk->entrylen = sg->length;
0076 
0077     if (walk->entrylen > walk->total)
0078         walk->entrylen = walk->total;
0079     walk->total -= walk->entrylen;
0080 
0081     return hash_walk_next(walk);
0082 }
0083 
0084 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
0085 {
0086     unsigned int alignmask = walk->alignmask;
0087     unsigned int nbytes = walk->entrylen;
0088 
0089     walk->data -= walk->offset;
0090 
0091     if (nbytes && walk->offset & alignmask && !err) {
0092         walk->offset = ALIGN(walk->offset, alignmask + 1);
0093         walk->data += walk->offset;
0094 
0095         nbytes = min(nbytes,
0096                  ((unsigned int)(PAGE_SIZE)) - walk->offset);
0097         walk->entrylen -= nbytes;
0098 
0099         return nbytes;
0100     }
0101 
0102     if (walk->flags & CRYPTO_ALG_ASYNC)
0103         kunmap(walk->pg);
0104     else {
0105         kunmap_atomic(walk->data);
0106         /*
0107          * The may sleep test only makes sense for sync users.
0108          * Async users don't need to sleep here anyway.
0109          */
0110         crypto_yield(walk->flags);
0111     }
0112 
0113     if (err)
0114         return err;
0115 
0116     if (nbytes) {
0117         walk->offset = 0;
0118         walk->pg++;
0119         return hash_walk_next(walk);
0120     }
0121 
0122     if (!walk->total)
0123         return 0;
0124 
0125     walk->sg = sg_next(walk->sg);
0126 
0127     return hash_walk_new_entry(walk);
0128 }
0129 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
0130 
0131 int crypto_hash_walk_first(struct ahash_request *req,
0132                struct crypto_hash_walk *walk)
0133 {
0134     walk->total = req->nbytes;
0135 
0136     if (!walk->total) {
0137         walk->entrylen = 0;
0138         return 0;
0139     }
0140 
0141     walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
0142     walk->sg = req->src;
0143     walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
0144 
0145     return hash_walk_new_entry(walk);
0146 }
0147 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
0148 
0149 int crypto_ahash_walk_first(struct ahash_request *req,
0150                 struct crypto_hash_walk *walk)
0151 {
0152     walk->total = req->nbytes;
0153 
0154     if (!walk->total) {
0155         walk->entrylen = 0;
0156         return 0;
0157     }
0158 
0159     walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
0160     walk->sg = req->src;
0161     walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
0162     walk->flags |= CRYPTO_ALG_ASYNC;
0163 
0164     BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
0165 
0166     return hash_walk_new_entry(walk);
0167 }
0168 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
0169 
0170 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
0171                 unsigned int keylen)
0172 {
0173     unsigned long alignmask = crypto_ahash_alignmask(tfm);
0174     int ret;
0175     u8 *buffer, *alignbuffer;
0176     unsigned long absize;
0177 
0178     absize = keylen + alignmask;
0179     buffer = kmalloc(absize, GFP_KERNEL);
0180     if (!buffer)
0181         return -ENOMEM;
0182 
0183     alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
0184     memcpy(alignbuffer, key, keylen);
0185     ret = tfm->setkey(tfm, alignbuffer, keylen);
0186     kzfree(buffer);
0187     return ret;
0188 }
0189 
0190 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
0191             unsigned int keylen)
0192 {
0193     unsigned long alignmask = crypto_ahash_alignmask(tfm);
0194 
0195     if ((unsigned long)key & alignmask)
0196         return ahash_setkey_unaligned(tfm, key, keylen);
0197 
0198     return tfm->setkey(tfm, key, keylen);
0199 }
0200 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
0201 
0202 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
0203               unsigned int keylen)
0204 {
0205     return -ENOSYS;
0206 }
0207 
0208 static inline unsigned int ahash_align_buffer_size(unsigned len,
0209                            unsigned long mask)
0210 {
0211     return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
0212 }
0213 
0214 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
0215 {
0216     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0217     unsigned long alignmask = crypto_ahash_alignmask(tfm);
0218     unsigned int ds = crypto_ahash_digestsize(tfm);
0219     struct ahash_request_priv *priv;
0220 
0221     priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
0222                (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0223                GFP_KERNEL : GFP_ATOMIC);
0224     if (!priv)
0225         return -ENOMEM;
0226 
0227     /*
0228      * WARNING: Voodoo programming below!
0229      *
0230      * The code below is obscure and hard to understand, thus explanation
0231      * is necessary. See include/crypto/hash.h and include/linux/crypto.h
0232      * to understand the layout of structures used here!
0233      *
0234      * The code here will replace portions of the ORIGINAL request with
0235      * pointers to new code and buffers so the hashing operation can store
0236      * the result in aligned buffer. We will call the modified request
0237      * an ADJUSTED request.
0238      *
0239      * The newly mangled request will look as such:
0240      *
0241      * req {
0242      *   .result        = ADJUSTED[new aligned buffer]
0243      *   .base.complete = ADJUSTED[pointer to completion function]
0244      *   .base.data     = ADJUSTED[*req (pointer to self)]
0245      *   .priv          = ADJUSTED[new priv] {
0246      *           .result   = ORIGINAL(result)
0247      *           .complete = ORIGINAL(base.complete)
0248      *           .data     = ORIGINAL(base.data)
0249      *   }
0250      */
0251 
0252     priv->result = req->result;
0253     priv->complete = req->base.complete;
0254     priv->data = req->base.data;
0255     /*
0256      * WARNING: We do not backup req->priv here! The req->priv
0257      *          is for internal use of the Crypto API and the
0258      *          user must _NOT_ _EVER_ depend on it's content!
0259      */
0260 
0261     req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
0262     req->base.complete = cplt;
0263     req->base.data = req;
0264     req->priv = priv;
0265 
0266     return 0;
0267 }
0268 
0269 static void ahash_restore_req(struct ahash_request *req)
0270 {
0271     struct ahash_request_priv *priv = req->priv;
0272 
0273     /* Restore the original crypto request. */
0274     req->result = priv->result;
0275     req->base.complete = priv->complete;
0276     req->base.data = priv->data;
0277     req->priv = NULL;
0278 
0279     /* Free the req->priv.priv from the ADJUSTED request. */
0280     kzfree(priv);
0281 }
0282 
0283 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
0284 {
0285     struct ahash_request_priv *priv = req->priv;
0286 
0287     if (err == -EINPROGRESS)
0288         return;
0289 
0290     if (!err)
0291         memcpy(priv->result, req->result,
0292                crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
0293 
0294     ahash_restore_req(req);
0295 }
0296 
0297 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
0298 {
0299     struct ahash_request *areq = req->data;
0300 
0301     /*
0302      * Restore the original request, see ahash_op_unaligned() for what
0303      * goes where.
0304      *
0305      * The "struct ahash_request *req" here is in fact the "req.base"
0306      * from the ADJUSTED request from ahash_op_unaligned(), thus as it
0307      * is a pointer to self, it is also the ADJUSTED "req" .
0308      */
0309 
0310     /* First copy req->result into req->priv.result */
0311     ahash_op_unaligned_finish(areq, err);
0312 
0313     /* Complete the ORIGINAL request. */
0314     areq->base.complete(&areq->base, err);
0315 }
0316 
0317 static int ahash_op_unaligned(struct ahash_request *req,
0318                   int (*op)(struct ahash_request *))
0319 {
0320     int err;
0321 
0322     err = ahash_save_req(req, ahash_op_unaligned_done);
0323     if (err)
0324         return err;
0325 
0326     err = op(req);
0327     ahash_op_unaligned_finish(req, err);
0328 
0329     return err;
0330 }
0331 
0332 static int crypto_ahash_op(struct ahash_request *req,
0333                int (*op)(struct ahash_request *))
0334 {
0335     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0336     unsigned long alignmask = crypto_ahash_alignmask(tfm);
0337 
0338     if ((unsigned long)req->result & alignmask)
0339         return ahash_op_unaligned(req, op);
0340 
0341     return op(req);
0342 }
0343 
0344 int crypto_ahash_final(struct ahash_request *req)
0345 {
0346     return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
0347 }
0348 EXPORT_SYMBOL_GPL(crypto_ahash_final);
0349 
0350 int crypto_ahash_finup(struct ahash_request *req)
0351 {
0352     return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
0353 }
0354 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
0355 
0356 int crypto_ahash_digest(struct ahash_request *req)
0357 {
0358     return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
0359 }
0360 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
0361 
0362 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
0363 {
0364     struct ahash_request_priv *priv = req->priv;
0365 
0366     if (err == -EINPROGRESS)
0367         return;
0368 
0369     if (!err)
0370         memcpy(priv->result, req->result,
0371                crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
0372 
0373     ahash_restore_req(req);
0374 }
0375 
0376 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
0377 {
0378     struct ahash_request *areq = req->data;
0379 
0380     ahash_def_finup_finish2(areq, err);
0381 
0382     areq->base.complete(&areq->base, err);
0383 }
0384 
0385 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
0386 {
0387     if (err)
0388         goto out;
0389 
0390     req->base.complete = ahash_def_finup_done2;
0391     req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
0392     err = crypto_ahash_reqtfm(req)->final(req);
0393 
0394 out:
0395     ahash_def_finup_finish2(req, err);
0396     return err;
0397 }
0398 
0399 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
0400 {
0401     struct ahash_request *areq = req->data;
0402 
0403     err = ahash_def_finup_finish1(areq, err);
0404 
0405     areq->base.complete(&areq->base, err);
0406 }
0407 
0408 static int ahash_def_finup(struct ahash_request *req)
0409 {
0410     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0411     int err;
0412 
0413     err = ahash_save_req(req, ahash_def_finup_done1);
0414     if (err)
0415         return err;
0416 
0417     err = tfm->update(req);
0418     return ahash_def_finup_finish1(req, err);
0419 }
0420 
0421 static int ahash_no_export(struct ahash_request *req, void *out)
0422 {
0423     return -ENOSYS;
0424 }
0425 
0426 static int ahash_no_import(struct ahash_request *req, const void *in)
0427 {
0428     return -ENOSYS;
0429 }
0430 
0431 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
0432 {
0433     struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
0434     struct ahash_alg *alg = crypto_ahash_alg(hash);
0435 
0436     hash->setkey = ahash_nosetkey;
0437     hash->has_setkey = false;
0438     hash->export = ahash_no_export;
0439     hash->import = ahash_no_import;
0440 
0441     if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
0442         return crypto_init_shash_ops_async(tfm);
0443 
0444     hash->init = alg->init;
0445     hash->update = alg->update;
0446     hash->final = alg->final;
0447     hash->finup = alg->finup ?: ahash_def_finup;
0448     hash->digest = alg->digest;
0449 
0450     if (alg->setkey) {
0451         hash->setkey = alg->setkey;
0452         hash->has_setkey = true;
0453     }
0454     if (alg->export)
0455         hash->export = alg->export;
0456     if (alg->import)
0457         hash->import = alg->import;
0458 
0459     return 0;
0460 }
0461 
0462 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
0463 {
0464     if (alg->cra_type != &crypto_ahash_type)
0465         return sizeof(struct crypto_shash *);
0466 
0467     return crypto_alg_extsize(alg);
0468 }
0469 
0470 #ifdef CONFIG_NET
0471 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
0472 {
0473     struct crypto_report_hash rhash;
0474 
0475     strncpy(rhash.type, "ahash", sizeof(rhash.type));
0476 
0477     rhash.blocksize = alg->cra_blocksize;
0478     rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
0479 
0480     if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
0481             sizeof(struct crypto_report_hash), &rhash))
0482         goto nla_put_failure;
0483     return 0;
0484 
0485 nla_put_failure:
0486     return -EMSGSIZE;
0487 }
0488 #else
0489 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
0490 {
0491     return -ENOSYS;
0492 }
0493 #endif
0494 
0495 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
0496     __attribute__ ((unused));
0497 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
0498 {
0499     seq_printf(m, "type         : ahash\n");
0500     seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
0501                          "yes" : "no");
0502     seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
0503     seq_printf(m, "digestsize   : %u\n",
0504            __crypto_hash_alg_common(alg)->digestsize);
0505 }
0506 
0507 const struct crypto_type crypto_ahash_type = {
0508     .extsize = crypto_ahash_extsize,
0509     .init_tfm = crypto_ahash_init_tfm,
0510 #ifdef CONFIG_PROC_FS
0511     .show = crypto_ahash_show,
0512 #endif
0513     .report = crypto_ahash_report,
0514     .maskclear = ~CRYPTO_ALG_TYPE_MASK,
0515     .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
0516     .type = CRYPTO_ALG_TYPE_AHASH,
0517     .tfmsize = offsetof(struct crypto_ahash, base),
0518 };
0519 EXPORT_SYMBOL_GPL(crypto_ahash_type);
0520 
0521 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
0522                     u32 mask)
0523 {
0524     return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
0525 }
0526 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
0527 
0528 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
0529 {
0530     return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
0531 }
0532 EXPORT_SYMBOL_GPL(crypto_has_ahash);
0533 
0534 static int ahash_prepare_alg(struct ahash_alg *alg)
0535 {
0536     struct crypto_alg *base = &alg->halg.base;
0537 
0538     if (alg->halg.digestsize > PAGE_SIZE / 8 ||
0539         alg->halg.statesize > PAGE_SIZE / 8 ||
0540         alg->halg.statesize == 0)
0541         return -EINVAL;
0542 
0543     base->cra_type = &crypto_ahash_type;
0544     base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
0545     base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
0546 
0547     return 0;
0548 }
0549 
0550 int crypto_register_ahash(struct ahash_alg *alg)
0551 {
0552     struct crypto_alg *base = &alg->halg.base;
0553     int err;
0554 
0555     err = ahash_prepare_alg(alg);
0556     if (err)
0557         return err;
0558 
0559     return crypto_register_alg(base);
0560 }
0561 EXPORT_SYMBOL_GPL(crypto_register_ahash);
0562 
0563 int crypto_unregister_ahash(struct ahash_alg *alg)
0564 {
0565     return crypto_unregister_alg(&alg->halg.base);
0566 }
0567 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
0568 
0569 int ahash_register_instance(struct crypto_template *tmpl,
0570                 struct ahash_instance *inst)
0571 {
0572     int err;
0573 
0574     err = ahash_prepare_alg(&inst->alg);
0575     if (err)
0576         return err;
0577 
0578     return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
0579 }
0580 EXPORT_SYMBOL_GPL(ahash_register_instance);
0581 
0582 void ahash_free_instance(struct crypto_instance *inst)
0583 {
0584     crypto_drop_spawn(crypto_instance_ctx(inst));
0585     kfree(ahash_instance(inst));
0586 }
0587 EXPORT_SYMBOL_GPL(ahash_free_instance);
0588 
0589 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
0590                 struct hash_alg_common *alg,
0591                 struct crypto_instance *inst)
0592 {
0593     return crypto_init_spawn2(&spawn->base, &alg->base, inst,
0594                   &crypto_ahash_type);
0595 }
0596 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
0597 
0598 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
0599 {
0600     struct crypto_alg *alg;
0601 
0602     alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
0603     return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
0604 }
0605 EXPORT_SYMBOL_GPL(ahash_attr_alg);
0606 
0607 MODULE_LICENSE("GPL");
0608 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");