Back to home page

LXR

 
 

    


0001 /*
0002  * pcrypt - Parallel crypto wrapper.
0003  *
0004  * Copyright (C) 2009 secunet Security Networks AG
0005  * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
0006  *
0007  * This program is free software; you can redistribute it and/or modify it
0008  * under the terms and conditions of the GNU General Public License,
0009  * version 2, as published by the Free Software Foundation.
0010  *
0011  * This program is distributed in the hope it will be useful, but WITHOUT
0012  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0013  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
0014  * more details.
0015  *
0016  * You should have received a copy of the GNU General Public License along with
0017  * this program; if not, write to the Free Software Foundation, Inc.,
0018  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
0019  */
0020 
0021 #include <crypto/algapi.h>
0022 #include <crypto/internal/aead.h>
0023 #include <linux/atomic.h>
0024 #include <linux/err.h>
0025 #include <linux/init.h>
0026 #include <linux/module.h>
0027 #include <linux/slab.h>
0028 #include <linux/notifier.h>
0029 #include <linux/kobject.h>
0030 #include <linux/cpu.h>
0031 #include <crypto/pcrypt.h>
0032 
0033 struct padata_pcrypt {
0034     struct padata_instance *pinst;
0035     struct workqueue_struct *wq;
0036 
0037     /*
0038      * Cpumask for callback CPUs. It should be
0039      * equal to serial cpumask of corresponding padata instance,
0040      * so it is updated when padata notifies us about serial
0041      * cpumask change.
0042      *
0043      * cb_cpumask is protected by RCU. This fact prevents us from
0044      * using cpumask_var_t directly because the actual type of
0045      * cpumsak_var_t depends on kernel configuration(particularly on
0046      * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
0047      * cpumask_var_t may be either a pointer to the struct cpumask
0048      * or a variable allocated on the stack. Thus we can not safely use
0049      * cpumask_var_t with RCU operations such as rcu_assign_pointer or
0050      * rcu_dereference. So cpumask_var_t is wrapped with struct
0051      * pcrypt_cpumask which makes possible to use it with RCU.
0052      */
0053     struct pcrypt_cpumask {
0054         cpumask_var_t mask;
0055     } *cb_cpumask;
0056     struct notifier_block nblock;
0057 };
0058 
0059 static struct padata_pcrypt pencrypt;
0060 static struct padata_pcrypt pdecrypt;
0061 static struct kset           *pcrypt_kset;
0062 
0063 struct pcrypt_instance_ctx {
0064     struct crypto_aead_spawn spawn;
0065     atomic_t tfm_count;
0066 };
0067 
0068 struct pcrypt_aead_ctx {
0069     struct crypto_aead *child;
0070     unsigned int cb_cpu;
0071 };
0072 
0073 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
0074                   struct padata_pcrypt *pcrypt)
0075 {
0076     unsigned int cpu_index, cpu, i;
0077     struct pcrypt_cpumask *cpumask;
0078 
0079     cpu = *cb_cpu;
0080 
0081     rcu_read_lock_bh();
0082     cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
0083     if (cpumask_test_cpu(cpu, cpumask->mask))
0084             goto out;
0085 
0086     if (!cpumask_weight(cpumask->mask))
0087             goto out;
0088 
0089     cpu_index = cpu % cpumask_weight(cpumask->mask);
0090 
0091     cpu = cpumask_first(cpumask->mask);
0092     for (i = 0; i < cpu_index; i++)
0093         cpu = cpumask_next(cpu, cpumask->mask);
0094 
0095     *cb_cpu = cpu;
0096 
0097 out:
0098     rcu_read_unlock_bh();
0099     return padata_do_parallel(pcrypt->pinst, padata, cpu);
0100 }
0101 
0102 static int pcrypt_aead_setkey(struct crypto_aead *parent,
0103                   const u8 *key, unsigned int keylen)
0104 {
0105     struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
0106 
0107     return crypto_aead_setkey(ctx->child, key, keylen);
0108 }
0109 
0110 static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
0111                    unsigned int authsize)
0112 {
0113     struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
0114 
0115     return crypto_aead_setauthsize(ctx->child, authsize);
0116 }
0117 
0118 static void pcrypt_aead_serial(struct padata_priv *padata)
0119 {
0120     struct pcrypt_request *preq = pcrypt_padata_request(padata);
0121     struct aead_request *req = pcrypt_request_ctx(preq);
0122 
0123     aead_request_complete(req->base.data, padata->info);
0124 }
0125 
0126 static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
0127 {
0128     struct aead_request *req = areq->data;
0129     struct pcrypt_request *preq = aead_request_ctx(req);
0130     struct padata_priv *padata = pcrypt_request_padata(preq);
0131 
0132     padata->info = err;
0133     req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
0134 
0135     padata_do_serial(padata);
0136 }
0137 
0138 static void pcrypt_aead_enc(struct padata_priv *padata)
0139 {
0140     struct pcrypt_request *preq = pcrypt_padata_request(padata);
0141     struct aead_request *req = pcrypt_request_ctx(preq);
0142 
0143     padata->info = crypto_aead_encrypt(req);
0144 
0145     if (padata->info == -EINPROGRESS)
0146         return;
0147 
0148     padata_do_serial(padata);
0149 }
0150 
0151 static int pcrypt_aead_encrypt(struct aead_request *req)
0152 {
0153     int err;
0154     struct pcrypt_request *preq = aead_request_ctx(req);
0155     struct aead_request *creq = pcrypt_request_ctx(preq);
0156     struct padata_priv *padata = pcrypt_request_padata(preq);
0157     struct crypto_aead *aead = crypto_aead_reqtfm(req);
0158     struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
0159     u32 flags = aead_request_flags(req);
0160 
0161     memset(padata, 0, sizeof(struct padata_priv));
0162 
0163     padata->parallel = pcrypt_aead_enc;
0164     padata->serial = pcrypt_aead_serial;
0165 
0166     aead_request_set_tfm(creq, ctx->child);
0167     aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
0168                   pcrypt_aead_done, req);
0169     aead_request_set_crypt(creq, req->src, req->dst,
0170                    req->cryptlen, req->iv);
0171     aead_request_set_ad(creq, req->assoclen);
0172 
0173     err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
0174     if (!err)
0175         return -EINPROGRESS;
0176 
0177     return err;
0178 }
0179 
0180 static void pcrypt_aead_dec(struct padata_priv *padata)
0181 {
0182     struct pcrypt_request *preq = pcrypt_padata_request(padata);
0183     struct aead_request *req = pcrypt_request_ctx(preq);
0184 
0185     padata->info = crypto_aead_decrypt(req);
0186 
0187     if (padata->info == -EINPROGRESS)
0188         return;
0189 
0190     padata_do_serial(padata);
0191 }
0192 
0193 static int pcrypt_aead_decrypt(struct aead_request *req)
0194 {
0195     int err;
0196     struct pcrypt_request *preq = aead_request_ctx(req);
0197     struct aead_request *creq = pcrypt_request_ctx(preq);
0198     struct padata_priv *padata = pcrypt_request_padata(preq);
0199     struct crypto_aead *aead = crypto_aead_reqtfm(req);
0200     struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
0201     u32 flags = aead_request_flags(req);
0202 
0203     memset(padata, 0, sizeof(struct padata_priv));
0204 
0205     padata->parallel = pcrypt_aead_dec;
0206     padata->serial = pcrypt_aead_serial;
0207 
0208     aead_request_set_tfm(creq, ctx->child);
0209     aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
0210                   pcrypt_aead_done, req);
0211     aead_request_set_crypt(creq, req->src, req->dst,
0212                    req->cryptlen, req->iv);
0213     aead_request_set_ad(creq, req->assoclen);
0214 
0215     err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
0216     if (!err)
0217         return -EINPROGRESS;
0218 
0219     return err;
0220 }
0221 
0222 static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
0223 {
0224     int cpu, cpu_index;
0225     struct aead_instance *inst = aead_alg_instance(tfm);
0226     struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
0227     struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0228     struct crypto_aead *cipher;
0229 
0230     cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
0231             cpumask_weight(cpu_online_mask);
0232 
0233     ctx->cb_cpu = cpumask_first(cpu_online_mask);
0234     for (cpu = 0; cpu < cpu_index; cpu++)
0235         ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
0236 
0237     cipher = crypto_spawn_aead(&ictx->spawn);
0238 
0239     if (IS_ERR(cipher))
0240         return PTR_ERR(cipher);
0241 
0242     ctx->child = cipher;
0243     crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
0244                      sizeof(struct aead_request) +
0245                      crypto_aead_reqsize(cipher));
0246 
0247     return 0;
0248 }
0249 
0250 static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
0251 {
0252     struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0253 
0254     crypto_free_aead(ctx->child);
0255 }
0256 
0257 static int pcrypt_init_instance(struct crypto_instance *inst,
0258                 struct crypto_alg *alg)
0259 {
0260     if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
0261              "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
0262         return -ENAMETOOLONG;
0263 
0264     memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
0265 
0266     inst->alg.cra_priority = alg->cra_priority + 100;
0267     inst->alg.cra_blocksize = alg->cra_blocksize;
0268     inst->alg.cra_alignmask = alg->cra_alignmask;
0269 
0270     return 0;
0271 }
0272 
0273 static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
0274                   u32 type, u32 mask)
0275 {
0276     struct pcrypt_instance_ctx *ctx;
0277     struct crypto_attr_type *algt;
0278     struct aead_instance *inst;
0279     struct aead_alg *alg;
0280     const char *name;
0281     int err;
0282 
0283     algt = crypto_get_attr_type(tb);
0284     if (IS_ERR(algt))
0285         return PTR_ERR(algt);
0286 
0287     name = crypto_attr_alg_name(tb[1]);
0288     if (IS_ERR(name))
0289         return PTR_ERR(name);
0290 
0291     inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
0292     if (!inst)
0293         return -ENOMEM;
0294 
0295     ctx = aead_instance_ctx(inst);
0296     crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
0297 
0298     err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
0299     if (err)
0300         goto out_free_inst;
0301 
0302     alg = crypto_spawn_aead_alg(&ctx->spawn);
0303     err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
0304     if (err)
0305         goto out_drop_aead;
0306 
0307     inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
0308 
0309     inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
0310     inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
0311 
0312     inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
0313 
0314     inst->alg.init = pcrypt_aead_init_tfm;
0315     inst->alg.exit = pcrypt_aead_exit_tfm;
0316 
0317     inst->alg.setkey = pcrypt_aead_setkey;
0318     inst->alg.setauthsize = pcrypt_aead_setauthsize;
0319     inst->alg.encrypt = pcrypt_aead_encrypt;
0320     inst->alg.decrypt = pcrypt_aead_decrypt;
0321 
0322     err = aead_register_instance(tmpl, inst);
0323     if (err)
0324         goto out_drop_aead;
0325 
0326 out:
0327     return err;
0328 
0329 out_drop_aead:
0330     crypto_drop_aead(&ctx->spawn);
0331 out_free_inst:
0332     kfree(inst);
0333     goto out;
0334 }
0335 
0336 static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
0337 {
0338     struct crypto_attr_type *algt;
0339 
0340     algt = crypto_get_attr_type(tb);
0341     if (IS_ERR(algt))
0342         return PTR_ERR(algt);
0343 
0344     switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
0345     case CRYPTO_ALG_TYPE_AEAD:
0346         return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
0347     }
0348 
0349     return -EINVAL;
0350 }
0351 
0352 static void pcrypt_free(struct crypto_instance *inst)
0353 {
0354     struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
0355 
0356     crypto_drop_aead(&ctx->spawn);
0357     kfree(inst);
0358 }
0359 
0360 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
0361                     unsigned long val, void *data)
0362 {
0363     struct padata_pcrypt *pcrypt;
0364     struct pcrypt_cpumask *new_mask, *old_mask;
0365     struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
0366 
0367     if (!(val & PADATA_CPU_SERIAL))
0368         return 0;
0369 
0370     pcrypt = container_of(self, struct padata_pcrypt, nblock);
0371     new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
0372     if (!new_mask)
0373         return -ENOMEM;
0374     if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
0375         kfree(new_mask);
0376         return -ENOMEM;
0377     }
0378 
0379     old_mask = pcrypt->cb_cpumask;
0380 
0381     cpumask_copy(new_mask->mask, cpumask->cbcpu);
0382     rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
0383     synchronize_rcu_bh();
0384 
0385     free_cpumask_var(old_mask->mask);
0386     kfree(old_mask);
0387     return 0;
0388 }
0389 
0390 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
0391 {
0392     int ret;
0393 
0394     pinst->kobj.kset = pcrypt_kset;
0395     ret = kobject_add(&pinst->kobj, NULL, name);
0396     if (!ret)
0397         kobject_uevent(&pinst->kobj, KOBJ_ADD);
0398 
0399     return ret;
0400 }
0401 
0402 static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
0403                   const char *name)
0404 {
0405     int ret = -ENOMEM;
0406     struct pcrypt_cpumask *mask;
0407 
0408     get_online_cpus();
0409 
0410     pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
0411                      1, name);
0412     if (!pcrypt->wq)
0413         goto err;
0414 
0415     pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
0416     if (!pcrypt->pinst)
0417         goto err_destroy_workqueue;
0418 
0419     mask = kmalloc(sizeof(*mask), GFP_KERNEL);
0420     if (!mask)
0421         goto err_free_padata;
0422     if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
0423         kfree(mask);
0424         goto err_free_padata;
0425     }
0426 
0427     cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
0428     rcu_assign_pointer(pcrypt->cb_cpumask, mask);
0429 
0430     pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
0431     ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
0432     if (ret)
0433         goto err_free_cpumask;
0434 
0435     ret = pcrypt_sysfs_add(pcrypt->pinst, name);
0436     if (ret)
0437         goto err_unregister_notifier;
0438 
0439     put_online_cpus();
0440 
0441     return ret;
0442 
0443 err_unregister_notifier:
0444     padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
0445 err_free_cpumask:
0446     free_cpumask_var(mask->mask);
0447     kfree(mask);
0448 err_free_padata:
0449     padata_free(pcrypt->pinst);
0450 err_destroy_workqueue:
0451     destroy_workqueue(pcrypt->wq);
0452 err:
0453     put_online_cpus();
0454 
0455     return ret;
0456 }
0457 
0458 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
0459 {
0460     free_cpumask_var(pcrypt->cb_cpumask->mask);
0461     kfree(pcrypt->cb_cpumask);
0462 
0463     padata_stop(pcrypt->pinst);
0464     padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
0465     destroy_workqueue(pcrypt->wq);
0466     padata_free(pcrypt->pinst);
0467 }
0468 
0469 static struct crypto_template pcrypt_tmpl = {
0470     .name = "pcrypt",
0471     .create = pcrypt_create,
0472     .free = pcrypt_free,
0473     .module = THIS_MODULE,
0474 };
0475 
0476 static int __init pcrypt_init(void)
0477 {
0478     int err = -ENOMEM;
0479 
0480     pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
0481     if (!pcrypt_kset)
0482         goto err;
0483 
0484     err = pcrypt_init_padata(&pencrypt, "pencrypt");
0485     if (err)
0486         goto err_unreg_kset;
0487 
0488     err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
0489     if (err)
0490         goto err_deinit_pencrypt;
0491 
0492     padata_start(pencrypt.pinst);
0493     padata_start(pdecrypt.pinst);
0494 
0495     return crypto_register_template(&pcrypt_tmpl);
0496 
0497 err_deinit_pencrypt:
0498     pcrypt_fini_padata(&pencrypt);
0499 err_unreg_kset:
0500     kset_unregister(pcrypt_kset);
0501 err:
0502     return err;
0503 }
0504 
0505 static void __exit pcrypt_exit(void)
0506 {
0507     pcrypt_fini_padata(&pencrypt);
0508     pcrypt_fini_padata(&pdecrypt);
0509 
0510     kset_unregister(pcrypt_kset);
0511     crypto_unregister_template(&pcrypt_tmpl);
0512 }
0513 
0514 module_init(pcrypt_init);
0515 module_exit(pcrypt_exit);
0516 
0517 MODULE_LICENSE("GPL");
0518 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
0519 MODULE_DESCRIPTION("Parallel crypto wrapper");
0520 MODULE_ALIAS_CRYPTO("pcrypt");