Back to home page

LXR

 
 

    


0001 /*
0002  * Synchronous Compression operations
0003  *
0004  * Copyright 2015 LG Electronics Inc.
0005  * Copyright (c) 2016, Intel Corporation
0006  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
0007  *
0008  * This program is free software; you can redistribute it and/or modify it
0009  * under the terms of the GNU General Public License as published by the Free
0010  * Software Foundation; either version 2 of the License, or (at your option)
0011  * any later version.
0012  *
0013  */
0014 #include <linux/errno.h>
0015 #include <linux/kernel.h>
0016 #include <linux/module.h>
0017 #include <linux/seq_file.h>
0018 #include <linux/slab.h>
0019 #include <linux/string.h>
0020 #include <linux/crypto.h>
0021 #include <linux/vmalloc.h>
0022 #include <crypto/algapi.h>
0023 #include <linux/cryptouser.h>
0024 #include <net/netlink.h>
0025 #include <linux/scatterlist.h>
0026 #include <crypto/scatterwalk.h>
0027 #include <crypto/internal/acompress.h>
0028 #include <crypto/internal/scompress.h>
0029 #include "internal.h"
0030 
0031 static const struct crypto_type crypto_scomp_type;
0032 static void * __percpu *scomp_src_scratches;
0033 static void * __percpu *scomp_dst_scratches;
0034 static int scomp_scratch_users;
0035 static DEFINE_MUTEX(scomp_lock);
0036 
0037 #ifdef CONFIG_NET
0038 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
0039 {
0040     struct crypto_report_comp rscomp;
0041 
0042     strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
0043 
0044     if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
0045             sizeof(struct crypto_report_comp), &rscomp))
0046         goto nla_put_failure;
0047     return 0;
0048 
0049 nla_put_failure:
0050     return -EMSGSIZE;
0051 }
0052 #else
0053 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
0054 {
0055     return -ENOSYS;
0056 }
0057 #endif
0058 
0059 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
0060     __attribute__ ((unused));
0061 
0062 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
0063 {
0064     seq_puts(m, "type         : scomp\n");
0065 }
0066 
0067 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
0068 {
0069     return 0;
0070 }
0071 
0072 static void crypto_scomp_free_scratches(void * __percpu *scratches)
0073 {
0074     int i;
0075 
0076     if (!scratches)
0077         return;
0078 
0079     for_each_possible_cpu(i)
0080         vfree(*per_cpu_ptr(scratches, i));
0081 
0082     free_percpu(scratches);
0083 }
0084 
0085 static void * __percpu *crypto_scomp_alloc_scratches(void)
0086 {
0087     void * __percpu *scratches;
0088     int i;
0089 
0090     scratches = alloc_percpu(void *);
0091     if (!scratches)
0092         return NULL;
0093 
0094     for_each_possible_cpu(i) {
0095         void *scratch;
0096 
0097         scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
0098         if (!scratch)
0099             goto error;
0100         *per_cpu_ptr(scratches, i) = scratch;
0101     }
0102 
0103     return scratches;
0104 
0105 error:
0106     crypto_scomp_free_scratches(scratches);
0107     return NULL;
0108 }
0109 
0110 static void crypto_scomp_free_all_scratches(void)
0111 {
0112     if (!--scomp_scratch_users) {
0113         crypto_scomp_free_scratches(scomp_src_scratches);
0114         crypto_scomp_free_scratches(scomp_dst_scratches);
0115         scomp_src_scratches = NULL;
0116         scomp_dst_scratches = NULL;
0117     }
0118 }
0119 
0120 static int crypto_scomp_alloc_all_scratches(void)
0121 {
0122     if (!scomp_scratch_users++) {
0123         scomp_src_scratches = crypto_scomp_alloc_scratches();
0124         if (!scomp_src_scratches)
0125             return -ENOMEM;
0126         scomp_dst_scratches = crypto_scomp_alloc_scratches();
0127         if (!scomp_dst_scratches)
0128             return -ENOMEM;
0129     }
0130     return 0;
0131 }
0132 
0133 static void crypto_scomp_sg_free(struct scatterlist *sgl)
0134 {
0135     int i, n;
0136     struct page *page;
0137 
0138     if (!sgl)
0139         return;
0140 
0141     n = sg_nents(sgl);
0142     for_each_sg(sgl, sgl, n, i) {
0143         page = sg_page(sgl);
0144         if (page)
0145             __free_page(page);
0146     }
0147 
0148     kfree(sgl);
0149 }
0150 
0151 static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
0152 {
0153     struct scatterlist *sgl;
0154     struct page *page;
0155     int i, n;
0156 
0157     n = ((size - 1) >> PAGE_SHIFT) + 1;
0158 
0159     sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
0160     if (!sgl)
0161         return NULL;
0162 
0163     sg_init_table(sgl, n);
0164 
0165     for (i = 0; i < n; i++) {
0166         page = alloc_page(gfp);
0167         if (!page)
0168             goto err;
0169         sg_set_page(sgl + i, page, PAGE_SIZE, 0);
0170     }
0171 
0172     return sgl;
0173 
0174 err:
0175     sg_mark_end(sgl + i);
0176     crypto_scomp_sg_free(sgl);
0177     return NULL;
0178 }
0179 
0180 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
0181 {
0182     struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
0183     void **tfm_ctx = acomp_tfm_ctx(tfm);
0184     struct crypto_scomp *scomp = *tfm_ctx;
0185     void **ctx = acomp_request_ctx(req);
0186     const int cpu = get_cpu();
0187     u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
0188     u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
0189     int ret;
0190 
0191     if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
0192         ret = -EINVAL;
0193         goto out;
0194     }
0195 
0196     if (req->dst && !req->dlen) {
0197         ret = -EINVAL;
0198         goto out;
0199     }
0200 
0201     if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
0202         req->dlen = SCOMP_SCRATCH_SIZE;
0203 
0204     scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
0205     if (dir)
0206         ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
0207                         scratch_dst, &req->dlen, *ctx);
0208     else
0209         ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
0210                           scratch_dst, &req->dlen, *ctx);
0211     if (!ret) {
0212         if (!req->dst) {
0213             req->dst = crypto_scomp_sg_alloc(req->dlen,
0214                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
0215                    GFP_KERNEL : GFP_ATOMIC);
0216             if (!req->dst)
0217                 goto out;
0218         }
0219         scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
0220                      1);
0221     }
0222 out:
0223     put_cpu();
0224     return ret;
0225 }
0226 
0227 static int scomp_acomp_compress(struct acomp_req *req)
0228 {
0229     return scomp_acomp_comp_decomp(req, 1);
0230 }
0231 
0232 static int scomp_acomp_decompress(struct acomp_req *req)
0233 {
0234     return scomp_acomp_comp_decomp(req, 0);
0235 }
0236 
0237 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
0238 {
0239     struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
0240 
0241     crypto_free_scomp(*ctx);
0242 }
0243 
0244 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
0245 {
0246     struct crypto_alg *calg = tfm->__crt_alg;
0247     struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
0248     struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
0249     struct crypto_scomp *scomp;
0250 
0251     if (!crypto_mod_get(calg))
0252         return -EAGAIN;
0253 
0254     scomp = crypto_create_tfm(calg, &crypto_scomp_type);
0255     if (IS_ERR(scomp)) {
0256         crypto_mod_put(calg);
0257         return PTR_ERR(scomp);
0258     }
0259 
0260     *ctx = scomp;
0261     tfm->exit = crypto_exit_scomp_ops_async;
0262 
0263     crt->compress = scomp_acomp_compress;
0264     crt->decompress = scomp_acomp_decompress;
0265     crt->dst_free = crypto_scomp_sg_free;
0266     crt->reqsize = sizeof(void *);
0267 
0268     return 0;
0269 }
0270 
0271 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
0272 {
0273     struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
0274     struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
0275     struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
0276     struct crypto_scomp *scomp = *tfm_ctx;
0277     void *ctx;
0278 
0279     ctx = crypto_scomp_alloc_ctx(scomp);
0280     if (IS_ERR(ctx)) {
0281         kfree(req);
0282         return NULL;
0283     }
0284 
0285     *req->__ctx = ctx;
0286 
0287     return req;
0288 }
0289 
0290 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
0291 {
0292     struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
0293     struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
0294     struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
0295     struct crypto_scomp *scomp = *tfm_ctx;
0296     void *ctx = *req->__ctx;
0297 
0298     if (ctx)
0299         crypto_scomp_free_ctx(scomp, ctx);
0300 }
0301 
0302 static const struct crypto_type crypto_scomp_type = {
0303     .extsize = crypto_alg_extsize,
0304     .init_tfm = crypto_scomp_init_tfm,
0305 #ifdef CONFIG_PROC_FS
0306     .show = crypto_scomp_show,
0307 #endif
0308     .report = crypto_scomp_report,
0309     .maskclear = ~CRYPTO_ALG_TYPE_MASK,
0310     .maskset = CRYPTO_ALG_TYPE_MASK,
0311     .type = CRYPTO_ALG_TYPE_SCOMPRESS,
0312     .tfmsize = offsetof(struct crypto_scomp, base),
0313 };
0314 
0315 int crypto_register_scomp(struct scomp_alg *alg)
0316 {
0317     struct crypto_alg *base = &alg->base;
0318     int ret = -ENOMEM;
0319 
0320     mutex_lock(&scomp_lock);
0321     if (crypto_scomp_alloc_all_scratches())
0322         goto error;
0323 
0324     base->cra_type = &crypto_scomp_type;
0325     base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
0326     base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
0327 
0328     ret = crypto_register_alg(base);
0329     if (ret)
0330         goto error;
0331 
0332     mutex_unlock(&scomp_lock);
0333     return ret;
0334 
0335 error:
0336     crypto_scomp_free_all_scratches();
0337     mutex_unlock(&scomp_lock);
0338     return ret;
0339 }
0340 EXPORT_SYMBOL_GPL(crypto_register_scomp);
0341 
0342 int crypto_unregister_scomp(struct scomp_alg *alg)
0343 {
0344     int ret;
0345 
0346     mutex_lock(&scomp_lock);
0347     ret = crypto_unregister_alg(&alg->base);
0348     crypto_scomp_free_all_scratches();
0349     mutex_unlock(&scomp_lock);
0350 
0351     return ret;
0352 }
0353 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
0354 
0355 MODULE_LICENSE("GPL");
0356 MODULE_DESCRIPTION("Synchronous compression type");