Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
0004  *
0005  * Copyright (C) 2017 Advanced Micro Devices, Inc.
0006  *
0007  * Author: Gary R Hook <gary.hook@amd.com>
0008  */
0009 
0010 #include <linux/module.h>
0011 #include <linux/sched.h>
0012 #include <linux/scatterlist.h>
0013 #include <linux/crypto.h>
0014 #include <crypto/algapi.h>
0015 #include <crypto/internal/rsa.h>
0016 #include <crypto/internal/akcipher.h>
0017 #include <crypto/akcipher.h>
0018 #include <crypto/scatterwalk.h>
0019 
0020 #include "ccp-crypto.h"
0021 
0022 static inline struct akcipher_request *akcipher_request_cast(
0023     struct crypto_async_request *req)
0024 {
0025     return container_of(req, struct akcipher_request, base);
0026 }
0027 
0028 static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
0029                         const u8 *buf, size_t sz)
0030 {
0031     int nskip;
0032 
0033     for (nskip = 0; nskip < sz; nskip++)
0034         if (buf[nskip])
0035             break;
0036     *kplen = sz - nskip;
0037     *kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL);
0038     if (!*kpbuf)
0039         return -ENOMEM;
0040 
0041     return 0;
0042 }
0043 
0044 static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
0045 {
0046     struct akcipher_request *req = akcipher_request_cast(async_req);
0047     struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
0048 
0049     if (ret)
0050         return ret;
0051 
0052     req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
0053 
0054     return 0;
0055 }
0056 
0057 static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
0058 {
0059     struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
0060 
0061     return ctx->u.rsa.n_len;
0062 }
0063 
0064 static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
0065 {
0066     struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0067     struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
0068     struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
0069     int ret = 0;
0070 
0071     memset(&rctx->cmd, 0, sizeof(rctx->cmd));
0072     INIT_LIST_HEAD(&rctx->cmd.entry);
0073     rctx->cmd.engine = CCP_ENGINE_RSA;
0074 
0075     rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
0076     if (encrypt) {
0077         rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
0078         rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
0079     } else {
0080         rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
0081         rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
0082     }
0083     rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
0084     rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
0085     rctx->cmd.u.rsa.src = req->src;
0086     rctx->cmd.u.rsa.src_len = req->src_len;
0087     rctx->cmd.u.rsa.dst = req->dst;
0088 
0089     ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
0090 
0091     return ret;
0092 }
0093 
0094 static int ccp_rsa_encrypt(struct akcipher_request *req)
0095 {
0096     return ccp_rsa_crypt(req, true);
0097 }
0098 
0099 static int ccp_rsa_decrypt(struct akcipher_request *req)
0100 {
0101     return ccp_rsa_crypt(req, false);
0102 }
0103 
0104 static int ccp_check_key_length(unsigned int len)
0105 {
0106     /* In bits */
0107     if (len < 8 || len > 4096)
0108         return -EINVAL;
0109     return 0;
0110 }
0111 
0112 static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
0113 {
0114     /* Clean up old key data */
0115     kfree_sensitive(ctx->u.rsa.e_buf);
0116     ctx->u.rsa.e_buf = NULL;
0117     ctx->u.rsa.e_len = 0;
0118     kfree_sensitive(ctx->u.rsa.n_buf);
0119     ctx->u.rsa.n_buf = NULL;
0120     ctx->u.rsa.n_len = 0;
0121     kfree_sensitive(ctx->u.rsa.d_buf);
0122     ctx->u.rsa.d_buf = NULL;
0123     ctx->u.rsa.d_len = 0;
0124 }
0125 
0126 static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
0127               unsigned int keylen, bool private)
0128 {
0129     struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
0130     struct rsa_key raw_key;
0131     int ret;
0132 
0133     ccp_rsa_free_key_bufs(ctx);
0134     memset(&raw_key, 0, sizeof(raw_key));
0135 
0136     /* Code borrowed from crypto/rsa.c */
0137     if (private)
0138         ret = rsa_parse_priv_key(&raw_key, key, keylen);
0139     else
0140         ret = rsa_parse_pub_key(&raw_key, key, keylen);
0141     if (ret)
0142         goto n_key;
0143 
0144     ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len,
0145                     raw_key.n, raw_key.n_sz);
0146     if (ret)
0147         goto key_err;
0148     sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
0149 
0150     ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */
0151     if (ccp_check_key_length(ctx->u.rsa.key_len)) {
0152         ret = -EINVAL;
0153         goto key_err;
0154     }
0155 
0156     ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len,
0157                     raw_key.e, raw_key.e_sz);
0158     if (ret)
0159         goto key_err;
0160     sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
0161 
0162     if (private) {
0163         ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf,
0164                         &ctx->u.rsa.d_len,
0165                         raw_key.d, raw_key.d_sz);
0166         if (ret)
0167             goto key_err;
0168         sg_init_one(&ctx->u.rsa.d_sg,
0169                 ctx->u.rsa.d_buf, ctx->u.rsa.d_len);
0170     }
0171 
0172     return 0;
0173 
0174 key_err:
0175     ccp_rsa_free_key_bufs(ctx);
0176 
0177 n_key:
0178     return ret;
0179 }
0180 
0181 static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
0182                   unsigned int keylen)
0183 {
0184     return ccp_rsa_setkey(tfm, key, keylen, true);
0185 }
0186 
0187 static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
0188                  unsigned int keylen)
0189 {
0190     return ccp_rsa_setkey(tfm, key, keylen, false);
0191 }
0192 
0193 static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
0194 {
0195     struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
0196 
0197     akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
0198     ctx->complete = ccp_rsa_complete;
0199 
0200     return 0;
0201 }
0202 
0203 static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
0204 {
0205     struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
0206 
0207     ccp_rsa_free_key_bufs(ctx);
0208 }
0209 
0210 static struct akcipher_alg ccp_rsa_defaults = {
0211     .encrypt = ccp_rsa_encrypt,
0212     .decrypt = ccp_rsa_decrypt,
0213     .set_pub_key = ccp_rsa_setpubkey,
0214     .set_priv_key = ccp_rsa_setprivkey,
0215     .max_size = ccp_rsa_maxsize,
0216     .init = ccp_rsa_init_tfm,
0217     .exit = ccp_rsa_exit_tfm,
0218     .base = {
0219         .cra_name = "rsa",
0220         .cra_driver_name = "rsa-ccp",
0221         .cra_priority = CCP_CRA_PRIORITY,
0222         .cra_module = THIS_MODULE,
0223         .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
0224     },
0225 };
0226 
0227 struct ccp_rsa_def {
0228     unsigned int version;
0229     const char *name;
0230     const char *driver_name;
0231     unsigned int reqsize;
0232     struct akcipher_alg *alg_defaults;
0233 };
0234 
0235 static struct ccp_rsa_def rsa_algs[] = {
0236     {
0237         .version    = CCP_VERSION(3, 0),
0238         .name       = "rsa",
0239         .driver_name    = "rsa-ccp",
0240         .reqsize    = sizeof(struct ccp_rsa_req_ctx),
0241         .alg_defaults   = &ccp_rsa_defaults,
0242     }
0243 };
0244 
0245 static int ccp_register_rsa_alg(struct list_head *head,
0246                     const struct ccp_rsa_def *def)
0247 {
0248     struct ccp_crypto_akcipher_alg *ccp_alg;
0249     struct akcipher_alg *alg;
0250     int ret;
0251 
0252     ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
0253     if (!ccp_alg)
0254         return -ENOMEM;
0255 
0256     INIT_LIST_HEAD(&ccp_alg->entry);
0257 
0258     alg = &ccp_alg->alg;
0259     *alg = *def->alg_defaults;
0260     snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
0261     snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
0262          def->driver_name);
0263     ret = crypto_register_akcipher(alg);
0264     if (ret) {
0265         pr_err("%s akcipher algorithm registration error (%d)\n",
0266                alg->base.cra_name, ret);
0267         kfree(ccp_alg);
0268         return ret;
0269     }
0270 
0271     list_add(&ccp_alg->entry, head);
0272 
0273     return 0;
0274 }
0275 
0276 int ccp_register_rsa_algs(struct list_head *head)
0277 {
0278     int i, ret;
0279     unsigned int ccpversion = ccp_version();
0280 
0281     /* Register the RSA algorithm in standard mode
0282      * This works for CCP v3 and later
0283      */
0284     for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
0285         if (rsa_algs[i].version > ccpversion)
0286             continue;
0287         ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
0288         if (ret)
0289             return ret;
0290     }
0291 
0292     return 0;
0293 }