0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011 #include <linux/moduleparam.h>
0012 #include <linux/kernel.h>
0013 #include <linux/list.h>
0014 #include <linux/ccp.h>
0015 #include <linux/scatterlist.h>
0016 #include <crypto/internal/hash.h>
0017 #include <crypto/internal/akcipher.h>
0018
0019 #include "ccp-crypto.h"
0020
0021 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
0022 MODULE_LICENSE("GPL");
0023 MODULE_VERSION("1.0.0");
0024 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
0025
0026 static unsigned int aes_disable;
0027 module_param(aes_disable, uint, 0444);
0028 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
0029
0030 static unsigned int sha_disable;
0031 module_param(sha_disable, uint, 0444);
0032 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
0033
0034 static unsigned int des3_disable;
0035 module_param(des3_disable, uint, 0444);
0036 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
0037
0038 static unsigned int rsa_disable;
0039 module_param(rsa_disable, uint, 0444);
0040 MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
0041
0042
0043 static LIST_HEAD(hash_algs);
0044 static LIST_HEAD(skcipher_algs);
0045 static LIST_HEAD(aead_algs);
0046 static LIST_HEAD(akcipher_algs);
0047
0048
0049
0050
0051
0052
0053 struct ccp_crypto_queue {
0054 struct list_head cmds;
0055 struct list_head *backlog;
0056 unsigned int cmd_count;
0057 };
0058
0059 #define CCP_CRYPTO_MAX_QLEN 100
0060
0061 static struct ccp_crypto_queue req_queue;
0062 static DEFINE_SPINLOCK(req_queue_lock);
0063
0064 struct ccp_crypto_cmd {
0065 struct list_head entry;
0066
0067 struct ccp_cmd *cmd;
0068
0069
0070
0071
0072
0073
0074 struct crypto_async_request *req;
0075 struct crypto_tfm *tfm;
0076
0077
0078 int ret;
0079 };
0080
0081 struct ccp_crypto_cpu {
0082 struct work_struct work;
0083 struct completion completion;
0084 struct ccp_crypto_cmd *crypto_cmd;
0085 int err;
0086 };
0087
0088 static inline bool ccp_crypto_success(int err)
0089 {
0090 if (err && (err != -EINPROGRESS) && (err != -EBUSY))
0091 return false;
0092
0093 return true;
0094 }
0095
0096 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
0097 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
0098 {
0099 struct ccp_crypto_cmd *held = NULL, *tmp;
0100 unsigned long flags;
0101
0102 *backlog = NULL;
0103
0104 spin_lock_irqsave(&req_queue_lock, flags);
0105
0106
0107
0108
0109 tmp = crypto_cmd;
0110 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
0111 if (crypto_cmd->tfm != tmp->tfm)
0112 continue;
0113 held = tmp;
0114 break;
0115 }
0116
0117
0118
0119
0120
0121 if (req_queue.backlog != &req_queue.cmds) {
0122
0123 if (req_queue.backlog == &crypto_cmd->entry)
0124 req_queue.backlog = crypto_cmd->entry.next;
0125
0126 *backlog = container_of(req_queue.backlog,
0127 struct ccp_crypto_cmd, entry);
0128 req_queue.backlog = req_queue.backlog->next;
0129
0130
0131 if (req_queue.backlog == &crypto_cmd->entry)
0132 req_queue.backlog = crypto_cmd->entry.next;
0133 }
0134
0135
0136 req_queue.cmd_count--;
0137 list_del(&crypto_cmd->entry);
0138
0139 spin_unlock_irqrestore(&req_queue_lock, flags);
0140
0141 return held;
0142 }
0143
0144 static void ccp_crypto_complete(void *data, int err)
0145 {
0146 struct ccp_crypto_cmd *crypto_cmd = data;
0147 struct ccp_crypto_cmd *held, *next, *backlog;
0148 struct crypto_async_request *req = crypto_cmd->req;
0149 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
0150 int ret;
0151
0152 if (err == -EINPROGRESS) {
0153
0154 if (crypto_cmd->ret == -EBUSY) {
0155 crypto_cmd->ret = -EINPROGRESS;
0156 req->complete(req, -EINPROGRESS);
0157 }
0158
0159 return;
0160 }
0161
0162
0163
0164
0165
0166 held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
0167 if (backlog) {
0168 backlog->ret = -EINPROGRESS;
0169 backlog->req->complete(backlog->req, -EINPROGRESS);
0170 }
0171
0172
0173 if (crypto_cmd->ret == -EBUSY)
0174 req->complete(req, -EINPROGRESS);
0175
0176
0177 ret = err;
0178 if (ctx->complete)
0179 ret = ctx->complete(req, ret);
0180 req->complete(req, ret);
0181
0182
0183 while (held) {
0184
0185
0186
0187 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
0188 ret = ccp_enqueue_cmd(held->cmd);
0189 if (ccp_crypto_success(ret))
0190 break;
0191
0192
0193 ctx = crypto_tfm_ctx(held->req->tfm);
0194 if (ctx->complete)
0195 ret = ctx->complete(held->req, ret);
0196 held->req->complete(held->req, ret);
0197
0198 next = ccp_crypto_cmd_complete(held, &backlog);
0199 if (backlog) {
0200 backlog->ret = -EINPROGRESS;
0201 backlog->req->complete(backlog->req, -EINPROGRESS);
0202 }
0203
0204 kfree(held);
0205 held = next;
0206 }
0207
0208 kfree(crypto_cmd);
0209 }
0210
0211 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
0212 {
0213 struct ccp_crypto_cmd *active = NULL, *tmp;
0214 unsigned long flags;
0215 bool free_cmd = true;
0216 int ret;
0217
0218 spin_lock_irqsave(&req_queue_lock, flags);
0219
0220
0221 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
0222 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
0223 ret = -ENOSPC;
0224 goto e_lock;
0225 }
0226 }
0227
0228
0229
0230
0231
0232 list_for_each_entry(tmp, &req_queue.cmds, entry) {
0233 if (crypto_cmd->tfm != tmp->tfm)
0234 continue;
0235 active = tmp;
0236 break;
0237 }
0238
0239 ret = -EINPROGRESS;
0240 if (!active) {
0241 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
0242 if (!ccp_crypto_success(ret))
0243 goto e_lock;
0244 }
0245
0246 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
0247 ret = -EBUSY;
0248 if (req_queue.backlog == &req_queue.cmds)
0249 req_queue.backlog = &crypto_cmd->entry;
0250 }
0251 crypto_cmd->ret = ret;
0252
0253 req_queue.cmd_count++;
0254 list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
0255
0256 free_cmd = false;
0257
0258 e_lock:
0259 spin_unlock_irqrestore(&req_queue_lock, flags);
0260
0261 if (free_cmd)
0262 kfree(crypto_cmd);
0263
0264 return ret;
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
0275 struct ccp_cmd *cmd)
0276 {
0277 struct ccp_crypto_cmd *crypto_cmd;
0278 gfp_t gfp;
0279
0280 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
0281
0282 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
0283 if (!crypto_cmd)
0284 return -ENOMEM;
0285
0286
0287
0288
0289
0290
0291 crypto_cmd->cmd = cmd;
0292 crypto_cmd->req = req;
0293 crypto_cmd->tfm = req->tfm;
0294
0295 cmd->callback = ccp_crypto_complete;
0296 cmd->data = crypto_cmd;
0297
0298 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
0299 cmd->flags |= CCP_CMD_MAY_BACKLOG;
0300 else
0301 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
0302
0303 return ccp_crypto_enqueue_cmd(crypto_cmd);
0304 }
0305
0306 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
0307 struct scatterlist *sg_add)
0308 {
0309 struct scatterlist *sg, *sg_last = NULL;
0310
0311 for (sg = table->sgl; sg; sg = sg_next(sg))
0312 if (!sg_page(sg))
0313 break;
0314 if (WARN_ON(!sg))
0315 return NULL;
0316
0317 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
0318 sg_set_page(sg, sg_page(sg_add), sg_add->length,
0319 sg_add->offset);
0320 sg_last = sg;
0321 }
0322 if (WARN_ON(sg_add))
0323 return NULL;
0324
0325 return sg_last;
0326 }
0327
0328 static int ccp_register_algs(void)
0329 {
0330 int ret;
0331
0332 if (!aes_disable) {
0333 ret = ccp_register_aes_algs(&skcipher_algs);
0334 if (ret)
0335 return ret;
0336
0337 ret = ccp_register_aes_cmac_algs(&hash_algs);
0338 if (ret)
0339 return ret;
0340
0341 ret = ccp_register_aes_xts_algs(&skcipher_algs);
0342 if (ret)
0343 return ret;
0344
0345 ret = ccp_register_aes_aeads(&aead_algs);
0346 if (ret)
0347 return ret;
0348 }
0349
0350 if (!des3_disable) {
0351 ret = ccp_register_des3_algs(&skcipher_algs);
0352 if (ret)
0353 return ret;
0354 }
0355
0356 if (!sha_disable) {
0357 ret = ccp_register_sha_algs(&hash_algs);
0358 if (ret)
0359 return ret;
0360 }
0361
0362 if (!rsa_disable) {
0363 ret = ccp_register_rsa_algs(&akcipher_algs);
0364 if (ret)
0365 return ret;
0366 }
0367
0368 return 0;
0369 }
0370
0371 static void ccp_unregister_algs(void)
0372 {
0373 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
0374 struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
0375 struct ccp_crypto_aead *aead_alg, *aead_tmp;
0376 struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
0377
0378 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
0379 crypto_unregister_ahash(&ahash_alg->alg);
0380 list_del(&ahash_alg->entry);
0381 kfree(ahash_alg);
0382 }
0383
0384 list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
0385 crypto_unregister_skcipher(&ablk_alg->alg);
0386 list_del(&ablk_alg->entry);
0387 kfree(ablk_alg);
0388 }
0389
0390 list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
0391 crypto_unregister_aead(&aead_alg->alg);
0392 list_del(&aead_alg->entry);
0393 kfree(aead_alg);
0394 }
0395
0396 list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
0397 crypto_unregister_akcipher(&akc_alg->alg);
0398 list_del(&akc_alg->entry);
0399 kfree(akc_alg);
0400 }
0401 }
0402
0403 static int ccp_crypto_init(void)
0404 {
0405 int ret;
0406
0407 ret = ccp_present();
0408 if (ret) {
0409 pr_err("Cannot load: there are no available CCPs\n");
0410 return ret;
0411 }
0412
0413 INIT_LIST_HEAD(&req_queue.cmds);
0414 req_queue.backlog = &req_queue.cmds;
0415 req_queue.cmd_count = 0;
0416
0417 ret = ccp_register_algs();
0418 if (ret)
0419 ccp_unregister_algs();
0420
0421 return ret;
0422 }
0423
0424 static void ccp_crypto_exit(void)
0425 {
0426 ccp_unregister_algs();
0427 }
0428
0429 module_init(ccp_crypto_init);
0430 module_exit(ccp_crypto_exit);