0001
0002
0003
0004
0005
0006 #include <linux/device.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/moduleparam.h>
0010 #include <linux/types.h>
0011 #include <linux/errno.h>
0012 #include <crypto/aes.h>
0013 #include <crypto/internal/des.h>
0014 #include <crypto/internal/skcipher.h>
0015
0016 #include "cipher.h"
0017
0018 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
0019 module_param(aes_sw_max_len, uint, 0644);
0020 MODULE_PARM_DESC(aes_sw_max_len,
0021 "Only use hardware for AES requests larger than this "
0022 "[0=always use hardware; anything <16 breaks AES-GCM; default="
0023 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
0024
0025 static LIST_HEAD(skcipher_algs);
0026
0027 static void qce_skcipher_done(void *data)
0028 {
0029 struct crypto_async_request *async_req = data;
0030 struct skcipher_request *req = skcipher_request_cast(async_req);
0031 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
0032 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
0033 struct qce_device *qce = tmpl->qce;
0034 struct qce_result_dump *result_buf = qce->dma.result_buf;
0035 enum dma_data_direction dir_src, dir_dst;
0036 u32 status;
0037 int error;
0038 bool diff_dst;
0039
0040 diff_dst = (req->src != req->dst) ? true : false;
0041 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
0042 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
0043
0044 error = qce_dma_terminate_all(&qce->dma);
0045 if (error)
0046 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
0047 error);
0048
0049 if (diff_dst)
0050 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
0051 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
0052
0053 sg_free_table(&rctx->dst_tbl);
0054
0055 error = qce_check_status(qce, &status);
0056 if (error < 0)
0057 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
0058
0059 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
0060 qce->async_req_done(tmpl->qce, error);
0061 }
0062
0063 static int
0064 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
0065 {
0066 struct skcipher_request *req = skcipher_request_cast(async_req);
0067 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
0068 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
0069 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
0070 struct qce_device *qce = tmpl->qce;
0071 enum dma_data_direction dir_src, dir_dst;
0072 struct scatterlist *sg;
0073 bool diff_dst;
0074 gfp_t gfp;
0075 int dst_nents, src_nents, ret;
0076
0077 rctx->iv = req->iv;
0078 rctx->ivsize = crypto_skcipher_ivsize(skcipher);
0079 rctx->cryptlen = req->cryptlen;
0080
0081 diff_dst = (req->src != req->dst) ? true : false;
0082 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
0083 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
0084
0085 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
0086 if (diff_dst)
0087 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
0088 else
0089 rctx->dst_nents = rctx->src_nents;
0090 if (rctx->src_nents < 0) {
0091 dev_err(qce->dev, "Invalid numbers of src SG.\n");
0092 return rctx->src_nents;
0093 }
0094 if (rctx->dst_nents < 0) {
0095 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
0096 return -rctx->dst_nents;
0097 }
0098
0099 rctx->dst_nents += 1;
0100
0101 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0102 GFP_KERNEL : GFP_ATOMIC;
0103
0104 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
0105 if (ret)
0106 return ret;
0107
0108 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
0109
0110 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
0111 if (IS_ERR(sg)) {
0112 ret = PTR_ERR(sg);
0113 goto error_free;
0114 }
0115
0116 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
0117 QCE_RESULT_BUF_SZ);
0118 if (IS_ERR(sg)) {
0119 ret = PTR_ERR(sg);
0120 goto error_free;
0121 }
0122
0123 sg_mark_end(sg);
0124 rctx->dst_sg = rctx->dst_tbl.sgl;
0125
0126 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
0127 if (dst_nents < 0) {
0128 ret = dst_nents;
0129 goto error_free;
0130 }
0131
0132 if (diff_dst) {
0133 src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
0134 if (src_nents < 0) {
0135 ret = src_nents;
0136 goto error_unmap_dst;
0137 }
0138 rctx->src_sg = req->src;
0139 } else {
0140 rctx->src_sg = rctx->dst_sg;
0141 src_nents = dst_nents - 1;
0142 }
0143
0144 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
0145 rctx->dst_sg, dst_nents,
0146 qce_skcipher_done, async_req);
0147 if (ret)
0148 goto error_unmap_src;
0149
0150 qce_dma_issue_pending(&qce->dma);
0151
0152 ret = qce_start(async_req, tmpl->crypto_alg_type);
0153 if (ret)
0154 goto error_terminate;
0155
0156 return 0;
0157
0158 error_terminate:
0159 qce_dma_terminate_all(&qce->dma);
0160 error_unmap_src:
0161 if (diff_dst)
0162 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
0163 error_unmap_dst:
0164 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
0165 error_free:
0166 sg_free_table(&rctx->dst_tbl);
0167 return ret;
0168 }
0169
0170 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
0171 unsigned int keylen)
0172 {
0173 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
0174 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
0175 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
0176 unsigned int __keylen;
0177 int ret;
0178
0179 if (!key || !keylen)
0180 return -EINVAL;
0181
0182
0183
0184
0185
0186 if (IS_XTS(flags)) {
0187 __keylen = keylen >> 1;
0188 if (!memcmp(key, key + __keylen, __keylen))
0189 return -ENOKEY;
0190 } else {
0191 __keylen = keylen;
0192 }
0193
0194 switch (__keylen) {
0195 case AES_KEYSIZE_128:
0196 case AES_KEYSIZE_256:
0197 memcpy(ctx->enc_key, key, keylen);
0198 break;
0199 case AES_KEYSIZE_192:
0200 break;
0201 default:
0202 return -EINVAL;
0203 }
0204
0205 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
0206 if (!ret)
0207 ctx->enc_keylen = keylen;
0208 return ret;
0209 }
0210
0211 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
0212 unsigned int keylen)
0213 {
0214 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
0215 int err;
0216
0217 err = verify_skcipher_des_key(ablk, key);
0218 if (err)
0219 return err;
0220
0221 ctx->enc_keylen = keylen;
0222 memcpy(ctx->enc_key, key, keylen);
0223 return 0;
0224 }
0225
0226 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
0227 unsigned int keylen)
0228 {
0229 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
0230 u32 _key[6];
0231 int err;
0232
0233 err = verify_skcipher_des3_key(ablk, key);
0234 if (err)
0235 return err;
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245 memcpy(_key, key, DES3_EDE_KEY_SIZE);
0246 if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
0247 !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
0248 !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
0249 return -ENOKEY;
0250
0251 ctx->enc_keylen = keylen;
0252 memcpy(ctx->enc_key, key, keylen);
0253 return 0;
0254 }
0255
0256 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
0257 {
0258 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0259 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
0260 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
0261 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
0262 unsigned int blocksize = crypto_skcipher_blocksize(tfm);
0263 int keylen;
0264 int ret;
0265
0266 rctx->flags = tmpl->alg_flags;
0267 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
0268 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
0269
0270
0271 if (!req->cryptlen)
0272 return 0;
0273
0274
0275
0276
0277
0278 if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
0279 if (!IS_ALIGNED(req->cryptlen, blocksize))
0280 return -EINVAL;
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290 if (IS_AES(rctx->flags) &&
0291 ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
0292 (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
0293 (req->cryptlen > QCE_SECTOR_SIZE &&
0294 req->cryptlen % QCE_SECTOR_SIZE))))) {
0295 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0296 skcipher_request_set_callback(&rctx->fallback_req,
0297 req->base.flags,
0298 req->base.complete,
0299 req->base.data);
0300 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
0301 req->dst, req->cryptlen, req->iv);
0302 ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
0303 crypto_skcipher_decrypt(&rctx->fallback_req);
0304 return ret;
0305 }
0306
0307 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
0308 }
0309
0310 static int qce_skcipher_encrypt(struct skcipher_request *req)
0311 {
0312 return qce_skcipher_crypt(req, 1);
0313 }
0314
0315 static int qce_skcipher_decrypt(struct skcipher_request *req)
0316 {
0317 return qce_skcipher_crypt(req, 0);
0318 }
0319
0320 static int qce_skcipher_init(struct crypto_skcipher *tfm)
0321 {
0322
0323 crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
0324 fallback_req));
0325 return 0;
0326 }
0327
0328 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
0329 {
0330 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
0331
0332 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
0333 0, CRYPTO_ALG_NEED_FALLBACK);
0334 if (IS_ERR(ctx->fallback))
0335 return PTR_ERR(ctx->fallback);
0336
0337 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
0338 crypto_skcipher_reqsize(ctx->fallback));
0339 return 0;
0340 }
0341
0342 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
0343 {
0344 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
0345
0346 crypto_free_skcipher(ctx->fallback);
0347 }
0348
0349 struct qce_skcipher_def {
0350 unsigned long flags;
0351 const char *name;
0352 const char *drv_name;
0353 unsigned int blocksize;
0354 unsigned int chunksize;
0355 unsigned int ivsize;
0356 unsigned int min_keysize;
0357 unsigned int max_keysize;
0358 };
0359
0360 static const struct qce_skcipher_def skcipher_def[] = {
0361 {
0362 .flags = QCE_ALG_AES | QCE_MODE_ECB,
0363 .name = "ecb(aes)",
0364 .drv_name = "ecb-aes-qce",
0365 .blocksize = AES_BLOCK_SIZE,
0366 .ivsize = 0,
0367 .min_keysize = AES_MIN_KEY_SIZE,
0368 .max_keysize = AES_MAX_KEY_SIZE,
0369 },
0370 {
0371 .flags = QCE_ALG_AES | QCE_MODE_CBC,
0372 .name = "cbc(aes)",
0373 .drv_name = "cbc-aes-qce",
0374 .blocksize = AES_BLOCK_SIZE,
0375 .ivsize = AES_BLOCK_SIZE,
0376 .min_keysize = AES_MIN_KEY_SIZE,
0377 .max_keysize = AES_MAX_KEY_SIZE,
0378 },
0379 {
0380 .flags = QCE_ALG_AES | QCE_MODE_CTR,
0381 .name = "ctr(aes)",
0382 .drv_name = "ctr-aes-qce",
0383 .blocksize = 1,
0384 .chunksize = AES_BLOCK_SIZE,
0385 .ivsize = AES_BLOCK_SIZE,
0386 .min_keysize = AES_MIN_KEY_SIZE,
0387 .max_keysize = AES_MAX_KEY_SIZE,
0388 },
0389 {
0390 .flags = QCE_ALG_AES | QCE_MODE_XTS,
0391 .name = "xts(aes)",
0392 .drv_name = "xts-aes-qce",
0393 .blocksize = AES_BLOCK_SIZE,
0394 .ivsize = AES_BLOCK_SIZE,
0395 .min_keysize = AES_MIN_KEY_SIZE * 2,
0396 .max_keysize = AES_MAX_KEY_SIZE * 2,
0397 },
0398 {
0399 .flags = QCE_ALG_DES | QCE_MODE_ECB,
0400 .name = "ecb(des)",
0401 .drv_name = "ecb-des-qce",
0402 .blocksize = DES_BLOCK_SIZE,
0403 .ivsize = 0,
0404 .min_keysize = DES_KEY_SIZE,
0405 .max_keysize = DES_KEY_SIZE,
0406 },
0407 {
0408 .flags = QCE_ALG_DES | QCE_MODE_CBC,
0409 .name = "cbc(des)",
0410 .drv_name = "cbc-des-qce",
0411 .blocksize = DES_BLOCK_SIZE,
0412 .ivsize = DES_BLOCK_SIZE,
0413 .min_keysize = DES_KEY_SIZE,
0414 .max_keysize = DES_KEY_SIZE,
0415 },
0416 {
0417 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
0418 .name = "ecb(des3_ede)",
0419 .drv_name = "ecb-3des-qce",
0420 .blocksize = DES3_EDE_BLOCK_SIZE,
0421 .ivsize = 0,
0422 .min_keysize = DES3_EDE_KEY_SIZE,
0423 .max_keysize = DES3_EDE_KEY_SIZE,
0424 },
0425 {
0426 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
0427 .name = "cbc(des3_ede)",
0428 .drv_name = "cbc-3des-qce",
0429 .blocksize = DES3_EDE_BLOCK_SIZE,
0430 .ivsize = DES3_EDE_BLOCK_SIZE,
0431 .min_keysize = DES3_EDE_KEY_SIZE,
0432 .max_keysize = DES3_EDE_KEY_SIZE,
0433 },
0434 };
0435
0436 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
0437 struct qce_device *qce)
0438 {
0439 struct qce_alg_template *tmpl;
0440 struct skcipher_alg *alg;
0441 int ret;
0442
0443 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
0444 if (!tmpl)
0445 return -ENOMEM;
0446
0447 alg = &tmpl->alg.skcipher;
0448
0449 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
0450 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
0451 def->drv_name);
0452
0453 alg->base.cra_blocksize = def->blocksize;
0454 alg->chunksize = def->chunksize;
0455 alg->ivsize = def->ivsize;
0456 alg->min_keysize = def->min_keysize;
0457 alg->max_keysize = def->max_keysize;
0458 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
0459 IS_DES(def->flags) ? qce_des_setkey :
0460 qce_skcipher_setkey;
0461 alg->encrypt = qce_skcipher_encrypt;
0462 alg->decrypt = qce_skcipher_decrypt;
0463
0464 alg->base.cra_priority = 300;
0465 alg->base.cra_flags = CRYPTO_ALG_ASYNC |
0466 CRYPTO_ALG_ALLOCATES_MEMORY |
0467 CRYPTO_ALG_KERN_DRIVER_ONLY;
0468 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
0469 alg->base.cra_alignmask = 0;
0470 alg->base.cra_module = THIS_MODULE;
0471
0472 if (IS_AES(def->flags)) {
0473 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
0474 alg->init = qce_skcipher_init_fallback;
0475 alg->exit = qce_skcipher_exit;
0476 } else {
0477 alg->init = qce_skcipher_init;
0478 }
0479
0480 INIT_LIST_HEAD(&tmpl->entry);
0481 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
0482 tmpl->alg_flags = def->flags;
0483 tmpl->qce = qce;
0484
0485 ret = crypto_register_skcipher(alg);
0486 if (ret) {
0487 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
0488 kfree(tmpl);
0489 return ret;
0490 }
0491
0492 list_add_tail(&tmpl->entry, &skcipher_algs);
0493 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
0494 return 0;
0495 }
0496
0497 static void qce_skcipher_unregister(struct qce_device *qce)
0498 {
0499 struct qce_alg_template *tmpl, *n;
0500
0501 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
0502 crypto_unregister_skcipher(&tmpl->alg.skcipher);
0503 list_del(&tmpl->entry);
0504 kfree(tmpl);
0505 }
0506 }
0507
0508 static int qce_skcipher_register(struct qce_device *qce)
0509 {
0510 int ret, i;
0511
0512 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
0513 ret = qce_skcipher_register_one(&skcipher_def[i], qce);
0514 if (ret)
0515 goto err;
0516 }
0517
0518 return 0;
0519 err:
0520 qce_skcipher_unregister(qce);
0521 return ret;
0522 }
0523
0524 const struct qce_algo_ops skcipher_ops = {
0525 .type = CRYPTO_ALG_TYPE_SKCIPHER,
0526 .register_algs = qce_skcipher_register,
0527 .unregister_algs = qce_skcipher_unregister,
0528 .async_req_handle = qce_skcipher_async_req_handle,
0529 };