0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/completion.h>
0009 #include <linux/delay.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/module.h>
0013 #include <linux/of_device.h>
0014
0015 #include <crypto/engine.h>
0016 #include <crypto/scatterwalk.h>
0017 #include <crypto/sha2.h>
0018 #include <crypto/sm3.h>
0019 #include <crypto/hmac.h>
0020 #include <crypto/internal/hash.h>
0021
0022 #include "ocs-hcu.h"
0023
0024 #define DRV_NAME "keembay-ocs-hcu"
0025
0026
0027 #define REQ_FINAL BIT(0)
0028
0029 #define REQ_FLAGS_HMAC BIT(1)
0030
0031 #define REQ_FLAGS_HMAC_HW BIT(2)
0032
0033 #define REQ_FLAGS_HMAC_SW BIT(3)
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 struct ocs_hcu_ctx {
0045 struct crypto_engine_ctx engine_ctx;
0046 struct ocs_hcu_dev *hcu_dev;
0047 u8 key[SHA512_BLOCK_SIZE];
0048 size_t key_len;
0049 bool is_sm3_tfm;
0050 bool is_hmac_tfm;
0051 };
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 struct ocs_hcu_rctx {
0073 struct ocs_hcu_dev *hcu_dev;
0074 u32 flags;
0075 enum ocs_hcu_algo algo;
0076 size_t blk_sz;
0077 size_t dig_sz;
0078 struct ocs_hcu_dma_list *dma_list;
0079 struct ocs_hcu_hash_ctx hash_ctx;
0080
0081
0082
0083
0084
0085
0086 u8 buffer[2 * SHA512_BLOCK_SIZE];
0087 size_t buf_cnt;
0088 dma_addr_t buf_dma_addr;
0089 size_t buf_dma_count;
0090 struct scatterlist *sg;
0091 unsigned int sg_data_total;
0092 unsigned int sg_data_offset;
0093 unsigned int sg_dma_nents;
0094 };
0095
0096
0097
0098
0099
0100
0101 struct ocs_hcu_drv {
0102 struct list_head dev_list;
0103 spinlock_t lock;
0104 };
0105
0106 static struct ocs_hcu_drv ocs_hcu = {
0107 .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
0108 .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
0109 };
0110
0111
0112
0113
0114
0115 static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
0116 {
0117 return rctx->sg_data_total + rctx->buf_cnt;
0118 }
0119
0120
0121 static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
0122 {
0123 size_t count;
0124
0125 if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
0126 WARN(1, "%s: sg data does not fit in buffer\n", __func__);
0127 return -EINVAL;
0128 }
0129
0130 while (rctx->sg_data_total) {
0131 if (!rctx->sg) {
0132 WARN(1, "%s: unexpected NULL sg\n", __func__);
0133 return -EINVAL;
0134 }
0135
0136
0137
0138
0139 if (rctx->sg_data_offset == rctx->sg->length) {
0140 rctx->sg = sg_next(rctx->sg);
0141 rctx->sg_data_offset = 0;
0142 continue;
0143 }
0144
0145
0146
0147
0148
0149 count = min(rctx->sg->length - rctx->sg_data_offset,
0150 rctx->sg_data_total);
0151
0152 scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
0153 rctx->sg, rctx->sg_data_offset,
0154 count, 0);
0155
0156 rctx->sg_data_offset += count;
0157 rctx->sg_data_total -= count;
0158 rctx->buf_cnt += count;
0159 }
0160
0161 return 0;
0162 }
0163
0164 static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
0165 {
0166 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0167 struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
0168
0169
0170 if (tctx->hcu_dev)
0171 return tctx->hcu_dev;
0172
0173
0174
0175
0176
0177 spin_lock_bh(&ocs_hcu.lock);
0178 tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
0179 struct ocs_hcu_dev,
0180 list);
0181 spin_unlock_bh(&ocs_hcu.lock);
0182
0183 return tctx->hcu_dev;
0184 }
0185
0186
0187 static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
0188 struct ocs_hcu_rctx *rctx)
0189 {
0190 struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
0191 struct device *dev = hcu_dev->dev;
0192
0193
0194 if (rctx->buf_dma_count) {
0195 dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
0196 DMA_TO_DEVICE);
0197 rctx->buf_dma_count = 0;
0198 }
0199
0200
0201 if (rctx->sg_dma_nents) {
0202 dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
0203 rctx->sg_dma_nents = 0;
0204 }
0205
0206
0207 if (rctx->dma_list) {
0208 ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
0209 rctx->dma_list = NULL;
0210 }
0211 }
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227 static int kmb_ocs_dma_prepare(struct ahash_request *req)
0228 {
0229 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0230 struct device *dev = rctx->hcu_dev->dev;
0231 unsigned int remainder = 0;
0232 unsigned int total;
0233 size_t nents;
0234 size_t count;
0235 int rc;
0236 int i;
0237
0238
0239 total = kmb_get_total_data(rctx);
0240 if (!total)
0241 return -EINVAL;
0242
0243
0244
0245
0246
0247
0248 if (!(rctx->flags & REQ_FINAL))
0249 remainder = total % rctx->blk_sz;
0250
0251
0252 nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
0253
0254
0255 if (nents) {
0256 rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
0257 DMA_TO_DEVICE);
0258 if (!rctx->sg_dma_nents) {
0259 dev_err(dev, "Failed to MAP SG\n");
0260 rc = -ENOMEM;
0261 goto cleanup;
0262 }
0263
0264
0265
0266
0267 nents = rctx->sg_dma_nents;
0268 }
0269
0270
0271
0272
0273
0274 if (rctx->buf_cnt) {
0275 rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
0276 rctx->buf_cnt,
0277 DMA_TO_DEVICE);
0278 if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
0279 dev_err(dev, "Failed to map request context buffer\n");
0280 rc = -ENOMEM;
0281 goto cleanup;
0282 }
0283 rctx->buf_dma_count = rctx->buf_cnt;
0284
0285 nents++;
0286 }
0287
0288
0289 rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
0290 if (!rctx->dma_list) {
0291 rc = -ENOMEM;
0292 goto cleanup;
0293 }
0294
0295
0296 if (rctx->buf_dma_count) {
0297 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
0298 rctx->buf_dma_addr,
0299 rctx->buf_dma_count);
0300 if (rc)
0301 goto cleanup;
0302 }
0303
0304
0305 for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
0306
0307
0308
0309
0310
0311
0312 count = min(rctx->sg_data_total - remainder,
0313 sg_dma_len(rctx->sg) - rctx->sg_data_offset);
0314
0315
0316
0317
0318 if (count == 0)
0319 continue;
0320
0321 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
0322 rctx->dma_list,
0323 rctx->sg->dma_address,
0324 count);
0325 if (rc)
0326 goto cleanup;
0327
0328
0329 rctx->sg_data_total -= count;
0330
0331
0332
0333
0334
0335
0336 if (rctx->sg_data_total <= remainder) {
0337 WARN_ON(rctx->sg_data_total < remainder);
0338 rctx->sg_data_offset += count;
0339 break;
0340 }
0341
0342
0343
0344
0345
0346 rctx->sg_data_offset = 0;
0347 }
0348
0349 return 0;
0350 cleanup:
0351 dev_err(dev, "Failed to prepare DMA.\n");
0352 kmb_ocs_hcu_dma_cleanup(req, rctx);
0353
0354 return rc;
0355 }
0356
0357 static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
0358 {
0359 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0360
0361
0362 memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
0363 }
0364
0365 static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
0366 {
0367 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
0368
0369 if (!hcu_dev)
0370 return -ENOENT;
0371
0372 return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
0373 }
0374
0375 static int prepare_ipad(struct ahash_request *req)
0376 {
0377 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0378 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0379 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
0380 int i;
0381
0382 WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
0383 WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
0384 "%s: HMAC_SW flag is not set\n", __func__);
0385
0386
0387
0388
0389
0390 if (ctx->key_len > rctx->blk_sz) {
0391 WARN(1, "%s: Invalid key length in tfm context\n", __func__);
0392 return -EINVAL;
0393 }
0394 memzero_explicit(&ctx->key[ctx->key_len],
0395 rctx->blk_sz - ctx->key_len);
0396 ctx->key_len = rctx->blk_sz;
0397
0398
0399
0400
0401
0402
0403
0404 for (i = 0; i < rctx->blk_sz; i++)
0405 rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
0406 rctx->buf_cnt = rctx->blk_sz;
0407
0408 return 0;
0409 }
0410
0411 static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
0412 {
0413 struct ahash_request *req = container_of(areq, struct ahash_request,
0414 base);
0415 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
0416 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0417 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0418 struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
0419 int rc;
0420 int i;
0421
0422 if (!hcu_dev) {
0423 rc = -ENOENT;
0424 goto error;
0425 }
0426
0427
0428
0429
0430
0431
0432 if (rctx->flags & REQ_FLAGS_HMAC_HW) {
0433
0434 rc = kmb_ocs_dma_prepare(req);
0435 if (rc)
0436 goto error;
0437
0438 rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
0439 rctx->dma_list, req->result, rctx->dig_sz);
0440
0441
0442 kmb_ocs_hcu_dma_cleanup(req, rctx);
0443
0444
0445 if (rc)
0446 goto error;
0447
0448 goto done;
0449 }
0450
0451
0452 if (!(rctx->flags & REQ_FINAL)) {
0453
0454 if (!kmb_get_total_data(rctx))
0455 return -EINVAL;
0456
0457
0458 rc = kmb_ocs_dma_prepare(req);
0459 if (rc)
0460 goto error;
0461
0462
0463 rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
0464 rctx->dma_list);
0465
0466
0467 kmb_ocs_hcu_dma_cleanup(req, rctx);
0468
0469
0470 if (rc)
0471 goto error;
0472
0473
0474
0475
0476
0477 rctx->buf_cnt = 0;
0478
0479
0480
0481
0482
0483
0484
0485 rc = flush_sg_to_ocs_buffer(rctx);
0486 if (rc)
0487 goto error;
0488
0489 goto done;
0490 }
0491
0492
0493
0494
0495 if (kmb_get_total_data(rctx)) {
0496
0497 rc = kmb_ocs_dma_prepare(req);
0498 if (rc)
0499 goto error;
0500
0501
0502 rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
0503 rctx->dma_list,
0504 req->result, rctx->dig_sz);
0505
0506 kmb_ocs_hcu_dma_cleanup(req, rctx);
0507
0508
0509 if (rc)
0510 goto error;
0511
0512 } else {
0513 rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
0514 rctx->dig_sz);
0515 if (rc)
0516 goto error;
0517 }
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527 if (rctx->flags & REQ_FLAGS_HMAC_SW) {
0528
0529
0530
0531
0532
0533
0534 WARN_ON(tctx->key_len != rctx->blk_sz);
0535 for (i = 0; i < rctx->blk_sz; i++)
0536 rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
0537
0538 for (i = 0; (i < rctx->dig_sz); i++)
0539 rctx->buffer[rctx->blk_sz + i] = req->result[i];
0540
0541
0542 rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
0543 rctx->blk_sz + rctx->dig_sz, req->result,
0544 rctx->dig_sz);
0545 if (rc)
0546 goto error;
0547 }
0548
0549
0550 kmb_ocs_hcu_secure_cleanup(req);
0551 done:
0552 crypto_finalize_hash_request(hcu_dev->engine, req, 0);
0553
0554 return 0;
0555
0556 error:
0557 kmb_ocs_hcu_secure_cleanup(req);
0558 return rc;
0559 }
0560
0561 static int kmb_ocs_hcu_init(struct ahash_request *req)
0562 {
0563 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
0564 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0565 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0566 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
0567
0568 if (!hcu_dev)
0569 return -ENOENT;
0570
0571
0572 memset(rctx, 0, sizeof(*rctx));
0573
0574 rctx->hcu_dev = hcu_dev;
0575 rctx->dig_sz = crypto_ahash_digestsize(tfm);
0576
0577 switch (rctx->dig_sz) {
0578 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
0579 case SHA224_DIGEST_SIZE:
0580 rctx->blk_sz = SHA224_BLOCK_SIZE;
0581 rctx->algo = OCS_HCU_ALGO_SHA224;
0582 break;
0583 #endif
0584 case SHA256_DIGEST_SIZE:
0585 rctx->blk_sz = SHA256_BLOCK_SIZE;
0586
0587
0588
0589
0590 rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
0591 OCS_HCU_ALGO_SHA256;
0592 break;
0593 case SHA384_DIGEST_SIZE:
0594 rctx->blk_sz = SHA384_BLOCK_SIZE;
0595 rctx->algo = OCS_HCU_ALGO_SHA384;
0596 break;
0597 case SHA512_DIGEST_SIZE:
0598 rctx->blk_sz = SHA512_BLOCK_SIZE;
0599 rctx->algo = OCS_HCU_ALGO_SHA512;
0600 break;
0601 default:
0602 return -EINVAL;
0603 }
0604
0605
0606 ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
0607
0608
0609 if (ctx->is_hmac_tfm)
0610 rctx->flags |= REQ_FLAGS_HMAC;
0611
0612 return 0;
0613 }
0614
0615 static int kmb_ocs_hcu_update(struct ahash_request *req)
0616 {
0617 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0618 int rc;
0619
0620 if (!req->nbytes)
0621 return 0;
0622
0623 rctx->sg_data_total = req->nbytes;
0624 rctx->sg_data_offset = 0;
0625 rctx->sg = req->src;
0626
0627
0628
0629
0630
0631
0632 if (rctx->flags & REQ_FLAGS_HMAC &&
0633 !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
0634 rctx->flags |= REQ_FLAGS_HMAC_SW;
0635 rc = prepare_ipad(req);
0636 if (rc)
0637 return rc;
0638 }
0639
0640
0641
0642
0643
0644 if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
0645 return flush_sg_to_ocs_buffer(rctx);
0646
0647 return kmb_ocs_hcu_handle_queue(req);
0648 }
0649
0650
0651 static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
0652 {
0653 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0654 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0655 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
0656 int rc;
0657
0658 rctx->flags |= REQ_FINAL;
0659
0660
0661
0662
0663
0664 if (rctx->flags & REQ_FLAGS_HMAC &&
0665 !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
0666
0667
0668
0669
0670
0671
0672
0673
0674 if (kmb_get_total_data(rctx) &&
0675 ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
0676 rctx->flags |= REQ_FLAGS_HMAC_HW;
0677 } else {
0678 rctx->flags |= REQ_FLAGS_HMAC_SW;
0679 rc = prepare_ipad(req);
0680 if (rc)
0681 return rc;
0682 }
0683 }
0684
0685 return kmb_ocs_hcu_handle_queue(req);
0686 }
0687
0688 static int kmb_ocs_hcu_final(struct ahash_request *req)
0689 {
0690 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0691
0692 rctx->sg_data_total = 0;
0693 rctx->sg_data_offset = 0;
0694 rctx->sg = NULL;
0695
0696 return kmb_ocs_hcu_fin_common(req);
0697 }
0698
0699 static int kmb_ocs_hcu_finup(struct ahash_request *req)
0700 {
0701 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0702
0703 rctx->sg_data_total = req->nbytes;
0704 rctx->sg_data_offset = 0;
0705 rctx->sg = req->src;
0706
0707 return kmb_ocs_hcu_fin_common(req);
0708 }
0709
0710 static int kmb_ocs_hcu_digest(struct ahash_request *req)
0711 {
0712 int rc = 0;
0713 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
0714
0715 if (!hcu_dev)
0716 return -ENOENT;
0717
0718 rc = kmb_ocs_hcu_init(req);
0719 if (rc)
0720 return rc;
0721
0722 rc = kmb_ocs_hcu_finup(req);
0723
0724 return rc;
0725 }
0726
0727 static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
0728 {
0729 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0730
0731
0732 memcpy(out, rctx, sizeof(*rctx));
0733
0734 return 0;
0735 }
0736
0737 static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
0738 {
0739 struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
0740
0741
0742 memcpy(rctx, in, sizeof(*rctx));
0743
0744 return 0;
0745 }
0746
0747 static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
0748 unsigned int keylen)
0749 {
0750 unsigned int digestsize = crypto_ahash_digestsize(tfm);
0751 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
0752 size_t blk_sz = crypto_ahash_blocksize(tfm);
0753 struct crypto_ahash *ahash_tfm;
0754 struct ahash_request *req;
0755 struct crypto_wait wait;
0756 struct scatterlist sg;
0757 const char *alg_name;
0758 int rc;
0759
0760
0761
0762
0763
0764
0765
0766
0767 if (keylen <= blk_sz) {
0768 memcpy(ctx->key, key, keylen);
0769 ctx->key_len = keylen;
0770 return 0;
0771 }
0772
0773 switch (digestsize) {
0774 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
0775 case SHA224_DIGEST_SIZE:
0776 alg_name = "sha224-keembay-ocs";
0777 break;
0778 #endif
0779 case SHA256_DIGEST_SIZE:
0780 alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
0781 "sha256-keembay-ocs";
0782 break;
0783 case SHA384_DIGEST_SIZE:
0784 alg_name = "sha384-keembay-ocs";
0785 break;
0786 case SHA512_DIGEST_SIZE:
0787 alg_name = "sha512-keembay-ocs";
0788 break;
0789 default:
0790 return -EINVAL;
0791 }
0792
0793 ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
0794 if (IS_ERR(ahash_tfm))
0795 return PTR_ERR(ahash_tfm);
0796
0797 req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
0798 if (!req) {
0799 rc = -ENOMEM;
0800 goto err_free_ahash;
0801 }
0802
0803 crypto_init_wait(&wait);
0804 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
0805 crypto_req_done, &wait);
0806 crypto_ahash_clear_flags(ahash_tfm, ~0);
0807
0808 sg_init_one(&sg, key, keylen);
0809 ahash_request_set_crypt(req, &sg, ctx->key, keylen);
0810
0811 rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
0812 if (rc == 0)
0813 ctx->key_len = digestsize;
0814
0815 ahash_request_free(req);
0816 err_free_ahash:
0817 crypto_free_ahash(ahash_tfm);
0818
0819 return rc;
0820 }
0821
0822
0823 static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
0824 {
0825 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
0826 sizeof(struct ocs_hcu_rctx));
0827
0828
0829 memzero_explicit(ctx, sizeof(*ctx));
0830
0831 ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
0832 }
0833
0834 static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
0835 {
0836 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
0837
0838 __cra_init(tfm, ctx);
0839
0840 return 0;
0841 }
0842
0843 static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
0844 {
0845 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
0846
0847 __cra_init(tfm, ctx);
0848
0849 ctx->is_sm3_tfm = true;
0850
0851 return 0;
0852 }
0853
0854 static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
0855 {
0856 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
0857
0858 __cra_init(tfm, ctx);
0859
0860 ctx->is_sm3_tfm = true;
0861 ctx->is_hmac_tfm = true;
0862
0863 return 0;
0864 }
0865
0866 static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
0867 {
0868 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
0869
0870 __cra_init(tfm, ctx);
0871
0872 ctx->is_hmac_tfm = true;
0873
0874 return 0;
0875 }
0876
0877
0878 static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
0879 {
0880 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
0881
0882
0883 memzero_explicit(ctx->key, sizeof(ctx->key));
0884 }
0885
0886 static struct ahash_alg ocs_hcu_algs[] = {
0887 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
0888 {
0889 .init = kmb_ocs_hcu_init,
0890 .update = kmb_ocs_hcu_update,
0891 .final = kmb_ocs_hcu_final,
0892 .finup = kmb_ocs_hcu_finup,
0893 .digest = kmb_ocs_hcu_digest,
0894 .export = kmb_ocs_hcu_export,
0895 .import = kmb_ocs_hcu_import,
0896 .halg = {
0897 .digestsize = SHA224_DIGEST_SIZE,
0898 .statesize = sizeof(struct ocs_hcu_rctx),
0899 .base = {
0900 .cra_name = "sha224",
0901 .cra_driver_name = "sha224-keembay-ocs",
0902 .cra_priority = 255,
0903 .cra_flags = CRYPTO_ALG_ASYNC,
0904 .cra_blocksize = SHA224_BLOCK_SIZE,
0905 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
0906 .cra_alignmask = 0,
0907 .cra_module = THIS_MODULE,
0908 .cra_init = kmb_ocs_hcu_sha_cra_init,
0909 }
0910 }
0911 },
0912 {
0913 .init = kmb_ocs_hcu_init,
0914 .update = kmb_ocs_hcu_update,
0915 .final = kmb_ocs_hcu_final,
0916 .finup = kmb_ocs_hcu_finup,
0917 .digest = kmb_ocs_hcu_digest,
0918 .export = kmb_ocs_hcu_export,
0919 .import = kmb_ocs_hcu_import,
0920 .setkey = kmb_ocs_hcu_setkey,
0921 .halg = {
0922 .digestsize = SHA224_DIGEST_SIZE,
0923 .statesize = sizeof(struct ocs_hcu_rctx),
0924 .base = {
0925 .cra_name = "hmac(sha224)",
0926 .cra_driver_name = "hmac-sha224-keembay-ocs",
0927 .cra_priority = 255,
0928 .cra_flags = CRYPTO_ALG_ASYNC,
0929 .cra_blocksize = SHA224_BLOCK_SIZE,
0930 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
0931 .cra_alignmask = 0,
0932 .cra_module = THIS_MODULE,
0933 .cra_init = kmb_ocs_hcu_hmac_cra_init,
0934 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
0935 }
0936 }
0937 },
0938 #endif
0939 {
0940 .init = kmb_ocs_hcu_init,
0941 .update = kmb_ocs_hcu_update,
0942 .final = kmb_ocs_hcu_final,
0943 .finup = kmb_ocs_hcu_finup,
0944 .digest = kmb_ocs_hcu_digest,
0945 .export = kmb_ocs_hcu_export,
0946 .import = kmb_ocs_hcu_import,
0947 .halg = {
0948 .digestsize = SHA256_DIGEST_SIZE,
0949 .statesize = sizeof(struct ocs_hcu_rctx),
0950 .base = {
0951 .cra_name = "sha256",
0952 .cra_driver_name = "sha256-keembay-ocs",
0953 .cra_priority = 255,
0954 .cra_flags = CRYPTO_ALG_ASYNC,
0955 .cra_blocksize = SHA256_BLOCK_SIZE,
0956 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
0957 .cra_alignmask = 0,
0958 .cra_module = THIS_MODULE,
0959 .cra_init = kmb_ocs_hcu_sha_cra_init,
0960 }
0961 }
0962 },
0963 {
0964 .init = kmb_ocs_hcu_init,
0965 .update = kmb_ocs_hcu_update,
0966 .final = kmb_ocs_hcu_final,
0967 .finup = kmb_ocs_hcu_finup,
0968 .digest = kmb_ocs_hcu_digest,
0969 .export = kmb_ocs_hcu_export,
0970 .import = kmb_ocs_hcu_import,
0971 .setkey = kmb_ocs_hcu_setkey,
0972 .halg = {
0973 .digestsize = SHA256_DIGEST_SIZE,
0974 .statesize = sizeof(struct ocs_hcu_rctx),
0975 .base = {
0976 .cra_name = "hmac(sha256)",
0977 .cra_driver_name = "hmac-sha256-keembay-ocs",
0978 .cra_priority = 255,
0979 .cra_flags = CRYPTO_ALG_ASYNC,
0980 .cra_blocksize = SHA256_BLOCK_SIZE,
0981 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
0982 .cra_alignmask = 0,
0983 .cra_module = THIS_MODULE,
0984 .cra_init = kmb_ocs_hcu_hmac_cra_init,
0985 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
0986 }
0987 }
0988 },
0989 {
0990 .init = kmb_ocs_hcu_init,
0991 .update = kmb_ocs_hcu_update,
0992 .final = kmb_ocs_hcu_final,
0993 .finup = kmb_ocs_hcu_finup,
0994 .digest = kmb_ocs_hcu_digest,
0995 .export = kmb_ocs_hcu_export,
0996 .import = kmb_ocs_hcu_import,
0997 .halg = {
0998 .digestsize = SM3_DIGEST_SIZE,
0999 .statesize = sizeof(struct ocs_hcu_rctx),
1000 .base = {
1001 .cra_name = "sm3",
1002 .cra_driver_name = "sm3-keembay-ocs",
1003 .cra_priority = 255,
1004 .cra_flags = CRYPTO_ALG_ASYNC,
1005 .cra_blocksize = SM3_BLOCK_SIZE,
1006 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1007 .cra_alignmask = 0,
1008 .cra_module = THIS_MODULE,
1009 .cra_init = kmb_ocs_hcu_sm3_cra_init,
1010 }
1011 }
1012 },
1013 {
1014 .init = kmb_ocs_hcu_init,
1015 .update = kmb_ocs_hcu_update,
1016 .final = kmb_ocs_hcu_final,
1017 .finup = kmb_ocs_hcu_finup,
1018 .digest = kmb_ocs_hcu_digest,
1019 .export = kmb_ocs_hcu_export,
1020 .import = kmb_ocs_hcu_import,
1021 .setkey = kmb_ocs_hcu_setkey,
1022 .halg = {
1023 .digestsize = SM3_DIGEST_SIZE,
1024 .statesize = sizeof(struct ocs_hcu_rctx),
1025 .base = {
1026 .cra_name = "hmac(sm3)",
1027 .cra_driver_name = "hmac-sm3-keembay-ocs",
1028 .cra_priority = 255,
1029 .cra_flags = CRYPTO_ALG_ASYNC,
1030 .cra_blocksize = SM3_BLOCK_SIZE,
1031 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1032 .cra_alignmask = 0,
1033 .cra_module = THIS_MODULE,
1034 .cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
1035 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1036 }
1037 }
1038 },
1039 {
1040 .init = kmb_ocs_hcu_init,
1041 .update = kmb_ocs_hcu_update,
1042 .final = kmb_ocs_hcu_final,
1043 .finup = kmb_ocs_hcu_finup,
1044 .digest = kmb_ocs_hcu_digest,
1045 .export = kmb_ocs_hcu_export,
1046 .import = kmb_ocs_hcu_import,
1047 .halg = {
1048 .digestsize = SHA384_DIGEST_SIZE,
1049 .statesize = sizeof(struct ocs_hcu_rctx),
1050 .base = {
1051 .cra_name = "sha384",
1052 .cra_driver_name = "sha384-keembay-ocs",
1053 .cra_priority = 255,
1054 .cra_flags = CRYPTO_ALG_ASYNC,
1055 .cra_blocksize = SHA384_BLOCK_SIZE,
1056 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1057 .cra_alignmask = 0,
1058 .cra_module = THIS_MODULE,
1059 .cra_init = kmb_ocs_hcu_sha_cra_init,
1060 }
1061 }
1062 },
1063 {
1064 .init = kmb_ocs_hcu_init,
1065 .update = kmb_ocs_hcu_update,
1066 .final = kmb_ocs_hcu_final,
1067 .finup = kmb_ocs_hcu_finup,
1068 .digest = kmb_ocs_hcu_digest,
1069 .export = kmb_ocs_hcu_export,
1070 .import = kmb_ocs_hcu_import,
1071 .setkey = kmb_ocs_hcu_setkey,
1072 .halg = {
1073 .digestsize = SHA384_DIGEST_SIZE,
1074 .statesize = sizeof(struct ocs_hcu_rctx),
1075 .base = {
1076 .cra_name = "hmac(sha384)",
1077 .cra_driver_name = "hmac-sha384-keembay-ocs",
1078 .cra_priority = 255,
1079 .cra_flags = CRYPTO_ALG_ASYNC,
1080 .cra_blocksize = SHA384_BLOCK_SIZE,
1081 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1082 .cra_alignmask = 0,
1083 .cra_module = THIS_MODULE,
1084 .cra_init = kmb_ocs_hcu_hmac_cra_init,
1085 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1086 }
1087 }
1088 },
1089 {
1090 .init = kmb_ocs_hcu_init,
1091 .update = kmb_ocs_hcu_update,
1092 .final = kmb_ocs_hcu_final,
1093 .finup = kmb_ocs_hcu_finup,
1094 .digest = kmb_ocs_hcu_digest,
1095 .export = kmb_ocs_hcu_export,
1096 .import = kmb_ocs_hcu_import,
1097 .halg = {
1098 .digestsize = SHA512_DIGEST_SIZE,
1099 .statesize = sizeof(struct ocs_hcu_rctx),
1100 .base = {
1101 .cra_name = "sha512",
1102 .cra_driver_name = "sha512-keembay-ocs",
1103 .cra_priority = 255,
1104 .cra_flags = CRYPTO_ALG_ASYNC,
1105 .cra_blocksize = SHA512_BLOCK_SIZE,
1106 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1107 .cra_alignmask = 0,
1108 .cra_module = THIS_MODULE,
1109 .cra_init = kmb_ocs_hcu_sha_cra_init,
1110 }
1111 }
1112 },
1113 {
1114 .init = kmb_ocs_hcu_init,
1115 .update = kmb_ocs_hcu_update,
1116 .final = kmb_ocs_hcu_final,
1117 .finup = kmb_ocs_hcu_finup,
1118 .digest = kmb_ocs_hcu_digest,
1119 .export = kmb_ocs_hcu_export,
1120 .import = kmb_ocs_hcu_import,
1121 .setkey = kmb_ocs_hcu_setkey,
1122 .halg = {
1123 .digestsize = SHA512_DIGEST_SIZE,
1124 .statesize = sizeof(struct ocs_hcu_rctx),
1125 .base = {
1126 .cra_name = "hmac(sha512)",
1127 .cra_driver_name = "hmac-sha512-keembay-ocs",
1128 .cra_priority = 255,
1129 .cra_flags = CRYPTO_ALG_ASYNC,
1130 .cra_blocksize = SHA512_BLOCK_SIZE,
1131 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1132 .cra_alignmask = 0,
1133 .cra_module = THIS_MODULE,
1134 .cra_init = kmb_ocs_hcu_hmac_cra_init,
1135 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1136 }
1137 }
1138 },
1139 };
1140
1141
1142 static const struct of_device_id kmb_ocs_hcu_of_match[] = {
1143 {
1144 .compatible = "intel,keembay-ocs-hcu",
1145 },
1146 {}
1147 };
1148
1149 static int kmb_ocs_hcu_remove(struct platform_device *pdev)
1150 {
1151 struct ocs_hcu_dev *hcu_dev;
1152 int rc;
1153
1154 hcu_dev = platform_get_drvdata(pdev);
1155 if (!hcu_dev)
1156 return -ENODEV;
1157
1158 crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1159
1160 rc = crypto_engine_exit(hcu_dev->engine);
1161
1162 spin_lock_bh(&ocs_hcu.lock);
1163 list_del(&hcu_dev->list);
1164 spin_unlock_bh(&ocs_hcu.lock);
1165
1166 return rc;
1167 }
1168
1169 static int kmb_ocs_hcu_probe(struct platform_device *pdev)
1170 {
1171 struct device *dev = &pdev->dev;
1172 struct ocs_hcu_dev *hcu_dev;
1173 struct resource *hcu_mem;
1174 int rc;
1175
1176 hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
1177 if (!hcu_dev)
1178 return -ENOMEM;
1179
1180 hcu_dev->dev = dev;
1181
1182 platform_set_drvdata(pdev, hcu_dev);
1183 rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
1184 if (rc)
1185 return rc;
1186
1187
1188 hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1189 if (!hcu_mem) {
1190 dev_err(dev, "Could not retrieve io mem resource.\n");
1191 return -ENODEV;
1192 }
1193
1194 hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
1195 if (IS_ERR(hcu_dev->io_base))
1196 return PTR_ERR(hcu_dev->io_base);
1197
1198 init_completion(&hcu_dev->irq_done);
1199
1200
1201 hcu_dev->irq = platform_get_irq(pdev, 0);
1202 if (hcu_dev->irq < 0)
1203 return hcu_dev->irq;
1204
1205 rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
1206 ocs_hcu_irq_handler, NULL, 0,
1207 "keembay-ocs-hcu", hcu_dev);
1208 if (rc < 0) {
1209 dev_err(dev, "Could not request IRQ.\n");
1210 return rc;
1211 }
1212
1213 INIT_LIST_HEAD(&hcu_dev->list);
1214
1215 spin_lock_bh(&ocs_hcu.lock);
1216 list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
1217 spin_unlock_bh(&ocs_hcu.lock);
1218
1219
1220 hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
1221 if (!hcu_dev->engine) {
1222 rc = -ENOMEM;
1223 goto list_del;
1224 }
1225
1226 rc = crypto_engine_start(hcu_dev->engine);
1227 if (rc) {
1228 dev_err(dev, "Could not start engine.\n");
1229 goto cleanup;
1230 }
1231
1232
1233
1234 rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1235 if (rc) {
1236 dev_err(dev, "Could not register algorithms.\n");
1237 goto cleanup;
1238 }
1239
1240 return 0;
1241
1242 cleanup:
1243 crypto_engine_exit(hcu_dev->engine);
1244 list_del:
1245 spin_lock_bh(&ocs_hcu.lock);
1246 list_del(&hcu_dev->list);
1247 spin_unlock_bh(&ocs_hcu.lock);
1248
1249 return rc;
1250 }
1251
1252
1253 static struct platform_driver kmb_ocs_hcu_driver = {
1254 .probe = kmb_ocs_hcu_probe,
1255 .remove = kmb_ocs_hcu_remove,
1256 .driver = {
1257 .name = DRV_NAME,
1258 .of_match_table = kmb_ocs_hcu_of_match,
1259 },
1260 };
1261
1262 module_platform_driver(kmb_ocs_hcu_driver);
1263
1264 MODULE_LICENSE("GPL");