0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/delay.h>
0009 #include <linux/device.h>
0010 #include <linux/iopoll.h>
0011 #include <linux/irq.h>
0012 #include <linux/module.h>
0013
0014 #include <crypto/sha2.h>
0015
0016 #include "ocs-hcu.h"
0017
0018
0019 #define OCS_HCU_MODE 0x00
0020 #define OCS_HCU_CHAIN 0x04
0021 #define OCS_HCU_OPERATION 0x08
0022 #define OCS_HCU_KEY_0 0x0C
0023 #define OCS_HCU_ISR 0x50
0024 #define OCS_HCU_IER 0x54
0025 #define OCS_HCU_STATUS 0x58
0026 #define OCS_HCU_MSG_LEN_LO 0x60
0027 #define OCS_HCU_MSG_LEN_HI 0x64
0028 #define OCS_HCU_KEY_BYTE_ORDER_CFG 0x80
0029 #define OCS_HCU_DMA_SRC_ADDR 0x400
0030 #define OCS_HCU_DMA_SRC_SIZE 0x408
0031 #define OCS_HCU_DMA_DST_SIZE 0x40C
0032 #define OCS_HCU_DMA_DMA_MODE 0x410
0033 #define OCS_HCU_DMA_NEXT_SRC_DESCR 0x418
0034 #define OCS_HCU_DMA_MSI_ISR 0x480
0035 #define OCS_HCU_DMA_MSI_IER 0x484
0036 #define OCS_HCU_DMA_MSI_MASK 0x488
0037
0038
0039 #define HCU_MODE_ALGO_SHIFT 16
0040 #define HCU_MODE_HMAC_SHIFT 22
0041
0042 #define HCU_STATUS_BUSY BIT(0)
0043
0044 #define HCU_BYTE_ORDER_SWAP BIT(0)
0045
0046 #define HCU_IRQ_HASH_DONE BIT(2)
0047 #define HCU_IRQ_HASH_ERR_MASK (BIT(3) | BIT(1) | BIT(0))
0048
0049 #define HCU_DMA_IRQ_SRC_DONE BIT(0)
0050 #define HCU_DMA_IRQ_SAI_ERR BIT(2)
0051 #define HCU_DMA_IRQ_BAD_COMP_ERR BIT(3)
0052 #define HCU_DMA_IRQ_INBUF_RD_ERR BIT(4)
0053 #define HCU_DMA_IRQ_INBUF_WD_ERR BIT(5)
0054 #define HCU_DMA_IRQ_OUTBUF_WR_ERR BIT(6)
0055 #define HCU_DMA_IRQ_OUTBUF_RD_ERR BIT(7)
0056 #define HCU_DMA_IRQ_CRD_ERR BIT(8)
0057 #define HCU_DMA_IRQ_ERR_MASK (HCU_DMA_IRQ_SAI_ERR | \
0058 HCU_DMA_IRQ_BAD_COMP_ERR | \
0059 HCU_DMA_IRQ_INBUF_RD_ERR | \
0060 HCU_DMA_IRQ_INBUF_WD_ERR | \
0061 HCU_DMA_IRQ_OUTBUF_WR_ERR | \
0062 HCU_DMA_IRQ_OUTBUF_RD_ERR | \
0063 HCU_DMA_IRQ_CRD_ERR)
0064
0065 #define HCU_DMA_SNOOP_MASK (0x7 << 28)
0066 #define HCU_DMA_SRC_LL_EN BIT(25)
0067 #define HCU_DMA_EN BIT(31)
0068
0069 #define OCS_HCU_ENDIANNESS_VALUE 0x2A
0070
0071 #define HCU_DMA_MSI_UNMASK BIT(0)
0072 #define HCU_DMA_MSI_DISABLE 0
0073 #define HCU_IRQ_DISABLE 0
0074
0075 #define OCS_HCU_START BIT(0)
0076 #define OCS_HCU_TERMINATE BIT(1)
0077
0078 #define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
0079
0080 #define OCS_HCU_HW_KEY_LEN_U32 (OCS_HCU_HW_KEY_LEN / sizeof(u32))
0081
0082 #define HCU_DATA_WRITE_ENDIANNESS_OFFSET 26
0083
0084 #define OCS_HCU_NUM_CHAINS_SHA256_224_SM3 (SHA256_DIGEST_SIZE / sizeof(u32))
0085 #define OCS_HCU_NUM_CHAINS_SHA384_512 (SHA512_DIGEST_SIZE / sizeof(u32))
0086
0087
0088
0089
0090
0091 #define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US 200
0092
0093 #define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
0094
0095
0096
0097
0098
0099
0100
0101
0102 struct ocs_hcu_dma_entry {
0103 u32 src_addr;
0104 u32 src_len;
0105 u32 nxt_desc;
0106 u32 ll_flags;
0107 };
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 struct ocs_hcu_dma_list {
0123 struct ocs_hcu_dma_entry *head;
0124 struct ocs_hcu_dma_entry *tail;
0125 dma_addr_t dma_addr;
0126 size_t max_nents;
0127 };
0128
0129 static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
0130 {
0131 switch (algo) {
0132 case OCS_HCU_ALGO_SHA224:
0133 case OCS_HCU_ALGO_SHA256:
0134 case OCS_HCU_ALGO_SM3:
0135 return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
0136 case OCS_HCU_ALGO_SHA384:
0137 case OCS_HCU_ALGO_SHA512:
0138 return OCS_HCU_NUM_CHAINS_SHA384_512;
0139 default:
0140 return 0;
0141 };
0142 }
0143
0144 static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
0145 {
0146 switch (algo) {
0147 case OCS_HCU_ALGO_SHA224:
0148 return SHA224_DIGEST_SIZE;
0149 case OCS_HCU_ALGO_SHA256:
0150 case OCS_HCU_ALGO_SM3:
0151
0152 return SHA256_DIGEST_SIZE;
0153 case OCS_HCU_ALGO_SHA384:
0154 return SHA384_DIGEST_SIZE;
0155 case OCS_HCU_ALGO_SHA512:
0156 return SHA512_DIGEST_SIZE;
0157 default:
0158 return 0;
0159 }
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169 static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
0170 {
0171 long val;
0172
0173 return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
0174 !(val & HCU_STATUS_BUSY),
0175 OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
0176 OCS_HCU_WAIT_BUSY_TIMEOUT_US);
0177 }
0178
0179 static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
0180 {
0181
0182 writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
0183 hcu_dev->irq_err = false;
0184
0185 writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
0186 hcu_dev->io_base + OCS_HCU_IER);
0187 }
0188
0189 static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
0190 {
0191
0192 writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
0193 hcu_dev->irq_err = false;
0194
0195 writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
0196 hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
0197
0198 writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
0199 }
0200
0201 static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
0202 {
0203 writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
0204 writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
0205 }
0206
0207 static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
0208 {
0209 int rc;
0210
0211 rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
0212 if (rc)
0213 goto exit;
0214
0215 if (hcu_dev->irq_err) {
0216
0217 hcu_dev->irq_err = false;
0218 rc = -EIO;
0219 goto exit;
0220 }
0221
0222 exit:
0223 ocs_hcu_irq_dis(hcu_dev);
0224
0225 return rc;
0226 }
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243 static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
0244 struct ocs_hcu_idata *data,
0245 enum ocs_hcu_algo algo)
0246 {
0247 const int n = ocs_hcu_num_chains(algo);
0248 u32 *chain;
0249 int rc;
0250 int i;
0251
0252
0253 if (!data)
0254 return -EINVAL;
0255
0256 chain = (u32 *)data->digest;
0257
0258
0259 rc = ocs_hcu_wait_busy(hcu_dev);
0260 if (rc)
0261 return rc;
0262
0263
0264
0265
0266
0267
0268
0269 for (i = 0; i < n; i++)
0270 chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
0271
0272 data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
0273 data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
0274
0275 return 0;
0276 }
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
0287 const struct ocs_hcu_idata *data,
0288 enum ocs_hcu_algo algo)
0289 {
0290 const int n = ocs_hcu_num_chains(algo);
0291 u32 *chain = (u32 *)data->digest;
0292 int i;
0293
0294
0295
0296
0297
0298
0299
0300 for (i = 0; i < n; i++)
0301 writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
0302
0303 writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
0304 writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
0305 }
0306
0307 static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
0308 enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
0309 {
0310 u32 *chain;
0311 int rc;
0312 int i;
0313
0314 if (!dgst)
0315 return -EINVAL;
0316
0317
0318 if (dgst_len != ocs_hcu_digest_size(algo))
0319 return -EINVAL;
0320
0321
0322 rc = ocs_hcu_wait_busy(hcu_dev);
0323 if (rc)
0324 return rc;
0325
0326 chain = (u32 *)dgst;
0327 for (i = 0; i < dgst_len / sizeof(u32); i++)
0328 chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
0329
0330 return 0;
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
0342 bool use_hmac)
0343 {
0344 u32 cfg;
0345 int rc;
0346
0347 if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
0348 algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
0349 algo != OCS_HCU_ALGO_SM3)
0350 return -EINVAL;
0351
0352 rc = ocs_hcu_wait_busy(hcu_dev);
0353 if (rc)
0354 return rc;
0355
0356
0357 ocs_hcu_irq_dis(hcu_dev);
0358
0359
0360 cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
0361 cfg |= algo << HCU_MODE_ALGO_SHIFT;
0362 if (use_hmac)
0363 cfg |= BIT(HCU_MODE_HMAC_SHIFT);
0364
0365 writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
0366
0367 return 0;
0368 }
0369
0370
0371
0372
0373
0374 static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
0375 {
0376 int reg_off;
0377
0378
0379 for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
0380 writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
0392 {
0393 u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
0394 int i;
0395
0396 if (len > OCS_HCU_HW_KEY_LEN)
0397 return -EINVAL;
0398
0399
0400 memcpy(key_u32, key, len);
0401
0402
0403
0404
0405
0406 memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 writel(HCU_BYTE_ORDER_SWAP,
0419 hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
0420
0421
0422
0423
0424 for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
0425 writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
0426 hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
0427
0428 memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
0429
0430 return 0;
0431 }
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443 static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
0444 const struct ocs_hcu_dma_list *dma_list,
0445 bool finalize)
0446 {
0447 u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
0448 int rc;
0449
0450 if (!dma_list)
0451 return -EINVAL;
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465 if (finalize)
0466 ocs_hcu_done_irq_en(hcu_dev);
0467 else
0468 ocs_hcu_dma_irq_en(hcu_dev);
0469
0470 reinit_completion(&hcu_dev->irq_done);
0471 writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
0472 writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
0473 writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
0474
0475 writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
0476
0477 writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
0478
0479 if (finalize)
0480 writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
0481
0482 rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
0483 if (rc)
0484 return rc;
0485
0486 return 0;
0487 }
0488
0489 struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
0490 int max_nents)
0491 {
0492 struct ocs_hcu_dma_list *dma_list;
0493
0494 dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
0495 if (!dma_list)
0496 return NULL;
0497
0498
0499 dma_list->head = dma_alloc_coherent(hcu_dev->dev,
0500 sizeof(*dma_list->head) * max_nents,
0501 &dma_list->dma_addr, GFP_KERNEL);
0502 if (!dma_list->head) {
0503 kfree(dma_list);
0504 return NULL;
0505 }
0506 dma_list->max_nents = max_nents;
0507 dma_list->tail = NULL;
0508
0509 return dma_list;
0510 }
0511
0512 void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
0513 struct ocs_hcu_dma_list *dma_list)
0514 {
0515 if (!dma_list)
0516 return;
0517
0518 dma_free_coherent(hcu_dev->dev,
0519 sizeof(*dma_list->head) * dma_list->max_nents,
0520 dma_list->head, dma_list->dma_addr);
0521
0522 kfree(dma_list);
0523 }
0524
0525
0526 int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
0527 struct ocs_hcu_dma_list *dma_list,
0528 dma_addr_t addr, u32 len)
0529 {
0530 struct device *dev = hcu_dev->dev;
0531 struct ocs_hcu_dma_entry *old_tail;
0532 struct ocs_hcu_dma_entry *new_tail;
0533
0534 if (!len)
0535 return 0;
0536
0537 if (!dma_list)
0538 return -EINVAL;
0539
0540 if (addr & ~OCS_HCU_DMA_BIT_MASK) {
0541 dev_err(dev,
0542 "Unexpected error: Invalid DMA address for OCS HCU\n");
0543 return -EINVAL;
0544 }
0545
0546 old_tail = dma_list->tail;
0547 new_tail = old_tail ? old_tail + 1 : dma_list->head;
0548
0549
0550 if (new_tail - dma_list->head >= dma_list->max_nents)
0551 return -ENOMEM;
0552
0553
0554
0555
0556
0557 if (old_tail) {
0558 old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
0559
0560
0561
0562
0563 old_tail->nxt_desc = dma_list->dma_addr +
0564 sizeof(*dma_list->tail) * (new_tail -
0565 dma_list->head);
0566 }
0567
0568 new_tail->src_addr = (u32)addr;
0569 new_tail->src_len = (u32)len;
0570 new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
0571 new_tail->nxt_desc = 0;
0572
0573
0574 dma_list->tail = new_tail;
0575
0576 return 0;
0577 }
0578
0579
0580
0581
0582
0583
0584
0585
0586 int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
0587 {
0588 if (!ctx)
0589 return -EINVAL;
0590
0591 ctx->algo = algo;
0592 ctx->idata.msg_len_lo = 0;
0593 ctx->idata.msg_len_hi = 0;
0594
0595
0596 return 0;
0597 }
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
0608 struct ocs_hcu_hash_ctx *ctx,
0609 const struct ocs_hcu_dma_list *dma_list)
0610 {
0611 int rc;
0612
0613 if (!hcu_dev || !ctx)
0614 return -EINVAL;
0615
0616
0617 rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
0618 if (rc)
0619 return rc;
0620
0621
0622 if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
0623 ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0624
0625
0626 rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
0627 if (rc)
0628 return rc;
0629
0630
0631 return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0632 }
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644 int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
0645 const struct ocs_hcu_hash_ctx *ctx,
0646 const struct ocs_hcu_dma_list *dma_list,
0647 u8 *dgst, size_t dgst_len)
0648 {
0649 int rc;
0650
0651 if (!hcu_dev || !ctx)
0652 return -EINVAL;
0653
0654
0655 rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
0656 if (rc)
0657 return rc;
0658
0659
0660 if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
0661 ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0662
0663
0664 rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
0665 if (rc)
0666 return rc;
0667
0668
0669 return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
0670 }
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681 int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
0682 const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
0683 size_t dgst_len)
0684 {
0685 int rc;
0686
0687 if (!hcu_dev || !ctx)
0688 return -EINVAL;
0689
0690
0691 rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
0692 if (rc)
0693 return rc;
0694
0695
0696 if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
0697 ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
0698
0699
0700
0701
0702
0703 ocs_hcu_done_irq_en(hcu_dev);
0704 reinit_completion(&hcu_dev->irq_done);
0705 writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
0706
0707 rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
0708 if (rc)
0709 return rc;
0710
0711
0712 return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726 int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
0727 void *data, size_t data_len, u8 *dgst, size_t dgst_len)
0728 {
0729 struct device *dev = hcu_dev->dev;
0730 dma_addr_t dma_handle;
0731 u32 reg;
0732 int rc;
0733
0734
0735 rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
0736 if (rc)
0737 return rc;
0738
0739 dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
0740 if (dma_mapping_error(dev, dma_handle))
0741 return -EIO;
0742
0743 reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
0744
0745 ocs_hcu_done_irq_en(hcu_dev);
0746
0747 reinit_completion(&hcu_dev->irq_done);
0748
0749 writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
0750 writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
0751 writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
0752 writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
0753
0754 writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
0755
0756 rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
0757 if (rc)
0758 return rc;
0759
0760 dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
0761
0762 return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
0763 }
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777 int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
0778 const u8 *key, size_t key_len,
0779 const struct ocs_hcu_dma_list *dma_list,
0780 u8 *dgst, size_t dgst_len)
0781 {
0782 int rc;
0783
0784
0785 if (!key || key_len == 0)
0786 return -EINVAL;
0787
0788
0789 rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
0790 if (rc)
0791 return rc;
0792
0793 rc = ocs_hcu_write_key(hcu_dev, key, key_len);
0794 if (rc)
0795 return rc;
0796
0797 rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
0798
0799
0800 ocs_hcu_clear_key(hcu_dev);
0801
0802 if (rc)
0803 return rc;
0804
0805 return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
0806 }
0807
0808 irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
0809 {
0810 struct ocs_hcu_dev *hcu_dev = dev_id;
0811 u32 hcu_irq;
0812 u32 dma_irq;
0813
0814
0815 hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
0816 writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
0817
0818
0819 dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
0820 writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
0821
0822
0823 if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
0824 hcu_dev->irq_err = true;
0825 goto complete;
0826 }
0827
0828
0829 if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
0830 goto complete;
0831
0832 return IRQ_NONE;
0833
0834 complete:
0835 complete(&hcu_dev->irq_done);
0836
0837 return IRQ_HANDLED;
0838 }
0839
0840 MODULE_LICENSE("GPL");