0001
0002
0003
0004
0005
0006 #include <linux/err.h>
0007 #include <linux/module.h>
0008 #include <linux/init.h>
0009 #include <linux/errno.h>
0010 #include <linux/kernel.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/platform_device.h>
0013 #include <linux/scatterlist.h>
0014 #include <linux/crypto.h>
0015 #include <linux/kthread.h>
0016 #include <linux/rtnetlink.h>
0017 #include <linux/sched.h>
0018 #include <linux/of_address.h>
0019 #include <linux/of_device.h>
0020 #include <linux/io.h>
0021 #include <linux/bitops.h>
0022
0023 #include <crypto/algapi.h>
0024 #include <crypto/aead.h>
0025 #include <crypto/internal/aead.h>
0026 #include <crypto/aes.h>
0027 #include <crypto/internal/des.h>
0028 #include <crypto/hmac.h>
0029 #include <crypto/md5.h>
0030 #include <crypto/authenc.h>
0031 #include <crypto/skcipher.h>
0032 #include <crypto/hash.h>
0033 #include <crypto/sha1.h>
0034 #include <crypto/sha2.h>
0035 #include <crypto/sha3.h>
0036
0037 #include "util.h"
0038 #include "cipher.h"
0039 #include "spu.h"
0040 #include "spum.h"
0041 #include "spu2.h"
0042
0043
0044
0045 struct bcm_device_private iproc_priv;
0046
0047
0048
0049 int flow_debug_logging;
0050 module_param(flow_debug_logging, int, 0644);
0051 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
0052
0053 int packet_debug_logging;
0054 module_param(packet_debug_logging, int, 0644);
0055 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
0056
0057 int debug_logging_sleep;
0058 module_param(debug_logging_sleep, int, 0644);
0059 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 static int cipher_pri = 150;
0071 module_param(cipher_pri, int, 0644);
0072 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
0073
0074 static int hash_pri = 100;
0075 module_param(hash_pri, int, 0644);
0076 MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
0077
0078 static int aead_pri = 150;
0079 module_param(aead_pri, int, 0644);
0080 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
0081
0082
0083
0084
0085
0086
0087
0088
0089 static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
0090
0091
0092
0093
0094 #define BCM_HDR_LEN iproc_priv.bcm_hdr_len
0095
0096
0097 #define MBOX_SLEEP_MIN 800
0098 #define MBOX_SLEEP_MAX 1000
0099
0100
0101
0102
0103
0104
0105
0106 static u8 select_channel(void)
0107 {
0108 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
0109
0110 return chan_idx % iproc_priv.spu.num_chan;
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 static int
0134 spu_skcipher_rx_sg_create(struct brcm_message *mssg,
0135 struct iproc_reqctx_s *rctx,
0136 u8 rx_frag_num,
0137 unsigned int chunksize, u32 stat_pad_len)
0138 {
0139 struct spu_hw *spu = &iproc_priv.spu;
0140 struct scatterlist *sg;
0141 struct iproc_ctx_s *ctx = rctx->ctx;
0142 u32 datalen;
0143
0144 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
0145 rctx->gfp);
0146 if (!mssg->spu.dst)
0147 return -ENOMEM;
0148
0149 sg = mssg->spu.dst;
0150 sg_init_table(sg, rx_frag_num);
0151
0152 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
0153
0154
0155 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
0156 spu->spu_xts_tweak_in_payload())
0157 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
0158 SPU_XTS_TWEAK_SIZE);
0159
0160
0161 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
0162 rctx->dst_nents, chunksize);
0163 if (datalen < chunksize) {
0164 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
0165 __func__, chunksize, datalen);
0166 return -EFAULT;
0167 }
0168
0169 if (stat_pad_len)
0170 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
0171
0172 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
0173 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
0174
0175 return 0;
0176 }
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 static int
0198 spu_skcipher_tx_sg_create(struct brcm_message *mssg,
0199 struct iproc_reqctx_s *rctx,
0200 u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
0201 {
0202 struct spu_hw *spu = &iproc_priv.spu;
0203 struct scatterlist *sg;
0204 struct iproc_ctx_s *ctx = rctx->ctx;
0205 u32 datalen;
0206 u32 stat_len;
0207
0208 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
0209 rctx->gfp);
0210 if (unlikely(!mssg->spu.src))
0211 return -ENOMEM;
0212
0213 sg = mssg->spu.src;
0214 sg_init_table(sg, tx_frag_num);
0215
0216 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
0217 BCM_HDR_LEN + ctx->spu_req_hdr_len);
0218
0219
0220 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
0221 spu->spu_xts_tweak_in_payload())
0222 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
0223
0224
0225 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
0226 rctx->src_nents, chunksize);
0227 if (unlikely(datalen < chunksize)) {
0228 pr_err("%s(): failed to copy src sg to mbox msg",
0229 __func__);
0230 return -EFAULT;
0231 }
0232
0233 if (pad_len)
0234 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
0235
0236 stat_len = spu->spu_tx_status_len();
0237 if (stat_len) {
0238 memset(rctx->msg_buf.tx_stat, 0, stat_len);
0239 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
0240 }
0241 return 0;
0242 }
0243
0244 static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
0245 u8 chan_idx)
0246 {
0247 int err;
0248 int retry_cnt = 0;
0249 struct device *dev = &(iproc_priv.pdev->dev);
0250
0251 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
0252 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
0253 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
0254
0255
0256
0257
0258 retry_cnt++;
0259 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
0260 err = mbox_send_message(iproc_priv.mbox[chan_idx],
0261 mssg);
0262 atomic_inc(&iproc_priv.mb_no_spc);
0263 }
0264 }
0265 if (err < 0) {
0266 atomic_inc(&iproc_priv.mb_send_fail);
0267 return err;
0268 }
0269
0270
0271 err = mssg->error;
0272 if (unlikely(err < 0)) {
0273 dev_err(dev, "message error %d", err);
0274
0275 }
0276
0277
0278 mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
0279 return err;
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
0301 {
0302 struct spu_hw *spu = &iproc_priv.spu;
0303 struct crypto_async_request *areq = rctx->parent;
0304 struct skcipher_request *req =
0305 container_of(areq, struct skcipher_request, base);
0306 struct iproc_ctx_s *ctx = rctx->ctx;
0307 struct spu_cipher_parms cipher_parms;
0308 int err;
0309 unsigned int chunksize;
0310 int remaining;
0311 int chunk_start;
0312
0313
0314 u8 local_iv_ctr[MAX_IV_SIZE];
0315 u32 stat_pad_len;
0316 u32 pad_len;
0317 struct brcm_message *mssg;
0318
0319
0320 u8 rx_frag_num = 2;
0321 u8 tx_frag_num = 1;
0322
0323 flow_log("%s\n", __func__);
0324
0325 cipher_parms.alg = ctx->cipher.alg;
0326 cipher_parms.mode = ctx->cipher.mode;
0327 cipher_parms.type = ctx->cipher_type;
0328 cipher_parms.key_len = ctx->enckeylen;
0329 cipher_parms.key_buf = ctx->enckey;
0330 cipher_parms.iv_buf = local_iv_ctr;
0331 cipher_parms.iv_len = rctx->iv_ctr_len;
0332
0333 mssg = &rctx->mb_mssg;
0334 chunk_start = rctx->src_sent;
0335 remaining = rctx->total_todo - chunk_start;
0336
0337
0338 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
0339 (remaining > ctx->max_payload))
0340 chunksize = ctx->max_payload;
0341 else
0342 chunksize = remaining;
0343
0344 rctx->src_sent += chunksize;
0345 rctx->total_sent = rctx->src_sent;
0346
0347
0348 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
0349 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
0350
0351 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
0352 rctx->is_encrypt && chunk_start)
0353
0354
0355
0356
0357 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
0358 rctx->iv_ctr_len,
0359 chunk_start - rctx->iv_ctr_len);
0360
0361 if (rctx->iv_ctr_len) {
0362
0363 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
0364 rctx->iv_ctr_len);
0365
0366
0367 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
0368 !rctx->is_encrypt) {
0369
0370
0371
0372
0373 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
0374 rctx->iv_ctr_len,
0375 rctx->src_sent - rctx->iv_ctr_len);
0376 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
0387 }
0388 }
0389
0390 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
0391 flow_log("max_payload infinite\n");
0392 else
0393 flow_log("max_payload %u\n", ctx->max_payload);
0394
0395 flow_log("sent:%u start:%u remains:%u size:%u\n",
0396 rctx->src_sent, chunk_start, remaining, chunksize);
0397
0398
0399 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
0400 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
0401
0402 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
0403 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
0404 &cipher_parms, chunksize);
0405
0406 atomic64_add(chunksize, &iproc_priv.bytes_out);
0407
0408 stat_pad_len = spu->spu_wordalign_padlen(chunksize);
0409 if (stat_pad_len)
0410 rx_frag_num++;
0411 pad_len = stat_pad_len;
0412 if (pad_len) {
0413 tx_frag_num++;
0414 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
0415 0, ctx->auth.alg, ctx->auth.mode,
0416 rctx->total_sent, stat_pad_len);
0417 }
0418
0419 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
0420 ctx->spu_req_hdr_len);
0421 packet_log("payload:\n");
0422 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
0423 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
0424
0425
0426
0427
0428
0429 memset(mssg, 0, sizeof(*mssg));
0430 mssg->type = BRCM_MESSAGE_SPU;
0431 mssg->ctx = rctx;
0432
0433
0434 rx_frag_num += rctx->dst_nents;
0435
0436 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
0437 spu->spu_xts_tweak_in_payload())
0438 rx_frag_num++;
0439
0440 err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
0441 stat_pad_len);
0442 if (err)
0443 return err;
0444
0445
0446 tx_frag_num += rctx->src_nents;
0447 if (spu->spu_tx_status_len())
0448 tx_frag_num++;
0449
0450 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
0451 spu->spu_xts_tweak_in_payload())
0452 tx_frag_num++;
0453
0454 err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
0455 pad_len);
0456 if (err)
0457 return err;
0458
0459 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
0460 if (unlikely(err < 0))
0461 return err;
0462
0463 return -EINPROGRESS;
0464 }
0465
0466
0467
0468
0469
0470
0471 static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
0472 {
0473 struct spu_hw *spu = &iproc_priv.spu;
0474 struct crypto_async_request *areq = rctx->parent;
0475 struct skcipher_request *req = skcipher_request_cast(areq);
0476 struct iproc_ctx_s *ctx = rctx->ctx;
0477 u32 payload_len;
0478
0479
0480 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
0481
0482
0483
0484
0485
0486 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
0487 spu->spu_xts_tweak_in_payload() &&
0488 (payload_len >= SPU_XTS_TWEAK_SIZE))
0489 payload_len -= SPU_XTS_TWEAK_SIZE;
0490
0491 atomic64_add(payload_len, &iproc_priv.bytes_in);
0492
0493 flow_log("%s() offset: %u, bd_len: %u BD:\n",
0494 __func__, rctx->total_received, payload_len);
0495
0496 dump_sg(req->dst, rctx->total_received, payload_len);
0497
0498 rctx->total_received += payload_len;
0499 if (rctx->total_received == rctx->total_todo) {
0500 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
0501 atomic_inc(
0502 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
0503 }
0504 }
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 static int
0526 spu_ahash_rx_sg_create(struct brcm_message *mssg,
0527 struct iproc_reqctx_s *rctx,
0528 u8 rx_frag_num, unsigned int digestsize,
0529 u32 stat_pad_len)
0530 {
0531 struct spu_hw *spu = &iproc_priv.spu;
0532 struct scatterlist *sg;
0533 struct iproc_ctx_s *ctx = rctx->ctx;
0534
0535 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
0536 rctx->gfp);
0537 if (!mssg->spu.dst)
0538 return -ENOMEM;
0539
0540 sg = mssg->spu.dst;
0541 sg_init_table(sg, rx_frag_num);
0542
0543 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
0544
0545
0546 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
0547
0548 if (stat_pad_len)
0549 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
0550
0551 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
0552 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
0553 return 0;
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 static int
0578 spu_ahash_tx_sg_create(struct brcm_message *mssg,
0579 struct iproc_reqctx_s *rctx,
0580 u8 tx_frag_num,
0581 u32 spu_hdr_len,
0582 unsigned int hash_carry_len,
0583 unsigned int new_data_len, u32 pad_len)
0584 {
0585 struct spu_hw *spu = &iproc_priv.spu;
0586 struct scatterlist *sg;
0587 u32 datalen;
0588 u32 stat_len;
0589
0590 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
0591 rctx->gfp);
0592 if (!mssg->spu.src)
0593 return -ENOMEM;
0594
0595 sg = mssg->spu.src;
0596 sg_init_table(sg, tx_frag_num);
0597
0598 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
0599 BCM_HDR_LEN + spu_hdr_len);
0600
0601 if (hash_carry_len)
0602 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
0603
0604 if (new_data_len) {
0605
0606 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
0607 rctx->src_nents, new_data_len);
0608 if (datalen < new_data_len) {
0609 pr_err("%s(): failed to copy src sg to mbox msg",
0610 __func__);
0611 return -EFAULT;
0612 }
0613 }
0614
0615 if (pad_len)
0616 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
0617
0618 stat_len = spu->spu_tx_status_len();
0619 if (stat_len) {
0620 memset(rctx->msg_buf.tx_stat, 0, stat_len);
0621 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
0622 }
0623
0624 return 0;
0625 }
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
0654 {
0655 struct spu_hw *spu = &iproc_priv.spu;
0656 struct crypto_async_request *areq = rctx->parent;
0657 struct ahash_request *req = ahash_request_cast(areq);
0658 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
0659 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
0660 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
0661 struct iproc_ctx_s *ctx = rctx->ctx;
0662
0663
0664 unsigned int nbytes_to_hash = 0;
0665 int err;
0666 unsigned int chunksize = 0;
0667
0668
0669
0670
0671 unsigned int new_data_len;
0672
0673 unsigned int __maybe_unused chunk_start = 0;
0674 u32 db_size;
0675 int pad_len = 0;
0676 u32 data_pad_len = 0;
0677 u32 stat_pad_len = 0;
0678 struct brcm_message *mssg;
0679 struct spu_request_opts req_opts;
0680 struct spu_cipher_parms cipher_parms;
0681 struct spu_hash_parms hash_parms;
0682 struct spu_aead_parms aead_parms;
0683 unsigned int local_nbuf;
0684 u32 spu_hdr_len;
0685 unsigned int digestsize;
0686 u16 rem = 0;
0687
0688
0689
0690
0691
0692 u8 rx_frag_num = 3;
0693 u8 tx_frag_num = 1;
0694
0695 flow_log("total_todo %u, total_sent %u\n",
0696 rctx->total_todo, rctx->total_sent);
0697
0698 memset(&req_opts, 0, sizeof(req_opts));
0699 memset(&cipher_parms, 0, sizeof(cipher_parms));
0700 memset(&hash_parms, 0, sizeof(hash_parms));
0701 memset(&aead_parms, 0, sizeof(aead_parms));
0702
0703 req_opts.bd_suppress = true;
0704 hash_parms.alg = ctx->auth.alg;
0705 hash_parms.mode = ctx->auth.mode;
0706 hash_parms.type = HASH_TYPE_NONE;
0707 hash_parms.key_buf = (u8 *)ctx->authkey;
0708 hash_parms.key_len = ctx->authkeylen;
0709
0710
0711
0712
0713
0714
0715
0716
0717 cipher_parms.type = ctx->cipher_type;
0718
0719 mssg = &rctx->mb_mssg;
0720 chunk_start = rctx->src_sent;
0721
0722
0723
0724
0725
0726 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
0727 chunksize = nbytes_to_hash;
0728 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
0729 (chunksize > ctx->max_payload))
0730 chunksize = ctx->max_payload;
0731
0732
0733
0734
0735
0736
0737 if (!rctx->is_final) {
0738 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
0739 u16 new_len;
0740
0741 rem = chunksize % blocksize;
0742 if (rem) {
0743
0744 chunksize -= rem;
0745 if (chunksize == 0) {
0746
0747 new_len = rem - rctx->hash_carry_len;
0748 sg_copy_part_to_buf(req->src, dest, new_len,
0749 rctx->src_sent);
0750 rctx->hash_carry_len = rem;
0751 flow_log("Exiting with hash carry len: %u\n",
0752 rctx->hash_carry_len);
0753 packet_dump(" buf: ",
0754 rctx->hash_carry,
0755 rctx->hash_carry_len);
0756 return -EAGAIN;
0757 }
0758 }
0759 }
0760
0761
0762 local_nbuf = rctx->hash_carry_len;
0763 rctx->hash_carry_len = 0;
0764 if (local_nbuf)
0765 tx_frag_num++;
0766 new_data_len = chunksize - local_nbuf;
0767
0768
0769 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
0770 new_data_len);
0771
0772
0773 if (hash_parms.alg == HASH_ALG_AES)
0774 hash_parms.type = (enum hash_type)cipher_parms.type;
0775 else
0776 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
0777
0778 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
0779 hash_parms.type);
0780 hash_parms.digestsize = digestsize;
0781
0782
0783 rctx->total_sent += chunksize;
0784
0785 rctx->src_sent += new_data_len;
0786
0787 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
0788 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
0789 hash_parms.mode,
0790 chunksize,
0791 blocksize);
0792
0793
0794
0795
0796
0797 if ((hash_parms.type == HASH_TYPE_UPDT) &&
0798 (hash_parms.alg != HASH_ALG_AES)) {
0799 hash_parms.key_buf = rctx->incr_hash;
0800 hash_parms.key_len = digestsize;
0801 }
0802
0803 atomic64_add(chunksize, &iproc_priv.bytes_out);
0804
0805 flow_log("%s() final: %u nbuf: %u ",
0806 __func__, rctx->is_final, local_nbuf);
0807
0808 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
0809 flow_log("max_payload infinite\n");
0810 else
0811 flow_log("max_payload %u\n", ctx->max_payload);
0812
0813 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
0814
0815
0816 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
0817
0818 hash_parms.prebuf_len = local_nbuf;
0819 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
0820 BCM_HDR_LEN,
0821 &req_opts, &cipher_parms,
0822 &hash_parms, &aead_parms,
0823 new_data_len);
0824
0825 if (spu_hdr_len == 0) {
0826 pr_err("Failed to create SPU request header\n");
0827 return -EFAULT;
0828 }
0829
0830
0831
0832
0833
0834 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
0835 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
0836 0, 0, hash_parms.pad_len);
0837 if (spu->spu_tx_status_len())
0838 stat_pad_len = spu->spu_wordalign_padlen(db_size);
0839 if (stat_pad_len)
0840 rx_frag_num++;
0841 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
0842 if (pad_len) {
0843 tx_frag_num++;
0844 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
0845 hash_parms.pad_len, ctx->auth.alg,
0846 ctx->auth.mode, rctx->total_sent,
0847 stat_pad_len);
0848 }
0849
0850 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
0851 spu_hdr_len);
0852 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
0853 flow_log("Data:\n");
0854 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
0855 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
0856
0857
0858
0859
0860
0861 memset(mssg, 0, sizeof(*mssg));
0862 mssg->type = BRCM_MESSAGE_SPU;
0863 mssg->ctx = rctx;
0864
0865
0866 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
0867 stat_pad_len);
0868 if (err)
0869 return err;
0870
0871
0872 tx_frag_num += rctx->src_nents;
0873 if (spu->spu_tx_status_len())
0874 tx_frag_num++;
0875 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
0876 local_nbuf, new_data_len, pad_len);
0877 if (err)
0878 return err;
0879
0880 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
0881 if (unlikely(err < 0))
0882 return err;
0883
0884 return -EINPROGRESS;
0885 }
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897 static int spu_hmac_outer_hash(struct ahash_request *req,
0898 struct iproc_ctx_s *ctx)
0899 {
0900 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
0901 unsigned int blocksize =
0902 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
0903 int rc;
0904
0905 switch (ctx->auth.alg) {
0906 case HASH_ALG_MD5:
0907 rc = do_shash("md5", req->result, ctx->opad, blocksize,
0908 req->result, ctx->digestsize, NULL, 0);
0909 break;
0910 case HASH_ALG_SHA1:
0911 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
0912 req->result, ctx->digestsize, NULL, 0);
0913 break;
0914 case HASH_ALG_SHA224:
0915 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
0916 req->result, ctx->digestsize, NULL, 0);
0917 break;
0918 case HASH_ALG_SHA256:
0919 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
0920 req->result, ctx->digestsize, NULL, 0);
0921 break;
0922 case HASH_ALG_SHA384:
0923 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
0924 req->result, ctx->digestsize, NULL, 0);
0925 break;
0926 case HASH_ALG_SHA512:
0927 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
0928 req->result, ctx->digestsize, NULL, 0);
0929 break;
0930 default:
0931 pr_err("%s() Error : unknown hmac type\n", __func__);
0932 rc = -EINVAL;
0933 }
0934 return rc;
0935 }
0936
0937
0938
0939
0940
0941
0942
0943
0944 static int ahash_req_done(struct iproc_reqctx_s *rctx)
0945 {
0946 struct spu_hw *spu = &iproc_priv.spu;
0947 struct crypto_async_request *areq = rctx->parent;
0948 struct ahash_request *req = ahash_request_cast(areq);
0949 struct iproc_ctx_s *ctx = rctx->ctx;
0950 int err;
0951
0952 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
0953
0954 if (spu->spu_type == SPU_TYPE_SPUM) {
0955
0956
0957
0958 if (ctx->auth.alg == HASH_ALG_MD5) {
0959 __swab32s((u32 *)req->result);
0960 __swab32s(((u32 *)req->result) + 1);
0961 __swab32s(((u32 *)req->result) + 2);
0962 __swab32s(((u32 *)req->result) + 3);
0963 __swab32s(((u32 *)req->result) + 4);
0964 }
0965 }
0966
0967 flow_dump(" digest ", req->result, ctx->digestsize);
0968
0969
0970 if (rctx->is_sw_hmac) {
0971 err = spu_hmac_outer_hash(req, ctx);
0972 if (err < 0)
0973 return err;
0974 flow_dump(" hmac: ", req->result, ctx->digestsize);
0975 }
0976
0977 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
0978 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
0979 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
0980 } else {
0981 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
0982 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
0983 }
0984
0985 return 0;
0986 }
0987
0988
0989
0990
0991
0992
0993
0994 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
0995 {
0996 struct iproc_ctx_s *ctx = rctx->ctx;
0997 struct crypto_async_request *areq = rctx->parent;
0998 struct ahash_request *req = ahash_request_cast(areq);
0999 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1000 unsigned int blocksize =
1001 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1002
1003
1004
1005
1006 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1007
1008 flow_log("%s() blocksize:%u digestsize:%u\n",
1009 __func__, blocksize, ctx->digestsize);
1010
1011 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1012
1013 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1014 ahash_req_done(rctx);
1015 }
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1043 struct aead_request *req,
1044 struct iproc_reqctx_s *rctx,
1045 u8 rx_frag_num,
1046 unsigned int assoc_len,
1047 u32 ret_iv_len, unsigned int resp_len,
1048 unsigned int digestsize, u32 stat_pad_len)
1049 {
1050 struct spu_hw *spu = &iproc_priv.spu;
1051 struct scatterlist *sg;
1052 struct iproc_ctx_s *ctx = rctx->ctx;
1053 u32 datalen;
1054 u32 assoc_buf_len;
1055 u8 data_padlen = 0;
1056
1057 if (ctx->is_rfc4543) {
1058
1059 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1060 assoc_len + resp_len);
1061 assoc_buf_len = assoc_len;
1062 } else {
1063 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1064 resp_len);
1065 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1066 assoc_len, ret_iv_len,
1067 rctx->is_encrypt);
1068 }
1069
1070 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1071
1072 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1073 resp_len +
1074 data_padlen);
1075
1076 if (data_padlen)
1077
1078 rx_frag_num++;
1079
1080 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1081 rctx->gfp);
1082 if (!mssg->spu.dst)
1083 return -ENOMEM;
1084
1085 sg = mssg->spu.dst;
1086 sg_init_table(sg, rx_frag_num);
1087
1088
1089 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1090
1091 if (assoc_buf_len) {
1092
1093
1094
1095
1096 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1097 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1098 }
1099
1100 if (resp_len) {
1101
1102
1103
1104
1105 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1106 rctx->dst_nents, resp_len);
1107 if (datalen < (resp_len)) {
1108 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1109 __func__, resp_len, datalen);
1110 return -EFAULT;
1111 }
1112 }
1113
1114
1115 if (data_padlen) {
1116 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1117 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1118 }
1119
1120
1121 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1122
1123 flow_log("stat_pad_len %u\n", stat_pad_len);
1124 if (stat_pad_len) {
1125 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1126 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1127 }
1128
1129 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1130 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1131
1132 return 0;
1133 }
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1163 struct iproc_reqctx_s *rctx,
1164 u8 tx_frag_num,
1165 u32 spu_hdr_len,
1166 struct scatterlist *assoc,
1167 unsigned int assoc_len,
1168 int assoc_nents,
1169 unsigned int aead_iv_len,
1170 unsigned int chunksize,
1171 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1172 {
1173 struct spu_hw *spu = &iproc_priv.spu;
1174 struct scatterlist *sg;
1175 struct scatterlist *assoc_sg = assoc;
1176 struct iproc_ctx_s *ctx = rctx->ctx;
1177 u32 datalen;
1178 u32 written;
1179 u32 assoc_offset = 0;
1180 u32 stat_len;
1181
1182 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1183 rctx->gfp);
1184 if (!mssg->spu.src)
1185 return -ENOMEM;
1186
1187 sg = mssg->spu.src;
1188 sg_init_table(sg, tx_frag_num);
1189
1190 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1191 BCM_HDR_LEN + spu_hdr_len);
1192
1193 if (assoc_len) {
1194
1195 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1196 assoc_nents, assoc_len);
1197 if (written < assoc_len) {
1198 pr_err("%s(): failed to copy assoc sg to mbox msg",
1199 __func__);
1200 return -EFAULT;
1201 }
1202 }
1203
1204 if (aead_iv_len)
1205 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1206
1207 if (aad_pad_len) {
1208 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1209 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1210 }
1211
1212 datalen = chunksize;
1213 if ((chunksize > ctx->digestsize) && incl_icv)
1214 datalen -= ctx->digestsize;
1215 if (datalen) {
1216
1217 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1218 rctx->src_nents, datalen);
1219 if (written < datalen) {
1220 pr_err("%s(): failed to copy src sg to mbox msg",
1221 __func__);
1222 return -EFAULT;
1223 }
1224 }
1225
1226 if (pad_len) {
1227 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1228 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1229 }
1230
1231 if (incl_icv)
1232 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1233
1234 stat_len = spu->spu_tx_status_len();
1235 if (stat_len) {
1236 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1237 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1238 }
1239 return 0;
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1260 {
1261 struct spu_hw *spu = &iproc_priv.spu;
1262 struct crypto_async_request *areq = rctx->parent;
1263 struct aead_request *req = container_of(areq,
1264 struct aead_request, base);
1265 struct iproc_ctx_s *ctx = rctx->ctx;
1266 int err;
1267 unsigned int chunksize;
1268 unsigned int resp_len;
1269 u32 spu_hdr_len;
1270 u32 db_size;
1271 u32 stat_pad_len;
1272 u32 pad_len;
1273 struct brcm_message *mssg;
1274 struct spu_request_opts req_opts;
1275 struct spu_cipher_parms cipher_parms;
1276 struct spu_hash_parms hash_parms;
1277 struct spu_aead_parms aead_parms;
1278 int assoc_nents = 0;
1279 bool incl_icv = false;
1280 unsigned int digestsize = ctx->digestsize;
1281
1282
1283
1284 u8 rx_frag_num = 2;
1285 u8 tx_frag_num = 1;
1286
1287
1288 chunksize = rctx->total_todo;
1289
1290 flow_log("%s: chunksize %u\n", __func__, chunksize);
1291
1292 memset(&req_opts, 0, sizeof(req_opts));
1293 memset(&hash_parms, 0, sizeof(hash_parms));
1294 memset(&aead_parms, 0, sizeof(aead_parms));
1295
1296 req_opts.is_inbound = !(rctx->is_encrypt);
1297 req_opts.auth_first = ctx->auth_first;
1298 req_opts.is_aead = true;
1299 req_opts.is_esp = ctx->is_esp;
1300
1301 cipher_parms.alg = ctx->cipher.alg;
1302 cipher_parms.mode = ctx->cipher.mode;
1303 cipher_parms.type = ctx->cipher_type;
1304 cipher_parms.key_buf = ctx->enckey;
1305 cipher_parms.key_len = ctx->enckeylen;
1306 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1307 cipher_parms.iv_len = rctx->iv_ctr_len;
1308
1309 hash_parms.alg = ctx->auth.alg;
1310 hash_parms.mode = ctx->auth.mode;
1311 hash_parms.type = HASH_TYPE_NONE;
1312 hash_parms.key_buf = (u8 *)ctx->authkey;
1313 hash_parms.key_len = ctx->authkeylen;
1314 hash_parms.digestsize = digestsize;
1315
1316 if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1317 (ctx->authkeylen < SHA224_DIGEST_SIZE))
1318 hash_parms.key_len = SHA224_DIGEST_SIZE;
1319
1320 aead_parms.assoc_size = req->assoclen;
1321 if (ctx->is_esp && !ctx->is_rfc4543) {
1322
1323
1324
1325
1326
1327 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1328
1329 if (rctx->is_encrypt) {
1330 aead_parms.return_iv = true;
1331 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1332 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1333 }
1334 } else {
1335 aead_parms.ret_iv_len = 0;
1336 }
1337
1338
1339
1340
1341
1342
1343
1344 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1345 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1346 if (aead_parms.assoc_size)
1347 assoc_nents = spu_sg_count(rctx->assoc, 0,
1348 aead_parms.assoc_size);
1349
1350 mssg = &rctx->mb_mssg;
1351
1352 rctx->total_sent = chunksize;
1353 rctx->src_sent = chunksize;
1354 if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1355 aead_parms.assoc_size,
1356 aead_parms.ret_iv_len,
1357 rctx->is_encrypt))
1358 rx_frag_num++;
1359
1360 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1361 rctx->iv_ctr_len);
1362
1363 if (ctx->auth.alg == HASH_ALG_AES)
1364 hash_parms.type = (enum hash_type)ctx->cipher_type;
1365
1366
1367 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1368 aead_parms.assoc_size);
1369
1370
1371 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1372 chunksize);
1373
1374 if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1375
1376
1377
1378
1379 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1380 ctx->cipher.mode,
1381 aead_parms.assoc_size + 2);
1382
1383
1384
1385
1386
1387 if (!rctx->is_encrypt)
1388 aead_parms.data_pad_len =
1389 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1390 chunksize - digestsize);
1391
1392
1393 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1394 chunksize, rctx->is_encrypt,
1395 ctx->is_esp);
1396 }
1397
1398 if (ctx->is_rfc4543) {
1399
1400
1401
1402
1403 aead_parms.aad_pad_len = 0;
1404 if (!rctx->is_encrypt)
1405 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1406 ctx->cipher.mode,
1407 aead_parms.assoc_size + chunksize -
1408 digestsize);
1409 else
1410 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1411 ctx->cipher.mode,
1412 aead_parms.assoc_size + chunksize);
1413
1414 req_opts.is_rfc4543 = true;
1415 }
1416
1417 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1418 incl_icv = true;
1419 tx_frag_num++;
1420
1421 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1422 req->assoclen + rctx->total_sent -
1423 digestsize);
1424 }
1425
1426 atomic64_add(chunksize, &iproc_priv.bytes_out);
1427
1428 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1429
1430
1431 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1432
1433 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1434 BCM_HDR_LEN, &req_opts,
1435 &cipher_parms, &hash_parms,
1436 &aead_parms, chunksize);
1437
1438
1439 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1440 chunksize, aead_parms.aad_pad_len,
1441 aead_parms.data_pad_len, 0);
1442
1443 stat_pad_len = spu->spu_wordalign_padlen(db_size);
1444
1445 if (stat_pad_len)
1446 rx_frag_num++;
1447 pad_len = aead_parms.data_pad_len + stat_pad_len;
1448 if (pad_len) {
1449 tx_frag_num++;
1450 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1451 aead_parms.data_pad_len, 0,
1452 ctx->auth.alg, ctx->auth.mode,
1453 rctx->total_sent, stat_pad_len);
1454 }
1455
1456 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1457 spu_hdr_len);
1458 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1459 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1460 packet_log("BD:\n");
1461 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1462 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1463
1464
1465
1466
1467
1468 memset(mssg, 0, sizeof(*mssg));
1469 mssg->type = BRCM_MESSAGE_SPU;
1470 mssg->ctx = rctx;
1471
1472
1473 rx_frag_num += rctx->dst_nents;
1474 resp_len = chunksize;
1475
1476
1477
1478
1479
1480
1481 rx_frag_num++;
1482
1483 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1484 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1485
1486
1487
1488
1489 resp_len -= ctx->digestsize;
1490 if (resp_len == 0)
1491
1492 rx_frag_num -= rctx->dst_nents;
1493 }
1494
1495 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1496 aead_parms.assoc_size,
1497 aead_parms.ret_iv_len, resp_len, digestsize,
1498 stat_pad_len);
1499 if (err)
1500 return err;
1501
1502
1503 tx_frag_num += rctx->src_nents;
1504 tx_frag_num += assoc_nents;
1505 if (aead_parms.aad_pad_len)
1506 tx_frag_num++;
1507 if (aead_parms.iv_len)
1508 tx_frag_num++;
1509 if (spu->spu_tx_status_len())
1510 tx_frag_num++;
1511 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1512 rctx->assoc, aead_parms.assoc_size,
1513 assoc_nents, aead_parms.iv_len, chunksize,
1514 aead_parms.aad_pad_len, pad_len, incl_icv);
1515 if (err)
1516 return err;
1517
1518 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1519 if (unlikely(err < 0))
1520 return err;
1521
1522 return -EINPROGRESS;
1523 }
1524
1525
1526
1527
1528
1529 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1530 {
1531 struct spu_hw *spu = &iproc_priv.spu;
1532 struct crypto_async_request *areq = rctx->parent;
1533 struct aead_request *req = container_of(areq,
1534 struct aead_request, base);
1535 struct iproc_ctx_s *ctx = rctx->ctx;
1536 u32 payload_len;
1537 unsigned int icv_offset;
1538 u32 result_len;
1539
1540
1541 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1542 flow_log("payload_len %u\n", payload_len);
1543
1544
1545 atomic64_add(payload_len, &iproc_priv.bytes_in);
1546
1547 if (req->assoclen)
1548 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1549 req->assoclen);
1550
1551
1552
1553
1554
1555
1556 result_len = req->cryptlen;
1557 if (rctx->is_encrypt) {
1558 icv_offset = req->assoclen + rctx->total_sent;
1559 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1560 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1561 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1562 ctx->digestsize, icv_offset);
1563 result_len += ctx->digestsize;
1564 }
1565
1566 packet_log("response data: ");
1567 dump_sg(req->dst, req->assoclen, result_len);
1568
1569 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1570 if (ctx->cipher.alg == CIPHER_ALG_AES) {
1571 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1572 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1573 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1574 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1575 else
1576 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1577 } else {
1578 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1579 }
1580 }
1581
1582
1583
1584
1585
1586
1587
1588
1589 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1590 {
1591
1592 struct brcm_message *mssg = &rctx->mb_mssg;
1593
1594 kfree(mssg->spu.src);
1595 kfree(mssg->spu.dst);
1596 memset(mssg, 0, sizeof(struct brcm_message));
1597 }
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1608 {
1609 struct crypto_async_request *areq = rctx->parent;
1610
1611 flow_log("%s() err:%d\n\n", __func__, err);
1612
1613
1614 spu_chunk_cleanup(rctx);
1615
1616 if (areq)
1617 areq->complete(areq, err);
1618 }
1619
1620
1621
1622
1623
1624
1625 static void spu_rx_callback(struct mbox_client *cl, void *msg)
1626 {
1627 struct spu_hw *spu = &iproc_priv.spu;
1628 struct brcm_message *mssg = msg;
1629 struct iproc_reqctx_s *rctx;
1630 int err;
1631
1632 rctx = mssg->ctx;
1633 if (unlikely(!rctx)) {
1634
1635 pr_err("%s(): no request context", __func__);
1636 err = -EFAULT;
1637 goto cb_finish;
1638 }
1639
1640
1641 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1642 if (err != 0) {
1643 if (err == SPU_INVALID_ICV)
1644 atomic_inc(&iproc_priv.bad_icv);
1645 err = -EBADMSG;
1646 goto cb_finish;
1647 }
1648
1649
1650 switch (rctx->ctx->alg->type) {
1651 case CRYPTO_ALG_TYPE_SKCIPHER:
1652 handle_skcipher_resp(rctx);
1653 break;
1654 case CRYPTO_ALG_TYPE_AHASH:
1655 handle_ahash_resp(rctx);
1656 break;
1657 case CRYPTO_ALG_TYPE_AEAD:
1658 handle_aead_resp(rctx);
1659 break;
1660 default:
1661 err = -EINVAL;
1662 goto cb_finish;
1663 }
1664
1665
1666
1667
1668
1669 if (rctx->total_sent < rctx->total_todo) {
1670
1671 spu_chunk_cleanup(rctx);
1672
1673 switch (rctx->ctx->alg->type) {
1674 case CRYPTO_ALG_TYPE_SKCIPHER:
1675 err = handle_skcipher_req(rctx);
1676 break;
1677 case CRYPTO_ALG_TYPE_AHASH:
1678 err = handle_ahash_req(rctx);
1679 if (err == -EAGAIN)
1680
1681
1682
1683
1684 err = 0;
1685 break;
1686 case CRYPTO_ALG_TYPE_AEAD:
1687 err = handle_aead_req(rctx);
1688 break;
1689 default:
1690 err = -EINVAL;
1691 }
1692
1693 if (err == -EINPROGRESS)
1694
1695 return;
1696 }
1697
1698 cb_finish:
1699 finish_req(rctx, err);
1700 }
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
1714 {
1715 struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1716 struct iproc_ctx_s *ctx =
1717 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1718 int err;
1719
1720 flow_log("%s() enc:%u\n", __func__, encrypt);
1721
1722 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1723 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1724 rctx->parent = &req->base;
1725 rctx->is_encrypt = encrypt;
1726 rctx->bd_suppress = false;
1727 rctx->total_todo = req->cryptlen;
1728 rctx->src_sent = 0;
1729 rctx->total_sent = 0;
1730 rctx->total_received = 0;
1731 rctx->ctx = ctx;
1732
1733
1734 rctx->src_sg = req->src;
1735 rctx->src_nents = 0;
1736 rctx->src_skip = 0;
1737 rctx->dst_sg = req->dst;
1738 rctx->dst_nents = 0;
1739 rctx->dst_skip = 0;
1740
1741 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1742 ctx->cipher.mode == CIPHER_MODE_CTR ||
1743 ctx->cipher.mode == CIPHER_MODE_OFB ||
1744 ctx->cipher.mode == CIPHER_MODE_XTS ||
1745 ctx->cipher.mode == CIPHER_MODE_GCM ||
1746 ctx->cipher.mode == CIPHER_MODE_CCM) {
1747 rctx->iv_ctr_len =
1748 crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
1749 memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1750 } else {
1751 rctx->iv_ctr_len = 0;
1752 }
1753
1754
1755 rctx->chan_idx = select_channel();
1756 err = handle_skcipher_req(rctx);
1757 if (err != -EINPROGRESS)
1758
1759 spu_chunk_cleanup(rctx);
1760
1761 return err;
1762 }
1763
1764 static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
1765 unsigned int keylen)
1766 {
1767 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1768 int err;
1769
1770 err = verify_skcipher_des_key(cipher, key);
1771 if (err)
1772 return err;
1773
1774 ctx->cipher_type = CIPHER_TYPE_DES;
1775 return 0;
1776 }
1777
1778 static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1779 unsigned int keylen)
1780 {
1781 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1782 int err;
1783
1784 err = verify_skcipher_des3_key(cipher, key);
1785 if (err)
1786 return err;
1787
1788 ctx->cipher_type = CIPHER_TYPE_3DES;
1789 return 0;
1790 }
1791
1792 static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1793 unsigned int keylen)
1794 {
1795 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1796
1797 if (ctx->cipher.mode == CIPHER_MODE_XTS)
1798
1799 keylen = keylen / 2;
1800
1801 switch (keylen) {
1802 case AES_KEYSIZE_128:
1803 ctx->cipher_type = CIPHER_TYPE_AES128;
1804 break;
1805 case AES_KEYSIZE_192:
1806 ctx->cipher_type = CIPHER_TYPE_AES192;
1807 break;
1808 case AES_KEYSIZE_256:
1809 ctx->cipher_type = CIPHER_TYPE_AES256;
1810 break;
1811 default:
1812 return -EINVAL;
1813 }
1814 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1815 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1816 return 0;
1817 }
1818
1819 static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
1820 unsigned int keylen)
1821 {
1822 struct spu_hw *spu = &iproc_priv.spu;
1823 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1824 struct spu_cipher_parms cipher_parms;
1825 u32 alloc_len = 0;
1826 int err;
1827
1828 flow_log("skcipher_setkey() keylen: %d\n", keylen);
1829 flow_dump(" key: ", key, keylen);
1830
1831 switch (ctx->cipher.alg) {
1832 case CIPHER_ALG_DES:
1833 err = des_setkey(cipher, key, keylen);
1834 break;
1835 case CIPHER_ALG_3DES:
1836 err = threedes_setkey(cipher, key, keylen);
1837 break;
1838 case CIPHER_ALG_AES:
1839 err = aes_setkey(cipher, key, keylen);
1840 break;
1841 default:
1842 pr_err("%s() Error: unknown cipher alg\n", __func__);
1843 err = -EINVAL;
1844 }
1845 if (err)
1846 return err;
1847
1848 memcpy(ctx->enckey, key, keylen);
1849 ctx->enckeylen = keylen;
1850
1851
1852 if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1853 (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1854 unsigned int xts_keylen = keylen / 2;
1855
1856 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1857 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1858 }
1859
1860 if (spu->spu_type == SPU_TYPE_SPUM)
1861 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1862 else if (spu->spu_type == SPU_TYPE_SPU2)
1863 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1864 memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1865 cipher_parms.iv_buf = NULL;
1866 cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
1867 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1868
1869 cipher_parms.alg = ctx->cipher.alg;
1870 cipher_parms.mode = ctx->cipher.mode;
1871 cipher_parms.type = ctx->cipher_type;
1872 cipher_parms.key_buf = ctx->enckey;
1873 cipher_parms.key_len = ctx->enckeylen;
1874
1875
1876 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1877 ctx->spu_req_hdr_len =
1878 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1879 &cipher_parms);
1880
1881 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1882 ctx->enckeylen,
1883 false);
1884
1885 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1886
1887 return 0;
1888 }
1889
1890 static int skcipher_encrypt(struct skcipher_request *req)
1891 {
1892 flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
1893
1894 return skcipher_enqueue(req, true);
1895 }
1896
1897 static int skcipher_decrypt(struct skcipher_request *req)
1898 {
1899 flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
1900 return skcipher_enqueue(req, false);
1901 }
1902
1903 static int ahash_enqueue(struct ahash_request *req)
1904 {
1905 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1906 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1907 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1908 int err;
1909 const char *alg_name;
1910
1911 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1912
1913 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1914 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1915 rctx->parent = &req->base;
1916 rctx->ctx = ctx;
1917 rctx->bd_suppress = true;
1918 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1919
1920
1921 rctx->src_sg = req->src;
1922 rctx->src_skip = 0;
1923 rctx->src_nents = 0;
1924 rctx->dst_sg = NULL;
1925 rctx->dst_skip = 0;
1926 rctx->dst_nents = 0;
1927
1928
1929 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1930 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
1931 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
1932 flow_log("Doing %sfinal %s zero-len hash request in software\n",
1933 rctx->is_final ? "" : "non-", alg_name);
1934 err = do_shash((unsigned char *)alg_name, req->result,
1935 NULL, 0, NULL, 0, ctx->authkey,
1936 ctx->authkeylen);
1937 if (err < 0)
1938 flow_log("Hash request failed with error %d\n", err);
1939 return err;
1940 }
1941
1942 rctx->chan_idx = select_channel();
1943
1944 err = handle_ahash_req(rctx);
1945 if (err != -EINPROGRESS)
1946
1947 spu_chunk_cleanup(rctx);
1948
1949 if (err == -EAGAIN)
1950
1951
1952
1953
1954 err = 0;
1955
1956 return err;
1957 }
1958
1959 static int __ahash_init(struct ahash_request *req)
1960 {
1961 struct spu_hw *spu = &iproc_priv.spu;
1962 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1963 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1964 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1965
1966 flow_log("%s()\n", __func__);
1967
1968
1969 rctx->hash_carry_len = 0;
1970 rctx->is_final = 0;
1971
1972 rctx->total_todo = 0;
1973 rctx->src_sent = 0;
1974 rctx->total_sent = 0;
1975 rctx->total_received = 0;
1976
1977 ctx->digestsize = crypto_ahash_digestsize(tfm);
1978
1979 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
1980
1981 rctx->is_sw_hmac = false;
1982
1983 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
1984 true);
1985
1986 return 0;
1987 }
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002 static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2003 {
2004 struct spu_hw *spu = &iproc_priv.spu;
2005
2006 if (spu->spu_type == SPU_TYPE_SPU2)
2007 return true;
2008
2009 if ((ctx->auth.alg == HASH_ALG_AES) &&
2010 (ctx->auth.mode == HASH_MODE_XCBC))
2011 return true;
2012
2013
2014 return false;
2015 }
2016
2017 static int ahash_init(struct ahash_request *req)
2018 {
2019 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2020 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2021 const char *alg_name;
2022 struct crypto_shash *hash;
2023 int ret;
2024 gfp_t gfp;
2025
2026 if (spu_no_incr_hash(ctx)) {
2027
2028
2029
2030
2031
2032 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2033 hash = crypto_alloc_shash(alg_name, 0, 0);
2034 if (IS_ERR(hash)) {
2035 ret = PTR_ERR(hash);
2036 goto err;
2037 }
2038
2039 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2040 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2041 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2042 crypto_shash_descsize(hash), gfp);
2043 if (!ctx->shash) {
2044 ret = -ENOMEM;
2045 goto err_hash;
2046 }
2047 ctx->shash->tfm = hash;
2048
2049
2050 if (ctx->authkeylen > 0) {
2051 ret = crypto_shash_setkey(hash, ctx->authkey,
2052 ctx->authkeylen);
2053 if (ret)
2054 goto err_shash;
2055 }
2056
2057
2058 ret = crypto_shash_init(ctx->shash);
2059 if (ret)
2060 goto err_shash;
2061 } else {
2062
2063 ret = __ahash_init(req);
2064 }
2065
2066 return ret;
2067
2068 err_shash:
2069 kfree(ctx->shash);
2070 err_hash:
2071 crypto_free_shash(hash);
2072 err:
2073 return ret;
2074 }
2075
2076 static int __ahash_update(struct ahash_request *req)
2077 {
2078 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2079
2080 flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2081
2082 if (!req->nbytes)
2083 return 0;
2084 rctx->total_todo += req->nbytes;
2085 rctx->src_sent = 0;
2086
2087 return ahash_enqueue(req);
2088 }
2089
2090 static int ahash_update(struct ahash_request *req)
2091 {
2092 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2093 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2094 u8 *tmpbuf;
2095 int ret;
2096 int nents;
2097 gfp_t gfp;
2098
2099 if (spu_no_incr_hash(ctx)) {
2100
2101
2102
2103
2104
2105 if (req->src)
2106 nents = sg_nents(req->src);
2107 else
2108 return -EINVAL;
2109
2110
2111 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2112 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2113 tmpbuf = kmalloc(req->nbytes, gfp);
2114 if (!tmpbuf)
2115 return -ENOMEM;
2116
2117 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2118 req->nbytes) {
2119 kfree(tmpbuf);
2120 return -EINVAL;
2121 }
2122
2123
2124 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2125 kfree(tmpbuf);
2126 } else {
2127
2128 ret = __ahash_update(req);
2129 }
2130
2131 return ret;
2132 }
2133
2134 static int __ahash_final(struct ahash_request *req)
2135 {
2136 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2137
2138 flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2139
2140 rctx->is_final = 1;
2141
2142 return ahash_enqueue(req);
2143 }
2144
2145 static int ahash_final(struct ahash_request *req)
2146 {
2147 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2148 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2149 int ret;
2150
2151 if (spu_no_incr_hash(ctx)) {
2152
2153
2154
2155
2156
2157 ret = crypto_shash_final(ctx->shash, req->result);
2158
2159
2160 crypto_free_shash(ctx->shash->tfm);
2161 kfree(ctx->shash);
2162
2163 } else {
2164
2165 ret = __ahash_final(req);
2166 }
2167
2168 return ret;
2169 }
2170
2171 static int __ahash_finup(struct ahash_request *req)
2172 {
2173 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2174
2175 flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2176
2177 rctx->total_todo += req->nbytes;
2178 rctx->src_sent = 0;
2179 rctx->is_final = 1;
2180
2181 return ahash_enqueue(req);
2182 }
2183
2184 static int ahash_finup(struct ahash_request *req)
2185 {
2186 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2187 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2188 u8 *tmpbuf;
2189 int ret;
2190 int nents;
2191 gfp_t gfp;
2192
2193 if (spu_no_incr_hash(ctx)) {
2194
2195
2196
2197
2198
2199 if (req->src) {
2200 nents = sg_nents(req->src);
2201 } else {
2202 ret = -EINVAL;
2203 goto ahash_finup_exit;
2204 }
2205
2206
2207 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2208 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2209 tmpbuf = kmalloc(req->nbytes, gfp);
2210 if (!tmpbuf) {
2211 ret = -ENOMEM;
2212 goto ahash_finup_exit;
2213 }
2214
2215 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2216 req->nbytes) {
2217 ret = -EINVAL;
2218 goto ahash_finup_free;
2219 }
2220
2221
2222 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2223 req->result);
2224 } else {
2225
2226 return __ahash_finup(req);
2227 }
2228 ahash_finup_free:
2229 kfree(tmpbuf);
2230
2231 ahash_finup_exit:
2232
2233 crypto_free_shash(ctx->shash->tfm);
2234 kfree(ctx->shash);
2235 return ret;
2236 }
2237
2238 static int ahash_digest(struct ahash_request *req)
2239 {
2240 int err;
2241
2242 flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2243
2244
2245 err = __ahash_init(req);
2246 if (!err)
2247 err = __ahash_finup(req);
2248
2249 return err;
2250 }
2251
2252 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2253 unsigned int keylen)
2254 {
2255 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2256
2257 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2258 __func__, ahash, key, keylen);
2259 flow_dump(" key: ", key, keylen);
2260
2261 if (ctx->auth.alg == HASH_ALG_AES) {
2262 switch (keylen) {
2263 case AES_KEYSIZE_128:
2264 ctx->cipher_type = CIPHER_TYPE_AES128;
2265 break;
2266 case AES_KEYSIZE_192:
2267 ctx->cipher_type = CIPHER_TYPE_AES192;
2268 break;
2269 case AES_KEYSIZE_256:
2270 ctx->cipher_type = CIPHER_TYPE_AES256;
2271 break;
2272 default:
2273 pr_err("%s() Error: Invalid key length\n", __func__);
2274 return -EINVAL;
2275 }
2276 } else {
2277 pr_err("%s() Error: unknown hash alg\n", __func__);
2278 return -EINVAL;
2279 }
2280 memcpy(ctx->authkey, key, keylen);
2281 ctx->authkeylen = keylen;
2282
2283 return 0;
2284 }
2285
2286 static int ahash_export(struct ahash_request *req, void *out)
2287 {
2288 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2289 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2290
2291 spu_exp->total_todo = rctx->total_todo;
2292 spu_exp->total_sent = rctx->total_sent;
2293 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2294 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2295 spu_exp->hash_carry_len = rctx->hash_carry_len;
2296 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2297
2298 return 0;
2299 }
2300
2301 static int ahash_import(struct ahash_request *req, const void *in)
2302 {
2303 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2304 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2305
2306 rctx->total_todo = spu_exp->total_todo;
2307 rctx->total_sent = spu_exp->total_sent;
2308 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2309 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2310 rctx->hash_carry_len = spu_exp->hash_carry_len;
2311 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2312
2313 return 0;
2314 }
2315
2316 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2317 unsigned int keylen)
2318 {
2319 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2320 unsigned int blocksize =
2321 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2322 unsigned int digestsize = crypto_ahash_digestsize(ahash);
2323 unsigned int index;
2324 int rc;
2325
2326 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2327 __func__, ahash, key, keylen, blocksize, digestsize);
2328 flow_dump(" key: ", key, keylen);
2329
2330 if (keylen > blocksize) {
2331 switch (ctx->auth.alg) {
2332 case HASH_ALG_MD5:
2333 rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2334 0, NULL, 0);
2335 break;
2336 case HASH_ALG_SHA1:
2337 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2338 0, NULL, 0);
2339 break;
2340 case HASH_ALG_SHA224:
2341 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2342 0, NULL, 0);
2343 break;
2344 case HASH_ALG_SHA256:
2345 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2346 0, NULL, 0);
2347 break;
2348 case HASH_ALG_SHA384:
2349 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2350 0, NULL, 0);
2351 break;
2352 case HASH_ALG_SHA512:
2353 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2354 0, NULL, 0);
2355 break;
2356 case HASH_ALG_SHA3_224:
2357 rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2358 NULL, 0, NULL, 0);
2359 break;
2360 case HASH_ALG_SHA3_256:
2361 rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2362 NULL, 0, NULL, 0);
2363 break;
2364 case HASH_ALG_SHA3_384:
2365 rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2366 NULL, 0, NULL, 0);
2367 break;
2368 case HASH_ALG_SHA3_512:
2369 rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2370 NULL, 0, NULL, 0);
2371 break;
2372 default:
2373 pr_err("%s() Error: unknown hash alg\n", __func__);
2374 return -EINVAL;
2375 }
2376 if (rc < 0) {
2377 pr_err("%s() Error %d computing shash for %s\n",
2378 __func__, rc, hash_alg_name[ctx->auth.alg]);
2379 return rc;
2380 }
2381 ctx->authkeylen = digestsize;
2382
2383 flow_log(" keylen > digestsize... hashed\n");
2384 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2385 } else {
2386 memcpy(ctx->authkey, key, keylen);
2387 ctx->authkeylen = keylen;
2388 }
2389
2390
2391
2392
2393
2394
2395 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2396 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2397 memset(ctx->ipad + ctx->authkeylen, 0,
2398 blocksize - ctx->authkeylen);
2399 ctx->authkeylen = 0;
2400 memcpy(ctx->opad, ctx->ipad, blocksize);
2401
2402 for (index = 0; index < blocksize; index++) {
2403 ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2404 ctx->opad[index] ^= HMAC_OPAD_VALUE;
2405 }
2406
2407 flow_dump(" ipad: ", ctx->ipad, blocksize);
2408 flow_dump(" opad: ", ctx->opad, blocksize);
2409 }
2410 ctx->digestsize = digestsize;
2411 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2412
2413 return 0;
2414 }
2415
2416 static int ahash_hmac_init(struct ahash_request *req)
2417 {
2418 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2419 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2420 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2421 unsigned int blocksize =
2422 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2423
2424 flow_log("ahash_hmac_init()\n");
2425
2426
2427 ahash_init(req);
2428
2429 if (!spu_no_incr_hash(ctx)) {
2430
2431 rctx->is_sw_hmac = true;
2432 ctx->auth.mode = HASH_MODE_HASH;
2433
2434 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2435 rctx->hash_carry_len = blocksize;
2436 rctx->total_todo += blocksize;
2437 }
2438
2439 return 0;
2440 }
2441
2442 static int ahash_hmac_update(struct ahash_request *req)
2443 {
2444 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2445
2446 if (!req->nbytes)
2447 return 0;
2448
2449 return ahash_update(req);
2450 }
2451
2452 static int ahash_hmac_final(struct ahash_request *req)
2453 {
2454 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2455
2456 return ahash_final(req);
2457 }
2458
2459 static int ahash_hmac_finup(struct ahash_request *req)
2460 {
2461 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2462
2463 return ahash_finup(req);
2464 }
2465
2466 static int ahash_hmac_digest(struct ahash_request *req)
2467 {
2468 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2469 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2470 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2471 unsigned int blocksize =
2472 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2473
2474 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2475
2476
2477 __ahash_init(req);
2478
2479 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2480
2481
2482
2483
2484
2485
2486
2487
2488 rctx->is_sw_hmac = false;
2489 ctx->auth.mode = HASH_MODE_HMAC;
2490 } else {
2491 rctx->is_sw_hmac = true;
2492 ctx->auth.mode = HASH_MODE_HASH;
2493
2494 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2495 rctx->hash_carry_len = blocksize;
2496 rctx->total_todo += blocksize;
2497 }
2498
2499 return __ahash_finup(req);
2500 }
2501
2502
2503
2504 static int aead_need_fallback(struct aead_request *req)
2505 {
2506 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2507 struct spu_hw *spu = &iproc_priv.spu;
2508 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2509 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2510 u32 payload_len;
2511
2512
2513
2514
2515
2516 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2517 (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2518 (req->assoclen == 0)) {
2519 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2520 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2521 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2522 return 1;
2523 }
2524 }
2525
2526
2527 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2528 (spu->spu_type == SPU_TYPE_SPUM) &&
2529 (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2530 (ctx->digestsize != 16)) {
2531 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2532 __func__, ctx->digestsize);
2533 return 1;
2534 }
2535
2536
2537
2538
2539
2540 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2541 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2542 (req->assoclen == 0)) {
2543 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2544 __func__);
2545 return 1;
2546 }
2547
2548
2549
2550
2551
2552 if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2553 ctx->cipher.alg == CIPHER_ALG_AES &&
2554 rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2555 req->assoclen != 16 && req->assoclen != 20) {
2556 flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2557 " other than 16 or 20 bytes\n");
2558 return 1;
2559 }
2560
2561 payload_len = req->cryptlen;
2562 if (spu->spu_type == SPU_TYPE_SPUM)
2563 payload_len += req->assoclen;
2564
2565 flow_log("%s() payload len: %u\n", __func__, payload_len);
2566
2567 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2568 return 0;
2569 else
2570 return payload_len > ctx->max_payload;
2571 }
2572
2573 static void aead_complete(struct crypto_async_request *areq, int err)
2574 {
2575 struct aead_request *req =
2576 container_of(areq, struct aead_request, base);
2577 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2578 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2579
2580 flow_log("%s() err:%d\n", __func__, err);
2581
2582 areq->tfm = crypto_aead_tfm(aead);
2583
2584 areq->complete = rctx->old_complete;
2585 areq->data = rctx->old_data;
2586
2587 areq->complete(areq, err);
2588 }
2589
2590 static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2591 {
2592 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2593 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2594 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2595 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2596 int err;
2597 u32 req_flags;
2598
2599 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2600
2601 if (ctx->fallback_cipher) {
2602
2603 rctx->old_tfm = tfm;
2604 aead_request_set_tfm(req, ctx->fallback_cipher);
2605
2606
2607
2608
2609 rctx->old_complete = req->base.complete;
2610 rctx->old_data = req->base.data;
2611 req_flags = aead_request_flags(req);
2612 aead_request_set_callback(req, req_flags, aead_complete, req);
2613 err = is_encrypt ? crypto_aead_encrypt(req) :
2614 crypto_aead_decrypt(req);
2615
2616 if (err == 0) {
2617
2618
2619
2620
2621 aead_request_set_callback(req, req_flags,
2622 rctx->old_complete, req);
2623 req->base.data = rctx->old_data;
2624 aead_request_set_tfm(req, aead);
2625 flow_log("%s() fallback completed successfully\n\n",
2626 __func__);
2627 }
2628 } else {
2629 err = -EINVAL;
2630 }
2631
2632 return err;
2633 }
2634
2635 static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2636 {
2637 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2638 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2639 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2640 int err;
2641
2642 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2643
2644 if (req->assoclen > MAX_ASSOC_SIZE) {
2645 pr_err
2646 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2647 __func__, req->assoclen, MAX_ASSOC_SIZE);
2648 return -EINVAL;
2649 }
2650
2651 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2652 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2653 rctx->parent = &req->base;
2654 rctx->is_encrypt = is_encrypt;
2655 rctx->bd_suppress = false;
2656 rctx->total_todo = req->cryptlen;
2657 rctx->src_sent = 0;
2658 rctx->total_sent = 0;
2659 rctx->total_received = 0;
2660 rctx->is_sw_hmac = false;
2661 rctx->ctx = ctx;
2662 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2663
2664
2665 rctx->assoc = req->src;
2666
2667
2668
2669
2670
2671
2672 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2673 &rctx->src_skip) < 0) {
2674 pr_err("%s() Error: Unable to find start of src data\n",
2675 __func__);
2676 return -EINVAL;
2677 }
2678
2679 rctx->src_nents = 0;
2680 rctx->dst_nents = 0;
2681 if (req->dst == req->src) {
2682 rctx->dst_sg = rctx->src_sg;
2683 rctx->dst_skip = rctx->src_skip;
2684 } else {
2685
2686
2687
2688
2689
2690 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2691 &rctx->dst_skip) < 0) {
2692 pr_err("%s() Error: Unable to find start of dst data\n",
2693 __func__);
2694 return -EINVAL;
2695 }
2696 }
2697
2698 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2699 ctx->cipher.mode == CIPHER_MODE_CTR ||
2700 ctx->cipher.mode == CIPHER_MODE_OFB ||
2701 ctx->cipher.mode == CIPHER_MODE_XTS ||
2702 ctx->cipher.mode == CIPHER_MODE_GCM) {
2703 rctx->iv_ctr_len =
2704 ctx->salt_len +
2705 crypto_aead_ivsize(crypto_aead_reqtfm(req));
2706 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2707 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2708 } else {
2709 rctx->iv_ctr_len = 0;
2710 }
2711
2712 rctx->hash_carry_len = 0;
2713
2714 flow_log(" src sg: %p\n", req->src);
2715 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2716 rctx->src_sg, rctx->src_skip);
2717 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2718 flow_log(" dst sg: %p\n", req->dst);
2719 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2720 rctx->dst_sg, rctx->dst_skip);
2721 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2722 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2723 flow_log(" authkeylen:%u\n", ctx->authkeylen);
2724 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2725
2726 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2727 flow_log(" max_payload infinite");
2728 else
2729 flow_log(" max_payload: %u\n", ctx->max_payload);
2730
2731 if (unlikely(aead_need_fallback(req)))
2732 return aead_do_fallback(req, is_encrypt);
2733
2734
2735
2736
2737
2738 if (rctx->iv_ctr_len) {
2739 if (ctx->salt_len)
2740 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2741 ctx->salt, ctx->salt_len);
2742 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2743 req->iv,
2744 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2745 }
2746
2747 rctx->chan_idx = select_channel();
2748 err = handle_aead_req(rctx);
2749 if (err != -EINPROGRESS)
2750
2751 spu_chunk_cleanup(rctx);
2752
2753 return err;
2754 }
2755
2756 static int aead_authenc_setkey(struct crypto_aead *cipher,
2757 const u8 *key, unsigned int keylen)
2758 {
2759 struct spu_hw *spu = &iproc_priv.spu;
2760 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2761 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2762 struct crypto_authenc_keys keys;
2763 int ret;
2764
2765 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2766 keylen);
2767 flow_dump(" key: ", key, keylen);
2768
2769 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2770 if (ret)
2771 goto badkey;
2772
2773 if (keys.enckeylen > MAX_KEY_SIZE ||
2774 keys.authkeylen > MAX_KEY_SIZE)
2775 goto badkey;
2776
2777 ctx->enckeylen = keys.enckeylen;
2778 ctx->authkeylen = keys.authkeylen;
2779
2780 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2781
2782 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2783 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2784
2785 switch (ctx->alg->cipher_info.alg) {
2786 case CIPHER_ALG_DES:
2787 if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2788 return -EINVAL;
2789
2790 ctx->cipher_type = CIPHER_TYPE_DES;
2791 break;
2792 case CIPHER_ALG_3DES:
2793 if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2794 return -EINVAL;
2795
2796 ctx->cipher_type = CIPHER_TYPE_3DES;
2797 break;
2798 case CIPHER_ALG_AES:
2799 switch (ctx->enckeylen) {
2800 case AES_KEYSIZE_128:
2801 ctx->cipher_type = CIPHER_TYPE_AES128;
2802 break;
2803 case AES_KEYSIZE_192:
2804 ctx->cipher_type = CIPHER_TYPE_AES192;
2805 break;
2806 case AES_KEYSIZE_256:
2807 ctx->cipher_type = CIPHER_TYPE_AES256;
2808 break;
2809 default:
2810 goto badkey;
2811 }
2812 break;
2813 default:
2814 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2815 return -EINVAL;
2816 }
2817
2818 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2819 ctx->authkeylen);
2820 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2821 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2822
2823
2824 if (ctx->fallback_cipher) {
2825 flow_log(" running fallback setkey()\n");
2826
2827 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2828 ctx->fallback_cipher->base.crt_flags |=
2829 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2830 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2831 if (ret)
2832 flow_log(" fallback setkey() returned:%d\n", ret);
2833 }
2834
2835 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2836 ctx->enckeylen,
2837 false);
2838
2839 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2840
2841 return ret;
2842
2843 badkey:
2844 ctx->enckeylen = 0;
2845 ctx->authkeylen = 0;
2846 ctx->digestsize = 0;
2847
2848 return -EINVAL;
2849 }
2850
2851 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2852 const u8 *key, unsigned int keylen)
2853 {
2854 struct spu_hw *spu = &iproc_priv.spu;
2855 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2856 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2857
2858 int ret = 0;
2859
2860 flow_log("%s() keylen:%u\n", __func__, keylen);
2861 flow_dump(" key: ", key, keylen);
2862
2863 if (!ctx->is_esp)
2864 ctx->digestsize = keylen;
2865
2866 ctx->enckeylen = keylen;
2867 ctx->authkeylen = 0;
2868
2869 switch (ctx->enckeylen) {
2870 case AES_KEYSIZE_128:
2871 ctx->cipher_type = CIPHER_TYPE_AES128;
2872 break;
2873 case AES_KEYSIZE_192:
2874 ctx->cipher_type = CIPHER_TYPE_AES192;
2875 break;
2876 case AES_KEYSIZE_256:
2877 ctx->cipher_type = CIPHER_TYPE_AES256;
2878 break;
2879 default:
2880 goto badkey;
2881 }
2882
2883 memcpy(ctx->enckey, key, ctx->enckeylen);
2884
2885 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2886 ctx->authkeylen);
2887 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2888 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2889
2890
2891 if (ctx->fallback_cipher) {
2892 flow_log(" running fallback setkey()\n");
2893
2894 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2895 ctx->fallback_cipher->base.crt_flags |=
2896 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2897 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
2898 keylen + ctx->salt_len);
2899 if (ret)
2900 flow_log(" fallback setkey() returned:%d\n", ret);
2901 }
2902
2903 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2904 ctx->enckeylen,
2905 false);
2906
2907 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2908
2909 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2910 ctx->authkeylen);
2911
2912 return ret;
2913
2914 badkey:
2915 ctx->enckeylen = 0;
2916 ctx->authkeylen = 0;
2917 ctx->digestsize = 0;
2918
2919 return -EINVAL;
2920 }
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933 static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
2934 const u8 *key, unsigned int keylen)
2935 {
2936 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2937
2938 flow_log("%s\n", __func__);
2939
2940 if (keylen < GCM_ESP_SALT_SIZE)
2941 return -EINVAL;
2942
2943 ctx->salt_len = GCM_ESP_SALT_SIZE;
2944 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2945 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2946 keylen -= GCM_ESP_SALT_SIZE;
2947 ctx->digestsize = GCM_ESP_DIGESTSIZE;
2948 ctx->is_esp = true;
2949 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2950
2951 return aead_gcm_ccm_setkey(cipher, key, keylen);
2952 }
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
2966 const u8 *key, unsigned int keylen)
2967 {
2968 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2969
2970 flow_log("%s\n", __func__);
2971
2972 if (keylen < GCM_ESP_SALT_SIZE)
2973 return -EINVAL;
2974
2975 ctx->salt_len = GCM_ESP_SALT_SIZE;
2976 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2977 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2978 keylen -= GCM_ESP_SALT_SIZE;
2979 ctx->digestsize = GCM_ESP_DIGESTSIZE;
2980 ctx->is_esp = true;
2981 ctx->is_rfc4543 = true;
2982 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2983
2984 return aead_gcm_ccm_setkey(cipher, key, keylen);
2985 }
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998 static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
2999 const u8 *key, unsigned int keylen)
3000 {
3001 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3002
3003 flow_log("%s\n", __func__);
3004
3005 if (keylen < CCM_ESP_SALT_SIZE)
3006 return -EINVAL;
3007
3008 ctx->salt_len = CCM_ESP_SALT_SIZE;
3009 ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3010 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3011 keylen -= CCM_ESP_SALT_SIZE;
3012 ctx->is_esp = true;
3013 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3014
3015 return aead_gcm_ccm_setkey(cipher, key, keylen);
3016 }
3017
3018 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3019 {
3020 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3021 int ret = 0;
3022
3023 flow_log("%s() authkeylen:%u authsize:%u\n",
3024 __func__, ctx->authkeylen, authsize);
3025
3026 ctx->digestsize = authsize;
3027
3028
3029 if (ctx->fallback_cipher) {
3030 flow_log(" running fallback setauth()\n");
3031
3032 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3033 if (ret)
3034 flow_log(" fallback setauth() returned:%d\n", ret);
3035 }
3036
3037 return ret;
3038 }
3039
3040 static int aead_encrypt(struct aead_request *req)
3041 {
3042 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3043 req->cryptlen);
3044 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3045 flow_log(" assoc_len:%u\n", req->assoclen);
3046
3047 return aead_enqueue(req, true);
3048 }
3049
3050 static int aead_decrypt(struct aead_request *req)
3051 {
3052 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3053 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3054 flow_log(" assoc_len:%u\n", req->assoclen);
3055
3056 return aead_enqueue(req, false);
3057 }
3058
3059
3060
3061 static struct iproc_alg_s driver_algs[] = {
3062 {
3063 .type = CRYPTO_ALG_TYPE_AEAD,
3064 .alg.aead = {
3065 .base = {
3066 .cra_name = "gcm(aes)",
3067 .cra_driver_name = "gcm-aes-iproc",
3068 .cra_blocksize = AES_BLOCK_SIZE,
3069 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3070 },
3071 .setkey = aead_gcm_ccm_setkey,
3072 .ivsize = GCM_AES_IV_SIZE,
3073 .maxauthsize = AES_BLOCK_SIZE,
3074 },
3075 .cipher_info = {
3076 .alg = CIPHER_ALG_AES,
3077 .mode = CIPHER_MODE_GCM,
3078 },
3079 .auth_info = {
3080 .alg = HASH_ALG_AES,
3081 .mode = HASH_MODE_GCM,
3082 },
3083 .auth_first = 0,
3084 },
3085 {
3086 .type = CRYPTO_ALG_TYPE_AEAD,
3087 .alg.aead = {
3088 .base = {
3089 .cra_name = "ccm(aes)",
3090 .cra_driver_name = "ccm-aes-iproc",
3091 .cra_blocksize = AES_BLOCK_SIZE,
3092 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3093 },
3094 .setkey = aead_gcm_ccm_setkey,
3095 .ivsize = CCM_AES_IV_SIZE,
3096 .maxauthsize = AES_BLOCK_SIZE,
3097 },
3098 .cipher_info = {
3099 .alg = CIPHER_ALG_AES,
3100 .mode = CIPHER_MODE_CCM,
3101 },
3102 .auth_info = {
3103 .alg = HASH_ALG_AES,
3104 .mode = HASH_MODE_CCM,
3105 },
3106 .auth_first = 0,
3107 },
3108 {
3109 .type = CRYPTO_ALG_TYPE_AEAD,
3110 .alg.aead = {
3111 .base = {
3112 .cra_name = "rfc4106(gcm(aes))",
3113 .cra_driver_name = "gcm-aes-esp-iproc",
3114 .cra_blocksize = AES_BLOCK_SIZE,
3115 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3116 },
3117 .setkey = aead_gcm_esp_setkey,
3118 .ivsize = GCM_RFC4106_IV_SIZE,
3119 .maxauthsize = AES_BLOCK_SIZE,
3120 },
3121 .cipher_info = {
3122 .alg = CIPHER_ALG_AES,
3123 .mode = CIPHER_MODE_GCM,
3124 },
3125 .auth_info = {
3126 .alg = HASH_ALG_AES,
3127 .mode = HASH_MODE_GCM,
3128 },
3129 .auth_first = 0,
3130 },
3131 {
3132 .type = CRYPTO_ALG_TYPE_AEAD,
3133 .alg.aead = {
3134 .base = {
3135 .cra_name = "rfc4309(ccm(aes))",
3136 .cra_driver_name = "ccm-aes-esp-iproc",
3137 .cra_blocksize = AES_BLOCK_SIZE,
3138 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3139 },
3140 .setkey = aead_ccm_esp_setkey,
3141 .ivsize = CCM_AES_IV_SIZE,
3142 .maxauthsize = AES_BLOCK_SIZE,
3143 },
3144 .cipher_info = {
3145 .alg = CIPHER_ALG_AES,
3146 .mode = CIPHER_MODE_CCM,
3147 },
3148 .auth_info = {
3149 .alg = HASH_ALG_AES,
3150 .mode = HASH_MODE_CCM,
3151 },
3152 .auth_first = 0,
3153 },
3154 {
3155 .type = CRYPTO_ALG_TYPE_AEAD,
3156 .alg.aead = {
3157 .base = {
3158 .cra_name = "rfc4543(gcm(aes))",
3159 .cra_driver_name = "gmac-aes-esp-iproc",
3160 .cra_blocksize = AES_BLOCK_SIZE,
3161 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3162 },
3163 .setkey = rfc4543_gcm_esp_setkey,
3164 .ivsize = GCM_RFC4106_IV_SIZE,
3165 .maxauthsize = AES_BLOCK_SIZE,
3166 },
3167 .cipher_info = {
3168 .alg = CIPHER_ALG_AES,
3169 .mode = CIPHER_MODE_GCM,
3170 },
3171 .auth_info = {
3172 .alg = HASH_ALG_AES,
3173 .mode = HASH_MODE_GCM,
3174 },
3175 .auth_first = 0,
3176 },
3177 {
3178 .type = CRYPTO_ALG_TYPE_AEAD,
3179 .alg.aead = {
3180 .base = {
3181 .cra_name = "authenc(hmac(md5),cbc(aes))",
3182 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3183 .cra_blocksize = AES_BLOCK_SIZE,
3184 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3185 CRYPTO_ALG_ASYNC |
3186 CRYPTO_ALG_ALLOCATES_MEMORY
3187 },
3188 .setkey = aead_authenc_setkey,
3189 .ivsize = AES_BLOCK_SIZE,
3190 .maxauthsize = MD5_DIGEST_SIZE,
3191 },
3192 .cipher_info = {
3193 .alg = CIPHER_ALG_AES,
3194 .mode = CIPHER_MODE_CBC,
3195 },
3196 .auth_info = {
3197 .alg = HASH_ALG_MD5,
3198 .mode = HASH_MODE_HMAC,
3199 },
3200 .auth_first = 0,
3201 },
3202 {
3203 .type = CRYPTO_ALG_TYPE_AEAD,
3204 .alg.aead = {
3205 .base = {
3206 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3207 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3208 .cra_blocksize = AES_BLOCK_SIZE,
3209 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3210 CRYPTO_ALG_ASYNC |
3211 CRYPTO_ALG_ALLOCATES_MEMORY
3212 },
3213 .setkey = aead_authenc_setkey,
3214 .ivsize = AES_BLOCK_SIZE,
3215 .maxauthsize = SHA1_DIGEST_SIZE,
3216 },
3217 .cipher_info = {
3218 .alg = CIPHER_ALG_AES,
3219 .mode = CIPHER_MODE_CBC,
3220 },
3221 .auth_info = {
3222 .alg = HASH_ALG_SHA1,
3223 .mode = HASH_MODE_HMAC,
3224 },
3225 .auth_first = 0,
3226 },
3227 {
3228 .type = CRYPTO_ALG_TYPE_AEAD,
3229 .alg.aead = {
3230 .base = {
3231 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3232 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3233 .cra_blocksize = AES_BLOCK_SIZE,
3234 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3235 CRYPTO_ALG_ASYNC |
3236 CRYPTO_ALG_ALLOCATES_MEMORY
3237 },
3238 .setkey = aead_authenc_setkey,
3239 .ivsize = AES_BLOCK_SIZE,
3240 .maxauthsize = SHA256_DIGEST_SIZE,
3241 },
3242 .cipher_info = {
3243 .alg = CIPHER_ALG_AES,
3244 .mode = CIPHER_MODE_CBC,
3245 },
3246 .auth_info = {
3247 .alg = HASH_ALG_SHA256,
3248 .mode = HASH_MODE_HMAC,
3249 },
3250 .auth_first = 0,
3251 },
3252 {
3253 .type = CRYPTO_ALG_TYPE_AEAD,
3254 .alg.aead = {
3255 .base = {
3256 .cra_name = "authenc(hmac(md5),cbc(des))",
3257 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3258 .cra_blocksize = DES_BLOCK_SIZE,
3259 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3260 CRYPTO_ALG_ASYNC |
3261 CRYPTO_ALG_ALLOCATES_MEMORY
3262 },
3263 .setkey = aead_authenc_setkey,
3264 .ivsize = DES_BLOCK_SIZE,
3265 .maxauthsize = MD5_DIGEST_SIZE,
3266 },
3267 .cipher_info = {
3268 .alg = CIPHER_ALG_DES,
3269 .mode = CIPHER_MODE_CBC,
3270 },
3271 .auth_info = {
3272 .alg = HASH_ALG_MD5,
3273 .mode = HASH_MODE_HMAC,
3274 },
3275 .auth_first = 0,
3276 },
3277 {
3278 .type = CRYPTO_ALG_TYPE_AEAD,
3279 .alg.aead = {
3280 .base = {
3281 .cra_name = "authenc(hmac(sha1),cbc(des))",
3282 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3283 .cra_blocksize = DES_BLOCK_SIZE,
3284 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3285 CRYPTO_ALG_ASYNC |
3286 CRYPTO_ALG_ALLOCATES_MEMORY
3287 },
3288 .setkey = aead_authenc_setkey,
3289 .ivsize = DES_BLOCK_SIZE,
3290 .maxauthsize = SHA1_DIGEST_SIZE,
3291 },
3292 .cipher_info = {
3293 .alg = CIPHER_ALG_DES,
3294 .mode = CIPHER_MODE_CBC,
3295 },
3296 .auth_info = {
3297 .alg = HASH_ALG_SHA1,
3298 .mode = HASH_MODE_HMAC,
3299 },
3300 .auth_first = 0,
3301 },
3302 {
3303 .type = CRYPTO_ALG_TYPE_AEAD,
3304 .alg.aead = {
3305 .base = {
3306 .cra_name = "authenc(hmac(sha224),cbc(des))",
3307 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3308 .cra_blocksize = DES_BLOCK_SIZE,
3309 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3310 CRYPTO_ALG_ASYNC |
3311 CRYPTO_ALG_ALLOCATES_MEMORY
3312 },
3313 .setkey = aead_authenc_setkey,
3314 .ivsize = DES_BLOCK_SIZE,
3315 .maxauthsize = SHA224_DIGEST_SIZE,
3316 },
3317 .cipher_info = {
3318 .alg = CIPHER_ALG_DES,
3319 .mode = CIPHER_MODE_CBC,
3320 },
3321 .auth_info = {
3322 .alg = HASH_ALG_SHA224,
3323 .mode = HASH_MODE_HMAC,
3324 },
3325 .auth_first = 0,
3326 },
3327 {
3328 .type = CRYPTO_ALG_TYPE_AEAD,
3329 .alg.aead = {
3330 .base = {
3331 .cra_name = "authenc(hmac(sha256),cbc(des))",
3332 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3333 .cra_blocksize = DES_BLOCK_SIZE,
3334 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3335 CRYPTO_ALG_ASYNC |
3336 CRYPTO_ALG_ALLOCATES_MEMORY
3337 },
3338 .setkey = aead_authenc_setkey,
3339 .ivsize = DES_BLOCK_SIZE,
3340 .maxauthsize = SHA256_DIGEST_SIZE,
3341 },
3342 .cipher_info = {
3343 .alg = CIPHER_ALG_DES,
3344 .mode = CIPHER_MODE_CBC,
3345 },
3346 .auth_info = {
3347 .alg = HASH_ALG_SHA256,
3348 .mode = HASH_MODE_HMAC,
3349 },
3350 .auth_first = 0,
3351 },
3352 {
3353 .type = CRYPTO_ALG_TYPE_AEAD,
3354 .alg.aead = {
3355 .base = {
3356 .cra_name = "authenc(hmac(sha384),cbc(des))",
3357 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3358 .cra_blocksize = DES_BLOCK_SIZE,
3359 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3360 CRYPTO_ALG_ASYNC |
3361 CRYPTO_ALG_ALLOCATES_MEMORY
3362 },
3363 .setkey = aead_authenc_setkey,
3364 .ivsize = DES_BLOCK_SIZE,
3365 .maxauthsize = SHA384_DIGEST_SIZE,
3366 },
3367 .cipher_info = {
3368 .alg = CIPHER_ALG_DES,
3369 .mode = CIPHER_MODE_CBC,
3370 },
3371 .auth_info = {
3372 .alg = HASH_ALG_SHA384,
3373 .mode = HASH_MODE_HMAC,
3374 },
3375 .auth_first = 0,
3376 },
3377 {
3378 .type = CRYPTO_ALG_TYPE_AEAD,
3379 .alg.aead = {
3380 .base = {
3381 .cra_name = "authenc(hmac(sha512),cbc(des))",
3382 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3383 .cra_blocksize = DES_BLOCK_SIZE,
3384 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3385 CRYPTO_ALG_ASYNC |
3386 CRYPTO_ALG_ALLOCATES_MEMORY
3387 },
3388 .setkey = aead_authenc_setkey,
3389 .ivsize = DES_BLOCK_SIZE,
3390 .maxauthsize = SHA512_DIGEST_SIZE,
3391 },
3392 .cipher_info = {
3393 .alg = CIPHER_ALG_DES,
3394 .mode = CIPHER_MODE_CBC,
3395 },
3396 .auth_info = {
3397 .alg = HASH_ALG_SHA512,
3398 .mode = HASH_MODE_HMAC,
3399 },
3400 .auth_first = 0,
3401 },
3402 {
3403 .type = CRYPTO_ALG_TYPE_AEAD,
3404 .alg.aead = {
3405 .base = {
3406 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3407 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3408 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3409 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3410 CRYPTO_ALG_ASYNC |
3411 CRYPTO_ALG_ALLOCATES_MEMORY
3412 },
3413 .setkey = aead_authenc_setkey,
3414 .ivsize = DES3_EDE_BLOCK_SIZE,
3415 .maxauthsize = MD5_DIGEST_SIZE,
3416 },
3417 .cipher_info = {
3418 .alg = CIPHER_ALG_3DES,
3419 .mode = CIPHER_MODE_CBC,
3420 },
3421 .auth_info = {
3422 .alg = HASH_ALG_MD5,
3423 .mode = HASH_MODE_HMAC,
3424 },
3425 .auth_first = 0,
3426 },
3427 {
3428 .type = CRYPTO_ALG_TYPE_AEAD,
3429 .alg.aead = {
3430 .base = {
3431 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3432 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3433 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3434 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3435 CRYPTO_ALG_ASYNC |
3436 CRYPTO_ALG_ALLOCATES_MEMORY
3437 },
3438 .setkey = aead_authenc_setkey,
3439 .ivsize = DES3_EDE_BLOCK_SIZE,
3440 .maxauthsize = SHA1_DIGEST_SIZE,
3441 },
3442 .cipher_info = {
3443 .alg = CIPHER_ALG_3DES,
3444 .mode = CIPHER_MODE_CBC,
3445 },
3446 .auth_info = {
3447 .alg = HASH_ALG_SHA1,
3448 .mode = HASH_MODE_HMAC,
3449 },
3450 .auth_first = 0,
3451 },
3452 {
3453 .type = CRYPTO_ALG_TYPE_AEAD,
3454 .alg.aead = {
3455 .base = {
3456 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3457 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3458 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3459 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3460 CRYPTO_ALG_ASYNC |
3461 CRYPTO_ALG_ALLOCATES_MEMORY
3462 },
3463 .setkey = aead_authenc_setkey,
3464 .ivsize = DES3_EDE_BLOCK_SIZE,
3465 .maxauthsize = SHA224_DIGEST_SIZE,
3466 },
3467 .cipher_info = {
3468 .alg = CIPHER_ALG_3DES,
3469 .mode = CIPHER_MODE_CBC,
3470 },
3471 .auth_info = {
3472 .alg = HASH_ALG_SHA224,
3473 .mode = HASH_MODE_HMAC,
3474 },
3475 .auth_first = 0,
3476 },
3477 {
3478 .type = CRYPTO_ALG_TYPE_AEAD,
3479 .alg.aead = {
3480 .base = {
3481 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3482 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3483 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3484 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3485 CRYPTO_ALG_ASYNC |
3486 CRYPTO_ALG_ALLOCATES_MEMORY
3487 },
3488 .setkey = aead_authenc_setkey,
3489 .ivsize = DES3_EDE_BLOCK_SIZE,
3490 .maxauthsize = SHA256_DIGEST_SIZE,
3491 },
3492 .cipher_info = {
3493 .alg = CIPHER_ALG_3DES,
3494 .mode = CIPHER_MODE_CBC,
3495 },
3496 .auth_info = {
3497 .alg = HASH_ALG_SHA256,
3498 .mode = HASH_MODE_HMAC,
3499 },
3500 .auth_first = 0,
3501 },
3502 {
3503 .type = CRYPTO_ALG_TYPE_AEAD,
3504 .alg.aead = {
3505 .base = {
3506 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3507 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3508 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3509 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3510 CRYPTO_ALG_ASYNC |
3511 CRYPTO_ALG_ALLOCATES_MEMORY
3512 },
3513 .setkey = aead_authenc_setkey,
3514 .ivsize = DES3_EDE_BLOCK_SIZE,
3515 .maxauthsize = SHA384_DIGEST_SIZE,
3516 },
3517 .cipher_info = {
3518 .alg = CIPHER_ALG_3DES,
3519 .mode = CIPHER_MODE_CBC,
3520 },
3521 .auth_info = {
3522 .alg = HASH_ALG_SHA384,
3523 .mode = HASH_MODE_HMAC,
3524 },
3525 .auth_first = 0,
3526 },
3527 {
3528 .type = CRYPTO_ALG_TYPE_AEAD,
3529 .alg.aead = {
3530 .base = {
3531 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3532 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3533 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3534 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3535 CRYPTO_ALG_ASYNC |
3536 CRYPTO_ALG_ALLOCATES_MEMORY
3537 },
3538 .setkey = aead_authenc_setkey,
3539 .ivsize = DES3_EDE_BLOCK_SIZE,
3540 .maxauthsize = SHA512_DIGEST_SIZE,
3541 },
3542 .cipher_info = {
3543 .alg = CIPHER_ALG_3DES,
3544 .mode = CIPHER_MODE_CBC,
3545 },
3546 .auth_info = {
3547 .alg = HASH_ALG_SHA512,
3548 .mode = HASH_MODE_HMAC,
3549 },
3550 .auth_first = 0,
3551 },
3552
3553
3554 {
3555 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3556 .alg.skcipher = {
3557 .base.cra_name = "ofb(des)",
3558 .base.cra_driver_name = "ofb-des-iproc",
3559 .base.cra_blocksize = DES_BLOCK_SIZE,
3560 .min_keysize = DES_KEY_SIZE,
3561 .max_keysize = DES_KEY_SIZE,
3562 .ivsize = DES_BLOCK_SIZE,
3563 },
3564 .cipher_info = {
3565 .alg = CIPHER_ALG_DES,
3566 .mode = CIPHER_MODE_OFB,
3567 },
3568 .auth_info = {
3569 .alg = HASH_ALG_NONE,
3570 .mode = HASH_MODE_NONE,
3571 },
3572 },
3573 {
3574 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3575 .alg.skcipher = {
3576 .base.cra_name = "cbc(des)",
3577 .base.cra_driver_name = "cbc-des-iproc",
3578 .base.cra_blocksize = DES_BLOCK_SIZE,
3579 .min_keysize = DES_KEY_SIZE,
3580 .max_keysize = DES_KEY_SIZE,
3581 .ivsize = DES_BLOCK_SIZE,
3582 },
3583 .cipher_info = {
3584 .alg = CIPHER_ALG_DES,
3585 .mode = CIPHER_MODE_CBC,
3586 },
3587 .auth_info = {
3588 .alg = HASH_ALG_NONE,
3589 .mode = HASH_MODE_NONE,
3590 },
3591 },
3592 {
3593 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3594 .alg.skcipher = {
3595 .base.cra_name = "ecb(des)",
3596 .base.cra_driver_name = "ecb-des-iproc",
3597 .base.cra_blocksize = DES_BLOCK_SIZE,
3598 .min_keysize = DES_KEY_SIZE,
3599 .max_keysize = DES_KEY_SIZE,
3600 .ivsize = 0,
3601 },
3602 .cipher_info = {
3603 .alg = CIPHER_ALG_DES,
3604 .mode = CIPHER_MODE_ECB,
3605 },
3606 .auth_info = {
3607 .alg = HASH_ALG_NONE,
3608 .mode = HASH_MODE_NONE,
3609 },
3610 },
3611 {
3612 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3613 .alg.skcipher = {
3614 .base.cra_name = "ofb(des3_ede)",
3615 .base.cra_driver_name = "ofb-des3-iproc",
3616 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3617 .min_keysize = DES3_EDE_KEY_SIZE,
3618 .max_keysize = DES3_EDE_KEY_SIZE,
3619 .ivsize = DES3_EDE_BLOCK_SIZE,
3620 },
3621 .cipher_info = {
3622 .alg = CIPHER_ALG_3DES,
3623 .mode = CIPHER_MODE_OFB,
3624 },
3625 .auth_info = {
3626 .alg = HASH_ALG_NONE,
3627 .mode = HASH_MODE_NONE,
3628 },
3629 },
3630 {
3631 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3632 .alg.skcipher = {
3633 .base.cra_name = "cbc(des3_ede)",
3634 .base.cra_driver_name = "cbc-des3-iproc",
3635 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3636 .min_keysize = DES3_EDE_KEY_SIZE,
3637 .max_keysize = DES3_EDE_KEY_SIZE,
3638 .ivsize = DES3_EDE_BLOCK_SIZE,
3639 },
3640 .cipher_info = {
3641 .alg = CIPHER_ALG_3DES,
3642 .mode = CIPHER_MODE_CBC,
3643 },
3644 .auth_info = {
3645 .alg = HASH_ALG_NONE,
3646 .mode = HASH_MODE_NONE,
3647 },
3648 },
3649 {
3650 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3651 .alg.skcipher = {
3652 .base.cra_name = "ecb(des3_ede)",
3653 .base.cra_driver_name = "ecb-des3-iproc",
3654 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3655 .min_keysize = DES3_EDE_KEY_SIZE,
3656 .max_keysize = DES3_EDE_KEY_SIZE,
3657 .ivsize = 0,
3658 },
3659 .cipher_info = {
3660 .alg = CIPHER_ALG_3DES,
3661 .mode = CIPHER_MODE_ECB,
3662 },
3663 .auth_info = {
3664 .alg = HASH_ALG_NONE,
3665 .mode = HASH_MODE_NONE,
3666 },
3667 },
3668 {
3669 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3670 .alg.skcipher = {
3671 .base.cra_name = "ofb(aes)",
3672 .base.cra_driver_name = "ofb-aes-iproc",
3673 .base.cra_blocksize = AES_BLOCK_SIZE,
3674 .min_keysize = AES_MIN_KEY_SIZE,
3675 .max_keysize = AES_MAX_KEY_SIZE,
3676 .ivsize = AES_BLOCK_SIZE,
3677 },
3678 .cipher_info = {
3679 .alg = CIPHER_ALG_AES,
3680 .mode = CIPHER_MODE_OFB,
3681 },
3682 .auth_info = {
3683 .alg = HASH_ALG_NONE,
3684 .mode = HASH_MODE_NONE,
3685 },
3686 },
3687 {
3688 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3689 .alg.skcipher = {
3690 .base.cra_name = "cbc(aes)",
3691 .base.cra_driver_name = "cbc-aes-iproc",
3692 .base.cra_blocksize = AES_BLOCK_SIZE,
3693 .min_keysize = AES_MIN_KEY_SIZE,
3694 .max_keysize = AES_MAX_KEY_SIZE,
3695 .ivsize = AES_BLOCK_SIZE,
3696 },
3697 .cipher_info = {
3698 .alg = CIPHER_ALG_AES,
3699 .mode = CIPHER_MODE_CBC,
3700 },
3701 .auth_info = {
3702 .alg = HASH_ALG_NONE,
3703 .mode = HASH_MODE_NONE,
3704 },
3705 },
3706 {
3707 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3708 .alg.skcipher = {
3709 .base.cra_name = "ecb(aes)",
3710 .base.cra_driver_name = "ecb-aes-iproc",
3711 .base.cra_blocksize = AES_BLOCK_SIZE,
3712 .min_keysize = AES_MIN_KEY_SIZE,
3713 .max_keysize = AES_MAX_KEY_SIZE,
3714 .ivsize = 0,
3715 },
3716 .cipher_info = {
3717 .alg = CIPHER_ALG_AES,
3718 .mode = CIPHER_MODE_ECB,
3719 },
3720 .auth_info = {
3721 .alg = HASH_ALG_NONE,
3722 .mode = HASH_MODE_NONE,
3723 },
3724 },
3725 {
3726 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3727 .alg.skcipher = {
3728 .base.cra_name = "ctr(aes)",
3729 .base.cra_driver_name = "ctr-aes-iproc",
3730 .base.cra_blocksize = AES_BLOCK_SIZE,
3731 .min_keysize = AES_MIN_KEY_SIZE,
3732 .max_keysize = AES_MAX_KEY_SIZE,
3733 .ivsize = AES_BLOCK_SIZE,
3734 },
3735 .cipher_info = {
3736 .alg = CIPHER_ALG_AES,
3737 .mode = CIPHER_MODE_CTR,
3738 },
3739 .auth_info = {
3740 .alg = HASH_ALG_NONE,
3741 .mode = HASH_MODE_NONE,
3742 },
3743 },
3744 {
3745 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3746 .alg.skcipher = {
3747 .base.cra_name = "xts(aes)",
3748 .base.cra_driver_name = "xts-aes-iproc",
3749 .base.cra_blocksize = AES_BLOCK_SIZE,
3750 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3751 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3752 .ivsize = AES_BLOCK_SIZE,
3753 },
3754 .cipher_info = {
3755 .alg = CIPHER_ALG_AES,
3756 .mode = CIPHER_MODE_XTS,
3757 },
3758 .auth_info = {
3759 .alg = HASH_ALG_NONE,
3760 .mode = HASH_MODE_NONE,
3761 },
3762 },
3763
3764
3765 {
3766 .type = CRYPTO_ALG_TYPE_AHASH,
3767 .alg.hash = {
3768 .halg.digestsize = MD5_DIGEST_SIZE,
3769 .halg.base = {
3770 .cra_name = "md5",
3771 .cra_driver_name = "md5-iproc",
3772 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3773 .cra_flags = CRYPTO_ALG_ASYNC |
3774 CRYPTO_ALG_ALLOCATES_MEMORY,
3775 }
3776 },
3777 .cipher_info = {
3778 .alg = CIPHER_ALG_NONE,
3779 .mode = CIPHER_MODE_NONE,
3780 },
3781 .auth_info = {
3782 .alg = HASH_ALG_MD5,
3783 .mode = HASH_MODE_HASH,
3784 },
3785 },
3786 {
3787 .type = CRYPTO_ALG_TYPE_AHASH,
3788 .alg.hash = {
3789 .halg.digestsize = MD5_DIGEST_SIZE,
3790 .halg.base = {
3791 .cra_name = "hmac(md5)",
3792 .cra_driver_name = "hmac-md5-iproc",
3793 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3794 }
3795 },
3796 .cipher_info = {
3797 .alg = CIPHER_ALG_NONE,
3798 .mode = CIPHER_MODE_NONE,
3799 },
3800 .auth_info = {
3801 .alg = HASH_ALG_MD5,
3802 .mode = HASH_MODE_HMAC,
3803 },
3804 },
3805 {.type = CRYPTO_ALG_TYPE_AHASH,
3806 .alg.hash = {
3807 .halg.digestsize = SHA1_DIGEST_SIZE,
3808 .halg.base = {
3809 .cra_name = "sha1",
3810 .cra_driver_name = "sha1-iproc",
3811 .cra_blocksize = SHA1_BLOCK_SIZE,
3812 }
3813 },
3814 .cipher_info = {
3815 .alg = CIPHER_ALG_NONE,
3816 .mode = CIPHER_MODE_NONE,
3817 },
3818 .auth_info = {
3819 .alg = HASH_ALG_SHA1,
3820 .mode = HASH_MODE_HASH,
3821 },
3822 },
3823 {.type = CRYPTO_ALG_TYPE_AHASH,
3824 .alg.hash = {
3825 .halg.digestsize = SHA1_DIGEST_SIZE,
3826 .halg.base = {
3827 .cra_name = "hmac(sha1)",
3828 .cra_driver_name = "hmac-sha1-iproc",
3829 .cra_blocksize = SHA1_BLOCK_SIZE,
3830 }
3831 },
3832 .cipher_info = {
3833 .alg = CIPHER_ALG_NONE,
3834 .mode = CIPHER_MODE_NONE,
3835 },
3836 .auth_info = {
3837 .alg = HASH_ALG_SHA1,
3838 .mode = HASH_MODE_HMAC,
3839 },
3840 },
3841 {.type = CRYPTO_ALG_TYPE_AHASH,
3842 .alg.hash = {
3843 .halg.digestsize = SHA224_DIGEST_SIZE,
3844 .halg.base = {
3845 .cra_name = "sha224",
3846 .cra_driver_name = "sha224-iproc",
3847 .cra_blocksize = SHA224_BLOCK_SIZE,
3848 }
3849 },
3850 .cipher_info = {
3851 .alg = CIPHER_ALG_NONE,
3852 .mode = CIPHER_MODE_NONE,
3853 },
3854 .auth_info = {
3855 .alg = HASH_ALG_SHA224,
3856 .mode = HASH_MODE_HASH,
3857 },
3858 },
3859 {.type = CRYPTO_ALG_TYPE_AHASH,
3860 .alg.hash = {
3861 .halg.digestsize = SHA224_DIGEST_SIZE,
3862 .halg.base = {
3863 .cra_name = "hmac(sha224)",
3864 .cra_driver_name = "hmac-sha224-iproc",
3865 .cra_blocksize = SHA224_BLOCK_SIZE,
3866 }
3867 },
3868 .cipher_info = {
3869 .alg = CIPHER_ALG_NONE,
3870 .mode = CIPHER_MODE_NONE,
3871 },
3872 .auth_info = {
3873 .alg = HASH_ALG_SHA224,
3874 .mode = HASH_MODE_HMAC,
3875 },
3876 },
3877 {.type = CRYPTO_ALG_TYPE_AHASH,
3878 .alg.hash = {
3879 .halg.digestsize = SHA256_DIGEST_SIZE,
3880 .halg.base = {
3881 .cra_name = "sha256",
3882 .cra_driver_name = "sha256-iproc",
3883 .cra_blocksize = SHA256_BLOCK_SIZE,
3884 }
3885 },
3886 .cipher_info = {
3887 .alg = CIPHER_ALG_NONE,
3888 .mode = CIPHER_MODE_NONE,
3889 },
3890 .auth_info = {
3891 .alg = HASH_ALG_SHA256,
3892 .mode = HASH_MODE_HASH,
3893 },
3894 },
3895 {.type = CRYPTO_ALG_TYPE_AHASH,
3896 .alg.hash = {
3897 .halg.digestsize = SHA256_DIGEST_SIZE,
3898 .halg.base = {
3899 .cra_name = "hmac(sha256)",
3900 .cra_driver_name = "hmac-sha256-iproc",
3901 .cra_blocksize = SHA256_BLOCK_SIZE,
3902 }
3903 },
3904 .cipher_info = {
3905 .alg = CIPHER_ALG_NONE,
3906 .mode = CIPHER_MODE_NONE,
3907 },
3908 .auth_info = {
3909 .alg = HASH_ALG_SHA256,
3910 .mode = HASH_MODE_HMAC,
3911 },
3912 },
3913 {
3914 .type = CRYPTO_ALG_TYPE_AHASH,
3915 .alg.hash = {
3916 .halg.digestsize = SHA384_DIGEST_SIZE,
3917 .halg.base = {
3918 .cra_name = "sha384",
3919 .cra_driver_name = "sha384-iproc",
3920 .cra_blocksize = SHA384_BLOCK_SIZE,
3921 }
3922 },
3923 .cipher_info = {
3924 .alg = CIPHER_ALG_NONE,
3925 .mode = CIPHER_MODE_NONE,
3926 },
3927 .auth_info = {
3928 .alg = HASH_ALG_SHA384,
3929 .mode = HASH_MODE_HASH,
3930 },
3931 },
3932 {
3933 .type = CRYPTO_ALG_TYPE_AHASH,
3934 .alg.hash = {
3935 .halg.digestsize = SHA384_DIGEST_SIZE,
3936 .halg.base = {
3937 .cra_name = "hmac(sha384)",
3938 .cra_driver_name = "hmac-sha384-iproc",
3939 .cra_blocksize = SHA384_BLOCK_SIZE,
3940 }
3941 },
3942 .cipher_info = {
3943 .alg = CIPHER_ALG_NONE,
3944 .mode = CIPHER_MODE_NONE,
3945 },
3946 .auth_info = {
3947 .alg = HASH_ALG_SHA384,
3948 .mode = HASH_MODE_HMAC,
3949 },
3950 },
3951 {
3952 .type = CRYPTO_ALG_TYPE_AHASH,
3953 .alg.hash = {
3954 .halg.digestsize = SHA512_DIGEST_SIZE,
3955 .halg.base = {
3956 .cra_name = "sha512",
3957 .cra_driver_name = "sha512-iproc",
3958 .cra_blocksize = SHA512_BLOCK_SIZE,
3959 }
3960 },
3961 .cipher_info = {
3962 .alg = CIPHER_ALG_NONE,
3963 .mode = CIPHER_MODE_NONE,
3964 },
3965 .auth_info = {
3966 .alg = HASH_ALG_SHA512,
3967 .mode = HASH_MODE_HASH,
3968 },
3969 },
3970 {
3971 .type = CRYPTO_ALG_TYPE_AHASH,
3972 .alg.hash = {
3973 .halg.digestsize = SHA512_DIGEST_SIZE,
3974 .halg.base = {
3975 .cra_name = "hmac(sha512)",
3976 .cra_driver_name = "hmac-sha512-iproc",
3977 .cra_blocksize = SHA512_BLOCK_SIZE,
3978 }
3979 },
3980 .cipher_info = {
3981 .alg = CIPHER_ALG_NONE,
3982 .mode = CIPHER_MODE_NONE,
3983 },
3984 .auth_info = {
3985 .alg = HASH_ALG_SHA512,
3986 .mode = HASH_MODE_HMAC,
3987 },
3988 },
3989 {
3990 .type = CRYPTO_ALG_TYPE_AHASH,
3991 .alg.hash = {
3992 .halg.digestsize = SHA3_224_DIGEST_SIZE,
3993 .halg.base = {
3994 .cra_name = "sha3-224",
3995 .cra_driver_name = "sha3-224-iproc",
3996 .cra_blocksize = SHA3_224_BLOCK_SIZE,
3997 }
3998 },
3999 .cipher_info = {
4000 .alg = CIPHER_ALG_NONE,
4001 .mode = CIPHER_MODE_NONE,
4002 },
4003 .auth_info = {
4004 .alg = HASH_ALG_SHA3_224,
4005 .mode = HASH_MODE_HASH,
4006 },
4007 },
4008 {
4009 .type = CRYPTO_ALG_TYPE_AHASH,
4010 .alg.hash = {
4011 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4012 .halg.base = {
4013 .cra_name = "hmac(sha3-224)",
4014 .cra_driver_name = "hmac-sha3-224-iproc",
4015 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4016 }
4017 },
4018 .cipher_info = {
4019 .alg = CIPHER_ALG_NONE,
4020 .mode = CIPHER_MODE_NONE,
4021 },
4022 .auth_info = {
4023 .alg = HASH_ALG_SHA3_224,
4024 .mode = HASH_MODE_HMAC
4025 },
4026 },
4027 {
4028 .type = CRYPTO_ALG_TYPE_AHASH,
4029 .alg.hash = {
4030 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4031 .halg.base = {
4032 .cra_name = "sha3-256",
4033 .cra_driver_name = "sha3-256-iproc",
4034 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4035 }
4036 },
4037 .cipher_info = {
4038 .alg = CIPHER_ALG_NONE,
4039 .mode = CIPHER_MODE_NONE,
4040 },
4041 .auth_info = {
4042 .alg = HASH_ALG_SHA3_256,
4043 .mode = HASH_MODE_HASH,
4044 },
4045 },
4046 {
4047 .type = CRYPTO_ALG_TYPE_AHASH,
4048 .alg.hash = {
4049 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4050 .halg.base = {
4051 .cra_name = "hmac(sha3-256)",
4052 .cra_driver_name = "hmac-sha3-256-iproc",
4053 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4054 }
4055 },
4056 .cipher_info = {
4057 .alg = CIPHER_ALG_NONE,
4058 .mode = CIPHER_MODE_NONE,
4059 },
4060 .auth_info = {
4061 .alg = HASH_ALG_SHA3_256,
4062 .mode = HASH_MODE_HMAC,
4063 },
4064 },
4065 {
4066 .type = CRYPTO_ALG_TYPE_AHASH,
4067 .alg.hash = {
4068 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4069 .halg.base = {
4070 .cra_name = "sha3-384",
4071 .cra_driver_name = "sha3-384-iproc",
4072 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4073 }
4074 },
4075 .cipher_info = {
4076 .alg = CIPHER_ALG_NONE,
4077 .mode = CIPHER_MODE_NONE,
4078 },
4079 .auth_info = {
4080 .alg = HASH_ALG_SHA3_384,
4081 .mode = HASH_MODE_HASH,
4082 },
4083 },
4084 {
4085 .type = CRYPTO_ALG_TYPE_AHASH,
4086 .alg.hash = {
4087 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4088 .halg.base = {
4089 .cra_name = "hmac(sha3-384)",
4090 .cra_driver_name = "hmac-sha3-384-iproc",
4091 .cra_blocksize = SHA3_384_BLOCK_SIZE,
4092 }
4093 },
4094 .cipher_info = {
4095 .alg = CIPHER_ALG_NONE,
4096 .mode = CIPHER_MODE_NONE,
4097 },
4098 .auth_info = {
4099 .alg = HASH_ALG_SHA3_384,
4100 .mode = HASH_MODE_HMAC,
4101 },
4102 },
4103 {
4104 .type = CRYPTO_ALG_TYPE_AHASH,
4105 .alg.hash = {
4106 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4107 .halg.base = {
4108 .cra_name = "sha3-512",
4109 .cra_driver_name = "sha3-512-iproc",
4110 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4111 }
4112 },
4113 .cipher_info = {
4114 .alg = CIPHER_ALG_NONE,
4115 .mode = CIPHER_MODE_NONE,
4116 },
4117 .auth_info = {
4118 .alg = HASH_ALG_SHA3_512,
4119 .mode = HASH_MODE_HASH,
4120 },
4121 },
4122 {
4123 .type = CRYPTO_ALG_TYPE_AHASH,
4124 .alg.hash = {
4125 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4126 .halg.base = {
4127 .cra_name = "hmac(sha3-512)",
4128 .cra_driver_name = "hmac-sha3-512-iproc",
4129 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4130 }
4131 },
4132 .cipher_info = {
4133 .alg = CIPHER_ALG_NONE,
4134 .mode = CIPHER_MODE_NONE,
4135 },
4136 .auth_info = {
4137 .alg = HASH_ALG_SHA3_512,
4138 .mode = HASH_MODE_HMAC,
4139 },
4140 },
4141 {
4142 .type = CRYPTO_ALG_TYPE_AHASH,
4143 .alg.hash = {
4144 .halg.digestsize = AES_BLOCK_SIZE,
4145 .halg.base = {
4146 .cra_name = "xcbc(aes)",
4147 .cra_driver_name = "xcbc-aes-iproc",
4148 .cra_blocksize = AES_BLOCK_SIZE,
4149 }
4150 },
4151 .cipher_info = {
4152 .alg = CIPHER_ALG_NONE,
4153 .mode = CIPHER_MODE_NONE,
4154 },
4155 .auth_info = {
4156 .alg = HASH_ALG_AES,
4157 .mode = HASH_MODE_XCBC,
4158 },
4159 },
4160 {
4161 .type = CRYPTO_ALG_TYPE_AHASH,
4162 .alg.hash = {
4163 .halg.digestsize = AES_BLOCK_SIZE,
4164 .halg.base = {
4165 .cra_name = "cmac(aes)",
4166 .cra_driver_name = "cmac-aes-iproc",
4167 .cra_blocksize = AES_BLOCK_SIZE,
4168 }
4169 },
4170 .cipher_info = {
4171 .alg = CIPHER_ALG_NONE,
4172 .mode = CIPHER_MODE_NONE,
4173 },
4174 .auth_info = {
4175 .alg = HASH_ALG_AES,
4176 .mode = HASH_MODE_CMAC,
4177 },
4178 },
4179 };
4180
4181 static int generic_cra_init(struct crypto_tfm *tfm,
4182 struct iproc_alg_s *cipher_alg)
4183 {
4184 struct spu_hw *spu = &iproc_priv.spu;
4185 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4186 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4187
4188 flow_log("%s()\n", __func__);
4189
4190 ctx->alg = cipher_alg;
4191 ctx->cipher = cipher_alg->cipher_info;
4192 ctx->auth = cipher_alg->auth_info;
4193 ctx->auth_first = cipher_alg->auth_first;
4194 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4195 ctx->cipher.mode,
4196 blocksize);
4197 ctx->fallback_cipher = NULL;
4198
4199 ctx->enckeylen = 0;
4200 ctx->authkeylen = 0;
4201
4202 atomic_inc(&iproc_priv.stream_count);
4203 atomic_inc(&iproc_priv.session_count);
4204
4205 return 0;
4206 }
4207
4208 static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
4209 {
4210 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
4211 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
4212 struct iproc_alg_s *cipher_alg;
4213
4214 flow_log("%s()\n", __func__);
4215
4216 crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
4217
4218 cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
4219 return generic_cra_init(tfm, cipher_alg);
4220 }
4221
4222 static int ahash_cra_init(struct crypto_tfm *tfm)
4223 {
4224 int err;
4225 struct crypto_alg *alg = tfm->__crt_alg;
4226 struct iproc_alg_s *cipher_alg;
4227
4228 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4229 alg.hash);
4230
4231 err = generic_cra_init(tfm, cipher_alg);
4232 flow_log("%s()\n", __func__);
4233
4234
4235
4236
4237
4238 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4239 sizeof(struct iproc_reqctx_s));
4240
4241 return err;
4242 }
4243
4244 static int aead_cra_init(struct crypto_aead *aead)
4245 {
4246 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4247 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4248 struct crypto_alg *alg = tfm->__crt_alg;
4249 struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4250 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4251 alg.aead);
4252
4253 int err = generic_cra_init(tfm, cipher_alg);
4254
4255 flow_log("%s()\n", __func__);
4256
4257 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4258 ctx->is_esp = false;
4259 ctx->salt_len = 0;
4260 ctx->salt_offset = 0;
4261
4262
4263 get_random_bytes(ctx->iv, MAX_IV_SIZE);
4264 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4265
4266 if (!err) {
4267 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4268 flow_log("%s() creating fallback cipher\n", __func__);
4269
4270 ctx->fallback_cipher =
4271 crypto_alloc_aead(alg->cra_name, 0,
4272 CRYPTO_ALG_ASYNC |
4273 CRYPTO_ALG_NEED_FALLBACK);
4274 if (IS_ERR(ctx->fallback_cipher)) {
4275 pr_err("%s() Error: failed to allocate fallback for %s\n",
4276 __func__, alg->cra_name);
4277 return PTR_ERR(ctx->fallback_cipher);
4278 }
4279 }
4280 }
4281
4282 return err;
4283 }
4284
4285 static void generic_cra_exit(struct crypto_tfm *tfm)
4286 {
4287 atomic_dec(&iproc_priv.session_count);
4288 }
4289
4290 static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
4291 {
4292 generic_cra_exit(crypto_skcipher_tfm(tfm));
4293 }
4294
4295 static void aead_cra_exit(struct crypto_aead *aead)
4296 {
4297 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4298 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4299
4300 generic_cra_exit(tfm);
4301
4302 if (ctx->fallback_cipher) {
4303 crypto_free_aead(ctx->fallback_cipher);
4304 ctx->fallback_cipher = NULL;
4305 }
4306 }
4307
4308
4309
4310
4311
4312
4313
4314
4315 static void spu_functions_register(struct device *dev,
4316 enum spu_spu_type spu_type,
4317 enum spu_spu_subtype spu_subtype)
4318 {
4319 struct spu_hw *spu = &iproc_priv.spu;
4320
4321 if (spu_type == SPU_TYPE_SPUM) {
4322 dev_dbg(dev, "Registering SPUM functions");
4323 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4324 spu->spu_payload_length = spum_payload_length;
4325 spu->spu_response_hdr_len = spum_response_hdr_len;
4326 spu->spu_hash_pad_len = spum_hash_pad_len;
4327 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4328 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4329 spu->spu_aead_ivlen = spum_aead_ivlen;
4330 spu->spu_hash_type = spum_hash_type;
4331 spu->spu_digest_size = spum_digest_size;
4332 spu->spu_create_request = spum_create_request;
4333 spu->spu_cipher_req_init = spum_cipher_req_init;
4334 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4335 spu->spu_request_pad = spum_request_pad;
4336 spu->spu_tx_status_len = spum_tx_status_len;
4337 spu->spu_rx_status_len = spum_rx_status_len;
4338 spu->spu_status_process = spum_status_process;
4339 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4340 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4341 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4342 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4343 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4344 else
4345 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4346 } else {
4347 dev_dbg(dev, "Registering SPU2 functions");
4348 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4349 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4350 spu->spu_payload_length = spu2_payload_length;
4351 spu->spu_response_hdr_len = spu2_response_hdr_len;
4352 spu->spu_hash_pad_len = spu2_hash_pad_len;
4353 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4354 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4355 spu->spu_aead_ivlen = spu2_aead_ivlen;
4356 spu->spu_hash_type = spu2_hash_type;
4357 spu->spu_digest_size = spu2_digest_size;
4358 spu->spu_create_request = spu2_create_request;
4359 spu->spu_cipher_req_init = spu2_cipher_req_init;
4360 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4361 spu->spu_request_pad = spu2_request_pad;
4362 spu->spu_tx_status_len = spu2_tx_status_len;
4363 spu->spu_rx_status_len = spu2_rx_status_len;
4364 spu->spu_status_process = spu2_status_process;
4365 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4366 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4367 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4368 }
4369 }
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379 static int spu_mb_init(struct device *dev)
4380 {
4381 struct mbox_client *mcl = &iproc_priv.mcl;
4382 int err, i;
4383
4384 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4385 sizeof(struct mbox_chan *), GFP_KERNEL);
4386 if (!iproc_priv.mbox)
4387 return -ENOMEM;
4388
4389 mcl->dev = dev;
4390 mcl->tx_block = false;
4391 mcl->tx_tout = 0;
4392 mcl->knows_txdone = true;
4393 mcl->rx_callback = spu_rx_callback;
4394 mcl->tx_done = NULL;
4395
4396 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4397 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4398 if (IS_ERR(iproc_priv.mbox[i])) {
4399 err = PTR_ERR(iproc_priv.mbox[i]);
4400 dev_err(dev,
4401 "Mbox channel %d request failed with err %d",
4402 i, err);
4403 iproc_priv.mbox[i] = NULL;
4404 goto free_channels;
4405 }
4406 }
4407
4408 return 0;
4409 free_channels:
4410 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4411 if (iproc_priv.mbox[i])
4412 mbox_free_channel(iproc_priv.mbox[i]);
4413 }
4414
4415 return err;
4416 }
4417
4418 static void spu_mb_release(struct platform_device *pdev)
4419 {
4420 int i;
4421
4422 for (i = 0; i < iproc_priv.spu.num_chan; i++)
4423 mbox_free_channel(iproc_priv.mbox[i]);
4424 }
4425
4426 static void spu_counters_init(void)
4427 {
4428 int i;
4429 int j;
4430
4431 atomic_set(&iproc_priv.session_count, 0);
4432 atomic_set(&iproc_priv.stream_count, 0);
4433 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4434 atomic64_set(&iproc_priv.bytes_in, 0);
4435 atomic64_set(&iproc_priv.bytes_out, 0);
4436 for (i = 0; i < SPU_OP_NUM; i++) {
4437 atomic_set(&iproc_priv.op_counts[i], 0);
4438 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4439 }
4440 for (i = 0; i < CIPHER_ALG_LAST; i++)
4441 for (j = 0; j < CIPHER_MODE_LAST; j++)
4442 atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4443
4444 for (i = 0; i < HASH_ALG_LAST; i++) {
4445 atomic_set(&iproc_priv.hash_cnt[i], 0);
4446 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4447 }
4448 for (i = 0; i < AEAD_TYPE_LAST; i++)
4449 atomic_set(&iproc_priv.aead_cnt[i], 0);
4450
4451 atomic_set(&iproc_priv.mb_no_spc, 0);
4452 atomic_set(&iproc_priv.mb_send_fail, 0);
4453 atomic_set(&iproc_priv.bad_icv, 0);
4454 }
4455
4456 static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
4457 {
4458 struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
4459 int err;
4460
4461 crypto->base.cra_module = THIS_MODULE;
4462 crypto->base.cra_priority = cipher_pri;
4463 crypto->base.cra_alignmask = 0;
4464 crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4465 crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
4466 CRYPTO_ALG_ALLOCATES_MEMORY |
4467 CRYPTO_ALG_KERN_DRIVER_ONLY;
4468
4469 crypto->init = skcipher_init_tfm;
4470 crypto->exit = skcipher_exit_tfm;
4471 crypto->setkey = skcipher_setkey;
4472 crypto->encrypt = skcipher_encrypt;
4473 crypto->decrypt = skcipher_decrypt;
4474
4475 err = crypto_register_skcipher(crypto);
4476
4477 if (err == 0)
4478 driver_alg->registered = true;
4479 pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
4480 return err;
4481 }
4482
4483 static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4484 {
4485 struct spu_hw *spu = &iproc_priv.spu;
4486 struct ahash_alg *hash = &driver_alg->alg.hash;
4487 int err;
4488
4489
4490 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4491 (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4492 (spu->spu_type == SPU_TYPE_SPUM))
4493 return 0;
4494
4495
4496 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4497 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4498 return 0;
4499
4500 hash->halg.base.cra_module = THIS_MODULE;
4501 hash->halg.base.cra_priority = hash_pri;
4502 hash->halg.base.cra_alignmask = 0;
4503 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4504 hash->halg.base.cra_init = ahash_cra_init;
4505 hash->halg.base.cra_exit = generic_cra_exit;
4506 hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4507 CRYPTO_ALG_ALLOCATES_MEMORY;
4508 hash->halg.statesize = sizeof(struct spu_hash_export_s);
4509
4510 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4511 hash->init = ahash_init;
4512 hash->update = ahash_update;
4513 hash->final = ahash_final;
4514 hash->finup = ahash_finup;
4515 hash->digest = ahash_digest;
4516 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4517 ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4518 (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4519 hash->setkey = ahash_setkey;
4520 }
4521 } else {
4522 hash->setkey = ahash_hmac_setkey;
4523 hash->init = ahash_hmac_init;
4524 hash->update = ahash_hmac_update;
4525 hash->final = ahash_hmac_final;
4526 hash->finup = ahash_hmac_finup;
4527 hash->digest = ahash_hmac_digest;
4528 }
4529 hash->export = ahash_export;
4530 hash->import = ahash_import;
4531
4532 err = crypto_register_ahash(hash);
4533
4534 if (err == 0)
4535 driver_alg->registered = true;
4536 pr_debug(" registered ahash %s\n",
4537 hash->halg.base.cra_driver_name);
4538 return err;
4539 }
4540
4541 static int spu_register_aead(struct iproc_alg_s *driver_alg)
4542 {
4543 struct aead_alg *aead = &driver_alg->alg.aead;
4544 int err;
4545
4546 aead->base.cra_module = THIS_MODULE;
4547 aead->base.cra_priority = aead_pri;
4548 aead->base.cra_alignmask = 0;
4549 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4550
4551 aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4552
4553 aead->setauthsize = aead_setauthsize;
4554 aead->encrypt = aead_encrypt;
4555 aead->decrypt = aead_decrypt;
4556 aead->init = aead_cra_init;
4557 aead->exit = aead_cra_exit;
4558
4559 err = crypto_register_aead(aead);
4560
4561 if (err == 0)
4562 driver_alg->registered = true;
4563 pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4564 return err;
4565 }
4566
4567
4568 static int spu_algs_register(struct device *dev)
4569 {
4570 int i, j;
4571 int err;
4572
4573 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4574 switch (driver_algs[i].type) {
4575 case CRYPTO_ALG_TYPE_SKCIPHER:
4576 err = spu_register_skcipher(&driver_algs[i]);
4577 break;
4578 case CRYPTO_ALG_TYPE_AHASH:
4579 err = spu_register_ahash(&driver_algs[i]);
4580 break;
4581 case CRYPTO_ALG_TYPE_AEAD:
4582 err = spu_register_aead(&driver_algs[i]);
4583 break;
4584 default:
4585 dev_err(dev,
4586 "iproc-crypto: unknown alg type: %d",
4587 driver_algs[i].type);
4588 err = -EINVAL;
4589 }
4590
4591 if (err) {
4592 dev_err(dev, "alg registration failed with error %d\n",
4593 err);
4594 goto err_algs;
4595 }
4596 }
4597
4598 return 0;
4599
4600 err_algs:
4601 for (j = 0; j < i; j++) {
4602
4603 if (!driver_algs[j].registered)
4604 continue;
4605 switch (driver_algs[j].type) {
4606 case CRYPTO_ALG_TYPE_SKCIPHER:
4607 crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
4608 driver_algs[j].registered = false;
4609 break;
4610 case CRYPTO_ALG_TYPE_AHASH:
4611 crypto_unregister_ahash(&driver_algs[j].alg.hash);
4612 driver_algs[j].registered = false;
4613 break;
4614 case CRYPTO_ALG_TYPE_AEAD:
4615 crypto_unregister_aead(&driver_algs[j].alg.aead);
4616 driver_algs[j].registered = false;
4617 break;
4618 }
4619 }
4620 return err;
4621 }
4622
4623
4624
4625 static struct spu_type_subtype spum_ns2_types = {
4626 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4627 };
4628
4629 static struct spu_type_subtype spum_nsp_types = {
4630 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4631 };
4632
4633 static struct spu_type_subtype spu2_types = {
4634 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4635 };
4636
4637 static struct spu_type_subtype spu2_v2_types = {
4638 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4639 };
4640
4641 static const struct of_device_id bcm_spu_dt_ids[] = {
4642 {
4643 .compatible = "brcm,spum-crypto",
4644 .data = &spum_ns2_types,
4645 },
4646 {
4647 .compatible = "brcm,spum-nsp-crypto",
4648 .data = &spum_nsp_types,
4649 },
4650 {
4651 .compatible = "brcm,spu2-crypto",
4652 .data = &spu2_types,
4653 },
4654 {
4655 .compatible = "brcm,spu2-v2-crypto",
4656 .data = &spu2_v2_types,
4657 },
4658 { }
4659 };
4660
4661 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4662
4663 static int spu_dt_read(struct platform_device *pdev)
4664 {
4665 struct device *dev = &pdev->dev;
4666 struct spu_hw *spu = &iproc_priv.spu;
4667 struct resource *spu_ctrl_regs;
4668 const struct spu_type_subtype *matched_spu_type;
4669 struct device_node *dn = pdev->dev.of_node;
4670 int err, i;
4671
4672
4673 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4674
4675 matched_spu_type = of_device_get_match_data(dev);
4676 if (!matched_spu_type) {
4677 dev_err(dev, "Failed to match device\n");
4678 return -ENODEV;
4679 }
4680
4681 spu->spu_type = matched_spu_type->type;
4682 spu->spu_subtype = matched_spu_type->subtype;
4683
4684 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4685 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4686
4687 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4688 if (IS_ERR(spu->reg_vbase[i])) {
4689 err = PTR_ERR(spu->reg_vbase[i]);
4690 dev_err(dev, "Failed to map registers: %d\n",
4691 err);
4692 spu->reg_vbase[i] = NULL;
4693 return err;
4694 }
4695 }
4696 spu->num_spu = i;
4697 dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4698
4699 return 0;
4700 }
4701
4702 static int bcm_spu_probe(struct platform_device *pdev)
4703 {
4704 struct device *dev = &pdev->dev;
4705 struct spu_hw *spu = &iproc_priv.spu;
4706 int err;
4707
4708 iproc_priv.pdev = pdev;
4709 platform_set_drvdata(iproc_priv.pdev,
4710 &iproc_priv);
4711
4712 err = spu_dt_read(pdev);
4713 if (err < 0)
4714 goto failure;
4715
4716 err = spu_mb_init(dev);
4717 if (err < 0)
4718 goto failure;
4719
4720 if (spu->spu_type == SPU_TYPE_SPUM)
4721 iproc_priv.bcm_hdr_len = 8;
4722 else if (spu->spu_type == SPU_TYPE_SPU2)
4723 iproc_priv.bcm_hdr_len = 0;
4724
4725 spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
4726
4727 spu_counters_init();
4728
4729 spu_setup_debugfs();
4730
4731 err = spu_algs_register(dev);
4732 if (err < 0)
4733 goto fail_reg;
4734
4735 return 0;
4736
4737 fail_reg:
4738 spu_free_debugfs();
4739 failure:
4740 spu_mb_release(pdev);
4741 dev_err(dev, "%s failed with error %d.\n", __func__, err);
4742
4743 return err;
4744 }
4745
4746 static int bcm_spu_remove(struct platform_device *pdev)
4747 {
4748 int i;
4749 struct device *dev = &pdev->dev;
4750 char *cdn;
4751
4752 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4753
4754
4755
4756
4757
4758 if (!driver_algs[i].registered)
4759 continue;
4760
4761 switch (driver_algs[i].type) {
4762 case CRYPTO_ALG_TYPE_SKCIPHER:
4763 crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
4764 dev_dbg(dev, " unregistered cipher %s\n",
4765 driver_algs[i].alg.skcipher.base.cra_driver_name);
4766 driver_algs[i].registered = false;
4767 break;
4768 case CRYPTO_ALG_TYPE_AHASH:
4769 crypto_unregister_ahash(&driver_algs[i].alg.hash);
4770 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4771 dev_dbg(dev, " unregistered hash %s\n", cdn);
4772 driver_algs[i].registered = false;
4773 break;
4774 case CRYPTO_ALG_TYPE_AEAD:
4775 crypto_unregister_aead(&driver_algs[i].alg.aead);
4776 dev_dbg(dev, " unregistered aead %s\n",
4777 driver_algs[i].alg.aead.base.cra_driver_name);
4778 driver_algs[i].registered = false;
4779 break;
4780 }
4781 }
4782 spu_free_debugfs();
4783 spu_mb_release(pdev);
4784 return 0;
4785 }
4786
4787
4788
4789 static struct platform_driver bcm_spu_pdriver = {
4790 .driver = {
4791 .name = "brcm-spu-crypto",
4792 .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4793 },
4794 .probe = bcm_spu_probe,
4795 .remove = bcm_spu_remove,
4796 };
4797 module_platform_driver(bcm_spu_pdriver);
4798
4799 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4800 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4801 MODULE_LICENSE("GPL v2");