0001
0002
0003
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <crypto/algapi.h>
0007 #include <crypto/hash.h>
0008 #include <crypto/md5.h>
0009 #include <crypto/sm3.h>
0010 #include <crypto/internal/hash.h>
0011
0012 #include "cc_driver.h"
0013 #include "cc_request_mgr.h"
0014 #include "cc_buffer_mgr.h"
0015 #include "cc_hash.h"
0016 #include "cc_sram_mgr.h"
0017
0018 #define CC_MAX_HASH_SEQ_LEN 12
0019 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
0020 #define CC_SM3_HASH_LEN_SIZE 8
0021
0022 struct cc_hash_handle {
0023 u32 digest_len_sram_addr;
0024 u32 larval_digest_sram_addr;
0025 struct list_head hash_list;
0026 };
0027
0028 static const u32 cc_digest_len_init[] = {
0029 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
0030 static const u32 cc_md5_init[] = {
0031 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
0032 static const u32 cc_sha1_init[] = {
0033 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
0034 static const u32 cc_sha224_init[] = {
0035 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
0036 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
0037 static const u32 cc_sha256_init[] = {
0038 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
0039 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
0040 static const u32 cc_digest_len_sha512_init[] = {
0041 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
0042
0043
0044
0045
0046
0047 #define hilo(x) upper_32_bits(x), lower_32_bits(x)
0048 static const u32 cc_sha384_init[] = {
0049 hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
0050 hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
0051 static const u32 cc_sha512_init[] = {
0052 hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
0053 hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
0054
0055 static const u32 cc_sm3_init[] = {
0056 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
0057 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
0058
0059 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
0060 unsigned int *seq_size);
0061
0062 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
0063 unsigned int *seq_size);
0064
0065 static const void *cc_larval_digest(struct device *dev, u32 mode);
0066
0067 struct cc_hash_alg {
0068 struct list_head entry;
0069 int hash_mode;
0070 int hw_mode;
0071 int inter_digestsize;
0072 struct cc_drvdata *drvdata;
0073 struct ahash_alg ahash_alg;
0074 };
0075
0076 struct hash_key_req_ctx {
0077 u32 keylen;
0078 dma_addr_t key_dma_addr;
0079 u8 *key;
0080 };
0081
0082
0083 struct cc_hash_ctx {
0084 struct cc_drvdata *drvdata;
0085
0086
0087
0088 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
0089 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
0090
0091 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
0092 dma_addr_t digest_buff_dma_addr;
0093
0094 struct hash_key_req_ctx key_params;
0095 int hash_mode;
0096 int hw_mode;
0097 int inter_digestsize;
0098 unsigned int hash_len;
0099 struct completion setkey_comp;
0100 bool is_hmac;
0101 };
0102
0103 static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
0104 unsigned int flow_mode, struct cc_hw_desc desc[],
0105 bool is_not_last_data, unsigned int *seq_size);
0106
0107 static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
0108 {
0109 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
0110 mode == DRV_HASH_SHA512) {
0111 set_bytes_swap(desc, 1);
0112 } else {
0113 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
0114 }
0115 }
0116
0117 static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
0118 unsigned int digestsize)
0119 {
0120 state->digest_result_dma_addr =
0121 dma_map_single(dev, state->digest_result_buff,
0122 digestsize, DMA_BIDIRECTIONAL);
0123 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
0124 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
0125 digestsize);
0126 return -ENOMEM;
0127 }
0128 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
0129 digestsize, state->digest_result_buff,
0130 &state->digest_result_dma_addr);
0131
0132 return 0;
0133 }
0134
0135 static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
0136 struct cc_hash_ctx *ctx)
0137 {
0138 bool is_hmac = ctx->is_hmac;
0139
0140 memset(state, 0, sizeof(*state));
0141
0142 if (is_hmac) {
0143 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
0144 ctx->hw_mode != DRV_CIPHER_CMAC) {
0145 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
0146 ctx->inter_digestsize,
0147 DMA_BIDIRECTIONAL);
0148
0149 memcpy(state->digest_buff, ctx->digest_buff,
0150 ctx->inter_digestsize);
0151 if (ctx->hash_mode == DRV_HASH_SHA512 ||
0152 ctx->hash_mode == DRV_HASH_SHA384)
0153 memcpy(state->digest_bytes_len,
0154 cc_digest_len_sha512_init,
0155 ctx->hash_len);
0156 else
0157 memcpy(state->digest_bytes_len,
0158 cc_digest_len_init,
0159 ctx->hash_len);
0160 }
0161
0162 if (ctx->hash_mode != DRV_HASH_NULL) {
0163 dma_sync_single_for_cpu(dev,
0164 ctx->opad_tmp_keys_dma_addr,
0165 ctx->inter_digestsize,
0166 DMA_BIDIRECTIONAL);
0167 memcpy(state->opad_digest_buff,
0168 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
0169 }
0170 } else {
0171
0172 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
0173
0174 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
0175 }
0176 }
0177
0178 static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
0179 struct cc_hash_ctx *ctx)
0180 {
0181 bool is_hmac = ctx->is_hmac;
0182
0183 state->digest_buff_dma_addr =
0184 dma_map_single(dev, state->digest_buff,
0185 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
0186 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
0187 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
0188 ctx->inter_digestsize, state->digest_buff);
0189 return -EINVAL;
0190 }
0191 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
0192 ctx->inter_digestsize, state->digest_buff,
0193 &state->digest_buff_dma_addr);
0194
0195 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
0196 state->digest_bytes_len_dma_addr =
0197 dma_map_single(dev, state->digest_bytes_len,
0198 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
0199 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
0200 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
0201 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
0202 goto unmap_digest_buf;
0203 }
0204 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
0205 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
0206 &state->digest_bytes_len_dma_addr);
0207 }
0208
0209 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
0210 state->opad_digest_dma_addr =
0211 dma_map_single(dev, state->opad_digest_buff,
0212 ctx->inter_digestsize,
0213 DMA_BIDIRECTIONAL);
0214 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
0215 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
0216 ctx->inter_digestsize,
0217 state->opad_digest_buff);
0218 goto unmap_digest_len;
0219 }
0220 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
0221 ctx->inter_digestsize, state->opad_digest_buff,
0222 &state->opad_digest_dma_addr);
0223 }
0224
0225 return 0;
0226
0227 unmap_digest_len:
0228 if (state->digest_bytes_len_dma_addr) {
0229 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
0230 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
0231 state->digest_bytes_len_dma_addr = 0;
0232 }
0233 unmap_digest_buf:
0234 if (state->digest_buff_dma_addr) {
0235 dma_unmap_single(dev, state->digest_buff_dma_addr,
0236 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
0237 state->digest_buff_dma_addr = 0;
0238 }
0239
0240 return -EINVAL;
0241 }
0242
0243 static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
0244 struct cc_hash_ctx *ctx)
0245 {
0246 if (state->digest_buff_dma_addr) {
0247 dma_unmap_single(dev, state->digest_buff_dma_addr,
0248 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
0249 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
0250 &state->digest_buff_dma_addr);
0251 state->digest_buff_dma_addr = 0;
0252 }
0253 if (state->digest_bytes_len_dma_addr) {
0254 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
0255 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
0256 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
0257 &state->digest_bytes_len_dma_addr);
0258 state->digest_bytes_len_dma_addr = 0;
0259 }
0260 if (state->opad_digest_dma_addr) {
0261 dma_unmap_single(dev, state->opad_digest_dma_addr,
0262 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
0263 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
0264 &state->opad_digest_dma_addr);
0265 state->opad_digest_dma_addr = 0;
0266 }
0267 }
0268
0269 static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
0270 unsigned int digestsize, u8 *result)
0271 {
0272 if (state->digest_result_dma_addr) {
0273 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
0274 DMA_BIDIRECTIONAL);
0275 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
0276 state->digest_result_buff,
0277 &state->digest_result_dma_addr, digestsize);
0278 memcpy(result, state->digest_result_buff, digestsize);
0279 }
0280 state->digest_result_dma_addr = 0;
0281 }
0282
0283 static void cc_update_complete(struct device *dev, void *cc_req, int err)
0284 {
0285 struct ahash_request *req = (struct ahash_request *)cc_req;
0286 struct ahash_req_ctx *state = ahash_request_ctx(req);
0287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0288 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0289
0290 dev_dbg(dev, "req=%pK\n", req);
0291
0292 if (err != -EINPROGRESS) {
0293
0294 cc_unmap_hash_request(dev, state, req->src, false);
0295 cc_unmap_req(dev, state, ctx);
0296 }
0297
0298 ahash_request_complete(req, err);
0299 }
0300
0301 static void cc_digest_complete(struct device *dev, void *cc_req, int err)
0302 {
0303 struct ahash_request *req = (struct ahash_request *)cc_req;
0304 struct ahash_req_ctx *state = ahash_request_ctx(req);
0305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0306 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0307 u32 digestsize = crypto_ahash_digestsize(tfm);
0308
0309 dev_dbg(dev, "req=%pK\n", req);
0310
0311 if (err != -EINPROGRESS) {
0312
0313 cc_unmap_hash_request(dev, state, req->src, false);
0314 cc_unmap_result(dev, state, digestsize, req->result);
0315 cc_unmap_req(dev, state, ctx);
0316 }
0317
0318 ahash_request_complete(req, err);
0319 }
0320
0321 static void cc_hash_complete(struct device *dev, void *cc_req, int err)
0322 {
0323 struct ahash_request *req = (struct ahash_request *)cc_req;
0324 struct ahash_req_ctx *state = ahash_request_ctx(req);
0325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0326 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0327 u32 digestsize = crypto_ahash_digestsize(tfm);
0328
0329 dev_dbg(dev, "req=%pK\n", req);
0330
0331 if (err != -EINPROGRESS) {
0332
0333 cc_unmap_hash_request(dev, state, req->src, false);
0334 cc_unmap_result(dev, state, digestsize, req->result);
0335 cc_unmap_req(dev, state, ctx);
0336 }
0337
0338 ahash_request_complete(req, err);
0339 }
0340
0341 static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
0342 int idx)
0343 {
0344 struct ahash_req_ctx *state = ahash_request_ctx(req);
0345 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0346 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0347 u32 digestsize = crypto_ahash_digestsize(tfm);
0348
0349
0350 hw_desc_init(&desc[idx]);
0351 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0352 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
0353 NS_BIT, 1);
0354 set_queue_last_ind(ctx->drvdata, &desc[idx]);
0355 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0356 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0357 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
0358 cc_set_endianity(ctx->hash_mode, &desc[idx]);
0359 idx++;
0360
0361 return idx;
0362 }
0363
0364 static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
0365 int idx)
0366 {
0367 struct ahash_req_ctx *state = ahash_request_ctx(req);
0368 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0369 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0370 u32 digestsize = crypto_ahash_digestsize(tfm);
0371
0372
0373 hw_desc_init(&desc[idx]);
0374 set_cipher_mode(&desc[idx], ctx->hw_mode);
0375 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
0376 NS_BIT, 0);
0377 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0378 cc_set_endianity(ctx->hash_mode, &desc[idx]);
0379 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0380 idx++;
0381
0382
0383 hw_desc_init(&desc[idx]);
0384 set_cipher_mode(&desc[idx], ctx->hw_mode);
0385 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
0386 ctx->inter_digestsize, NS_BIT);
0387 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0388 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0389 idx++;
0390
0391
0392 hw_desc_init(&desc[idx]);
0393 set_cipher_mode(&desc[idx], ctx->hw_mode);
0394 set_din_sram(&desc[idx],
0395 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
0396 ctx->hash_len);
0397 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
0398 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0399 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0400 idx++;
0401
0402
0403 hw_desc_init(&desc[idx]);
0404 set_din_no_dma(&desc[idx], 0, 0xfffff0);
0405 set_dout_no_dma(&desc[idx], 0, 0, 1);
0406 idx++;
0407
0408
0409 hw_desc_init(&desc[idx]);
0410 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
0411 digestsize, NS_BIT);
0412 set_flow_mode(&desc[idx], DIN_HASH);
0413 idx++;
0414
0415 return idx;
0416 }
0417
0418 static int cc_hash_digest(struct ahash_request *req)
0419 {
0420 struct ahash_req_ctx *state = ahash_request_ctx(req);
0421 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0422 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0423 u32 digestsize = crypto_ahash_digestsize(tfm);
0424 struct scatterlist *src = req->src;
0425 unsigned int nbytes = req->nbytes;
0426 u8 *result = req->result;
0427 struct device *dev = drvdata_to_dev(ctx->drvdata);
0428 bool is_hmac = ctx->is_hmac;
0429 struct cc_crypto_req cc_req = {};
0430 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
0431 u32 larval_digest_addr;
0432 int idx = 0;
0433 int rc = 0;
0434 gfp_t flags = cc_gfp_flags(&req->base);
0435
0436 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
0437 nbytes);
0438
0439 cc_init_req(dev, state, ctx);
0440
0441 if (cc_map_req(dev, state, ctx)) {
0442 dev_err(dev, "map_ahash_source() failed\n");
0443 return -ENOMEM;
0444 }
0445
0446 if (cc_map_result(dev, state, digestsize)) {
0447 dev_err(dev, "map_ahash_digest() failed\n");
0448 cc_unmap_req(dev, state, ctx);
0449 return -ENOMEM;
0450 }
0451
0452 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
0453 flags)) {
0454 dev_err(dev, "map_ahash_request_final() failed\n");
0455 cc_unmap_result(dev, state, digestsize, result);
0456 cc_unmap_req(dev, state, ctx);
0457 return -ENOMEM;
0458 }
0459
0460
0461 cc_req.user_cb = cc_digest_complete;
0462 cc_req.user_arg = req;
0463
0464
0465
0466
0467 hw_desc_init(&desc[idx]);
0468 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0469 if (is_hmac) {
0470 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
0471 ctx->inter_digestsize, NS_BIT);
0472 } else {
0473 larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
0474 ctx->hash_mode);
0475 set_din_sram(&desc[idx], larval_digest_addr,
0476 ctx->inter_digestsize);
0477 }
0478 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0479 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0480 idx++;
0481
0482
0483 hw_desc_init(&desc[idx]);
0484 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0485
0486 if (is_hmac) {
0487 set_din_type(&desc[idx], DMA_DLLI,
0488 state->digest_bytes_len_dma_addr,
0489 ctx->hash_len, NS_BIT);
0490 } else {
0491 set_din_const(&desc[idx], 0, ctx->hash_len);
0492 if (nbytes)
0493 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
0494 else
0495 set_cipher_do(&desc[idx], DO_PAD);
0496 }
0497 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0498 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0499 idx++;
0500
0501 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
0502
0503 if (is_hmac) {
0504
0505 hw_desc_init(&desc[idx]);
0506 set_cipher_mode(&desc[idx], ctx->hw_mode);
0507 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
0508 ctx->hash_len, NS_BIT, 0);
0509 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0510 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
0511 set_cipher_do(&desc[idx], DO_PAD);
0512 idx++;
0513
0514 idx = cc_fin_hmac(desc, req, idx);
0515 }
0516
0517 idx = cc_fin_result(desc, req, idx);
0518
0519 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
0520 if (rc != -EINPROGRESS && rc != -EBUSY) {
0521 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
0522 cc_unmap_hash_request(dev, state, src, true);
0523 cc_unmap_result(dev, state, digestsize, result);
0524 cc_unmap_req(dev, state, ctx);
0525 }
0526 return rc;
0527 }
0528
0529 static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
0530 struct ahash_req_ctx *state, unsigned int idx)
0531 {
0532
0533 hw_desc_init(&desc[idx]);
0534 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0535 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
0536 ctx->inter_digestsize, NS_BIT);
0537 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0538 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0539 idx++;
0540
0541
0542 hw_desc_init(&desc[idx]);
0543 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0544 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
0545 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
0546 ctx->hash_len, NS_BIT);
0547 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0548 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0549 idx++;
0550
0551 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
0552
0553 return idx;
0554 }
0555
0556 static int cc_hash_update(struct ahash_request *req)
0557 {
0558 struct ahash_req_ctx *state = ahash_request_ctx(req);
0559 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0560 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0561 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
0562 struct scatterlist *src = req->src;
0563 unsigned int nbytes = req->nbytes;
0564 struct device *dev = drvdata_to_dev(ctx->drvdata);
0565 struct cc_crypto_req cc_req = {};
0566 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
0567 u32 idx = 0;
0568 int rc;
0569 gfp_t flags = cc_gfp_flags(&req->base);
0570
0571 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
0572 "hmac" : "hash", nbytes);
0573
0574 if (nbytes == 0) {
0575
0576 return 0;
0577 }
0578
0579 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
0580 block_size, flags);
0581 if (rc) {
0582 if (rc == 1) {
0583 dev_dbg(dev, " data size not require HW update %x\n",
0584 nbytes);
0585
0586 return 0;
0587 }
0588 dev_err(dev, "map_ahash_request_update() failed\n");
0589 return -ENOMEM;
0590 }
0591
0592 if (cc_map_req(dev, state, ctx)) {
0593 dev_err(dev, "map_ahash_source() failed\n");
0594 cc_unmap_hash_request(dev, state, src, true);
0595 return -EINVAL;
0596 }
0597
0598
0599 cc_req.user_cb = cc_update_complete;
0600 cc_req.user_arg = req;
0601
0602 idx = cc_restore_hash(desc, ctx, state, idx);
0603
0604
0605 hw_desc_init(&desc[idx]);
0606 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0607 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
0608 ctx->inter_digestsize, NS_BIT, 0);
0609 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0610 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0611 idx++;
0612
0613
0614 hw_desc_init(&desc[idx]);
0615 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0616 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
0617 ctx->hash_len, NS_BIT, 1);
0618 set_queue_last_ind(ctx->drvdata, &desc[idx]);
0619 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0620 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
0621 idx++;
0622
0623 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
0624 if (rc != -EINPROGRESS && rc != -EBUSY) {
0625 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
0626 cc_unmap_hash_request(dev, state, src, true);
0627 cc_unmap_req(dev, state, ctx);
0628 }
0629 return rc;
0630 }
0631
0632 static int cc_do_finup(struct ahash_request *req, bool update)
0633 {
0634 struct ahash_req_ctx *state = ahash_request_ctx(req);
0635 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0636 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0637 u32 digestsize = crypto_ahash_digestsize(tfm);
0638 struct scatterlist *src = req->src;
0639 unsigned int nbytes = req->nbytes;
0640 u8 *result = req->result;
0641 struct device *dev = drvdata_to_dev(ctx->drvdata);
0642 bool is_hmac = ctx->is_hmac;
0643 struct cc_crypto_req cc_req = {};
0644 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
0645 unsigned int idx = 0;
0646 int rc;
0647 gfp_t flags = cc_gfp_flags(&req->base);
0648
0649 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
0650 update ? "finup" : "final", nbytes);
0651
0652 if (cc_map_req(dev, state, ctx)) {
0653 dev_err(dev, "map_ahash_source() failed\n");
0654 return -EINVAL;
0655 }
0656
0657 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
0658 flags)) {
0659 dev_err(dev, "map_ahash_request_final() failed\n");
0660 cc_unmap_req(dev, state, ctx);
0661 return -ENOMEM;
0662 }
0663 if (cc_map_result(dev, state, digestsize)) {
0664 dev_err(dev, "map_ahash_digest() failed\n");
0665 cc_unmap_hash_request(dev, state, src, true);
0666 cc_unmap_req(dev, state, ctx);
0667 return -ENOMEM;
0668 }
0669
0670
0671 cc_req.user_cb = cc_hash_complete;
0672 cc_req.user_arg = req;
0673
0674 idx = cc_restore_hash(desc, ctx, state, idx);
0675
0676
0677 hw_desc_init(&desc[idx]);
0678 set_cipher_do(&desc[idx], DO_PAD);
0679 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
0680 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
0681 ctx->hash_len, NS_BIT, 0);
0682 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
0683 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0684 idx++;
0685
0686 if (is_hmac)
0687 idx = cc_fin_hmac(desc, req, idx);
0688
0689 idx = cc_fin_result(desc, req, idx);
0690
0691 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
0692 if (rc != -EINPROGRESS && rc != -EBUSY) {
0693 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
0694 cc_unmap_hash_request(dev, state, src, true);
0695 cc_unmap_result(dev, state, digestsize, result);
0696 cc_unmap_req(dev, state, ctx);
0697 }
0698 return rc;
0699 }
0700
0701 static int cc_hash_finup(struct ahash_request *req)
0702 {
0703 return cc_do_finup(req, true);
0704 }
0705
0706
0707 static int cc_hash_final(struct ahash_request *req)
0708 {
0709 return cc_do_finup(req, false);
0710 }
0711
0712 static int cc_hash_init(struct ahash_request *req)
0713 {
0714 struct ahash_req_ctx *state = ahash_request_ctx(req);
0715 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0716 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
0717 struct device *dev = drvdata_to_dev(ctx->drvdata);
0718
0719 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
0720
0721 cc_init_req(dev, state, ctx);
0722
0723 return 0;
0724 }
0725
0726 static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
0727 unsigned int keylen)
0728 {
0729 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
0730 struct cc_crypto_req cc_req = {};
0731 struct cc_hash_ctx *ctx = NULL;
0732 int blocksize = 0;
0733 int digestsize = 0;
0734 int i, idx = 0, rc = 0;
0735 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
0736 u32 larval_addr;
0737 struct device *dev;
0738
0739 ctx = crypto_ahash_ctx(ahash);
0740 dev = drvdata_to_dev(ctx->drvdata);
0741 dev_dbg(dev, "start keylen: %d", keylen);
0742
0743 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
0744 digestsize = crypto_ahash_digestsize(ahash);
0745
0746 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
0747
0748
0749
0750
0751 ctx->key_params.keylen = keylen;
0752 ctx->key_params.key_dma_addr = 0;
0753 ctx->is_hmac = true;
0754 ctx->key_params.key = NULL;
0755
0756 if (keylen) {
0757 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
0758 if (!ctx->key_params.key)
0759 return -ENOMEM;
0760
0761 ctx->key_params.key_dma_addr =
0762 dma_map_single(dev, ctx->key_params.key, keylen,
0763 DMA_TO_DEVICE);
0764 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
0765 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
0766 ctx->key_params.key, keylen);
0767 kfree_sensitive(ctx->key_params.key);
0768 return -ENOMEM;
0769 }
0770 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
0771 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
0772
0773 if (keylen > blocksize) {
0774
0775 hw_desc_init(&desc[idx]);
0776 set_cipher_mode(&desc[idx], ctx->hw_mode);
0777 set_din_sram(&desc[idx], larval_addr,
0778 ctx->inter_digestsize);
0779 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0780 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0781 idx++;
0782
0783
0784 hw_desc_init(&desc[idx]);
0785 set_cipher_mode(&desc[idx], ctx->hw_mode);
0786 set_din_const(&desc[idx], 0, ctx->hash_len);
0787 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
0788 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0789 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0790 idx++;
0791
0792 hw_desc_init(&desc[idx]);
0793 set_din_type(&desc[idx], DMA_DLLI,
0794 ctx->key_params.key_dma_addr, keylen,
0795 NS_BIT);
0796 set_flow_mode(&desc[idx], DIN_HASH);
0797 idx++;
0798
0799
0800 hw_desc_init(&desc[idx]);
0801 set_cipher_mode(&desc[idx], ctx->hw_mode);
0802 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
0803 digestsize, NS_BIT, 0);
0804 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0805 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0806 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
0807 cc_set_endianity(ctx->hash_mode, &desc[idx]);
0808 idx++;
0809
0810 hw_desc_init(&desc[idx]);
0811 set_din_const(&desc[idx], 0, (blocksize - digestsize));
0812 set_flow_mode(&desc[idx], BYPASS);
0813 set_dout_dlli(&desc[idx],
0814 (ctx->opad_tmp_keys_dma_addr +
0815 digestsize),
0816 (blocksize - digestsize), NS_BIT, 0);
0817 idx++;
0818 } else {
0819 hw_desc_init(&desc[idx]);
0820 set_din_type(&desc[idx], DMA_DLLI,
0821 ctx->key_params.key_dma_addr, keylen,
0822 NS_BIT);
0823 set_flow_mode(&desc[idx], BYPASS);
0824 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
0825 keylen, NS_BIT, 0);
0826 idx++;
0827
0828 if ((blocksize - keylen)) {
0829 hw_desc_init(&desc[idx]);
0830 set_din_const(&desc[idx], 0,
0831 (blocksize - keylen));
0832 set_flow_mode(&desc[idx], BYPASS);
0833 set_dout_dlli(&desc[idx],
0834 (ctx->opad_tmp_keys_dma_addr +
0835 keylen), (blocksize - keylen),
0836 NS_BIT, 0);
0837 idx++;
0838 }
0839 }
0840 } else {
0841 hw_desc_init(&desc[idx]);
0842 set_din_const(&desc[idx], 0, blocksize);
0843 set_flow_mode(&desc[idx], BYPASS);
0844 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
0845 blocksize, NS_BIT, 0);
0846 idx++;
0847 }
0848
0849 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
0850 if (rc) {
0851 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
0852 goto out;
0853 }
0854
0855
0856 for (idx = 0, i = 0; i < 2; i++) {
0857
0858 hw_desc_init(&desc[idx]);
0859 set_cipher_mode(&desc[idx], ctx->hw_mode);
0860 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
0861 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0862 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0863 idx++;
0864
0865
0866 hw_desc_init(&desc[idx]);
0867 set_cipher_mode(&desc[idx], ctx->hw_mode);
0868 set_din_const(&desc[idx], 0, ctx->hash_len);
0869 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0870 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0871 idx++;
0872
0873
0874 hw_desc_init(&desc[idx]);
0875 set_xor_val(&desc[idx], hmac_pad_const[i]);
0876 set_cipher_mode(&desc[idx], ctx->hw_mode);
0877 set_flow_mode(&desc[idx], S_DIN_to_HASH);
0878 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
0879 idx++;
0880
0881
0882 hw_desc_init(&desc[idx]);
0883 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
0884 blocksize, NS_BIT);
0885 set_cipher_mode(&desc[idx], ctx->hw_mode);
0886 set_xor_active(&desc[idx]);
0887 set_flow_mode(&desc[idx], DIN_HASH);
0888 idx++;
0889
0890
0891
0892
0893 hw_desc_init(&desc[idx]);
0894 set_cipher_mode(&desc[idx], ctx->hw_mode);
0895 if (i > 0)
0896 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
0897 ctx->inter_digestsize, NS_BIT, 0);
0898 else
0899 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
0900 ctx->inter_digestsize, NS_BIT, 0);
0901 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0902 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0903 idx++;
0904 }
0905
0906 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
0907
0908 out:
0909 if (ctx->key_params.key_dma_addr) {
0910 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
0911 ctx->key_params.keylen, DMA_TO_DEVICE);
0912 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
0913 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
0914 }
0915
0916 kfree_sensitive(ctx->key_params.key);
0917
0918 return rc;
0919 }
0920
0921 static int cc_xcbc_setkey(struct crypto_ahash *ahash,
0922 const u8 *key, unsigned int keylen)
0923 {
0924 struct cc_crypto_req cc_req = {};
0925 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0926 struct device *dev = drvdata_to_dev(ctx->drvdata);
0927 int rc = 0;
0928 unsigned int idx = 0;
0929 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
0930
0931 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
0932
0933 switch (keylen) {
0934 case AES_KEYSIZE_128:
0935 case AES_KEYSIZE_192:
0936 case AES_KEYSIZE_256:
0937 break;
0938 default:
0939 return -EINVAL;
0940 }
0941
0942 ctx->key_params.keylen = keylen;
0943
0944 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
0945 if (!ctx->key_params.key)
0946 return -ENOMEM;
0947
0948 ctx->key_params.key_dma_addr =
0949 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
0950 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
0951 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
0952 key, keylen);
0953 kfree_sensitive(ctx->key_params.key);
0954 return -ENOMEM;
0955 }
0956 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
0957 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
0958
0959 ctx->is_hmac = true;
0960
0961 hw_desc_init(&desc[idx]);
0962 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
0963 keylen, NS_BIT);
0964 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
0965 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
0966 set_key_size_aes(&desc[idx], keylen);
0967 set_flow_mode(&desc[idx], S_DIN_to_AES);
0968 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0969 idx++;
0970
0971 hw_desc_init(&desc[idx]);
0972 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
0973 set_flow_mode(&desc[idx], DIN_AES_DOUT);
0974 set_dout_dlli(&desc[idx],
0975 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
0976 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
0977 idx++;
0978
0979 hw_desc_init(&desc[idx]);
0980 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
0981 set_flow_mode(&desc[idx], DIN_AES_DOUT);
0982 set_dout_dlli(&desc[idx],
0983 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
0984 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
0985 idx++;
0986
0987 hw_desc_init(&desc[idx]);
0988 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
0989 set_flow_mode(&desc[idx], DIN_AES_DOUT);
0990 set_dout_dlli(&desc[idx],
0991 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
0992 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
0993 idx++;
0994
0995 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
0996
0997 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
0998 ctx->key_params.keylen, DMA_TO_DEVICE);
0999 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1000 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1001
1002 kfree_sensitive(ctx->key_params.key);
1003
1004 return rc;
1005 }
1006
1007 static int cc_cmac_setkey(struct crypto_ahash *ahash,
1008 const u8 *key, unsigned int keylen)
1009 {
1010 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1011 struct device *dev = drvdata_to_dev(ctx->drvdata);
1012
1013 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1014
1015 ctx->is_hmac = true;
1016
1017 switch (keylen) {
1018 case AES_KEYSIZE_128:
1019 case AES_KEYSIZE_192:
1020 case AES_KEYSIZE_256:
1021 break;
1022 default:
1023 return -EINVAL;
1024 }
1025
1026 ctx->key_params.keylen = keylen;
1027
1028
1029
1030 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1031 keylen, DMA_TO_DEVICE);
1032
1033 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1034 if (keylen == 24) {
1035 memset(ctx->opad_tmp_keys_buff + 24, 0,
1036 CC_AES_KEY_SIZE_MAX - 24);
1037 }
1038
1039 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1040 keylen, DMA_TO_DEVICE);
1041
1042 ctx->key_params.keylen = keylen;
1043
1044 return 0;
1045 }
1046
1047 static void cc_free_ctx(struct cc_hash_ctx *ctx)
1048 {
1049 struct device *dev = drvdata_to_dev(ctx->drvdata);
1050
1051 if (ctx->digest_buff_dma_addr) {
1052 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1053 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1054 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1055 &ctx->digest_buff_dma_addr);
1056 ctx->digest_buff_dma_addr = 0;
1057 }
1058 if (ctx->opad_tmp_keys_dma_addr) {
1059 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1060 sizeof(ctx->opad_tmp_keys_buff),
1061 DMA_BIDIRECTIONAL);
1062 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1063 &ctx->opad_tmp_keys_dma_addr);
1064 ctx->opad_tmp_keys_dma_addr = 0;
1065 }
1066
1067 ctx->key_params.keylen = 0;
1068 }
1069
1070 static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1071 {
1072 struct device *dev = drvdata_to_dev(ctx->drvdata);
1073
1074 ctx->key_params.keylen = 0;
1075
1076 ctx->digest_buff_dma_addr =
1077 dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
1078 DMA_BIDIRECTIONAL);
1079 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1080 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1081 sizeof(ctx->digest_buff), ctx->digest_buff);
1082 goto fail;
1083 }
1084 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1085 sizeof(ctx->digest_buff), ctx->digest_buff,
1086 &ctx->digest_buff_dma_addr);
1087
1088 ctx->opad_tmp_keys_dma_addr =
1089 dma_map_single(dev, ctx->opad_tmp_keys_buff,
1090 sizeof(ctx->opad_tmp_keys_buff),
1091 DMA_BIDIRECTIONAL);
1092 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1093 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1094 sizeof(ctx->opad_tmp_keys_buff),
1095 ctx->opad_tmp_keys_buff);
1096 goto fail;
1097 }
1098 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1099 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1100 &ctx->opad_tmp_keys_dma_addr);
1101
1102 ctx->is_hmac = false;
1103 return 0;
1104
1105 fail:
1106 cc_free_ctx(ctx);
1107 return -ENOMEM;
1108 }
1109
1110 static int cc_get_hash_len(struct crypto_tfm *tfm)
1111 {
1112 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1113
1114 if (ctx->hash_mode == DRV_HASH_SM3)
1115 return CC_SM3_HASH_LEN_SIZE;
1116 else
1117 return cc_get_default_hash_len(ctx->drvdata);
1118 }
1119
1120 static int cc_cra_init(struct crypto_tfm *tfm)
1121 {
1122 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1123 struct hash_alg_common *hash_alg_common =
1124 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1125 struct ahash_alg *ahash_alg =
1126 container_of(hash_alg_common, struct ahash_alg, halg);
1127 struct cc_hash_alg *cc_alg =
1128 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1129
1130 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1131 sizeof(struct ahash_req_ctx));
1132
1133 ctx->hash_mode = cc_alg->hash_mode;
1134 ctx->hw_mode = cc_alg->hw_mode;
1135 ctx->inter_digestsize = cc_alg->inter_digestsize;
1136 ctx->drvdata = cc_alg->drvdata;
1137 ctx->hash_len = cc_get_hash_len(tfm);
1138 return cc_alloc_ctx(ctx);
1139 }
1140
1141 static void cc_cra_exit(struct crypto_tfm *tfm)
1142 {
1143 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1144 struct device *dev = drvdata_to_dev(ctx->drvdata);
1145
1146 dev_dbg(dev, "cc_cra_exit");
1147 cc_free_ctx(ctx);
1148 }
1149
1150 static int cc_mac_update(struct ahash_request *req)
1151 {
1152 struct ahash_req_ctx *state = ahash_request_ctx(req);
1153 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1154 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1155 struct device *dev = drvdata_to_dev(ctx->drvdata);
1156 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1157 struct cc_crypto_req cc_req = {};
1158 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1159 int rc;
1160 u32 idx = 0;
1161 gfp_t flags = cc_gfp_flags(&req->base);
1162
1163 if (req->nbytes == 0) {
1164
1165 return 0;
1166 }
1167
1168 state->xcbc_count++;
1169
1170 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1171 req->nbytes, block_size, flags);
1172 if (rc) {
1173 if (rc == 1) {
1174 dev_dbg(dev, " data size not require HW update %x\n",
1175 req->nbytes);
1176
1177 return 0;
1178 }
1179 dev_err(dev, "map_ahash_request_update() failed\n");
1180 return -ENOMEM;
1181 }
1182
1183 if (cc_map_req(dev, state, ctx)) {
1184 dev_err(dev, "map_ahash_source() failed\n");
1185 return -EINVAL;
1186 }
1187
1188 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1189 cc_setup_xcbc(req, desc, &idx);
1190 else
1191 cc_setup_cmac(req, desc, &idx);
1192
1193 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1194
1195
1196 hw_desc_init(&desc[idx]);
1197 set_cipher_mode(&desc[idx], ctx->hw_mode);
1198 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1199 ctx->inter_digestsize, NS_BIT, 1);
1200 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1201 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1202 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1203 idx++;
1204
1205
1206 cc_req.user_cb = cc_update_complete;
1207 cc_req.user_arg = req;
1208
1209 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1210 if (rc != -EINPROGRESS && rc != -EBUSY) {
1211 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1212 cc_unmap_hash_request(dev, state, req->src, true);
1213 cc_unmap_req(dev, state, ctx);
1214 }
1215 return rc;
1216 }
1217
1218 static int cc_mac_final(struct ahash_request *req)
1219 {
1220 struct ahash_req_ctx *state = ahash_request_ctx(req);
1221 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1222 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1223 struct device *dev = drvdata_to_dev(ctx->drvdata);
1224 struct cc_crypto_req cc_req = {};
1225 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1226 int idx = 0;
1227 int rc = 0;
1228 u32 key_size, key_len;
1229 u32 digestsize = crypto_ahash_digestsize(tfm);
1230 gfp_t flags = cc_gfp_flags(&req->base);
1231 u32 rem_cnt = *cc_hash_buf_cnt(state);
1232
1233 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1234 key_size = CC_AES_128_BIT_KEY_SIZE;
1235 key_len = CC_AES_128_BIT_KEY_SIZE;
1236 } else {
1237 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1238 ctx->key_params.keylen;
1239 key_len = ctx->key_params.keylen;
1240 }
1241
1242 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1243
1244 if (cc_map_req(dev, state, ctx)) {
1245 dev_err(dev, "map_ahash_source() failed\n");
1246 return -EINVAL;
1247 }
1248
1249 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1250 req->nbytes, 0, flags)) {
1251 dev_err(dev, "map_ahash_request_final() failed\n");
1252 cc_unmap_req(dev, state, ctx);
1253 return -ENOMEM;
1254 }
1255
1256 if (cc_map_result(dev, state, digestsize)) {
1257 dev_err(dev, "map_ahash_digest() failed\n");
1258 cc_unmap_hash_request(dev, state, req->src, true);
1259 cc_unmap_req(dev, state, ctx);
1260 return -ENOMEM;
1261 }
1262
1263
1264 cc_req.user_cb = cc_hash_complete;
1265 cc_req.user_arg = req;
1266
1267 if (state->xcbc_count && rem_cnt == 0) {
1268
1269 hw_desc_init(&desc[idx]);
1270 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1271 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1272 set_din_type(&desc[idx], DMA_DLLI,
1273 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1274 key_size, NS_BIT);
1275 set_key_size_aes(&desc[idx], key_len);
1276 set_flow_mode(&desc[idx], S_DIN_to_AES);
1277 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1278 idx++;
1279
1280
1281
1282
1283 hw_desc_init(&desc[idx]);
1284 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1285 CC_AES_BLOCK_SIZE, NS_BIT);
1286 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1287 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1288 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1289 idx++;
1290
1291
1292 hw_desc_init(&desc[idx]);
1293 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1294 set_dout_no_dma(&desc[idx], 0, 0, 1);
1295 idx++;
1296 }
1297
1298 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1299 cc_setup_xcbc(req, desc, &idx);
1300 else
1301 cc_setup_cmac(req, desc, &idx);
1302
1303 if (state->xcbc_count == 0) {
1304 hw_desc_init(&desc[idx]);
1305 set_cipher_mode(&desc[idx], ctx->hw_mode);
1306 set_key_size_aes(&desc[idx], key_len);
1307 set_cmac_size0_mode(&desc[idx]);
1308 set_flow_mode(&desc[idx], S_DIN_to_AES);
1309 idx++;
1310 } else if (rem_cnt > 0) {
1311 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1312 } else {
1313 hw_desc_init(&desc[idx]);
1314 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1315 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1316 idx++;
1317 }
1318
1319
1320 hw_desc_init(&desc[idx]);
1321 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1322 digestsize, NS_BIT, 1);
1323 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1324 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1325 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1326 set_cipher_mode(&desc[idx], ctx->hw_mode);
1327 idx++;
1328
1329 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1330 if (rc != -EINPROGRESS && rc != -EBUSY) {
1331 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1332 cc_unmap_hash_request(dev, state, req->src, true);
1333 cc_unmap_result(dev, state, digestsize, req->result);
1334 cc_unmap_req(dev, state, ctx);
1335 }
1336 return rc;
1337 }
1338
1339 static int cc_mac_finup(struct ahash_request *req)
1340 {
1341 struct ahash_req_ctx *state = ahash_request_ctx(req);
1342 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1343 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1344 struct device *dev = drvdata_to_dev(ctx->drvdata);
1345 struct cc_crypto_req cc_req = {};
1346 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1347 int idx = 0;
1348 int rc = 0;
1349 u32 key_len = 0;
1350 u32 digestsize = crypto_ahash_digestsize(tfm);
1351 gfp_t flags = cc_gfp_flags(&req->base);
1352
1353 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1354 if (state->xcbc_count > 0 && req->nbytes == 0) {
1355 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1356 return cc_mac_final(req);
1357 }
1358
1359 if (cc_map_req(dev, state, ctx)) {
1360 dev_err(dev, "map_ahash_source() failed\n");
1361 return -EINVAL;
1362 }
1363
1364 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1365 req->nbytes, 1, flags)) {
1366 dev_err(dev, "map_ahash_request_final() failed\n");
1367 cc_unmap_req(dev, state, ctx);
1368 return -ENOMEM;
1369 }
1370 if (cc_map_result(dev, state, digestsize)) {
1371 dev_err(dev, "map_ahash_digest() failed\n");
1372 cc_unmap_hash_request(dev, state, req->src, true);
1373 cc_unmap_req(dev, state, ctx);
1374 return -ENOMEM;
1375 }
1376
1377
1378 cc_req.user_cb = cc_hash_complete;
1379 cc_req.user_arg = req;
1380
1381 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1382 key_len = CC_AES_128_BIT_KEY_SIZE;
1383 cc_setup_xcbc(req, desc, &idx);
1384 } else {
1385 key_len = ctx->key_params.keylen;
1386 cc_setup_cmac(req, desc, &idx);
1387 }
1388
1389 if (req->nbytes == 0) {
1390 hw_desc_init(&desc[idx]);
1391 set_cipher_mode(&desc[idx], ctx->hw_mode);
1392 set_key_size_aes(&desc[idx], key_len);
1393 set_cmac_size0_mode(&desc[idx]);
1394 set_flow_mode(&desc[idx], S_DIN_to_AES);
1395 idx++;
1396 } else {
1397 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1398 }
1399
1400
1401 hw_desc_init(&desc[idx]);
1402 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1403 digestsize, NS_BIT, 1);
1404 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1405 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1406 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1407 set_cipher_mode(&desc[idx], ctx->hw_mode);
1408 idx++;
1409
1410 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1411 if (rc != -EINPROGRESS && rc != -EBUSY) {
1412 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1413 cc_unmap_hash_request(dev, state, req->src, true);
1414 cc_unmap_result(dev, state, digestsize, req->result);
1415 cc_unmap_req(dev, state, ctx);
1416 }
1417 return rc;
1418 }
1419
1420 static int cc_mac_digest(struct ahash_request *req)
1421 {
1422 struct ahash_req_ctx *state = ahash_request_ctx(req);
1423 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1424 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1425 struct device *dev = drvdata_to_dev(ctx->drvdata);
1426 u32 digestsize = crypto_ahash_digestsize(tfm);
1427 struct cc_crypto_req cc_req = {};
1428 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1429 u32 key_len;
1430 unsigned int idx = 0;
1431 int rc;
1432 gfp_t flags = cc_gfp_flags(&req->base);
1433
1434 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1435
1436 cc_init_req(dev, state, ctx);
1437
1438 if (cc_map_req(dev, state, ctx)) {
1439 dev_err(dev, "map_ahash_source() failed\n");
1440 return -ENOMEM;
1441 }
1442 if (cc_map_result(dev, state, digestsize)) {
1443 dev_err(dev, "map_ahash_digest() failed\n");
1444 cc_unmap_req(dev, state, ctx);
1445 return -ENOMEM;
1446 }
1447
1448 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1449 req->nbytes, 1, flags)) {
1450 dev_err(dev, "map_ahash_request_final() failed\n");
1451 cc_unmap_req(dev, state, ctx);
1452 return -ENOMEM;
1453 }
1454
1455
1456 cc_req.user_cb = cc_digest_complete;
1457 cc_req.user_arg = req;
1458
1459 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1460 key_len = CC_AES_128_BIT_KEY_SIZE;
1461 cc_setup_xcbc(req, desc, &idx);
1462 } else {
1463 key_len = ctx->key_params.keylen;
1464 cc_setup_cmac(req, desc, &idx);
1465 }
1466
1467 if (req->nbytes == 0) {
1468 hw_desc_init(&desc[idx]);
1469 set_cipher_mode(&desc[idx], ctx->hw_mode);
1470 set_key_size_aes(&desc[idx], key_len);
1471 set_cmac_size0_mode(&desc[idx]);
1472 set_flow_mode(&desc[idx], S_DIN_to_AES);
1473 idx++;
1474 } else {
1475 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1476 }
1477
1478
1479 hw_desc_init(&desc[idx]);
1480 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1481 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1482 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1483 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1484 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1485 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1486 set_cipher_mode(&desc[idx], ctx->hw_mode);
1487 idx++;
1488
1489 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1490 if (rc != -EINPROGRESS && rc != -EBUSY) {
1491 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1492 cc_unmap_hash_request(dev, state, req->src, true);
1493 cc_unmap_result(dev, state, digestsize, req->result);
1494 cc_unmap_req(dev, state, ctx);
1495 }
1496 return rc;
1497 }
1498
1499 static int cc_hash_export(struct ahash_request *req, void *out)
1500 {
1501 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1502 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1503 struct ahash_req_ctx *state = ahash_request_ctx(req);
1504 u8 *curr_buff = cc_hash_buf(state);
1505 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1506 const u32 tmp = CC_EXPORT_MAGIC;
1507
1508 memcpy(out, &tmp, sizeof(u32));
1509 out += sizeof(u32);
1510
1511 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1512 out += ctx->inter_digestsize;
1513
1514 memcpy(out, state->digest_bytes_len, ctx->hash_len);
1515 out += ctx->hash_len;
1516
1517 memcpy(out, &curr_buff_cnt, sizeof(u32));
1518 out += sizeof(u32);
1519
1520 memcpy(out, curr_buff, curr_buff_cnt);
1521
1522 return 0;
1523 }
1524
1525 static int cc_hash_import(struct ahash_request *req, const void *in)
1526 {
1527 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1528 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1529 struct device *dev = drvdata_to_dev(ctx->drvdata);
1530 struct ahash_req_ctx *state = ahash_request_ctx(req);
1531 u32 tmp;
1532
1533 memcpy(&tmp, in, sizeof(u32));
1534 if (tmp != CC_EXPORT_MAGIC)
1535 return -EINVAL;
1536 in += sizeof(u32);
1537
1538 cc_init_req(dev, state, ctx);
1539
1540 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1541 in += ctx->inter_digestsize;
1542
1543 memcpy(state->digest_bytes_len, in, ctx->hash_len);
1544 in += ctx->hash_len;
1545
1546
1547 memcpy(&tmp, in, sizeof(u32));
1548 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1549 return -EINVAL;
1550 in += sizeof(u32);
1551
1552 state->buf_cnt[0] = tmp;
1553 memcpy(state->buffers[0], in, tmp);
1554
1555 return 0;
1556 }
1557
1558 struct cc_hash_template {
1559 char name[CRYPTO_MAX_ALG_NAME];
1560 char driver_name[CRYPTO_MAX_ALG_NAME];
1561 char mac_name[CRYPTO_MAX_ALG_NAME];
1562 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1563 unsigned int blocksize;
1564 bool is_mac;
1565 bool synchronize;
1566 struct ahash_alg template_ahash;
1567 int hash_mode;
1568 int hw_mode;
1569 int inter_digestsize;
1570 struct cc_drvdata *drvdata;
1571 u32 min_hw_rev;
1572 enum cc_std_body std_body;
1573 };
1574
1575 #define CC_STATE_SIZE(_x) \
1576 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1577
1578
1579 static struct cc_hash_template driver_hash[] = {
1580
1581 {
1582 .name = "sha1",
1583 .driver_name = "sha1-ccree",
1584 .mac_name = "hmac(sha1)",
1585 .mac_driver_name = "hmac-sha1-ccree",
1586 .blocksize = SHA1_BLOCK_SIZE,
1587 .is_mac = true,
1588 .synchronize = false,
1589 .template_ahash = {
1590 .init = cc_hash_init,
1591 .update = cc_hash_update,
1592 .final = cc_hash_final,
1593 .finup = cc_hash_finup,
1594 .digest = cc_hash_digest,
1595 .export = cc_hash_export,
1596 .import = cc_hash_import,
1597 .setkey = cc_hash_setkey,
1598 .halg = {
1599 .digestsize = SHA1_DIGEST_SIZE,
1600 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1601 },
1602 },
1603 .hash_mode = DRV_HASH_SHA1,
1604 .hw_mode = DRV_HASH_HW_SHA1,
1605 .inter_digestsize = SHA1_DIGEST_SIZE,
1606 .min_hw_rev = CC_HW_REV_630,
1607 .std_body = CC_STD_NIST,
1608 },
1609 {
1610 .name = "sha256",
1611 .driver_name = "sha256-ccree",
1612 .mac_name = "hmac(sha256)",
1613 .mac_driver_name = "hmac-sha256-ccree",
1614 .blocksize = SHA256_BLOCK_SIZE,
1615 .is_mac = true,
1616 .template_ahash = {
1617 .init = cc_hash_init,
1618 .update = cc_hash_update,
1619 .final = cc_hash_final,
1620 .finup = cc_hash_finup,
1621 .digest = cc_hash_digest,
1622 .export = cc_hash_export,
1623 .import = cc_hash_import,
1624 .setkey = cc_hash_setkey,
1625 .halg = {
1626 .digestsize = SHA256_DIGEST_SIZE,
1627 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1628 },
1629 },
1630 .hash_mode = DRV_HASH_SHA256,
1631 .hw_mode = DRV_HASH_HW_SHA256,
1632 .inter_digestsize = SHA256_DIGEST_SIZE,
1633 .min_hw_rev = CC_HW_REV_630,
1634 .std_body = CC_STD_NIST,
1635 },
1636 {
1637 .name = "sha224",
1638 .driver_name = "sha224-ccree",
1639 .mac_name = "hmac(sha224)",
1640 .mac_driver_name = "hmac-sha224-ccree",
1641 .blocksize = SHA224_BLOCK_SIZE,
1642 .is_mac = true,
1643 .template_ahash = {
1644 .init = cc_hash_init,
1645 .update = cc_hash_update,
1646 .final = cc_hash_final,
1647 .finup = cc_hash_finup,
1648 .digest = cc_hash_digest,
1649 .export = cc_hash_export,
1650 .import = cc_hash_import,
1651 .setkey = cc_hash_setkey,
1652 .halg = {
1653 .digestsize = SHA224_DIGEST_SIZE,
1654 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1655 },
1656 },
1657 .hash_mode = DRV_HASH_SHA224,
1658 .hw_mode = DRV_HASH_HW_SHA256,
1659 .inter_digestsize = SHA256_DIGEST_SIZE,
1660 .min_hw_rev = CC_HW_REV_630,
1661 .std_body = CC_STD_NIST,
1662 },
1663 {
1664 .name = "sha384",
1665 .driver_name = "sha384-ccree",
1666 .mac_name = "hmac(sha384)",
1667 .mac_driver_name = "hmac-sha384-ccree",
1668 .blocksize = SHA384_BLOCK_SIZE,
1669 .is_mac = true,
1670 .template_ahash = {
1671 .init = cc_hash_init,
1672 .update = cc_hash_update,
1673 .final = cc_hash_final,
1674 .finup = cc_hash_finup,
1675 .digest = cc_hash_digest,
1676 .export = cc_hash_export,
1677 .import = cc_hash_import,
1678 .setkey = cc_hash_setkey,
1679 .halg = {
1680 .digestsize = SHA384_DIGEST_SIZE,
1681 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1682 },
1683 },
1684 .hash_mode = DRV_HASH_SHA384,
1685 .hw_mode = DRV_HASH_HW_SHA512,
1686 .inter_digestsize = SHA512_DIGEST_SIZE,
1687 .min_hw_rev = CC_HW_REV_712,
1688 .std_body = CC_STD_NIST,
1689 },
1690 {
1691 .name = "sha512",
1692 .driver_name = "sha512-ccree",
1693 .mac_name = "hmac(sha512)",
1694 .mac_driver_name = "hmac-sha512-ccree",
1695 .blocksize = SHA512_BLOCK_SIZE,
1696 .is_mac = true,
1697 .template_ahash = {
1698 .init = cc_hash_init,
1699 .update = cc_hash_update,
1700 .final = cc_hash_final,
1701 .finup = cc_hash_finup,
1702 .digest = cc_hash_digest,
1703 .export = cc_hash_export,
1704 .import = cc_hash_import,
1705 .setkey = cc_hash_setkey,
1706 .halg = {
1707 .digestsize = SHA512_DIGEST_SIZE,
1708 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1709 },
1710 },
1711 .hash_mode = DRV_HASH_SHA512,
1712 .hw_mode = DRV_HASH_HW_SHA512,
1713 .inter_digestsize = SHA512_DIGEST_SIZE,
1714 .min_hw_rev = CC_HW_REV_712,
1715 .std_body = CC_STD_NIST,
1716 },
1717 {
1718 .name = "md5",
1719 .driver_name = "md5-ccree",
1720 .mac_name = "hmac(md5)",
1721 .mac_driver_name = "hmac-md5-ccree",
1722 .blocksize = MD5_HMAC_BLOCK_SIZE,
1723 .is_mac = true,
1724 .template_ahash = {
1725 .init = cc_hash_init,
1726 .update = cc_hash_update,
1727 .final = cc_hash_final,
1728 .finup = cc_hash_finup,
1729 .digest = cc_hash_digest,
1730 .export = cc_hash_export,
1731 .import = cc_hash_import,
1732 .setkey = cc_hash_setkey,
1733 .halg = {
1734 .digestsize = MD5_DIGEST_SIZE,
1735 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1736 },
1737 },
1738 .hash_mode = DRV_HASH_MD5,
1739 .hw_mode = DRV_HASH_HW_MD5,
1740 .inter_digestsize = MD5_DIGEST_SIZE,
1741 .min_hw_rev = CC_HW_REV_630,
1742 .std_body = CC_STD_NIST,
1743 },
1744 {
1745 .name = "sm3",
1746 .driver_name = "sm3-ccree",
1747 .blocksize = SM3_BLOCK_SIZE,
1748 .is_mac = false,
1749 .template_ahash = {
1750 .init = cc_hash_init,
1751 .update = cc_hash_update,
1752 .final = cc_hash_final,
1753 .finup = cc_hash_finup,
1754 .digest = cc_hash_digest,
1755 .export = cc_hash_export,
1756 .import = cc_hash_import,
1757 .setkey = cc_hash_setkey,
1758 .halg = {
1759 .digestsize = SM3_DIGEST_SIZE,
1760 .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1761 },
1762 },
1763 .hash_mode = DRV_HASH_SM3,
1764 .hw_mode = DRV_HASH_HW_SM3,
1765 .inter_digestsize = SM3_DIGEST_SIZE,
1766 .min_hw_rev = CC_HW_REV_713,
1767 .std_body = CC_STD_OSCCA,
1768 },
1769 {
1770 .mac_name = "xcbc(aes)",
1771 .mac_driver_name = "xcbc-aes-ccree",
1772 .blocksize = AES_BLOCK_SIZE,
1773 .is_mac = true,
1774 .template_ahash = {
1775 .init = cc_hash_init,
1776 .update = cc_mac_update,
1777 .final = cc_mac_final,
1778 .finup = cc_mac_finup,
1779 .digest = cc_mac_digest,
1780 .setkey = cc_xcbc_setkey,
1781 .export = cc_hash_export,
1782 .import = cc_hash_import,
1783 .halg = {
1784 .digestsize = AES_BLOCK_SIZE,
1785 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1786 },
1787 },
1788 .hash_mode = DRV_HASH_NULL,
1789 .hw_mode = DRV_CIPHER_XCBC_MAC,
1790 .inter_digestsize = AES_BLOCK_SIZE,
1791 .min_hw_rev = CC_HW_REV_630,
1792 .std_body = CC_STD_NIST,
1793 },
1794 {
1795 .mac_name = "cmac(aes)",
1796 .mac_driver_name = "cmac-aes-ccree",
1797 .blocksize = AES_BLOCK_SIZE,
1798 .is_mac = true,
1799 .template_ahash = {
1800 .init = cc_hash_init,
1801 .update = cc_mac_update,
1802 .final = cc_mac_final,
1803 .finup = cc_mac_finup,
1804 .digest = cc_mac_digest,
1805 .setkey = cc_cmac_setkey,
1806 .export = cc_hash_export,
1807 .import = cc_hash_import,
1808 .halg = {
1809 .digestsize = AES_BLOCK_SIZE,
1810 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1811 },
1812 },
1813 .hash_mode = DRV_HASH_NULL,
1814 .hw_mode = DRV_CIPHER_CMAC,
1815 .inter_digestsize = AES_BLOCK_SIZE,
1816 .min_hw_rev = CC_HW_REV_630,
1817 .std_body = CC_STD_NIST,
1818 },
1819 };
1820
1821 static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1822 struct device *dev, bool keyed)
1823 {
1824 struct cc_hash_alg *t_crypto_alg;
1825 struct crypto_alg *alg;
1826 struct ahash_alg *halg;
1827
1828 t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
1829 if (!t_crypto_alg)
1830 return ERR_PTR(-ENOMEM);
1831
1832 t_crypto_alg->ahash_alg = template->template_ahash;
1833 halg = &t_crypto_alg->ahash_alg;
1834 alg = &halg->halg.base;
1835
1836 if (keyed) {
1837 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1838 template->mac_name);
1839 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1840 template->mac_driver_name);
1841 } else {
1842 halg->setkey = NULL;
1843 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1844 template->name);
1845 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1846 template->driver_name);
1847 }
1848 alg->cra_module = THIS_MODULE;
1849 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1850 alg->cra_priority = CC_CRA_PRIO;
1851 alg->cra_blocksize = template->blocksize;
1852 alg->cra_alignmask = 0;
1853 alg->cra_exit = cc_cra_exit;
1854
1855 alg->cra_init = cc_cra_init;
1856 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1857
1858 t_crypto_alg->hash_mode = template->hash_mode;
1859 t_crypto_alg->hw_mode = template->hw_mode;
1860 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1861
1862 return t_crypto_alg;
1863 }
1864
1865 static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
1866 unsigned int size, u32 *sram_buff_ofs)
1867 {
1868 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1869 unsigned int larval_seq_len = 0;
1870 int rc;
1871
1872 cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
1873 larval_seq, &larval_seq_len);
1874 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1875 if (rc)
1876 return rc;
1877
1878 *sram_buff_ofs += size;
1879 return 0;
1880 }
1881
1882 int cc_init_hash_sram(struct cc_drvdata *drvdata)
1883 {
1884 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1885 u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
1886 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1887 bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1888 int rc = 0;
1889
1890
1891 rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
1892 sizeof(cc_digest_len_init), &sram_buff_ofs);
1893 if (rc)
1894 goto init_digest_const_err;
1895
1896 if (large_sha_supported) {
1897
1898 rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
1899 sizeof(cc_digest_len_sha512_init),
1900 &sram_buff_ofs);
1901 if (rc)
1902 goto init_digest_const_err;
1903 }
1904
1905
1906 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1907
1908
1909 rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
1910 &sram_buff_ofs);
1911 if (rc)
1912 goto init_digest_const_err;
1913
1914 rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
1915 &sram_buff_ofs);
1916 if (rc)
1917 goto init_digest_const_err;
1918
1919 rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
1920 &sram_buff_ofs);
1921 if (rc)
1922 goto init_digest_const_err;
1923
1924 rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
1925 &sram_buff_ofs);
1926 if (rc)
1927 goto init_digest_const_err;
1928
1929 if (sm3_supported) {
1930 rc = cc_init_copy_sram(drvdata, cc_sm3_init,
1931 sizeof(cc_sm3_init), &sram_buff_ofs);
1932 if (rc)
1933 goto init_digest_const_err;
1934 }
1935
1936 if (large_sha_supported) {
1937 rc = cc_init_copy_sram(drvdata, cc_sha384_init,
1938 sizeof(cc_sha384_init), &sram_buff_ofs);
1939 if (rc)
1940 goto init_digest_const_err;
1941
1942 rc = cc_init_copy_sram(drvdata, cc_sha512_init,
1943 sizeof(cc_sha512_init), &sram_buff_ofs);
1944 if (rc)
1945 goto init_digest_const_err;
1946 }
1947
1948 init_digest_const_err:
1949 return rc;
1950 }
1951
1952 int cc_hash_alloc(struct cc_drvdata *drvdata)
1953 {
1954 struct cc_hash_handle *hash_handle;
1955 u32 sram_buff;
1956 u32 sram_size_to_alloc;
1957 struct device *dev = drvdata_to_dev(drvdata);
1958 int rc = 0;
1959 int alg;
1960
1961 hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
1962 if (!hash_handle)
1963 return -ENOMEM;
1964
1965 INIT_LIST_HEAD(&hash_handle->hash_list);
1966 drvdata->hash_handle = hash_handle;
1967
1968 sram_size_to_alloc = sizeof(cc_digest_len_init) +
1969 sizeof(cc_md5_init) +
1970 sizeof(cc_sha1_init) +
1971 sizeof(cc_sha224_init) +
1972 sizeof(cc_sha256_init);
1973
1974 if (drvdata->hw_rev >= CC_HW_REV_713)
1975 sram_size_to_alloc += sizeof(cc_sm3_init);
1976
1977 if (drvdata->hw_rev >= CC_HW_REV_712)
1978 sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
1979 sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
1980
1981 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1982 if (sram_buff == NULL_SRAM_ADDR) {
1983 rc = -ENOMEM;
1984 goto fail;
1985 }
1986
1987
1988 hash_handle->digest_len_sram_addr = sram_buff;
1989
1990
1991 rc = cc_init_hash_sram(drvdata);
1992 if (rc) {
1993 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1994 goto fail;
1995 }
1996
1997
1998 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1999 struct cc_hash_alg *t_alg;
2000 int hw_mode = driver_hash[alg].hw_mode;
2001
2002
2003 if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2004 !(drvdata->std_bodies & driver_hash[alg].std_body))
2005 continue;
2006
2007 if (driver_hash[alg].is_mac) {
2008
2009 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2010 if (IS_ERR(t_alg)) {
2011 rc = PTR_ERR(t_alg);
2012 dev_err(dev, "%s alg allocation failed\n",
2013 driver_hash[alg].driver_name);
2014 goto fail;
2015 }
2016 t_alg->drvdata = drvdata;
2017
2018 rc = crypto_register_ahash(&t_alg->ahash_alg);
2019 if (rc) {
2020 dev_err(dev, "%s alg registration failed\n",
2021 driver_hash[alg].driver_name);
2022 goto fail;
2023 }
2024
2025 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2026 }
2027 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2028 hw_mode == DRV_CIPHER_CMAC)
2029 continue;
2030
2031
2032 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2033 if (IS_ERR(t_alg)) {
2034 rc = PTR_ERR(t_alg);
2035 dev_err(dev, "%s alg allocation failed\n",
2036 driver_hash[alg].driver_name);
2037 goto fail;
2038 }
2039 t_alg->drvdata = drvdata;
2040
2041 rc = crypto_register_ahash(&t_alg->ahash_alg);
2042 if (rc) {
2043 dev_err(dev, "%s alg registration failed\n",
2044 driver_hash[alg].driver_name);
2045 goto fail;
2046 }
2047
2048 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2049 }
2050
2051 return 0;
2052
2053 fail:
2054 cc_hash_free(drvdata);
2055 return rc;
2056 }
2057
2058 int cc_hash_free(struct cc_drvdata *drvdata)
2059 {
2060 struct cc_hash_alg *t_hash_alg, *hash_n;
2061 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2062
2063 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
2064 entry) {
2065 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2066 list_del(&t_hash_alg->entry);
2067 }
2068
2069 return 0;
2070 }
2071
2072 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2073 unsigned int *seq_size)
2074 {
2075 unsigned int idx = *seq_size;
2076 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2077 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2078 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2079
2080
2081 hw_desc_init(&desc[idx]);
2082 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2083 XCBC_MAC_K1_OFFSET),
2084 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2085 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2086 set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2087 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2088 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2089 set_flow_mode(&desc[idx], S_DIN_to_AES);
2090 idx++;
2091
2092
2093 hw_desc_init(&desc[idx]);
2094 set_din_type(&desc[idx], DMA_DLLI,
2095 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2096 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2097 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2098 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2099 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2100 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2101 set_flow_mode(&desc[idx], S_DIN_to_AES);
2102 idx++;
2103
2104
2105 hw_desc_init(&desc[idx]);
2106 set_din_type(&desc[idx], DMA_DLLI,
2107 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2108 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2109 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2110 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2111 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2112 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2113 set_flow_mode(&desc[idx], S_DIN_to_AES);
2114 idx++;
2115
2116
2117 hw_desc_init(&desc[idx]);
2118 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2119 CC_AES_BLOCK_SIZE, NS_BIT);
2120 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2121 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2122 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2123 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2124 set_flow_mode(&desc[idx], S_DIN_to_AES);
2125 idx++;
2126 *seq_size = idx;
2127 }
2128
2129 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2130 unsigned int *seq_size)
2131 {
2132 unsigned int idx = *seq_size;
2133 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2134 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2135 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2136
2137
2138 hw_desc_init(&desc[idx]);
2139 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2140 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2141 ctx->key_params.keylen), NS_BIT);
2142 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2143 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2144 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2145 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2146 set_flow_mode(&desc[idx], S_DIN_to_AES);
2147 idx++;
2148
2149
2150 hw_desc_init(&desc[idx]);
2151 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2152 CC_AES_BLOCK_SIZE, NS_BIT);
2153 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2154 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2155 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2156 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2157 set_flow_mode(&desc[idx], S_DIN_to_AES);
2158 idx++;
2159 *seq_size = idx;
2160 }
2161
2162 static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2163 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2164 struct cc_hw_desc desc[], bool is_not_last_data,
2165 unsigned int *seq_size)
2166 {
2167 unsigned int idx = *seq_size;
2168 struct device *dev = drvdata_to_dev(ctx->drvdata);
2169
2170 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2171 hw_desc_init(&desc[idx]);
2172 set_din_type(&desc[idx], DMA_DLLI,
2173 sg_dma_address(areq_ctx->curr_sg),
2174 areq_ctx->curr_sg->length, NS_BIT);
2175 set_flow_mode(&desc[idx], flow_mode);
2176 idx++;
2177 } else {
2178 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2179 dev_dbg(dev, " NULL mode\n");
2180
2181 return;
2182 }
2183
2184 hw_desc_init(&desc[idx]);
2185 set_din_type(&desc[idx], DMA_DLLI,
2186 areq_ctx->mlli_params.mlli_dma_addr,
2187 areq_ctx->mlli_params.mlli_len, NS_BIT);
2188 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2189 areq_ctx->mlli_params.mlli_len);
2190 set_flow_mode(&desc[idx], BYPASS);
2191 idx++;
2192
2193 hw_desc_init(&desc[idx]);
2194 set_din_type(&desc[idx], DMA_MLLI,
2195 ctx->drvdata->mlli_sram_addr,
2196 areq_ctx->mlli_nents, NS_BIT);
2197 set_flow_mode(&desc[idx], flow_mode);
2198 idx++;
2199 }
2200 if (is_not_last_data)
2201 set_din_not_last_indication(&desc[(idx - 1)]);
2202
2203 *seq_size = idx;
2204 }
2205
2206 static const void *cc_larval_digest(struct device *dev, u32 mode)
2207 {
2208 switch (mode) {
2209 case DRV_HASH_MD5:
2210 return cc_md5_init;
2211 case DRV_HASH_SHA1:
2212 return cc_sha1_init;
2213 case DRV_HASH_SHA224:
2214 return cc_sha224_init;
2215 case DRV_HASH_SHA256:
2216 return cc_sha256_init;
2217 case DRV_HASH_SHA384:
2218 return cc_sha384_init;
2219 case DRV_HASH_SHA512:
2220 return cc_sha512_init;
2221 case DRV_HASH_SM3:
2222 return cc_sm3_init;
2223 default:
2224 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2225 return cc_md5_init;
2226 }
2227 }
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239 u32 cc_larval_digest_addr(void *drvdata, u32 mode)
2240 {
2241 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2242 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2243 struct device *dev = drvdata_to_dev(_drvdata);
2244 bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2245 u32 addr;
2246
2247 switch (mode) {
2248 case DRV_HASH_NULL:
2249 break;
2250 case DRV_HASH_MD5:
2251 return (hash_handle->larval_digest_sram_addr);
2252 case DRV_HASH_SHA1:
2253 return (hash_handle->larval_digest_sram_addr +
2254 sizeof(cc_md5_init));
2255 case DRV_HASH_SHA224:
2256 return (hash_handle->larval_digest_sram_addr +
2257 sizeof(cc_md5_init) +
2258 sizeof(cc_sha1_init));
2259 case DRV_HASH_SHA256:
2260 return (hash_handle->larval_digest_sram_addr +
2261 sizeof(cc_md5_init) +
2262 sizeof(cc_sha1_init) +
2263 sizeof(cc_sha224_init));
2264 case DRV_HASH_SM3:
2265 return (hash_handle->larval_digest_sram_addr +
2266 sizeof(cc_md5_init) +
2267 sizeof(cc_sha1_init) +
2268 sizeof(cc_sha224_init) +
2269 sizeof(cc_sha256_init));
2270 case DRV_HASH_SHA384:
2271 addr = (hash_handle->larval_digest_sram_addr +
2272 sizeof(cc_md5_init) +
2273 sizeof(cc_sha1_init) +
2274 sizeof(cc_sha224_init) +
2275 sizeof(cc_sha256_init));
2276 if (sm3_supported)
2277 addr += sizeof(cc_sm3_init);
2278 return addr;
2279 case DRV_HASH_SHA512:
2280 addr = (hash_handle->larval_digest_sram_addr +
2281 sizeof(cc_md5_init) +
2282 sizeof(cc_sha1_init) +
2283 sizeof(cc_sha224_init) +
2284 sizeof(cc_sha256_init) +
2285 sizeof(cc_sha384_init));
2286 if (sm3_supported)
2287 addr += sizeof(cc_sm3_init);
2288 return addr;
2289 default:
2290 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2291 }
2292
2293
2294 return hash_handle->larval_digest_sram_addr;
2295 }
2296
2297 u32 cc_digest_len_addr(void *drvdata, u32 mode)
2298 {
2299 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2300 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2301 u32 digest_len_addr = hash_handle->digest_len_sram_addr;
2302
2303 switch (mode) {
2304 case DRV_HASH_SHA1:
2305 case DRV_HASH_SHA224:
2306 case DRV_HASH_SHA256:
2307 case DRV_HASH_MD5:
2308 return digest_len_addr;
2309 case DRV_HASH_SHA384:
2310 case DRV_HASH_SHA512:
2311 return digest_len_addr + sizeof(cc_digest_len_init);
2312 default:
2313 return digest_len_addr;
2314 }
2315 }