Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * caam - Freescale FSL CAAM support for ahash functions of crypto API
0004  *
0005  * Copyright 2011 Freescale Semiconductor, Inc.
0006  * Copyright 2018-2019 NXP
0007  *
0008  * Based on caamalg.c crypto API driver.
0009  *
0010  * relationship of digest job descriptor or first job descriptor after init to
0011  * shared descriptors:
0012  *
0013  * ---------------                     ---------------
0014  * | JobDesc #1  |-------------------->|  ShareDesc  |
0015  * | *(packet 1) |                     |  (hashKey)  |
0016  * ---------------                     | (operation) |
0017  *                                     ---------------
0018  *
0019  * relationship of subsequent job descriptors to shared descriptors:
0020  *
0021  * ---------------                     ---------------
0022  * | JobDesc #2  |-------------------->|  ShareDesc  |
0023  * | *(packet 2) |      |------------->|  (hashKey)  |
0024  * ---------------      |    |-------->| (operation) |
0025  *       .              |    |         | (load ctx2) |
0026  *       .              |    |         ---------------
0027  * ---------------      |    |
0028  * | JobDesc #3  |------|    |
0029  * | *(packet 3) |           |
0030  * ---------------           |
0031  *       .                   |
0032  *       .                   |
0033  * ---------------           |
0034  * | JobDesc #4  |------------
0035  * | *(packet 4) |
0036  * ---------------
0037  *
0038  * The SharedDesc never changes for a connection unless rekeyed, but
0039  * each packet will likely be in a different place. So all we need
0040  * to know to process the packet is where the input is, where the
0041  * output goes, and what context we want to process with. Context is
0042  * in the SharedDesc, packet references in the JobDesc.
0043  *
0044  * So, a job desc looks like:
0045  *
0046  * ---------------------
0047  * | Header            |
0048  * | ShareDesc Pointer |
0049  * | SEQ_OUT_PTR       |
0050  * | (output buffer)   |
0051  * | (output length)   |
0052  * | SEQ_IN_PTR        |
0053  * | (input buffer)    |
0054  * | (input length)    |
0055  * ---------------------
0056  */
0057 
0058 #include "compat.h"
0059 
0060 #include "regs.h"
0061 #include "intern.h"
0062 #include "desc_constr.h"
0063 #include "jr.h"
0064 #include "error.h"
0065 #include "sg_sw_sec4.h"
0066 #include "key_gen.h"
0067 #include "caamhash_desc.h"
0068 #include <crypto/engine.h>
0069 
0070 #define CAAM_CRA_PRIORITY       3000
0071 
0072 /* max hash key is max split key size */
0073 #define CAAM_MAX_HASH_KEY_SIZE      (SHA512_DIGEST_SIZE * 2)
0074 
0075 #define CAAM_MAX_HASH_BLOCK_SIZE    SHA512_BLOCK_SIZE
0076 #define CAAM_MAX_HASH_DIGEST_SIZE   SHA512_DIGEST_SIZE
0077 
0078 #define DESC_HASH_MAX_USED_BYTES    (DESC_AHASH_FINAL_LEN + \
0079                      CAAM_MAX_HASH_KEY_SIZE)
0080 #define DESC_HASH_MAX_USED_LEN      (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
0081 
0082 /* caam context sizes for hashes: running digest + 8 */
0083 #define HASH_MSG_LEN            8
0084 #define MAX_CTX_LEN         (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
0085 
0086 static struct list_head hash_list;
0087 
0088 /* ahash per-session context */
0089 struct caam_hash_ctx {
0090     struct crypto_engine_ctx enginectx;
0091     u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
0092     u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
0093     u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
0094     u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
0095     u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
0096     dma_addr_t sh_desc_update_dma ____cacheline_aligned;
0097     dma_addr_t sh_desc_update_first_dma;
0098     dma_addr_t sh_desc_fin_dma;
0099     dma_addr_t sh_desc_digest_dma;
0100     enum dma_data_direction dir;
0101     enum dma_data_direction key_dir;
0102     struct device *jrdev;
0103     int ctx_len;
0104     struct alginfo adata;
0105 };
0106 
0107 /* ahash state */
0108 struct caam_hash_state {
0109     dma_addr_t buf_dma;
0110     dma_addr_t ctx_dma;
0111     int ctx_dma_len;
0112     u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
0113     int buflen;
0114     int next_buflen;
0115     u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
0116     int (*update)(struct ahash_request *req) ____cacheline_aligned;
0117     int (*final)(struct ahash_request *req);
0118     int (*finup)(struct ahash_request *req);
0119     struct ahash_edesc *edesc;
0120     void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
0121                   void *context);
0122 };
0123 
0124 struct caam_export_state {
0125     u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
0126     u8 caam_ctx[MAX_CTX_LEN];
0127     int buflen;
0128     int (*update)(struct ahash_request *req);
0129     int (*final)(struct ahash_request *req);
0130     int (*finup)(struct ahash_request *req);
0131 };
0132 
0133 static inline bool is_cmac_aes(u32 algtype)
0134 {
0135     return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
0136            (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
0137 }
0138 /* Common job descriptor seq in/out ptr routines */
0139 
0140 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
0141 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
0142                       struct caam_hash_state *state,
0143                       int ctx_len)
0144 {
0145     state->ctx_dma_len = ctx_len;
0146     state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
0147                     ctx_len, DMA_FROM_DEVICE);
0148     if (dma_mapping_error(jrdev, state->ctx_dma)) {
0149         dev_err(jrdev, "unable to map ctx\n");
0150         state->ctx_dma = 0;
0151         return -ENOMEM;
0152     }
0153 
0154     append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
0155 
0156     return 0;
0157 }
0158 
0159 /* Map current buffer in state (if length > 0) and put it in link table */
0160 static inline int buf_map_to_sec4_sg(struct device *jrdev,
0161                      struct sec4_sg_entry *sec4_sg,
0162                      struct caam_hash_state *state)
0163 {
0164     int buflen = state->buflen;
0165 
0166     if (!buflen)
0167         return 0;
0168 
0169     state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
0170                     DMA_TO_DEVICE);
0171     if (dma_mapping_error(jrdev, state->buf_dma)) {
0172         dev_err(jrdev, "unable to map buf\n");
0173         state->buf_dma = 0;
0174         return -ENOMEM;
0175     }
0176 
0177     dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
0178 
0179     return 0;
0180 }
0181 
0182 /* Map state->caam_ctx, and add it to link table */
0183 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
0184                      struct caam_hash_state *state, int ctx_len,
0185                      struct sec4_sg_entry *sec4_sg, u32 flag)
0186 {
0187     state->ctx_dma_len = ctx_len;
0188     state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
0189     if (dma_mapping_error(jrdev, state->ctx_dma)) {
0190         dev_err(jrdev, "unable to map ctx\n");
0191         state->ctx_dma = 0;
0192         return -ENOMEM;
0193     }
0194 
0195     dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
0196 
0197     return 0;
0198 }
0199 
0200 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
0201 {
0202     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0203     int digestsize = crypto_ahash_digestsize(ahash);
0204     struct device *jrdev = ctx->jrdev;
0205     struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
0206     u32 *desc;
0207 
0208     ctx->adata.key_virt = ctx->key;
0209 
0210     /* ahash_update shared descriptor */
0211     desc = ctx->sh_desc_update;
0212     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
0213               ctx->ctx_len, true, ctrlpriv->era);
0214     dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
0215                    desc_bytes(desc), ctx->dir);
0216 
0217     print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
0218                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
0219                  1);
0220 
0221     /* ahash_update_first shared descriptor */
0222     desc = ctx->sh_desc_update_first;
0223     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
0224               ctx->ctx_len, false, ctrlpriv->era);
0225     dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
0226                    desc_bytes(desc), ctx->dir);
0227     print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
0228                  ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
0229                  desc_bytes(desc), 1);
0230 
0231     /* ahash_final shared descriptor */
0232     desc = ctx->sh_desc_fin;
0233     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
0234               ctx->ctx_len, true, ctrlpriv->era);
0235     dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
0236                    desc_bytes(desc), ctx->dir);
0237 
0238     print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
0239                  DUMP_PREFIX_ADDRESS, 16, 4, desc,
0240                  desc_bytes(desc), 1);
0241 
0242     /* ahash_digest shared descriptor */
0243     desc = ctx->sh_desc_digest;
0244     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
0245               ctx->ctx_len, false, ctrlpriv->era);
0246     dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
0247                    desc_bytes(desc), ctx->dir);
0248 
0249     print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
0250                  DUMP_PREFIX_ADDRESS, 16, 4, desc,
0251                  desc_bytes(desc), 1);
0252 
0253     return 0;
0254 }
0255 
0256 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
0257 {
0258     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0259     int digestsize = crypto_ahash_digestsize(ahash);
0260     struct device *jrdev = ctx->jrdev;
0261     u32 *desc;
0262 
0263     /* shared descriptor for ahash_update */
0264     desc = ctx->sh_desc_update;
0265     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
0266                 ctx->ctx_len, ctx->ctx_len);
0267     dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
0268                    desc_bytes(desc), ctx->dir);
0269     print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
0270                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
0271                  1);
0272 
0273     /* shared descriptor for ahash_{final,finup} */
0274     desc = ctx->sh_desc_fin;
0275     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
0276                 digestsize, ctx->ctx_len);
0277     dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
0278                    desc_bytes(desc), ctx->dir);
0279     print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
0280                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
0281                  1);
0282 
0283     /* key is immediate data for INIT and INITFINAL states */
0284     ctx->adata.key_virt = ctx->key;
0285 
0286     /* shared descriptor for first invocation of ahash_update */
0287     desc = ctx->sh_desc_update_first;
0288     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
0289                 ctx->ctx_len);
0290     dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
0291                    desc_bytes(desc), ctx->dir);
0292     print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
0293                  " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
0294                  desc_bytes(desc), 1);
0295 
0296     /* shared descriptor for ahash_digest */
0297     desc = ctx->sh_desc_digest;
0298     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
0299                 digestsize, ctx->ctx_len);
0300     dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
0301                    desc_bytes(desc), ctx->dir);
0302     print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
0303                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
0304                  1);
0305     return 0;
0306 }
0307 
0308 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
0309 {
0310     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0311     int digestsize = crypto_ahash_digestsize(ahash);
0312     struct device *jrdev = ctx->jrdev;
0313     u32 *desc;
0314 
0315     /* shared descriptor for ahash_update */
0316     desc = ctx->sh_desc_update;
0317     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
0318                 ctx->ctx_len, ctx->ctx_len);
0319     dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
0320                    desc_bytes(desc), ctx->dir);
0321     print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
0322                  DUMP_PREFIX_ADDRESS, 16, 4, desc,
0323                  desc_bytes(desc), 1);
0324 
0325     /* shared descriptor for ahash_{final,finup} */
0326     desc = ctx->sh_desc_fin;
0327     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
0328                 digestsize, ctx->ctx_len);
0329     dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
0330                    desc_bytes(desc), ctx->dir);
0331     print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
0332                  DUMP_PREFIX_ADDRESS, 16, 4, desc,
0333                  desc_bytes(desc), 1);
0334 
0335     /* shared descriptor for first invocation of ahash_update */
0336     desc = ctx->sh_desc_update_first;
0337     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
0338                 ctx->ctx_len);
0339     dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
0340                    desc_bytes(desc), ctx->dir);
0341     print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
0342                  " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
0343                  desc_bytes(desc), 1);
0344 
0345     /* shared descriptor for ahash_digest */
0346     desc = ctx->sh_desc_digest;
0347     cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
0348                 digestsize, ctx->ctx_len);
0349     dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
0350                    desc_bytes(desc), ctx->dir);
0351     print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
0352                  DUMP_PREFIX_ADDRESS, 16, 4, desc,
0353                  desc_bytes(desc), 1);
0354 
0355     return 0;
0356 }
0357 
0358 /* Digest hash size if it is too large */
0359 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
0360                u32 digestsize)
0361 {
0362     struct device *jrdev = ctx->jrdev;
0363     u32 *desc;
0364     struct split_key_result result;
0365     dma_addr_t key_dma;
0366     int ret;
0367 
0368     desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
0369     if (!desc) {
0370         dev_err(jrdev, "unable to allocate key input memory\n");
0371         return -ENOMEM;
0372     }
0373 
0374     init_job_desc(desc, 0);
0375 
0376     key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
0377     if (dma_mapping_error(jrdev, key_dma)) {
0378         dev_err(jrdev, "unable to map key memory\n");
0379         kfree(desc);
0380         return -ENOMEM;
0381     }
0382 
0383     /* Job descriptor to perform unkeyed hash on key_in */
0384     append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
0385              OP_ALG_AS_INITFINAL);
0386     append_seq_in_ptr(desc, key_dma, *keylen, 0);
0387     append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
0388                  FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
0389     append_seq_out_ptr(desc, key_dma, digestsize, 0);
0390     append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
0391              LDST_SRCDST_BYTE_CONTEXT);
0392 
0393     print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
0394                  DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
0395     print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
0396                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
0397                  1);
0398 
0399     result.err = 0;
0400     init_completion(&result.completion);
0401 
0402     ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
0403     if (ret == -EINPROGRESS) {
0404         /* in progress */
0405         wait_for_completion(&result.completion);
0406         ret = result.err;
0407 
0408         print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
0409                      DUMP_PREFIX_ADDRESS, 16, 4, key,
0410                      digestsize, 1);
0411     }
0412     dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
0413 
0414     *keylen = digestsize;
0415 
0416     kfree(desc);
0417 
0418     return ret;
0419 }
0420 
0421 static int ahash_setkey(struct crypto_ahash *ahash,
0422             const u8 *key, unsigned int keylen)
0423 {
0424     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0425     struct device *jrdev = ctx->jrdev;
0426     int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
0427     int digestsize = crypto_ahash_digestsize(ahash);
0428     struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
0429     int ret;
0430     u8 *hashed_key = NULL;
0431 
0432     dev_dbg(jrdev, "keylen %d\n", keylen);
0433 
0434     if (keylen > blocksize) {
0435         hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
0436         if (!hashed_key)
0437             return -ENOMEM;
0438         ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
0439         if (ret)
0440             goto bad_free_key;
0441         key = hashed_key;
0442     }
0443 
0444     /*
0445      * If DKP is supported, use it in the shared descriptor to generate
0446      * the split key.
0447      */
0448     if (ctrlpriv->era >= 6) {
0449         ctx->adata.key_inline = true;
0450         ctx->adata.keylen = keylen;
0451         ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
0452                               OP_ALG_ALGSEL_MASK);
0453 
0454         if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
0455             goto bad_free_key;
0456 
0457         memcpy(ctx->key, key, keylen);
0458 
0459         /*
0460          * In case |user key| > |derived key|, using DKP<imm,imm>
0461          * would result in invalid opcodes (last bytes of user key) in
0462          * the resulting descriptor. Use DKP<ptr,imm> instead => both
0463          * virtual and dma key addresses are needed.
0464          */
0465         if (keylen > ctx->adata.keylen_pad)
0466             dma_sync_single_for_device(ctx->jrdev,
0467                            ctx->adata.key_dma,
0468                            ctx->adata.keylen_pad,
0469                            DMA_TO_DEVICE);
0470     } else {
0471         ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
0472                     keylen, CAAM_MAX_HASH_KEY_SIZE);
0473         if (ret)
0474             goto bad_free_key;
0475     }
0476 
0477     kfree(hashed_key);
0478     return ahash_set_sh_desc(ahash);
0479  bad_free_key:
0480     kfree(hashed_key);
0481     return -EINVAL;
0482 }
0483 
0484 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
0485             unsigned int keylen)
0486 {
0487     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0488     struct device *jrdev = ctx->jrdev;
0489 
0490     if (keylen != AES_KEYSIZE_128)
0491         return -EINVAL;
0492 
0493     memcpy(ctx->key, key, keylen);
0494     dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
0495                    DMA_TO_DEVICE);
0496     ctx->adata.keylen = keylen;
0497 
0498     print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
0499                  DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
0500 
0501     return axcbc_set_sh_desc(ahash);
0502 }
0503 
0504 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
0505             unsigned int keylen)
0506 {
0507     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0508     int err;
0509 
0510     err = aes_check_keylen(keylen);
0511     if (err)
0512         return err;
0513 
0514     /* key is immediate data for all cmac shared descriptors */
0515     ctx->adata.key_virt = key;
0516     ctx->adata.keylen = keylen;
0517 
0518     print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
0519                  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
0520 
0521     return acmac_set_sh_desc(ahash);
0522 }
0523 
0524 /*
0525  * ahash_edesc - s/w-extended ahash descriptor
0526  * @sec4_sg_dma: physical mapped address of h/w link table
0527  * @src_nents: number of segments in input scatterlist
0528  * @sec4_sg_bytes: length of dma mapped sec4_sg space
0529  * @bklog: stored to determine if the request needs backlog
0530  * @hw_desc: the h/w job descriptor followed by any referenced link tables
0531  * @sec4_sg: h/w link table
0532  */
0533 struct ahash_edesc {
0534     dma_addr_t sec4_sg_dma;
0535     int src_nents;
0536     int sec4_sg_bytes;
0537     bool bklog;
0538     u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
0539     struct sec4_sg_entry sec4_sg[];
0540 };
0541 
0542 static inline void ahash_unmap(struct device *dev,
0543             struct ahash_edesc *edesc,
0544             struct ahash_request *req, int dst_len)
0545 {
0546     struct caam_hash_state *state = ahash_request_ctx(req);
0547 
0548     if (edesc->src_nents)
0549         dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
0550 
0551     if (edesc->sec4_sg_bytes)
0552         dma_unmap_single(dev, edesc->sec4_sg_dma,
0553                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
0554 
0555     if (state->buf_dma) {
0556         dma_unmap_single(dev, state->buf_dma, state->buflen,
0557                  DMA_TO_DEVICE);
0558         state->buf_dma = 0;
0559     }
0560 }
0561 
0562 static inline void ahash_unmap_ctx(struct device *dev,
0563             struct ahash_edesc *edesc,
0564             struct ahash_request *req, int dst_len, u32 flag)
0565 {
0566     struct caam_hash_state *state = ahash_request_ctx(req);
0567 
0568     if (state->ctx_dma) {
0569         dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
0570         state->ctx_dma = 0;
0571     }
0572     ahash_unmap(dev, edesc, req, dst_len);
0573 }
0574 
0575 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
0576                   void *context, enum dma_data_direction dir)
0577 {
0578     struct ahash_request *req = context;
0579     struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
0580     struct ahash_edesc *edesc;
0581     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
0582     int digestsize = crypto_ahash_digestsize(ahash);
0583     struct caam_hash_state *state = ahash_request_ctx(req);
0584     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0585     int ecode = 0;
0586     bool has_bklog;
0587 
0588     dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
0589 
0590     edesc = state->edesc;
0591     has_bklog = edesc->bklog;
0592 
0593     if (err)
0594         ecode = caam_jr_strstatus(jrdev, err);
0595 
0596     ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
0597     memcpy(req->result, state->caam_ctx, digestsize);
0598     kfree(edesc);
0599 
0600     print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
0601                  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
0602                  ctx->ctx_len, 1);
0603 
0604     /*
0605      * If no backlog flag, the completion of the request is done
0606      * by CAAM, not crypto engine.
0607      */
0608     if (!has_bklog)
0609         req->base.complete(&req->base, ecode);
0610     else
0611         crypto_finalize_hash_request(jrp->engine, req, ecode);
0612 }
0613 
0614 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
0615                void *context)
0616 {
0617     ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
0618 }
0619 
0620 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
0621                    void *context)
0622 {
0623     ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
0624 }
0625 
0626 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
0627                      void *context, enum dma_data_direction dir)
0628 {
0629     struct ahash_request *req = context;
0630     struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
0631     struct ahash_edesc *edesc;
0632     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
0633     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0634     struct caam_hash_state *state = ahash_request_ctx(req);
0635     int digestsize = crypto_ahash_digestsize(ahash);
0636     int ecode = 0;
0637     bool has_bklog;
0638 
0639     dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
0640 
0641     edesc = state->edesc;
0642     has_bklog = edesc->bklog;
0643     if (err)
0644         ecode = caam_jr_strstatus(jrdev, err);
0645 
0646     ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
0647     kfree(edesc);
0648 
0649     scatterwalk_map_and_copy(state->buf, req->src,
0650                  req->nbytes - state->next_buflen,
0651                  state->next_buflen, 0);
0652     state->buflen = state->next_buflen;
0653 
0654     print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
0655                  DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
0656                  state->buflen, 1);
0657 
0658     print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
0659                  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
0660                  ctx->ctx_len, 1);
0661     if (req->result)
0662         print_hex_dump_debug("result@"__stringify(__LINE__)": ",
0663                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
0664                      digestsize, 1);
0665 
0666     /*
0667      * If no backlog flag, the completion of the request is done
0668      * by CAAM, not crypto engine.
0669      */
0670     if (!has_bklog)
0671         req->base.complete(&req->base, ecode);
0672     else
0673         crypto_finalize_hash_request(jrp->engine, req, ecode);
0674 
0675 }
0676 
0677 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
0678               void *context)
0679 {
0680     ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
0681 }
0682 
0683 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
0684                    void *context)
0685 {
0686     ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
0687 }
0688 
0689 /*
0690  * Allocate an enhanced descriptor, which contains the hardware descriptor
0691  * and space for hardware scatter table containing sg_num entries.
0692  */
0693 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
0694                          int sg_num, u32 *sh_desc,
0695                          dma_addr_t sh_desc_dma)
0696 {
0697     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
0698     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0699     struct caam_hash_state *state = ahash_request_ctx(req);
0700     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0701                GFP_KERNEL : GFP_ATOMIC;
0702     struct ahash_edesc *edesc;
0703     unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
0704 
0705     edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
0706     if (!edesc) {
0707         dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
0708         return NULL;
0709     }
0710 
0711     state->edesc = edesc;
0712 
0713     init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
0714                  HDR_SHARE_DEFER | HDR_REVERSE);
0715 
0716     return edesc;
0717 }
0718 
0719 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
0720                    struct ahash_edesc *edesc,
0721                    struct ahash_request *req, int nents,
0722                    unsigned int first_sg,
0723                    unsigned int first_bytes, size_t to_hash)
0724 {
0725     dma_addr_t src_dma;
0726     u32 options;
0727 
0728     if (nents > 1 || first_sg) {
0729         struct sec4_sg_entry *sg = edesc->sec4_sg;
0730         unsigned int sgsize = sizeof(*sg) *
0731                       pad_sg_nents(first_sg + nents);
0732 
0733         sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
0734 
0735         src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
0736         if (dma_mapping_error(ctx->jrdev, src_dma)) {
0737             dev_err(ctx->jrdev, "unable to map S/G table\n");
0738             return -ENOMEM;
0739         }
0740 
0741         edesc->sec4_sg_bytes = sgsize;
0742         edesc->sec4_sg_dma = src_dma;
0743         options = LDST_SGF;
0744     } else {
0745         src_dma = sg_dma_address(req->src);
0746         options = 0;
0747     }
0748 
0749     append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
0750               options);
0751 
0752     return 0;
0753 }
0754 
0755 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
0756 {
0757     struct ahash_request *req = ahash_request_cast(areq);
0758     struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
0759     struct caam_hash_state *state = ahash_request_ctx(req);
0760     struct device *jrdev = ctx->jrdev;
0761     u32 *desc = state->edesc->hw_desc;
0762     int ret;
0763 
0764     state->edesc->bklog = true;
0765 
0766     ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
0767 
0768     if (ret == -ENOSPC && engine->retry_support)
0769         return ret;
0770 
0771     if (ret != -EINPROGRESS) {
0772         ahash_unmap(jrdev, state->edesc, req, 0);
0773         kfree(state->edesc);
0774     } else {
0775         ret = 0;
0776     }
0777 
0778     return ret;
0779 }
0780 
0781 static int ahash_enqueue_req(struct device *jrdev,
0782                  void (*cbk)(struct device *jrdev, u32 *desc,
0783                      u32 err, void *context),
0784                  struct ahash_request *req,
0785                  int dst_len, enum dma_data_direction dir)
0786 {
0787     struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
0788     struct caam_hash_state *state = ahash_request_ctx(req);
0789     struct ahash_edesc *edesc = state->edesc;
0790     u32 *desc = edesc->hw_desc;
0791     int ret;
0792 
0793     state->ahash_op_done = cbk;
0794 
0795     /*
0796      * Only the backlog request are sent to crypto-engine since the others
0797      * can be handled by CAAM, if free, especially since JR has up to 1024
0798      * entries (more than the 10 entries from crypto-engine).
0799      */
0800     if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
0801         ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
0802                                  req);
0803     else
0804         ret = caam_jr_enqueue(jrdev, desc, cbk, req);
0805 
0806     if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
0807         ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
0808         kfree(edesc);
0809     }
0810 
0811     return ret;
0812 }
0813 
0814 /* submit update job descriptor */
0815 static int ahash_update_ctx(struct ahash_request *req)
0816 {
0817     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
0818     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0819     struct caam_hash_state *state = ahash_request_ctx(req);
0820     struct device *jrdev = ctx->jrdev;
0821     u8 *buf = state->buf;
0822     int *buflen = &state->buflen;
0823     int *next_buflen = &state->next_buflen;
0824     int blocksize = crypto_ahash_blocksize(ahash);
0825     int in_len = *buflen + req->nbytes, to_hash;
0826     u32 *desc;
0827     int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
0828     struct ahash_edesc *edesc;
0829     int ret = 0;
0830 
0831     *next_buflen = in_len & (blocksize - 1);
0832     to_hash = in_len - *next_buflen;
0833 
0834     /*
0835      * For XCBC and CMAC, if to_hash is multiple of block size,
0836      * keep last block in internal buffer
0837      */
0838     if ((is_xcbc_aes(ctx->adata.algtype) ||
0839          is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
0840          (*next_buflen == 0)) {
0841         *next_buflen = blocksize;
0842         to_hash -= blocksize;
0843     }
0844 
0845     if (to_hash) {
0846         int pad_nents;
0847         int src_len = req->nbytes - *next_buflen;
0848 
0849         src_nents = sg_nents_for_len(req->src, src_len);
0850         if (src_nents < 0) {
0851             dev_err(jrdev, "Invalid number of src SG.\n");
0852             return src_nents;
0853         }
0854 
0855         if (src_nents) {
0856             mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
0857                           DMA_TO_DEVICE);
0858             if (!mapped_nents) {
0859                 dev_err(jrdev, "unable to DMA map source\n");
0860                 return -ENOMEM;
0861             }
0862         } else {
0863             mapped_nents = 0;
0864         }
0865 
0866         sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
0867         pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
0868         sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
0869 
0870         /*
0871          * allocate space for base edesc and hw desc commands,
0872          * link tables
0873          */
0874         edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
0875                       ctx->sh_desc_update_dma);
0876         if (!edesc) {
0877             dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
0878             return -ENOMEM;
0879         }
0880 
0881         edesc->src_nents = src_nents;
0882         edesc->sec4_sg_bytes = sec4_sg_bytes;
0883 
0884         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
0885                      edesc->sec4_sg, DMA_BIDIRECTIONAL);
0886         if (ret)
0887             goto unmap_ctx;
0888 
0889         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
0890         if (ret)
0891             goto unmap_ctx;
0892 
0893         if (mapped_nents)
0894             sg_to_sec4_sg_last(req->src, src_len,
0895                        edesc->sec4_sg + sec4_sg_src_index,
0896                        0);
0897         else
0898             sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
0899                         1);
0900 
0901         desc = edesc->hw_desc;
0902 
0903         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
0904                              sec4_sg_bytes,
0905                              DMA_TO_DEVICE);
0906         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
0907             dev_err(jrdev, "unable to map S/G table\n");
0908             ret = -ENOMEM;
0909             goto unmap_ctx;
0910         }
0911 
0912         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
0913                        to_hash, LDST_SGF);
0914 
0915         append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
0916 
0917         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
0918                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
0919                      desc_bytes(desc), 1);
0920 
0921         ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
0922                     ctx->ctx_len, DMA_BIDIRECTIONAL);
0923     } else if (*next_buflen) {
0924         scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
0925                      req->nbytes, 0);
0926         *buflen = *next_buflen;
0927 
0928         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
0929                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
0930                      *buflen, 1);
0931     }
0932 
0933     return ret;
0934 unmap_ctx:
0935     ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
0936     kfree(edesc);
0937     return ret;
0938 }
0939 
0940 static int ahash_final_ctx(struct ahash_request *req)
0941 {
0942     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
0943     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
0944     struct caam_hash_state *state = ahash_request_ctx(req);
0945     struct device *jrdev = ctx->jrdev;
0946     int buflen = state->buflen;
0947     u32 *desc;
0948     int sec4_sg_bytes;
0949     int digestsize = crypto_ahash_digestsize(ahash);
0950     struct ahash_edesc *edesc;
0951     int ret;
0952 
0953     sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
0954             sizeof(struct sec4_sg_entry);
0955 
0956     /* allocate space for base edesc and hw desc commands, link tables */
0957     edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
0958                   ctx->sh_desc_fin_dma);
0959     if (!edesc)
0960         return -ENOMEM;
0961 
0962     desc = edesc->hw_desc;
0963 
0964     edesc->sec4_sg_bytes = sec4_sg_bytes;
0965 
0966     ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
0967                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
0968     if (ret)
0969         goto unmap_ctx;
0970 
0971     ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
0972     if (ret)
0973         goto unmap_ctx;
0974 
0975     sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
0976 
0977     edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
0978                         sec4_sg_bytes, DMA_TO_DEVICE);
0979     if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
0980         dev_err(jrdev, "unable to map S/G table\n");
0981         ret = -ENOMEM;
0982         goto unmap_ctx;
0983     }
0984 
0985     append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
0986               LDST_SGF);
0987     append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
0988 
0989     print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
0990                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
0991                  1);
0992 
0993     return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
0994                  digestsize, DMA_BIDIRECTIONAL);
0995  unmap_ctx:
0996     ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
0997     kfree(edesc);
0998     return ret;
0999 }
1000 
1001 static int ahash_finup_ctx(struct ahash_request *req)
1002 {
1003     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1004     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1005     struct caam_hash_state *state = ahash_request_ctx(req);
1006     struct device *jrdev = ctx->jrdev;
1007     int buflen = state->buflen;
1008     u32 *desc;
1009     int sec4_sg_src_index;
1010     int src_nents, mapped_nents;
1011     int digestsize = crypto_ahash_digestsize(ahash);
1012     struct ahash_edesc *edesc;
1013     int ret;
1014 
1015     src_nents = sg_nents_for_len(req->src, req->nbytes);
1016     if (src_nents < 0) {
1017         dev_err(jrdev, "Invalid number of src SG.\n");
1018         return src_nents;
1019     }
1020 
1021     if (src_nents) {
1022         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1023                       DMA_TO_DEVICE);
1024         if (!mapped_nents) {
1025             dev_err(jrdev, "unable to DMA map source\n");
1026             return -ENOMEM;
1027         }
1028     } else {
1029         mapped_nents = 0;
1030     }
1031 
1032     sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1033 
1034     /* allocate space for base edesc and hw desc commands, link tables */
1035     edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1036                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1037     if (!edesc) {
1038         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1039         return -ENOMEM;
1040     }
1041 
1042     desc = edesc->hw_desc;
1043 
1044     edesc->src_nents = src_nents;
1045 
1046     ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1047                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
1048     if (ret)
1049         goto unmap_ctx;
1050 
1051     ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1052     if (ret)
1053         goto unmap_ctx;
1054 
1055     ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1056                   sec4_sg_src_index, ctx->ctx_len + buflen,
1057                   req->nbytes);
1058     if (ret)
1059         goto unmap_ctx;
1060 
1061     append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1062 
1063     print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1064                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1065                  1);
1066 
1067     return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1068                  digestsize, DMA_BIDIRECTIONAL);
1069  unmap_ctx:
1070     ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1071     kfree(edesc);
1072     return ret;
1073 }
1074 
1075 static int ahash_digest(struct ahash_request *req)
1076 {
1077     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1078     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1079     struct caam_hash_state *state = ahash_request_ctx(req);
1080     struct device *jrdev = ctx->jrdev;
1081     u32 *desc;
1082     int digestsize = crypto_ahash_digestsize(ahash);
1083     int src_nents, mapped_nents;
1084     struct ahash_edesc *edesc;
1085     int ret;
1086 
1087     state->buf_dma = 0;
1088 
1089     src_nents = sg_nents_for_len(req->src, req->nbytes);
1090     if (src_nents < 0) {
1091         dev_err(jrdev, "Invalid number of src SG.\n");
1092         return src_nents;
1093     }
1094 
1095     if (src_nents) {
1096         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1097                       DMA_TO_DEVICE);
1098         if (!mapped_nents) {
1099             dev_err(jrdev, "unable to map source for DMA\n");
1100             return -ENOMEM;
1101         }
1102     } else {
1103         mapped_nents = 0;
1104     }
1105 
1106     /* allocate space for base edesc and hw desc commands, link tables */
1107     edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1108                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1109     if (!edesc) {
1110         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1111         return -ENOMEM;
1112     }
1113 
1114     edesc->src_nents = src_nents;
1115 
1116     ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1117                   req->nbytes);
1118     if (ret) {
1119         ahash_unmap(jrdev, edesc, req, digestsize);
1120         kfree(edesc);
1121         return ret;
1122     }
1123 
1124     desc = edesc->hw_desc;
1125 
1126     ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1127     if (ret) {
1128         ahash_unmap(jrdev, edesc, req, digestsize);
1129         kfree(edesc);
1130         return -ENOMEM;
1131     }
1132 
1133     print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1134                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1135                  1);
1136 
1137     return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1138                  DMA_FROM_DEVICE);
1139 }
1140 
1141 /* submit ahash final if it the first job descriptor */
1142 static int ahash_final_no_ctx(struct ahash_request *req)
1143 {
1144     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1145     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1146     struct caam_hash_state *state = ahash_request_ctx(req);
1147     struct device *jrdev = ctx->jrdev;
1148     u8 *buf = state->buf;
1149     int buflen = state->buflen;
1150     u32 *desc;
1151     int digestsize = crypto_ahash_digestsize(ahash);
1152     struct ahash_edesc *edesc;
1153     int ret;
1154 
1155     /* allocate space for base edesc and hw desc commands, link tables */
1156     edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1157                   ctx->sh_desc_digest_dma);
1158     if (!edesc)
1159         return -ENOMEM;
1160 
1161     desc = edesc->hw_desc;
1162 
1163     if (buflen) {
1164         state->buf_dma = dma_map_single(jrdev, buf, buflen,
1165                         DMA_TO_DEVICE);
1166         if (dma_mapping_error(jrdev, state->buf_dma)) {
1167             dev_err(jrdev, "unable to map src\n");
1168             goto unmap;
1169         }
1170 
1171         append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1172     }
1173 
1174     ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1175     if (ret)
1176         goto unmap;
1177 
1178     print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1179                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1180                  1);
1181 
1182     return ahash_enqueue_req(jrdev, ahash_done, req,
1183                  digestsize, DMA_FROM_DEVICE);
1184  unmap:
1185     ahash_unmap(jrdev, edesc, req, digestsize);
1186     kfree(edesc);
1187     return -ENOMEM;
1188 }
1189 
1190 /* submit ahash update if it the first job descriptor after update */
1191 static int ahash_update_no_ctx(struct ahash_request *req)
1192 {
1193     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1194     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1195     struct caam_hash_state *state = ahash_request_ctx(req);
1196     struct device *jrdev = ctx->jrdev;
1197     u8 *buf = state->buf;
1198     int *buflen = &state->buflen;
1199     int *next_buflen = &state->next_buflen;
1200     int blocksize = crypto_ahash_blocksize(ahash);
1201     int in_len = *buflen + req->nbytes, to_hash;
1202     int sec4_sg_bytes, src_nents, mapped_nents;
1203     struct ahash_edesc *edesc;
1204     u32 *desc;
1205     int ret = 0;
1206 
1207     *next_buflen = in_len & (blocksize - 1);
1208     to_hash = in_len - *next_buflen;
1209 
1210     /*
1211      * For XCBC and CMAC, if to_hash is multiple of block size,
1212      * keep last block in internal buffer
1213      */
1214     if ((is_xcbc_aes(ctx->adata.algtype) ||
1215          is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1216          (*next_buflen == 0)) {
1217         *next_buflen = blocksize;
1218         to_hash -= blocksize;
1219     }
1220 
1221     if (to_hash) {
1222         int pad_nents;
1223         int src_len = req->nbytes - *next_buflen;
1224 
1225         src_nents = sg_nents_for_len(req->src, src_len);
1226         if (src_nents < 0) {
1227             dev_err(jrdev, "Invalid number of src SG.\n");
1228             return src_nents;
1229         }
1230 
1231         if (src_nents) {
1232             mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1233                           DMA_TO_DEVICE);
1234             if (!mapped_nents) {
1235                 dev_err(jrdev, "unable to DMA map source\n");
1236                 return -ENOMEM;
1237             }
1238         } else {
1239             mapped_nents = 0;
1240         }
1241 
1242         pad_nents = pad_sg_nents(1 + mapped_nents);
1243         sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1244 
1245         /*
1246          * allocate space for base edesc and hw desc commands,
1247          * link tables
1248          */
1249         edesc = ahash_edesc_alloc(req, pad_nents,
1250                       ctx->sh_desc_update_first,
1251                       ctx->sh_desc_update_first_dma);
1252         if (!edesc) {
1253             dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1254             return -ENOMEM;
1255         }
1256 
1257         edesc->src_nents = src_nents;
1258         edesc->sec4_sg_bytes = sec4_sg_bytes;
1259 
1260         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1261         if (ret)
1262             goto unmap_ctx;
1263 
1264         sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1265 
1266         desc = edesc->hw_desc;
1267 
1268         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1269                             sec4_sg_bytes,
1270                             DMA_TO_DEVICE);
1271         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1272             dev_err(jrdev, "unable to map S/G table\n");
1273             ret = -ENOMEM;
1274             goto unmap_ctx;
1275         }
1276 
1277         append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1278 
1279         ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1280         if (ret)
1281             goto unmap_ctx;
1282 
1283         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1284                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1285                      desc_bytes(desc), 1);
1286 
1287         ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1288                     ctx->ctx_len, DMA_TO_DEVICE);
1289         if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1290             return ret;
1291         state->update = ahash_update_ctx;
1292         state->finup = ahash_finup_ctx;
1293         state->final = ahash_final_ctx;
1294     } else if (*next_buflen) {
1295         scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1296                      req->nbytes, 0);
1297         *buflen = *next_buflen;
1298 
1299         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1300                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1301                      *buflen, 1);
1302     }
1303 
1304     return ret;
1305  unmap_ctx:
1306     ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1307     kfree(edesc);
1308     return ret;
1309 }
1310 
1311 /* submit ahash finup if it the first job descriptor after update */
1312 static int ahash_finup_no_ctx(struct ahash_request *req)
1313 {
1314     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1315     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1316     struct caam_hash_state *state = ahash_request_ctx(req);
1317     struct device *jrdev = ctx->jrdev;
1318     int buflen = state->buflen;
1319     u32 *desc;
1320     int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1321     int digestsize = crypto_ahash_digestsize(ahash);
1322     struct ahash_edesc *edesc;
1323     int ret;
1324 
1325     src_nents = sg_nents_for_len(req->src, req->nbytes);
1326     if (src_nents < 0) {
1327         dev_err(jrdev, "Invalid number of src SG.\n");
1328         return src_nents;
1329     }
1330 
1331     if (src_nents) {
1332         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1333                       DMA_TO_DEVICE);
1334         if (!mapped_nents) {
1335             dev_err(jrdev, "unable to DMA map source\n");
1336             return -ENOMEM;
1337         }
1338     } else {
1339         mapped_nents = 0;
1340     }
1341 
1342     sec4_sg_src_index = 2;
1343     sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1344              sizeof(struct sec4_sg_entry);
1345 
1346     /* allocate space for base edesc and hw desc commands, link tables */
1347     edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1348                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1349     if (!edesc) {
1350         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1351         return -ENOMEM;
1352     }
1353 
1354     desc = edesc->hw_desc;
1355 
1356     edesc->src_nents = src_nents;
1357     edesc->sec4_sg_bytes = sec4_sg_bytes;
1358 
1359     ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1360     if (ret)
1361         goto unmap;
1362 
1363     ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1364                   req->nbytes);
1365     if (ret) {
1366         dev_err(jrdev, "unable to map S/G table\n");
1367         goto unmap;
1368     }
1369 
1370     ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1371     if (ret)
1372         goto unmap;
1373 
1374     print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1375                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1376                  1);
1377 
1378     return ahash_enqueue_req(jrdev, ahash_done, req,
1379                  digestsize, DMA_FROM_DEVICE);
1380  unmap:
1381     ahash_unmap(jrdev, edesc, req, digestsize);
1382     kfree(edesc);
1383     return -ENOMEM;
1384 
1385 }
1386 
1387 /* submit first update job descriptor after init */
1388 static int ahash_update_first(struct ahash_request *req)
1389 {
1390     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1391     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1392     struct caam_hash_state *state = ahash_request_ctx(req);
1393     struct device *jrdev = ctx->jrdev;
1394     u8 *buf = state->buf;
1395     int *buflen = &state->buflen;
1396     int *next_buflen = &state->next_buflen;
1397     int to_hash;
1398     int blocksize = crypto_ahash_blocksize(ahash);
1399     u32 *desc;
1400     int src_nents, mapped_nents;
1401     struct ahash_edesc *edesc;
1402     int ret = 0;
1403 
1404     *next_buflen = req->nbytes & (blocksize - 1);
1405     to_hash = req->nbytes - *next_buflen;
1406 
1407     /*
1408      * For XCBC and CMAC, if to_hash is multiple of block size,
1409      * keep last block in internal buffer
1410      */
1411     if ((is_xcbc_aes(ctx->adata.algtype) ||
1412          is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1413          (*next_buflen == 0)) {
1414         *next_buflen = blocksize;
1415         to_hash -= blocksize;
1416     }
1417 
1418     if (to_hash) {
1419         src_nents = sg_nents_for_len(req->src,
1420                          req->nbytes - *next_buflen);
1421         if (src_nents < 0) {
1422             dev_err(jrdev, "Invalid number of src SG.\n");
1423             return src_nents;
1424         }
1425 
1426         if (src_nents) {
1427             mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1428                           DMA_TO_DEVICE);
1429             if (!mapped_nents) {
1430                 dev_err(jrdev, "unable to map source for DMA\n");
1431                 return -ENOMEM;
1432             }
1433         } else {
1434             mapped_nents = 0;
1435         }
1436 
1437         /*
1438          * allocate space for base edesc and hw desc commands,
1439          * link tables
1440          */
1441         edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1442                       mapped_nents : 0,
1443                       ctx->sh_desc_update_first,
1444                       ctx->sh_desc_update_first_dma);
1445         if (!edesc) {
1446             dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1447             return -ENOMEM;
1448         }
1449 
1450         edesc->src_nents = src_nents;
1451 
1452         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1453                       to_hash);
1454         if (ret)
1455             goto unmap_ctx;
1456 
1457         desc = edesc->hw_desc;
1458 
1459         ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1460         if (ret)
1461             goto unmap_ctx;
1462 
1463         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1464                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1465                      desc_bytes(desc), 1);
1466 
1467         ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1468                     ctx->ctx_len, DMA_TO_DEVICE);
1469         if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1470             return ret;
1471         state->update = ahash_update_ctx;
1472         state->finup = ahash_finup_ctx;
1473         state->final = ahash_final_ctx;
1474     } else if (*next_buflen) {
1475         state->update = ahash_update_no_ctx;
1476         state->finup = ahash_finup_no_ctx;
1477         state->final = ahash_final_no_ctx;
1478         scatterwalk_map_and_copy(buf, req->src, 0,
1479                      req->nbytes, 0);
1480         *buflen = *next_buflen;
1481 
1482         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1483                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1484                      *buflen, 1);
1485     }
1486 
1487     return ret;
1488  unmap_ctx:
1489     ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1490     kfree(edesc);
1491     return ret;
1492 }
1493 
1494 static int ahash_finup_first(struct ahash_request *req)
1495 {
1496     return ahash_digest(req);
1497 }
1498 
1499 static int ahash_init(struct ahash_request *req)
1500 {
1501     struct caam_hash_state *state = ahash_request_ctx(req);
1502 
1503     state->update = ahash_update_first;
1504     state->finup = ahash_finup_first;
1505     state->final = ahash_final_no_ctx;
1506 
1507     state->ctx_dma = 0;
1508     state->ctx_dma_len = 0;
1509     state->buf_dma = 0;
1510     state->buflen = 0;
1511     state->next_buflen = 0;
1512 
1513     return 0;
1514 }
1515 
1516 static int ahash_update(struct ahash_request *req)
1517 {
1518     struct caam_hash_state *state = ahash_request_ctx(req);
1519 
1520     return state->update(req);
1521 }
1522 
1523 static int ahash_finup(struct ahash_request *req)
1524 {
1525     struct caam_hash_state *state = ahash_request_ctx(req);
1526 
1527     return state->finup(req);
1528 }
1529 
1530 static int ahash_final(struct ahash_request *req)
1531 {
1532     struct caam_hash_state *state = ahash_request_ctx(req);
1533 
1534     return state->final(req);
1535 }
1536 
1537 static int ahash_export(struct ahash_request *req, void *out)
1538 {
1539     struct caam_hash_state *state = ahash_request_ctx(req);
1540     struct caam_export_state *export = out;
1541     u8 *buf = state->buf;
1542     int len = state->buflen;
1543 
1544     memcpy(export->buf, buf, len);
1545     memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1546     export->buflen = len;
1547     export->update = state->update;
1548     export->final = state->final;
1549     export->finup = state->finup;
1550 
1551     return 0;
1552 }
1553 
1554 static int ahash_import(struct ahash_request *req, const void *in)
1555 {
1556     struct caam_hash_state *state = ahash_request_ctx(req);
1557     const struct caam_export_state *export = in;
1558 
1559     memset(state, 0, sizeof(*state));
1560     memcpy(state->buf, export->buf, export->buflen);
1561     memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1562     state->buflen = export->buflen;
1563     state->update = export->update;
1564     state->final = export->final;
1565     state->finup = export->finup;
1566 
1567     return 0;
1568 }
1569 
1570 struct caam_hash_template {
1571     char name[CRYPTO_MAX_ALG_NAME];
1572     char driver_name[CRYPTO_MAX_ALG_NAME];
1573     char hmac_name[CRYPTO_MAX_ALG_NAME];
1574     char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1575     unsigned int blocksize;
1576     struct ahash_alg template_ahash;
1577     u32 alg_type;
1578 };
1579 
1580 /* ahash descriptors */
1581 static struct caam_hash_template driver_hash[] = {
1582     {
1583         .name = "sha1",
1584         .driver_name = "sha1-caam",
1585         .hmac_name = "hmac(sha1)",
1586         .hmac_driver_name = "hmac-sha1-caam",
1587         .blocksize = SHA1_BLOCK_SIZE,
1588         .template_ahash = {
1589             .init = ahash_init,
1590             .update = ahash_update,
1591             .final = ahash_final,
1592             .finup = ahash_finup,
1593             .digest = ahash_digest,
1594             .export = ahash_export,
1595             .import = ahash_import,
1596             .setkey = ahash_setkey,
1597             .halg = {
1598                 .digestsize = SHA1_DIGEST_SIZE,
1599                 .statesize = sizeof(struct caam_export_state),
1600             },
1601         },
1602         .alg_type = OP_ALG_ALGSEL_SHA1,
1603     }, {
1604         .name = "sha224",
1605         .driver_name = "sha224-caam",
1606         .hmac_name = "hmac(sha224)",
1607         .hmac_driver_name = "hmac-sha224-caam",
1608         .blocksize = SHA224_BLOCK_SIZE,
1609         .template_ahash = {
1610             .init = ahash_init,
1611             .update = ahash_update,
1612             .final = ahash_final,
1613             .finup = ahash_finup,
1614             .digest = ahash_digest,
1615             .export = ahash_export,
1616             .import = ahash_import,
1617             .setkey = ahash_setkey,
1618             .halg = {
1619                 .digestsize = SHA224_DIGEST_SIZE,
1620                 .statesize = sizeof(struct caam_export_state),
1621             },
1622         },
1623         .alg_type = OP_ALG_ALGSEL_SHA224,
1624     }, {
1625         .name = "sha256",
1626         .driver_name = "sha256-caam",
1627         .hmac_name = "hmac(sha256)",
1628         .hmac_driver_name = "hmac-sha256-caam",
1629         .blocksize = SHA256_BLOCK_SIZE,
1630         .template_ahash = {
1631             .init = ahash_init,
1632             .update = ahash_update,
1633             .final = ahash_final,
1634             .finup = ahash_finup,
1635             .digest = ahash_digest,
1636             .export = ahash_export,
1637             .import = ahash_import,
1638             .setkey = ahash_setkey,
1639             .halg = {
1640                 .digestsize = SHA256_DIGEST_SIZE,
1641                 .statesize = sizeof(struct caam_export_state),
1642             },
1643         },
1644         .alg_type = OP_ALG_ALGSEL_SHA256,
1645     }, {
1646         .name = "sha384",
1647         .driver_name = "sha384-caam",
1648         .hmac_name = "hmac(sha384)",
1649         .hmac_driver_name = "hmac-sha384-caam",
1650         .blocksize = SHA384_BLOCK_SIZE,
1651         .template_ahash = {
1652             .init = ahash_init,
1653             .update = ahash_update,
1654             .final = ahash_final,
1655             .finup = ahash_finup,
1656             .digest = ahash_digest,
1657             .export = ahash_export,
1658             .import = ahash_import,
1659             .setkey = ahash_setkey,
1660             .halg = {
1661                 .digestsize = SHA384_DIGEST_SIZE,
1662                 .statesize = sizeof(struct caam_export_state),
1663             },
1664         },
1665         .alg_type = OP_ALG_ALGSEL_SHA384,
1666     }, {
1667         .name = "sha512",
1668         .driver_name = "sha512-caam",
1669         .hmac_name = "hmac(sha512)",
1670         .hmac_driver_name = "hmac-sha512-caam",
1671         .blocksize = SHA512_BLOCK_SIZE,
1672         .template_ahash = {
1673             .init = ahash_init,
1674             .update = ahash_update,
1675             .final = ahash_final,
1676             .finup = ahash_finup,
1677             .digest = ahash_digest,
1678             .export = ahash_export,
1679             .import = ahash_import,
1680             .setkey = ahash_setkey,
1681             .halg = {
1682                 .digestsize = SHA512_DIGEST_SIZE,
1683                 .statesize = sizeof(struct caam_export_state),
1684             },
1685         },
1686         .alg_type = OP_ALG_ALGSEL_SHA512,
1687     }, {
1688         .name = "md5",
1689         .driver_name = "md5-caam",
1690         .hmac_name = "hmac(md5)",
1691         .hmac_driver_name = "hmac-md5-caam",
1692         .blocksize = MD5_BLOCK_WORDS * 4,
1693         .template_ahash = {
1694             .init = ahash_init,
1695             .update = ahash_update,
1696             .final = ahash_final,
1697             .finup = ahash_finup,
1698             .digest = ahash_digest,
1699             .export = ahash_export,
1700             .import = ahash_import,
1701             .setkey = ahash_setkey,
1702             .halg = {
1703                 .digestsize = MD5_DIGEST_SIZE,
1704                 .statesize = sizeof(struct caam_export_state),
1705             },
1706         },
1707         .alg_type = OP_ALG_ALGSEL_MD5,
1708     }, {
1709         .hmac_name = "xcbc(aes)",
1710         .hmac_driver_name = "xcbc-aes-caam",
1711         .blocksize = AES_BLOCK_SIZE,
1712         .template_ahash = {
1713             .init = ahash_init,
1714             .update = ahash_update,
1715             .final = ahash_final,
1716             .finup = ahash_finup,
1717             .digest = ahash_digest,
1718             .export = ahash_export,
1719             .import = ahash_import,
1720             .setkey = axcbc_setkey,
1721             .halg = {
1722                 .digestsize = AES_BLOCK_SIZE,
1723                 .statesize = sizeof(struct caam_export_state),
1724             },
1725          },
1726         .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1727     }, {
1728         .hmac_name = "cmac(aes)",
1729         .hmac_driver_name = "cmac-aes-caam",
1730         .blocksize = AES_BLOCK_SIZE,
1731         .template_ahash = {
1732             .init = ahash_init,
1733             .update = ahash_update,
1734             .final = ahash_final,
1735             .finup = ahash_finup,
1736             .digest = ahash_digest,
1737             .export = ahash_export,
1738             .import = ahash_import,
1739             .setkey = acmac_setkey,
1740             .halg = {
1741                 .digestsize = AES_BLOCK_SIZE,
1742                 .statesize = sizeof(struct caam_export_state),
1743             },
1744          },
1745         .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1746     },
1747 };
1748 
1749 struct caam_hash_alg {
1750     struct list_head entry;
1751     int alg_type;
1752     struct ahash_alg ahash_alg;
1753 };
1754 
1755 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1756 {
1757     struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1758     struct crypto_alg *base = tfm->__crt_alg;
1759     struct hash_alg_common *halg =
1760          container_of(base, struct hash_alg_common, base);
1761     struct ahash_alg *alg =
1762          container_of(halg, struct ahash_alg, halg);
1763     struct caam_hash_alg *caam_hash =
1764          container_of(alg, struct caam_hash_alg, ahash_alg);
1765     struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1766     /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1767     static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1768                      HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1769                      HASH_MSG_LEN + 32,
1770                      HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1771                      HASH_MSG_LEN + 64,
1772                      HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1773     const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1774                               sh_desc_update);
1775     dma_addr_t dma_addr;
1776     struct caam_drv_private *priv;
1777 
1778     /*
1779      * Get a Job ring from Job Ring driver to ensure in-order
1780      * crypto request processing per tfm
1781      */
1782     ctx->jrdev = caam_jr_alloc();
1783     if (IS_ERR(ctx->jrdev)) {
1784         pr_err("Job Ring Device allocation for transform failed\n");
1785         return PTR_ERR(ctx->jrdev);
1786     }
1787 
1788     priv = dev_get_drvdata(ctx->jrdev->parent);
1789 
1790     if (is_xcbc_aes(caam_hash->alg_type)) {
1791         ctx->dir = DMA_TO_DEVICE;
1792         ctx->key_dir = DMA_BIDIRECTIONAL;
1793         ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1794         ctx->ctx_len = 48;
1795     } else if (is_cmac_aes(caam_hash->alg_type)) {
1796         ctx->dir = DMA_TO_DEVICE;
1797         ctx->key_dir = DMA_NONE;
1798         ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1799         ctx->ctx_len = 32;
1800     } else {
1801         if (priv->era >= 6) {
1802             ctx->dir = DMA_BIDIRECTIONAL;
1803             ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1804         } else {
1805             ctx->dir = DMA_TO_DEVICE;
1806             ctx->key_dir = DMA_NONE;
1807         }
1808         ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1809         ctx->ctx_len = runninglen[(ctx->adata.algtype &
1810                        OP_ALG_ALGSEL_SUBMASK) >>
1811                       OP_ALG_ALGSEL_SHIFT];
1812     }
1813 
1814     if (ctx->key_dir != DMA_NONE) {
1815         ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1816                               ARRAY_SIZE(ctx->key),
1817                               ctx->key_dir,
1818                               DMA_ATTR_SKIP_CPU_SYNC);
1819         if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1820             dev_err(ctx->jrdev, "unable to map key\n");
1821             caam_jr_free(ctx->jrdev);
1822             return -ENOMEM;
1823         }
1824     }
1825 
1826     dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1827                     offsetof(struct caam_hash_ctx, key) -
1828                     sh_desc_update_offset,
1829                     ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1830     if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1831         dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1832 
1833         if (ctx->key_dir != DMA_NONE)
1834             dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1835                            ARRAY_SIZE(ctx->key),
1836                            ctx->key_dir,
1837                            DMA_ATTR_SKIP_CPU_SYNC);
1838 
1839         caam_jr_free(ctx->jrdev);
1840         return -ENOMEM;
1841     }
1842 
1843     ctx->sh_desc_update_dma = dma_addr;
1844     ctx->sh_desc_update_first_dma = dma_addr +
1845                     offsetof(struct caam_hash_ctx,
1846                          sh_desc_update_first) -
1847                     sh_desc_update_offset;
1848     ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1849                            sh_desc_fin) -
1850                     sh_desc_update_offset;
1851     ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1852                               sh_desc_digest) -
1853                     sh_desc_update_offset;
1854 
1855     ctx->enginectx.op.do_one_request = ahash_do_one_req;
1856 
1857     crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1858                  sizeof(struct caam_hash_state));
1859 
1860     /*
1861      * For keyed hash algorithms shared descriptors
1862      * will be created later in setkey() callback
1863      */
1864     return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1865 }
1866 
1867 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1868 {
1869     struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1870 
1871     dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1872                    offsetof(struct caam_hash_ctx, key) -
1873                    offsetof(struct caam_hash_ctx, sh_desc_update),
1874                    ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1875     if (ctx->key_dir != DMA_NONE)
1876         dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1877                        ARRAY_SIZE(ctx->key), ctx->key_dir,
1878                        DMA_ATTR_SKIP_CPU_SYNC);
1879     caam_jr_free(ctx->jrdev);
1880 }
1881 
1882 void caam_algapi_hash_exit(void)
1883 {
1884     struct caam_hash_alg *t_alg, *n;
1885 
1886     if (!hash_list.next)
1887         return;
1888 
1889     list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1890         crypto_unregister_ahash(&t_alg->ahash_alg);
1891         list_del(&t_alg->entry);
1892         kfree(t_alg);
1893     }
1894 }
1895 
1896 static struct caam_hash_alg *
1897 caam_hash_alloc(struct caam_hash_template *template,
1898         bool keyed)
1899 {
1900     struct caam_hash_alg *t_alg;
1901     struct ahash_alg *halg;
1902     struct crypto_alg *alg;
1903 
1904     t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1905     if (!t_alg) {
1906         pr_err("failed to allocate t_alg\n");
1907         return ERR_PTR(-ENOMEM);
1908     }
1909 
1910     t_alg->ahash_alg = template->template_ahash;
1911     halg = &t_alg->ahash_alg;
1912     alg = &halg->halg.base;
1913 
1914     if (keyed) {
1915         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1916              template->hmac_name);
1917         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918              template->hmac_driver_name);
1919     } else {
1920         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1921              template->name);
1922         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1923              template->driver_name);
1924         t_alg->ahash_alg.setkey = NULL;
1925     }
1926     alg->cra_module = THIS_MODULE;
1927     alg->cra_init = caam_hash_cra_init;
1928     alg->cra_exit = caam_hash_cra_exit;
1929     alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1930     alg->cra_priority = CAAM_CRA_PRIORITY;
1931     alg->cra_blocksize = template->blocksize;
1932     alg->cra_alignmask = 0;
1933     alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1934 
1935     t_alg->alg_type = template->alg_type;
1936 
1937     return t_alg;
1938 }
1939 
1940 int caam_algapi_hash_init(struct device *ctrldev)
1941 {
1942     int i = 0, err = 0;
1943     struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1944     unsigned int md_limit = SHA512_DIGEST_SIZE;
1945     u32 md_inst, md_vid;
1946 
1947     /*
1948      * Register crypto algorithms the device supports.  First, identify
1949      * presence and attributes of MD block.
1950      */
1951     if (priv->era < 10) {
1952         md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1953               CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1954         md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1955                CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1956     } else {
1957         u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1958 
1959         md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1960         md_inst = mdha & CHA_VER_NUM_MASK;
1961     }
1962 
1963     /*
1964      * Skip registration of any hashing algorithms if MD block
1965      * is not present.
1966      */
1967     if (!md_inst)
1968         return 0;
1969 
1970     /* Limit digest size based on LP256 */
1971     if (md_vid == CHA_VER_VID_MD_LP256)
1972         md_limit = SHA256_DIGEST_SIZE;
1973 
1974     INIT_LIST_HEAD(&hash_list);
1975 
1976     /* register crypto algorithms the device supports */
1977     for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1978         struct caam_hash_alg *t_alg;
1979         struct caam_hash_template *alg = driver_hash + i;
1980 
1981         /* If MD size is not supported by device, skip registration */
1982         if (is_mdha(alg->alg_type) &&
1983             alg->template_ahash.halg.digestsize > md_limit)
1984             continue;
1985 
1986         /* register hmac version */
1987         t_alg = caam_hash_alloc(alg, true);
1988         if (IS_ERR(t_alg)) {
1989             err = PTR_ERR(t_alg);
1990             pr_warn("%s alg allocation failed\n",
1991                 alg->hmac_driver_name);
1992             continue;
1993         }
1994 
1995         err = crypto_register_ahash(&t_alg->ahash_alg);
1996         if (err) {
1997             pr_warn("%s alg registration failed: %d\n",
1998                 t_alg->ahash_alg.halg.base.cra_driver_name,
1999                 err);
2000             kfree(t_alg);
2001         } else
2002             list_add_tail(&t_alg->entry, &hash_list);
2003 
2004         if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2005             continue;
2006 
2007         /* register unkeyed version */
2008         t_alg = caam_hash_alloc(alg, false);
2009         if (IS_ERR(t_alg)) {
2010             err = PTR_ERR(t_alg);
2011             pr_warn("%s alg allocation failed\n", alg->driver_name);
2012             continue;
2013         }
2014 
2015         err = crypto_register_ahash(&t_alg->ahash_alg);
2016         if (err) {
2017             pr_warn("%s alg registration failed: %d\n",
2018                 t_alg->ahash_alg.halg.base.cra_driver_name,
2019                 err);
2020             kfree(t_alg);
2021         } else
2022             list_add_tail(&t_alg->entry, &hash_list);
2023     }
2024 
2025     return err;
2026 }