Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
0002 /*
0003  * Copyright 2015-2016 Freescale Semiconductor Inc.
0004  * Copyright 2017-2019 NXP
0005  */
0006 
0007 #include "compat.h"
0008 #include "regs.h"
0009 #include "caamalg_qi2.h"
0010 #include "dpseci_cmd.h"
0011 #include "desc_constr.h"
0012 #include "error.h"
0013 #include "sg_sw_sec4.h"
0014 #include "sg_sw_qm2.h"
0015 #include "key_gen.h"
0016 #include "caamalg_desc.h"
0017 #include "caamhash_desc.h"
0018 #include "dpseci-debugfs.h"
0019 #include <linux/fsl/mc.h>
0020 #include <soc/fsl/dpaa2-io.h>
0021 #include <soc/fsl/dpaa2-fd.h>
0022 #include <crypto/xts.h>
0023 #include <asm/unaligned.h>
0024 
0025 #define CAAM_CRA_PRIORITY   2000
0026 
0027 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
0028 #define CAAM_MAX_KEY_SIZE   (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
0029                  SHA512_DIGEST_SIZE * 2)
0030 
0031 /*
0032  * This is a cache of buffers, from which the users of CAAM QI driver
0033  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
0034  * NOTE: A more elegant solution would be to have some headroom in the frames
0035  *       being processed. This can be added by the dpaa2-eth driver. This would
0036  *       pose a problem for userspace application processing which cannot
0037  *       know of this limitation. So for now, this will work.
0038  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
0039  */
0040 static struct kmem_cache *qi_cache;
0041 
0042 struct caam_alg_entry {
0043     struct device *dev;
0044     int class1_alg_type;
0045     int class2_alg_type;
0046     bool rfc3686;
0047     bool geniv;
0048     bool nodkp;
0049 };
0050 
0051 struct caam_aead_alg {
0052     struct aead_alg aead;
0053     struct caam_alg_entry caam;
0054     bool registered;
0055 };
0056 
0057 struct caam_skcipher_alg {
0058     struct skcipher_alg skcipher;
0059     struct caam_alg_entry caam;
0060     bool registered;
0061 };
0062 
0063 /**
0064  * struct caam_ctx - per-session context
0065  * @flc: Flow Contexts array
0066  * @key:  [authentication key], encryption key
0067  * @flc_dma: I/O virtual addresses of the Flow Contexts
0068  * @key_dma: I/O virtual address of the key
0069  * @dir: DMA direction for mapping key and Flow Contexts
0070  * @dev: dpseci device
0071  * @adata: authentication algorithm details
0072  * @cdata: encryption algorithm details
0073  * @authsize: authentication tag (a.k.a. ICV / MAC) size
0074  * @xts_key_fallback: true if fallback tfm needs to be used due
0075  *            to unsupported xts key lengths
0076  * @fallback: xts fallback tfm
0077  */
0078 struct caam_ctx {
0079     struct caam_flc flc[NUM_OP];
0080     u8 key[CAAM_MAX_KEY_SIZE];
0081     dma_addr_t flc_dma[NUM_OP];
0082     dma_addr_t key_dma;
0083     enum dma_data_direction dir;
0084     struct device *dev;
0085     struct alginfo adata;
0086     struct alginfo cdata;
0087     unsigned int authsize;
0088     bool xts_key_fallback;
0089     struct crypto_skcipher *fallback;
0090 };
0091 
0092 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
0093                      dma_addr_t iova_addr)
0094 {
0095     phys_addr_t phys_addr;
0096 
0097     phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
0098                    iova_addr;
0099 
0100     return phys_to_virt(phys_addr);
0101 }
0102 
0103 /*
0104  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
0105  *
0106  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
0107  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
0108  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
0109  * hosting 16 SG entries.
0110  *
0111  * @flags - flags that would be used for the equivalent kmalloc(..) call
0112  *
0113  * Returns a pointer to a retrieved buffer on success or NULL on failure.
0114  */
0115 static inline void *qi_cache_zalloc(gfp_t flags)
0116 {
0117     return kmem_cache_zalloc(qi_cache, flags);
0118 }
0119 
0120 /*
0121  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
0122  *
0123  * @obj - buffer previously allocated by qi_cache_zalloc
0124  *
0125  * No checking is being done, the call is a passthrough call to
0126  * kmem_cache_free(...)
0127  */
0128 static inline void qi_cache_free(void *obj)
0129 {
0130     kmem_cache_free(qi_cache, obj);
0131 }
0132 
0133 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
0134 {
0135     switch (crypto_tfm_alg_type(areq->tfm)) {
0136     case CRYPTO_ALG_TYPE_SKCIPHER:
0137         return skcipher_request_ctx(skcipher_request_cast(areq));
0138     case CRYPTO_ALG_TYPE_AEAD:
0139         return aead_request_ctx(container_of(areq, struct aead_request,
0140                              base));
0141     case CRYPTO_ALG_TYPE_AHASH:
0142         return ahash_request_ctx(ahash_request_cast(areq));
0143     default:
0144         return ERR_PTR(-EINVAL);
0145     }
0146 }
0147 
0148 static void caam_unmap(struct device *dev, struct scatterlist *src,
0149                struct scatterlist *dst, int src_nents,
0150                int dst_nents, dma_addr_t iv_dma, int ivsize,
0151                enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
0152                int qm_sg_bytes)
0153 {
0154     if (dst != src) {
0155         if (src_nents)
0156             dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
0157         if (dst_nents)
0158             dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
0159     } else {
0160         dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
0161     }
0162 
0163     if (iv_dma)
0164         dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
0165 
0166     if (qm_sg_bytes)
0167         dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
0168 }
0169 
0170 static int aead_set_sh_desc(struct crypto_aead *aead)
0171 {
0172     struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
0173                          typeof(*alg), aead);
0174     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0175     unsigned int ivsize = crypto_aead_ivsize(aead);
0176     struct device *dev = ctx->dev;
0177     struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
0178     struct caam_flc *flc;
0179     u32 *desc;
0180     u32 ctx1_iv_off = 0;
0181     u32 *nonce = NULL;
0182     unsigned int data_len[2];
0183     u32 inl_mask;
0184     const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
0185                    OP_ALG_AAI_CTR_MOD128);
0186     const bool is_rfc3686 = alg->caam.rfc3686;
0187 
0188     if (!ctx->cdata.keylen || !ctx->authsize)
0189         return 0;
0190 
0191     /*
0192      * AES-CTR needs to load IV in CONTEXT1 reg
0193      * at an offset of 128bits (16bytes)
0194      * CONTEXT1[255:128] = IV
0195      */
0196     if (ctr_mode)
0197         ctx1_iv_off = 16;
0198 
0199     /*
0200      * RFC3686 specific:
0201      *  CONTEXT1[255:128] = {NONCE, IV, COUNTER}
0202      */
0203     if (is_rfc3686) {
0204         ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
0205         nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
0206                 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
0207     }
0208 
0209     /*
0210      * In case |user key| > |derived key|, using DKP<imm,imm> would result
0211      * in invalid opcodes (last bytes of user key) in the resulting
0212      * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
0213      * addresses are needed.
0214      */
0215     ctx->adata.key_virt = ctx->key;
0216     ctx->adata.key_dma = ctx->key_dma;
0217 
0218     ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
0219     ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
0220 
0221     data_len[0] = ctx->adata.keylen_pad;
0222     data_len[1] = ctx->cdata.keylen;
0223 
0224     /* aead_encrypt shared descriptor */
0225     if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
0226                          DESC_QI_AEAD_ENC_LEN) +
0227                   (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
0228                   DESC_JOB_IO_LEN, data_len, &inl_mask,
0229                   ARRAY_SIZE(data_len)) < 0)
0230         return -EINVAL;
0231 
0232     ctx->adata.key_inline = !!(inl_mask & 1);
0233     ctx->cdata.key_inline = !!(inl_mask & 2);
0234 
0235     flc = &ctx->flc[ENCRYPT];
0236     desc = flc->sh_desc;
0237 
0238     if (alg->caam.geniv)
0239         cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
0240                       ivsize, ctx->authsize, is_rfc3686,
0241                       nonce, ctx1_iv_off, true,
0242                       priv->sec_attr.era);
0243     else
0244         cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
0245                        ivsize, ctx->authsize, is_rfc3686, nonce,
0246                        ctx1_iv_off, true, priv->sec_attr.era);
0247 
0248     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0249     dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
0250                    sizeof(flc->flc) + desc_bytes(desc),
0251                    ctx->dir);
0252 
0253     /* aead_decrypt shared descriptor */
0254     if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
0255                   (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
0256                   DESC_JOB_IO_LEN, data_len, &inl_mask,
0257                   ARRAY_SIZE(data_len)) < 0)
0258         return -EINVAL;
0259 
0260     ctx->adata.key_inline = !!(inl_mask & 1);
0261     ctx->cdata.key_inline = !!(inl_mask & 2);
0262 
0263     flc = &ctx->flc[DECRYPT];
0264     desc = flc->sh_desc;
0265     cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
0266                    ivsize, ctx->authsize, alg->caam.geniv,
0267                    is_rfc3686, nonce, ctx1_iv_off, true,
0268                    priv->sec_attr.era);
0269     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0270     dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
0271                    sizeof(flc->flc) + desc_bytes(desc),
0272                    ctx->dir);
0273 
0274     return 0;
0275 }
0276 
0277 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
0278 {
0279     struct caam_ctx *ctx = crypto_aead_ctx(authenc);
0280 
0281     ctx->authsize = authsize;
0282     aead_set_sh_desc(authenc);
0283 
0284     return 0;
0285 }
0286 
0287 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
0288                unsigned int keylen)
0289 {
0290     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0291     struct device *dev = ctx->dev;
0292     struct crypto_authenc_keys keys;
0293 
0294     if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
0295         goto badkey;
0296 
0297     dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
0298         keys.authkeylen + keys.enckeylen, keys.enckeylen,
0299         keys.authkeylen);
0300     print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
0301                  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
0302 
0303     ctx->adata.keylen = keys.authkeylen;
0304     ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
0305                           OP_ALG_ALGSEL_MASK);
0306 
0307     if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
0308         goto badkey;
0309 
0310     memcpy(ctx->key, keys.authkey, keys.authkeylen);
0311     memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
0312     dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
0313                    keys.enckeylen, ctx->dir);
0314     print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
0315                  DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
0316                  ctx->adata.keylen_pad + keys.enckeylen, 1);
0317 
0318     ctx->cdata.keylen = keys.enckeylen;
0319 
0320     memzero_explicit(&keys, sizeof(keys));
0321     return aead_set_sh_desc(aead);
0322 badkey:
0323     memzero_explicit(&keys, sizeof(keys));
0324     return -EINVAL;
0325 }
0326 
0327 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
0328                 unsigned int keylen)
0329 {
0330     struct crypto_authenc_keys keys;
0331     int err;
0332 
0333     err = crypto_authenc_extractkeys(&keys, key, keylen);
0334     if (unlikely(err))
0335         goto out;
0336 
0337     err = -EINVAL;
0338     if (keys.enckeylen != DES3_EDE_KEY_SIZE)
0339         goto out;
0340 
0341     err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
0342           aead_setkey(aead, key, keylen);
0343 
0344 out:
0345     memzero_explicit(&keys, sizeof(keys));
0346     return err;
0347 }
0348 
0349 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
0350                        bool encrypt)
0351 {
0352     struct crypto_aead *aead = crypto_aead_reqtfm(req);
0353     struct caam_request *req_ctx = aead_request_ctx(req);
0354     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
0355     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
0356     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0357     struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
0358                          typeof(*alg), aead);
0359     struct device *dev = ctx->dev;
0360     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0361               GFP_KERNEL : GFP_ATOMIC;
0362     int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
0363     int src_len, dst_len = 0;
0364     struct aead_edesc *edesc;
0365     dma_addr_t qm_sg_dma, iv_dma = 0;
0366     int ivsize = 0;
0367     unsigned int authsize = ctx->authsize;
0368     int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
0369     int in_len, out_len;
0370     struct dpaa2_sg_entry *sg_table;
0371 
0372     /* allocate space for base edesc, link tables and IV */
0373     edesc = qi_cache_zalloc(GFP_DMA | flags);
0374     if (unlikely(!edesc)) {
0375         dev_err(dev, "could not allocate extended descriptor\n");
0376         return ERR_PTR(-ENOMEM);
0377     }
0378 
0379     if (unlikely(req->dst != req->src)) {
0380         src_len = req->assoclen + req->cryptlen;
0381         dst_len = src_len + (encrypt ? authsize : (-authsize));
0382 
0383         src_nents = sg_nents_for_len(req->src, src_len);
0384         if (unlikely(src_nents < 0)) {
0385             dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
0386                 src_len);
0387             qi_cache_free(edesc);
0388             return ERR_PTR(src_nents);
0389         }
0390 
0391         dst_nents = sg_nents_for_len(req->dst, dst_len);
0392         if (unlikely(dst_nents < 0)) {
0393             dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
0394                 dst_len);
0395             qi_cache_free(edesc);
0396             return ERR_PTR(dst_nents);
0397         }
0398 
0399         if (src_nents) {
0400             mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
0401                               DMA_TO_DEVICE);
0402             if (unlikely(!mapped_src_nents)) {
0403                 dev_err(dev, "unable to map source\n");
0404                 qi_cache_free(edesc);
0405                 return ERR_PTR(-ENOMEM);
0406             }
0407         } else {
0408             mapped_src_nents = 0;
0409         }
0410 
0411         if (dst_nents) {
0412             mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
0413                               DMA_FROM_DEVICE);
0414             if (unlikely(!mapped_dst_nents)) {
0415                 dev_err(dev, "unable to map destination\n");
0416                 dma_unmap_sg(dev, req->src, src_nents,
0417                          DMA_TO_DEVICE);
0418                 qi_cache_free(edesc);
0419                 return ERR_PTR(-ENOMEM);
0420             }
0421         } else {
0422             mapped_dst_nents = 0;
0423         }
0424     } else {
0425         src_len = req->assoclen + req->cryptlen +
0426               (encrypt ? authsize : 0);
0427 
0428         src_nents = sg_nents_for_len(req->src, src_len);
0429         if (unlikely(src_nents < 0)) {
0430             dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
0431                 src_len);
0432             qi_cache_free(edesc);
0433             return ERR_PTR(src_nents);
0434         }
0435 
0436         mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
0437                           DMA_BIDIRECTIONAL);
0438         if (unlikely(!mapped_src_nents)) {
0439             dev_err(dev, "unable to map source\n");
0440             qi_cache_free(edesc);
0441             return ERR_PTR(-ENOMEM);
0442         }
0443     }
0444 
0445     if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
0446         ivsize = crypto_aead_ivsize(aead);
0447 
0448     /*
0449      * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
0450      * Input is not contiguous.
0451      * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
0452      * the end of the table by allocating more S/G entries. Logic:
0453      * if (src != dst && output S/G)
0454      *      pad output S/G, if needed
0455      * else if (src == dst && S/G)
0456      *      overlapping S/Gs; pad one of them
0457      * else if (input S/G) ...
0458      *      pad input S/G, if needed
0459      */
0460     qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
0461     if (mapped_dst_nents > 1)
0462         qm_sg_nents += pad_sg_nents(mapped_dst_nents);
0463     else if ((req->src == req->dst) && (mapped_src_nents > 1))
0464         qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
0465                   1 + !!ivsize +
0466                   pad_sg_nents(mapped_src_nents));
0467     else
0468         qm_sg_nents = pad_sg_nents(qm_sg_nents);
0469 
0470     sg_table = &edesc->sgt[0];
0471     qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
0472     if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
0473              CAAM_QI_MEMCACHE_SIZE)) {
0474         dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
0475             qm_sg_nents, ivsize);
0476         caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
0477                0, DMA_NONE, 0, 0);
0478         qi_cache_free(edesc);
0479         return ERR_PTR(-ENOMEM);
0480     }
0481 
0482     if (ivsize) {
0483         u8 *iv = (u8 *)(sg_table + qm_sg_nents);
0484 
0485         /* Make sure IV is located in a DMAable area */
0486         memcpy(iv, req->iv, ivsize);
0487 
0488         iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
0489         if (dma_mapping_error(dev, iv_dma)) {
0490             dev_err(dev, "unable to map IV\n");
0491             caam_unmap(dev, req->src, req->dst, src_nents,
0492                    dst_nents, 0, 0, DMA_NONE, 0, 0);
0493             qi_cache_free(edesc);
0494             return ERR_PTR(-ENOMEM);
0495         }
0496     }
0497 
0498     edesc->src_nents = src_nents;
0499     edesc->dst_nents = dst_nents;
0500     edesc->iv_dma = iv_dma;
0501 
0502     if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
0503         OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
0504         /*
0505          * The associated data comes already with the IV but we need
0506          * to skip it when we authenticate or encrypt...
0507          */
0508         edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
0509     else
0510         edesc->assoclen = cpu_to_caam32(req->assoclen);
0511     edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
0512                          DMA_TO_DEVICE);
0513     if (dma_mapping_error(dev, edesc->assoclen_dma)) {
0514         dev_err(dev, "unable to map assoclen\n");
0515         caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
0516                iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
0517         qi_cache_free(edesc);
0518         return ERR_PTR(-ENOMEM);
0519     }
0520 
0521     dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
0522     qm_sg_index++;
0523     if (ivsize) {
0524         dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
0525         qm_sg_index++;
0526     }
0527     sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
0528     qm_sg_index += mapped_src_nents;
0529 
0530     if (mapped_dst_nents > 1)
0531         sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
0532 
0533     qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
0534     if (dma_mapping_error(dev, qm_sg_dma)) {
0535         dev_err(dev, "unable to map S/G table\n");
0536         dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
0537         caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
0538                iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
0539         qi_cache_free(edesc);
0540         return ERR_PTR(-ENOMEM);
0541     }
0542 
0543     edesc->qm_sg_dma = qm_sg_dma;
0544     edesc->qm_sg_bytes = qm_sg_bytes;
0545 
0546     out_len = req->assoclen + req->cryptlen +
0547           (encrypt ? ctx->authsize : (-ctx->authsize));
0548     in_len = 4 + ivsize + req->assoclen + req->cryptlen;
0549 
0550     memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
0551     dpaa2_fl_set_final(in_fle, true);
0552     dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
0553     dpaa2_fl_set_addr(in_fle, qm_sg_dma);
0554     dpaa2_fl_set_len(in_fle, in_len);
0555 
0556     if (req->dst == req->src) {
0557         if (mapped_src_nents == 1) {
0558             dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
0559             dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
0560         } else {
0561             dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
0562             dpaa2_fl_set_addr(out_fle, qm_sg_dma +
0563                       (1 + !!ivsize) * sizeof(*sg_table));
0564         }
0565     } else if (!mapped_dst_nents) {
0566         /*
0567          * crypto engine requires the output entry to be present when
0568          * "frame list" FD is used.
0569          * Since engine does not support FMT=2'b11 (unused entry type),
0570          * leaving out_fle zeroized is the best option.
0571          */
0572         goto skip_out_fle;
0573     } else if (mapped_dst_nents == 1) {
0574         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
0575         dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
0576     } else {
0577         dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
0578         dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
0579                   sizeof(*sg_table));
0580     }
0581 
0582     dpaa2_fl_set_len(out_fle, out_len);
0583 
0584 skip_out_fle:
0585     return edesc;
0586 }
0587 
0588 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
0589 {
0590     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0591     unsigned int ivsize = crypto_aead_ivsize(aead);
0592     struct device *dev = ctx->dev;
0593     struct caam_flc *flc;
0594     u32 *desc;
0595 
0596     if (!ctx->cdata.keylen || !ctx->authsize)
0597         return 0;
0598 
0599     flc = &ctx->flc[ENCRYPT];
0600     desc = flc->sh_desc;
0601     cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
0602                    ctx->authsize, true, true);
0603     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0604     dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
0605                    sizeof(flc->flc) + desc_bytes(desc),
0606                    ctx->dir);
0607 
0608     flc = &ctx->flc[DECRYPT];
0609     desc = flc->sh_desc;
0610     cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
0611                    ctx->authsize, false, true);
0612     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0613     dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
0614                    sizeof(flc->flc) + desc_bytes(desc),
0615                    ctx->dir);
0616 
0617     return 0;
0618 }
0619 
0620 static int chachapoly_setauthsize(struct crypto_aead *aead,
0621                   unsigned int authsize)
0622 {
0623     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0624 
0625     if (authsize != POLY1305_DIGEST_SIZE)
0626         return -EINVAL;
0627 
0628     ctx->authsize = authsize;
0629     return chachapoly_set_sh_desc(aead);
0630 }
0631 
0632 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
0633                  unsigned int keylen)
0634 {
0635     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0636     unsigned int ivsize = crypto_aead_ivsize(aead);
0637     unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
0638 
0639     if (keylen != CHACHA_KEY_SIZE + saltlen)
0640         return -EINVAL;
0641 
0642     ctx->cdata.key_virt = key;
0643     ctx->cdata.keylen = keylen - saltlen;
0644 
0645     return chachapoly_set_sh_desc(aead);
0646 }
0647 
0648 static int gcm_set_sh_desc(struct crypto_aead *aead)
0649 {
0650     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0651     struct device *dev = ctx->dev;
0652     unsigned int ivsize = crypto_aead_ivsize(aead);
0653     struct caam_flc *flc;
0654     u32 *desc;
0655     int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
0656             ctx->cdata.keylen;
0657 
0658     if (!ctx->cdata.keylen || !ctx->authsize)
0659         return 0;
0660 
0661     /*
0662      * AES GCM encrypt shared descriptor
0663      * Job Descriptor and Shared Descriptor
0664      * must fit into the 64-word Descriptor h/w Buffer
0665      */
0666     if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
0667         ctx->cdata.key_inline = true;
0668         ctx->cdata.key_virt = ctx->key;
0669     } else {
0670         ctx->cdata.key_inline = false;
0671         ctx->cdata.key_dma = ctx->key_dma;
0672     }
0673 
0674     flc = &ctx->flc[ENCRYPT];
0675     desc = flc->sh_desc;
0676     cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
0677     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0678     dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
0679                    sizeof(flc->flc) + desc_bytes(desc),
0680                    ctx->dir);
0681 
0682     /*
0683      * Job Descriptor and Shared Descriptors
0684      * must all fit into the 64-word Descriptor h/w Buffer
0685      */
0686     if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
0687         ctx->cdata.key_inline = true;
0688         ctx->cdata.key_virt = ctx->key;
0689     } else {
0690         ctx->cdata.key_inline = false;
0691         ctx->cdata.key_dma = ctx->key_dma;
0692     }
0693 
0694     flc = &ctx->flc[DECRYPT];
0695     desc = flc->sh_desc;
0696     cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
0697     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0698     dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
0699                    sizeof(flc->flc) + desc_bytes(desc),
0700                    ctx->dir);
0701 
0702     return 0;
0703 }
0704 
0705 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
0706 {
0707     struct caam_ctx *ctx = crypto_aead_ctx(authenc);
0708     int err;
0709 
0710     err = crypto_gcm_check_authsize(authsize);
0711     if (err)
0712         return err;
0713 
0714     ctx->authsize = authsize;
0715     gcm_set_sh_desc(authenc);
0716 
0717     return 0;
0718 }
0719 
0720 static int gcm_setkey(struct crypto_aead *aead,
0721               const u8 *key, unsigned int keylen)
0722 {
0723     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0724     struct device *dev = ctx->dev;
0725     int ret;
0726 
0727     ret = aes_check_keylen(keylen);
0728     if (ret)
0729         return ret;
0730     print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
0731                  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
0732 
0733     memcpy(ctx->key, key, keylen);
0734     dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
0735     ctx->cdata.keylen = keylen;
0736 
0737     return gcm_set_sh_desc(aead);
0738 }
0739 
0740 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
0741 {
0742     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0743     struct device *dev = ctx->dev;
0744     unsigned int ivsize = crypto_aead_ivsize(aead);
0745     struct caam_flc *flc;
0746     u32 *desc;
0747     int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
0748             ctx->cdata.keylen;
0749 
0750     if (!ctx->cdata.keylen || !ctx->authsize)
0751         return 0;
0752 
0753     ctx->cdata.key_virt = ctx->key;
0754 
0755     /*
0756      * RFC4106 encrypt shared descriptor
0757      * Job Descriptor and Shared Descriptor
0758      * must fit into the 64-word Descriptor h/w Buffer
0759      */
0760     if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
0761         ctx->cdata.key_inline = true;
0762     } else {
0763         ctx->cdata.key_inline = false;
0764         ctx->cdata.key_dma = ctx->key_dma;
0765     }
0766 
0767     flc = &ctx->flc[ENCRYPT];
0768     desc = flc->sh_desc;
0769     cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
0770                   true);
0771     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0772     dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
0773                    sizeof(flc->flc) + desc_bytes(desc),
0774                    ctx->dir);
0775 
0776     /*
0777      * Job Descriptor and Shared Descriptors
0778      * must all fit into the 64-word Descriptor h/w Buffer
0779      */
0780     if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
0781         ctx->cdata.key_inline = true;
0782     } else {
0783         ctx->cdata.key_inline = false;
0784         ctx->cdata.key_dma = ctx->key_dma;
0785     }
0786 
0787     flc = &ctx->flc[DECRYPT];
0788     desc = flc->sh_desc;
0789     cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
0790                   true);
0791     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0792     dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
0793                    sizeof(flc->flc) + desc_bytes(desc),
0794                    ctx->dir);
0795 
0796     return 0;
0797 }
0798 
0799 static int rfc4106_setauthsize(struct crypto_aead *authenc,
0800                    unsigned int authsize)
0801 {
0802     struct caam_ctx *ctx = crypto_aead_ctx(authenc);
0803     int err;
0804 
0805     err = crypto_rfc4106_check_authsize(authsize);
0806     if (err)
0807         return err;
0808 
0809     ctx->authsize = authsize;
0810     rfc4106_set_sh_desc(authenc);
0811 
0812     return 0;
0813 }
0814 
0815 static int rfc4106_setkey(struct crypto_aead *aead,
0816               const u8 *key, unsigned int keylen)
0817 {
0818     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0819     struct device *dev = ctx->dev;
0820     int ret;
0821 
0822     ret = aes_check_keylen(keylen - 4);
0823     if (ret)
0824         return ret;
0825 
0826     print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
0827                  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
0828 
0829     memcpy(ctx->key, key, keylen);
0830     /*
0831      * The last four bytes of the key material are used as the salt value
0832      * in the nonce. Update the AES key length.
0833      */
0834     ctx->cdata.keylen = keylen - 4;
0835     dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
0836                    ctx->dir);
0837 
0838     return rfc4106_set_sh_desc(aead);
0839 }
0840 
0841 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
0842 {
0843     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0844     struct device *dev = ctx->dev;
0845     unsigned int ivsize = crypto_aead_ivsize(aead);
0846     struct caam_flc *flc;
0847     u32 *desc;
0848     int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
0849             ctx->cdata.keylen;
0850 
0851     if (!ctx->cdata.keylen || !ctx->authsize)
0852         return 0;
0853 
0854     ctx->cdata.key_virt = ctx->key;
0855 
0856     /*
0857      * RFC4543 encrypt shared descriptor
0858      * Job Descriptor and Shared Descriptor
0859      * must fit into the 64-word Descriptor h/w Buffer
0860      */
0861     if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
0862         ctx->cdata.key_inline = true;
0863     } else {
0864         ctx->cdata.key_inline = false;
0865         ctx->cdata.key_dma = ctx->key_dma;
0866     }
0867 
0868     flc = &ctx->flc[ENCRYPT];
0869     desc = flc->sh_desc;
0870     cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
0871                   true);
0872     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0873     dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
0874                    sizeof(flc->flc) + desc_bytes(desc),
0875                    ctx->dir);
0876 
0877     /*
0878      * Job Descriptor and Shared Descriptors
0879      * must all fit into the 64-word Descriptor h/w Buffer
0880      */
0881     if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
0882         ctx->cdata.key_inline = true;
0883     } else {
0884         ctx->cdata.key_inline = false;
0885         ctx->cdata.key_dma = ctx->key_dma;
0886     }
0887 
0888     flc = &ctx->flc[DECRYPT];
0889     desc = flc->sh_desc;
0890     cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
0891                   true);
0892     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0893     dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
0894                    sizeof(flc->flc) + desc_bytes(desc),
0895                    ctx->dir);
0896 
0897     return 0;
0898 }
0899 
0900 static int rfc4543_setauthsize(struct crypto_aead *authenc,
0901                    unsigned int authsize)
0902 {
0903     struct caam_ctx *ctx = crypto_aead_ctx(authenc);
0904 
0905     if (authsize != 16)
0906         return -EINVAL;
0907 
0908     ctx->authsize = authsize;
0909     rfc4543_set_sh_desc(authenc);
0910 
0911     return 0;
0912 }
0913 
0914 static int rfc4543_setkey(struct crypto_aead *aead,
0915               const u8 *key, unsigned int keylen)
0916 {
0917     struct caam_ctx *ctx = crypto_aead_ctx(aead);
0918     struct device *dev = ctx->dev;
0919     int ret;
0920 
0921     ret = aes_check_keylen(keylen - 4);
0922     if (ret)
0923         return ret;
0924 
0925     print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
0926                  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
0927 
0928     memcpy(ctx->key, key, keylen);
0929     /*
0930      * The last four bytes of the key material are used as the salt value
0931      * in the nonce. Update the AES key length.
0932      */
0933     ctx->cdata.keylen = keylen - 4;
0934     dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
0935                    ctx->dir);
0936 
0937     return rfc4543_set_sh_desc(aead);
0938 }
0939 
0940 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
0941                unsigned int keylen, const u32 ctx1_iv_off)
0942 {
0943     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
0944     struct caam_skcipher_alg *alg =
0945         container_of(crypto_skcipher_alg(skcipher),
0946                  struct caam_skcipher_alg, skcipher);
0947     struct device *dev = ctx->dev;
0948     struct caam_flc *flc;
0949     unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
0950     u32 *desc;
0951     const bool is_rfc3686 = alg->caam.rfc3686;
0952 
0953     print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
0954                  DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
0955 
0956     ctx->cdata.keylen = keylen;
0957     ctx->cdata.key_virt = key;
0958     ctx->cdata.key_inline = true;
0959 
0960     /* skcipher_encrypt shared descriptor */
0961     flc = &ctx->flc[ENCRYPT];
0962     desc = flc->sh_desc;
0963     cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
0964                    ctx1_iv_off);
0965     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0966     dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
0967                    sizeof(flc->flc) + desc_bytes(desc),
0968                    ctx->dir);
0969 
0970     /* skcipher_decrypt shared descriptor */
0971     flc = &ctx->flc[DECRYPT];
0972     desc = flc->sh_desc;
0973     cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
0974                    ctx1_iv_off);
0975     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
0976     dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
0977                    sizeof(flc->flc) + desc_bytes(desc),
0978                    ctx->dir);
0979 
0980     return 0;
0981 }
0982 
0983 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
0984                    const u8 *key, unsigned int keylen)
0985 {
0986     int err;
0987 
0988     err = aes_check_keylen(keylen);
0989     if (err)
0990         return err;
0991 
0992     return skcipher_setkey(skcipher, key, keylen, 0);
0993 }
0994 
0995 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
0996                    const u8 *key, unsigned int keylen)
0997 {
0998     u32 ctx1_iv_off;
0999     int err;
1000 
1001     /*
1002      * RFC3686 specific:
1003      *  | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1004      *  | *key = {KEY, NONCE}
1005      */
1006     ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1007     keylen -= CTR_RFC3686_NONCE_SIZE;
1008 
1009     err = aes_check_keylen(keylen);
1010     if (err)
1011         return err;
1012 
1013     return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1014 }
1015 
1016 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1017                    const u8 *key, unsigned int keylen)
1018 {
1019     u32 ctx1_iv_off;
1020     int err;
1021 
1022     /*
1023      * AES-CTR needs to load IV in CONTEXT1 reg
1024      * at an offset of 128bits (16bytes)
1025      * CONTEXT1[255:128] = IV
1026      */
1027     ctx1_iv_off = 16;
1028 
1029     err = aes_check_keylen(keylen);
1030     if (err)
1031         return err;
1032 
1033     return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1034 }
1035 
1036 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1037                     const u8 *key, unsigned int keylen)
1038 {
1039     if (keylen != CHACHA_KEY_SIZE)
1040         return -EINVAL;
1041 
1042     return skcipher_setkey(skcipher, key, keylen, 0);
1043 }
1044 
1045 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1046                    const u8 *key, unsigned int keylen)
1047 {
1048     return verify_skcipher_des_key(skcipher, key) ?:
1049            skcipher_setkey(skcipher, key, keylen, 0);
1050 }
1051 
1052 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1053                     const u8 *key, unsigned int keylen)
1054 {
1055     return verify_skcipher_des3_key(skcipher, key) ?:
1056            skcipher_setkey(skcipher, key, keylen, 0);
1057 }
1058 
1059 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1060                    unsigned int keylen)
1061 {
1062     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1063     struct device *dev = ctx->dev;
1064     struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1065     struct caam_flc *flc;
1066     u32 *desc;
1067     int err;
1068 
1069     err = xts_verify_key(skcipher, key, keylen);
1070     if (err) {
1071         dev_dbg(dev, "key size mismatch\n");
1072         return err;
1073     }
1074 
1075     if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1076         ctx->xts_key_fallback = true;
1077 
1078     if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1079         err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1080         if (err)
1081             return err;
1082     }
1083 
1084     ctx->cdata.keylen = keylen;
1085     ctx->cdata.key_virt = key;
1086     ctx->cdata.key_inline = true;
1087 
1088     /* xts_skcipher_encrypt shared descriptor */
1089     flc = &ctx->flc[ENCRYPT];
1090     desc = flc->sh_desc;
1091     cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1092     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1093     dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1094                    sizeof(flc->flc) + desc_bytes(desc),
1095                    ctx->dir);
1096 
1097     /* xts_skcipher_decrypt shared descriptor */
1098     flc = &ctx->flc[DECRYPT];
1099     desc = flc->sh_desc;
1100     cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1101     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1102     dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1103                    sizeof(flc->flc) + desc_bytes(desc),
1104                    ctx->dir);
1105 
1106     return 0;
1107 }
1108 
1109 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1110 {
1111     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1112     struct caam_request *req_ctx = skcipher_request_ctx(req);
1113     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1114     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1115     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1116     struct device *dev = ctx->dev;
1117     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118                GFP_KERNEL : GFP_ATOMIC;
1119     int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1120     struct skcipher_edesc *edesc;
1121     dma_addr_t iv_dma;
1122     u8 *iv;
1123     int ivsize = crypto_skcipher_ivsize(skcipher);
1124     int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1125     struct dpaa2_sg_entry *sg_table;
1126 
1127     src_nents = sg_nents_for_len(req->src, req->cryptlen);
1128     if (unlikely(src_nents < 0)) {
1129         dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1130             req->cryptlen);
1131         return ERR_PTR(src_nents);
1132     }
1133 
1134     if (unlikely(req->dst != req->src)) {
1135         dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1136         if (unlikely(dst_nents < 0)) {
1137             dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1138                 req->cryptlen);
1139             return ERR_PTR(dst_nents);
1140         }
1141 
1142         mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1143                           DMA_TO_DEVICE);
1144         if (unlikely(!mapped_src_nents)) {
1145             dev_err(dev, "unable to map source\n");
1146             return ERR_PTR(-ENOMEM);
1147         }
1148 
1149         mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1150                           DMA_FROM_DEVICE);
1151         if (unlikely(!mapped_dst_nents)) {
1152             dev_err(dev, "unable to map destination\n");
1153             dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1154             return ERR_PTR(-ENOMEM);
1155         }
1156     } else {
1157         mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1158                           DMA_BIDIRECTIONAL);
1159         if (unlikely(!mapped_src_nents)) {
1160             dev_err(dev, "unable to map source\n");
1161             return ERR_PTR(-ENOMEM);
1162         }
1163     }
1164 
1165     qm_sg_ents = 1 + mapped_src_nents;
1166     dst_sg_idx = qm_sg_ents;
1167 
1168     /*
1169      * Input, output HW S/G tables: [IV, src][dst, IV]
1170      * IV entries point to the same buffer
1171      * If src == dst, S/G entries are reused (S/G tables overlap)
1172      *
1173      * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1174      * the end of the table by allocating more S/G entries.
1175      */
1176     if (req->src != req->dst)
1177         qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1178     else
1179         qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1180 
1181     qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1182     if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1183              ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1184         dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1185             qm_sg_ents, ivsize);
1186         caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1187                0, DMA_NONE, 0, 0);
1188         return ERR_PTR(-ENOMEM);
1189     }
1190 
1191     /* allocate space for base edesc, link tables and IV */
1192     edesc = qi_cache_zalloc(GFP_DMA | flags);
1193     if (unlikely(!edesc)) {
1194         dev_err(dev, "could not allocate extended descriptor\n");
1195         caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1196                0, DMA_NONE, 0, 0);
1197         return ERR_PTR(-ENOMEM);
1198     }
1199 
1200     /* Make sure IV is located in a DMAable area */
1201     sg_table = &edesc->sgt[0];
1202     iv = (u8 *)(sg_table + qm_sg_ents);
1203     memcpy(iv, req->iv, ivsize);
1204 
1205     iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1206     if (dma_mapping_error(dev, iv_dma)) {
1207         dev_err(dev, "unable to map IV\n");
1208         caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1209                0, DMA_NONE, 0, 0);
1210         qi_cache_free(edesc);
1211         return ERR_PTR(-ENOMEM);
1212     }
1213 
1214     edesc->src_nents = src_nents;
1215     edesc->dst_nents = dst_nents;
1216     edesc->iv_dma = iv_dma;
1217     edesc->qm_sg_bytes = qm_sg_bytes;
1218 
1219     dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1220     sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1221 
1222     if (req->src != req->dst)
1223         sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1224 
1225     dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1226              ivsize, 0);
1227 
1228     edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1229                       DMA_TO_DEVICE);
1230     if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1231         dev_err(dev, "unable to map S/G table\n");
1232         caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1233                iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1234         qi_cache_free(edesc);
1235         return ERR_PTR(-ENOMEM);
1236     }
1237 
1238     memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1239     dpaa2_fl_set_final(in_fle, true);
1240     dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1241     dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1242 
1243     dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1244     dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1245 
1246     dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1247 
1248     if (req->src == req->dst)
1249         dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1250                   sizeof(*sg_table));
1251     else
1252         dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1253                   sizeof(*sg_table));
1254 
1255     return edesc;
1256 }
1257 
1258 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1259                struct aead_request *req)
1260 {
1261     struct crypto_aead *aead = crypto_aead_reqtfm(req);
1262     int ivsize = crypto_aead_ivsize(aead);
1263 
1264     caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1265            edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1266            edesc->qm_sg_bytes);
1267     dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1268 }
1269 
1270 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1271                struct skcipher_request *req)
1272 {
1273     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1274     int ivsize = crypto_skcipher_ivsize(skcipher);
1275 
1276     caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1277            edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1278            edesc->qm_sg_bytes);
1279 }
1280 
1281 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1282 {
1283     struct crypto_async_request *areq = cbk_ctx;
1284     struct aead_request *req = container_of(areq, struct aead_request,
1285                         base);
1286     struct caam_request *req_ctx = to_caam_req(areq);
1287     struct aead_edesc *edesc = req_ctx->edesc;
1288     struct crypto_aead *aead = crypto_aead_reqtfm(req);
1289     struct caam_ctx *ctx = crypto_aead_ctx(aead);
1290     int ecode = 0;
1291 
1292     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1293 
1294     if (unlikely(status))
1295         ecode = caam_qi2_strstatus(ctx->dev, status);
1296 
1297     aead_unmap(ctx->dev, edesc, req);
1298     qi_cache_free(edesc);
1299     aead_request_complete(req, ecode);
1300 }
1301 
1302 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1303 {
1304     struct crypto_async_request *areq = cbk_ctx;
1305     struct aead_request *req = container_of(areq, struct aead_request,
1306                         base);
1307     struct caam_request *req_ctx = to_caam_req(areq);
1308     struct aead_edesc *edesc = req_ctx->edesc;
1309     struct crypto_aead *aead = crypto_aead_reqtfm(req);
1310     struct caam_ctx *ctx = crypto_aead_ctx(aead);
1311     int ecode = 0;
1312 
1313     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1314 
1315     if (unlikely(status))
1316         ecode = caam_qi2_strstatus(ctx->dev, status);
1317 
1318     aead_unmap(ctx->dev, edesc, req);
1319     qi_cache_free(edesc);
1320     aead_request_complete(req, ecode);
1321 }
1322 
1323 static int aead_encrypt(struct aead_request *req)
1324 {
1325     struct aead_edesc *edesc;
1326     struct crypto_aead *aead = crypto_aead_reqtfm(req);
1327     struct caam_ctx *ctx = crypto_aead_ctx(aead);
1328     struct caam_request *caam_req = aead_request_ctx(req);
1329     int ret;
1330 
1331     /* allocate extended descriptor */
1332     edesc = aead_edesc_alloc(req, true);
1333     if (IS_ERR(edesc))
1334         return PTR_ERR(edesc);
1335 
1336     caam_req->flc = &ctx->flc[ENCRYPT];
1337     caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1338     caam_req->cbk = aead_encrypt_done;
1339     caam_req->ctx = &req->base;
1340     caam_req->edesc = edesc;
1341     ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1342     if (ret != -EINPROGRESS &&
1343         !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1344         aead_unmap(ctx->dev, edesc, req);
1345         qi_cache_free(edesc);
1346     }
1347 
1348     return ret;
1349 }
1350 
1351 static int aead_decrypt(struct aead_request *req)
1352 {
1353     struct aead_edesc *edesc;
1354     struct crypto_aead *aead = crypto_aead_reqtfm(req);
1355     struct caam_ctx *ctx = crypto_aead_ctx(aead);
1356     struct caam_request *caam_req = aead_request_ctx(req);
1357     int ret;
1358 
1359     /* allocate extended descriptor */
1360     edesc = aead_edesc_alloc(req, false);
1361     if (IS_ERR(edesc))
1362         return PTR_ERR(edesc);
1363 
1364     caam_req->flc = &ctx->flc[DECRYPT];
1365     caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1366     caam_req->cbk = aead_decrypt_done;
1367     caam_req->ctx = &req->base;
1368     caam_req->edesc = edesc;
1369     ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1370     if (ret != -EINPROGRESS &&
1371         !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1372         aead_unmap(ctx->dev, edesc, req);
1373         qi_cache_free(edesc);
1374     }
1375 
1376     return ret;
1377 }
1378 
1379 static int ipsec_gcm_encrypt(struct aead_request *req)
1380 {
1381     return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1382 }
1383 
1384 static int ipsec_gcm_decrypt(struct aead_request *req)
1385 {
1386     return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1387 }
1388 
1389 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1390 {
1391     struct crypto_async_request *areq = cbk_ctx;
1392     struct skcipher_request *req = skcipher_request_cast(areq);
1393     struct caam_request *req_ctx = to_caam_req(areq);
1394     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1395     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1396     struct skcipher_edesc *edesc = req_ctx->edesc;
1397     int ecode = 0;
1398     int ivsize = crypto_skcipher_ivsize(skcipher);
1399 
1400     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1401 
1402     if (unlikely(status))
1403         ecode = caam_qi2_strstatus(ctx->dev, status);
1404 
1405     print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1406                  DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1407                  edesc->src_nents > 1 ? 100 : ivsize, 1);
1408     caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1409              DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1410              edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1411 
1412     skcipher_unmap(ctx->dev, edesc, req);
1413 
1414     /*
1415      * The crypto API expects us to set the IV (req->iv) to the last
1416      * ciphertext block (CBC mode) or last counter (CTR mode).
1417      * This is used e.g. by the CTS mode.
1418      */
1419     if (!ecode)
1420         memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1421                ivsize);
1422 
1423     qi_cache_free(edesc);
1424     skcipher_request_complete(req, ecode);
1425 }
1426 
1427 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1428 {
1429     struct crypto_async_request *areq = cbk_ctx;
1430     struct skcipher_request *req = skcipher_request_cast(areq);
1431     struct caam_request *req_ctx = to_caam_req(areq);
1432     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1433     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1434     struct skcipher_edesc *edesc = req_ctx->edesc;
1435     int ecode = 0;
1436     int ivsize = crypto_skcipher_ivsize(skcipher);
1437 
1438     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1439 
1440     if (unlikely(status))
1441         ecode = caam_qi2_strstatus(ctx->dev, status);
1442 
1443     print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1444                  DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1445                  edesc->src_nents > 1 ? 100 : ivsize, 1);
1446     caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1447              DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1448              edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1449 
1450     skcipher_unmap(ctx->dev, edesc, req);
1451 
1452     /*
1453      * The crypto API expects us to set the IV (req->iv) to the last
1454      * ciphertext block (CBC mode) or last counter (CTR mode).
1455      * This is used e.g. by the CTS mode.
1456      */
1457     if (!ecode)
1458         memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1459                ivsize);
1460 
1461     qi_cache_free(edesc);
1462     skcipher_request_complete(req, ecode);
1463 }
1464 
1465 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1466 {
1467     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1468     unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1469 
1470     return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1471 }
1472 
1473 static int skcipher_encrypt(struct skcipher_request *req)
1474 {
1475     struct skcipher_edesc *edesc;
1476     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1477     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1478     struct caam_request *caam_req = skcipher_request_ctx(req);
1479     struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1480     int ret;
1481 
1482     /*
1483      * XTS is expected to return an error even for input length = 0
1484      * Note that the case input length < block size will be caught during
1485      * HW offloading and return an error.
1486      */
1487     if (!req->cryptlen && !ctx->fallback)
1488         return 0;
1489 
1490     if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1491                   ctx->xts_key_fallback)) {
1492         skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1493         skcipher_request_set_callback(&caam_req->fallback_req,
1494                           req->base.flags,
1495                           req->base.complete,
1496                           req->base.data);
1497         skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1498                        req->dst, req->cryptlen, req->iv);
1499 
1500         return crypto_skcipher_encrypt(&caam_req->fallback_req);
1501     }
1502 
1503     /* allocate extended descriptor */
1504     edesc = skcipher_edesc_alloc(req);
1505     if (IS_ERR(edesc))
1506         return PTR_ERR(edesc);
1507 
1508     caam_req->flc = &ctx->flc[ENCRYPT];
1509     caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1510     caam_req->cbk = skcipher_encrypt_done;
1511     caam_req->ctx = &req->base;
1512     caam_req->edesc = edesc;
1513     ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1514     if (ret != -EINPROGRESS &&
1515         !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1516         skcipher_unmap(ctx->dev, edesc, req);
1517         qi_cache_free(edesc);
1518     }
1519 
1520     return ret;
1521 }
1522 
1523 static int skcipher_decrypt(struct skcipher_request *req)
1524 {
1525     struct skcipher_edesc *edesc;
1526     struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1527     struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1528     struct caam_request *caam_req = skcipher_request_ctx(req);
1529     struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1530     int ret;
1531 
1532     /*
1533      * XTS is expected to return an error even for input length = 0
1534      * Note that the case input length < block size will be caught during
1535      * HW offloading and return an error.
1536      */
1537     if (!req->cryptlen && !ctx->fallback)
1538         return 0;
1539 
1540     if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1541                   ctx->xts_key_fallback)) {
1542         skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1543         skcipher_request_set_callback(&caam_req->fallback_req,
1544                           req->base.flags,
1545                           req->base.complete,
1546                           req->base.data);
1547         skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1548                        req->dst, req->cryptlen, req->iv);
1549 
1550         return crypto_skcipher_decrypt(&caam_req->fallback_req);
1551     }
1552 
1553     /* allocate extended descriptor */
1554     edesc = skcipher_edesc_alloc(req);
1555     if (IS_ERR(edesc))
1556         return PTR_ERR(edesc);
1557 
1558     caam_req->flc = &ctx->flc[DECRYPT];
1559     caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1560     caam_req->cbk = skcipher_decrypt_done;
1561     caam_req->ctx = &req->base;
1562     caam_req->edesc = edesc;
1563     ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1564     if (ret != -EINPROGRESS &&
1565         !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1566         skcipher_unmap(ctx->dev, edesc, req);
1567         qi_cache_free(edesc);
1568     }
1569 
1570     return ret;
1571 }
1572 
1573 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1574              bool uses_dkp)
1575 {
1576     dma_addr_t dma_addr;
1577     int i;
1578 
1579     /* copy descriptor header template value */
1580     ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1581     ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1582 
1583     ctx->dev = caam->dev;
1584     ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1585 
1586     dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1587                     offsetof(struct caam_ctx, flc_dma),
1588                     ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1589     if (dma_mapping_error(ctx->dev, dma_addr)) {
1590         dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1591         return -ENOMEM;
1592     }
1593 
1594     for (i = 0; i < NUM_OP; i++)
1595         ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1596     ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1597 
1598     return 0;
1599 }
1600 
1601 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1602 {
1603     struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1604     struct caam_skcipher_alg *caam_alg =
1605         container_of(alg, typeof(*caam_alg), skcipher);
1606     struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1607     u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1608     int ret = 0;
1609 
1610     if (alg_aai == OP_ALG_AAI_XTS) {
1611         const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1612         struct crypto_skcipher *fallback;
1613 
1614         fallback = crypto_alloc_skcipher(tfm_name, 0,
1615                          CRYPTO_ALG_NEED_FALLBACK);
1616         if (IS_ERR(fallback)) {
1617             dev_err(caam_alg->caam.dev,
1618                 "Failed to allocate %s fallback: %ld\n",
1619                 tfm_name, PTR_ERR(fallback));
1620             return PTR_ERR(fallback);
1621         }
1622 
1623         ctx->fallback = fallback;
1624         crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1625                         crypto_skcipher_reqsize(fallback));
1626     } else {
1627         crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1628     }
1629 
1630     ret = caam_cra_init(ctx, &caam_alg->caam, false);
1631     if (ret && ctx->fallback)
1632         crypto_free_skcipher(ctx->fallback);
1633 
1634     return ret;
1635 }
1636 
1637 static int caam_cra_init_aead(struct crypto_aead *tfm)
1638 {
1639     struct aead_alg *alg = crypto_aead_alg(tfm);
1640     struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1641                               aead);
1642 
1643     crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1644     return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1645                  !caam_alg->caam.nodkp);
1646 }
1647 
1648 static void caam_exit_common(struct caam_ctx *ctx)
1649 {
1650     dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1651                    offsetof(struct caam_ctx, flc_dma), ctx->dir,
1652                    DMA_ATTR_SKIP_CPU_SYNC);
1653 }
1654 
1655 static void caam_cra_exit(struct crypto_skcipher *tfm)
1656 {
1657     struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1658 
1659     if (ctx->fallback)
1660         crypto_free_skcipher(ctx->fallback);
1661     caam_exit_common(ctx);
1662 }
1663 
1664 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1665 {
1666     caam_exit_common(crypto_aead_ctx(tfm));
1667 }
1668 
1669 static struct caam_skcipher_alg driver_algs[] = {
1670     {
1671         .skcipher = {
1672             .base = {
1673                 .cra_name = "cbc(aes)",
1674                 .cra_driver_name = "cbc-aes-caam-qi2",
1675                 .cra_blocksize = AES_BLOCK_SIZE,
1676             },
1677             .setkey = aes_skcipher_setkey,
1678             .encrypt = skcipher_encrypt,
1679             .decrypt = skcipher_decrypt,
1680             .min_keysize = AES_MIN_KEY_SIZE,
1681             .max_keysize = AES_MAX_KEY_SIZE,
1682             .ivsize = AES_BLOCK_SIZE,
1683         },
1684         .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1685     },
1686     {
1687         .skcipher = {
1688             .base = {
1689                 .cra_name = "cbc(des3_ede)",
1690                 .cra_driver_name = "cbc-3des-caam-qi2",
1691                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1692             },
1693             .setkey = des3_skcipher_setkey,
1694             .encrypt = skcipher_encrypt,
1695             .decrypt = skcipher_decrypt,
1696             .min_keysize = DES3_EDE_KEY_SIZE,
1697             .max_keysize = DES3_EDE_KEY_SIZE,
1698             .ivsize = DES3_EDE_BLOCK_SIZE,
1699         },
1700         .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1701     },
1702     {
1703         .skcipher = {
1704             .base = {
1705                 .cra_name = "cbc(des)",
1706                 .cra_driver_name = "cbc-des-caam-qi2",
1707                 .cra_blocksize = DES_BLOCK_SIZE,
1708             },
1709             .setkey = des_skcipher_setkey,
1710             .encrypt = skcipher_encrypt,
1711             .decrypt = skcipher_decrypt,
1712             .min_keysize = DES_KEY_SIZE,
1713             .max_keysize = DES_KEY_SIZE,
1714             .ivsize = DES_BLOCK_SIZE,
1715         },
1716         .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1717     },
1718     {
1719         .skcipher = {
1720             .base = {
1721                 .cra_name = "ctr(aes)",
1722                 .cra_driver_name = "ctr-aes-caam-qi2",
1723                 .cra_blocksize = 1,
1724             },
1725             .setkey = ctr_skcipher_setkey,
1726             .encrypt = skcipher_encrypt,
1727             .decrypt = skcipher_decrypt,
1728             .min_keysize = AES_MIN_KEY_SIZE,
1729             .max_keysize = AES_MAX_KEY_SIZE,
1730             .ivsize = AES_BLOCK_SIZE,
1731             .chunksize = AES_BLOCK_SIZE,
1732         },
1733         .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1734                     OP_ALG_AAI_CTR_MOD128,
1735     },
1736     {
1737         .skcipher = {
1738             .base = {
1739                 .cra_name = "rfc3686(ctr(aes))",
1740                 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1741                 .cra_blocksize = 1,
1742             },
1743             .setkey = rfc3686_skcipher_setkey,
1744             .encrypt = skcipher_encrypt,
1745             .decrypt = skcipher_decrypt,
1746             .min_keysize = AES_MIN_KEY_SIZE +
1747                        CTR_RFC3686_NONCE_SIZE,
1748             .max_keysize = AES_MAX_KEY_SIZE +
1749                        CTR_RFC3686_NONCE_SIZE,
1750             .ivsize = CTR_RFC3686_IV_SIZE,
1751             .chunksize = AES_BLOCK_SIZE,
1752         },
1753         .caam = {
1754             .class1_alg_type = OP_ALG_ALGSEL_AES |
1755                        OP_ALG_AAI_CTR_MOD128,
1756             .rfc3686 = true,
1757         },
1758     },
1759     {
1760         .skcipher = {
1761             .base = {
1762                 .cra_name = "xts(aes)",
1763                 .cra_driver_name = "xts-aes-caam-qi2",
1764                 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1765                 .cra_blocksize = AES_BLOCK_SIZE,
1766             },
1767             .setkey = xts_skcipher_setkey,
1768             .encrypt = skcipher_encrypt,
1769             .decrypt = skcipher_decrypt,
1770             .min_keysize = 2 * AES_MIN_KEY_SIZE,
1771             .max_keysize = 2 * AES_MAX_KEY_SIZE,
1772             .ivsize = AES_BLOCK_SIZE,
1773         },
1774         .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1775     },
1776     {
1777         .skcipher = {
1778             .base = {
1779                 .cra_name = "chacha20",
1780                 .cra_driver_name = "chacha20-caam-qi2",
1781                 .cra_blocksize = 1,
1782             },
1783             .setkey = chacha20_skcipher_setkey,
1784             .encrypt = skcipher_encrypt,
1785             .decrypt = skcipher_decrypt,
1786             .min_keysize = CHACHA_KEY_SIZE,
1787             .max_keysize = CHACHA_KEY_SIZE,
1788             .ivsize = CHACHA_IV_SIZE,
1789         },
1790         .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1791     },
1792 };
1793 
1794 static struct caam_aead_alg driver_aeads[] = {
1795     {
1796         .aead = {
1797             .base = {
1798                 .cra_name = "rfc4106(gcm(aes))",
1799                 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1800                 .cra_blocksize = 1,
1801             },
1802             .setkey = rfc4106_setkey,
1803             .setauthsize = rfc4106_setauthsize,
1804             .encrypt = ipsec_gcm_encrypt,
1805             .decrypt = ipsec_gcm_decrypt,
1806             .ivsize = 8,
1807             .maxauthsize = AES_BLOCK_SIZE,
1808         },
1809         .caam = {
1810             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1811             .nodkp = true,
1812         },
1813     },
1814     {
1815         .aead = {
1816             .base = {
1817                 .cra_name = "rfc4543(gcm(aes))",
1818                 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1819                 .cra_blocksize = 1,
1820             },
1821             .setkey = rfc4543_setkey,
1822             .setauthsize = rfc4543_setauthsize,
1823             .encrypt = ipsec_gcm_encrypt,
1824             .decrypt = ipsec_gcm_decrypt,
1825             .ivsize = 8,
1826             .maxauthsize = AES_BLOCK_SIZE,
1827         },
1828         .caam = {
1829             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1830             .nodkp = true,
1831         },
1832     },
1833     /* Galois Counter Mode */
1834     {
1835         .aead = {
1836             .base = {
1837                 .cra_name = "gcm(aes)",
1838                 .cra_driver_name = "gcm-aes-caam-qi2",
1839                 .cra_blocksize = 1,
1840             },
1841             .setkey = gcm_setkey,
1842             .setauthsize = gcm_setauthsize,
1843             .encrypt = aead_encrypt,
1844             .decrypt = aead_decrypt,
1845             .ivsize = 12,
1846             .maxauthsize = AES_BLOCK_SIZE,
1847         },
1848         .caam = {
1849             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1850             .nodkp = true,
1851         }
1852     },
1853     /* single-pass ipsec_esp descriptor */
1854     {
1855         .aead = {
1856             .base = {
1857                 .cra_name = "authenc(hmac(md5),cbc(aes))",
1858                 .cra_driver_name = "authenc-hmac-md5-"
1859                            "cbc-aes-caam-qi2",
1860                 .cra_blocksize = AES_BLOCK_SIZE,
1861             },
1862             .setkey = aead_setkey,
1863             .setauthsize = aead_setauthsize,
1864             .encrypt = aead_encrypt,
1865             .decrypt = aead_decrypt,
1866             .ivsize = AES_BLOCK_SIZE,
1867             .maxauthsize = MD5_DIGEST_SIZE,
1868         },
1869         .caam = {
1870             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1871             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1872                        OP_ALG_AAI_HMAC_PRECOMP,
1873         }
1874     },
1875     {
1876         .aead = {
1877             .base = {
1878                 .cra_name = "echainiv(authenc(hmac(md5),"
1879                         "cbc(aes)))",
1880                 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1881                            "cbc-aes-caam-qi2",
1882                 .cra_blocksize = AES_BLOCK_SIZE,
1883             },
1884             .setkey = aead_setkey,
1885             .setauthsize = aead_setauthsize,
1886             .encrypt = aead_encrypt,
1887             .decrypt = aead_decrypt,
1888             .ivsize = AES_BLOCK_SIZE,
1889             .maxauthsize = MD5_DIGEST_SIZE,
1890         },
1891         .caam = {
1892             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1893             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1894                        OP_ALG_AAI_HMAC_PRECOMP,
1895             .geniv = true,
1896         }
1897     },
1898     {
1899         .aead = {
1900             .base = {
1901                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1902                 .cra_driver_name = "authenc-hmac-sha1-"
1903                            "cbc-aes-caam-qi2",
1904                 .cra_blocksize = AES_BLOCK_SIZE,
1905             },
1906             .setkey = aead_setkey,
1907             .setauthsize = aead_setauthsize,
1908             .encrypt = aead_encrypt,
1909             .decrypt = aead_decrypt,
1910             .ivsize = AES_BLOCK_SIZE,
1911             .maxauthsize = SHA1_DIGEST_SIZE,
1912         },
1913         .caam = {
1914             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1915             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1916                        OP_ALG_AAI_HMAC_PRECOMP,
1917         }
1918     },
1919     {
1920         .aead = {
1921             .base = {
1922                 .cra_name = "echainiv(authenc(hmac(sha1),"
1923                         "cbc(aes)))",
1924                 .cra_driver_name = "echainiv-authenc-"
1925                            "hmac-sha1-cbc-aes-caam-qi2",
1926                 .cra_blocksize = AES_BLOCK_SIZE,
1927             },
1928             .setkey = aead_setkey,
1929             .setauthsize = aead_setauthsize,
1930             .encrypt = aead_encrypt,
1931             .decrypt = aead_decrypt,
1932             .ivsize = AES_BLOCK_SIZE,
1933             .maxauthsize = SHA1_DIGEST_SIZE,
1934         },
1935         .caam = {
1936             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1937             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1938                        OP_ALG_AAI_HMAC_PRECOMP,
1939             .geniv = true,
1940         },
1941     },
1942     {
1943         .aead = {
1944             .base = {
1945                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1946                 .cra_driver_name = "authenc-hmac-sha224-"
1947                            "cbc-aes-caam-qi2",
1948                 .cra_blocksize = AES_BLOCK_SIZE,
1949             },
1950             .setkey = aead_setkey,
1951             .setauthsize = aead_setauthsize,
1952             .encrypt = aead_encrypt,
1953             .decrypt = aead_decrypt,
1954             .ivsize = AES_BLOCK_SIZE,
1955             .maxauthsize = SHA224_DIGEST_SIZE,
1956         },
1957         .caam = {
1958             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1959             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1960                        OP_ALG_AAI_HMAC_PRECOMP,
1961         }
1962     },
1963     {
1964         .aead = {
1965             .base = {
1966                 .cra_name = "echainiv(authenc(hmac(sha224),"
1967                         "cbc(aes)))",
1968                 .cra_driver_name = "echainiv-authenc-"
1969                            "hmac-sha224-cbc-aes-caam-qi2",
1970                 .cra_blocksize = AES_BLOCK_SIZE,
1971             },
1972             .setkey = aead_setkey,
1973             .setauthsize = aead_setauthsize,
1974             .encrypt = aead_encrypt,
1975             .decrypt = aead_decrypt,
1976             .ivsize = AES_BLOCK_SIZE,
1977             .maxauthsize = SHA224_DIGEST_SIZE,
1978         },
1979         .caam = {
1980             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1981             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1982                        OP_ALG_AAI_HMAC_PRECOMP,
1983             .geniv = true,
1984         }
1985     },
1986     {
1987         .aead = {
1988             .base = {
1989                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1990                 .cra_driver_name = "authenc-hmac-sha256-"
1991                            "cbc-aes-caam-qi2",
1992                 .cra_blocksize = AES_BLOCK_SIZE,
1993             },
1994             .setkey = aead_setkey,
1995             .setauthsize = aead_setauthsize,
1996             .encrypt = aead_encrypt,
1997             .decrypt = aead_decrypt,
1998             .ivsize = AES_BLOCK_SIZE,
1999             .maxauthsize = SHA256_DIGEST_SIZE,
2000         },
2001         .caam = {
2002             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2003             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2004                        OP_ALG_AAI_HMAC_PRECOMP,
2005         }
2006     },
2007     {
2008         .aead = {
2009             .base = {
2010                 .cra_name = "echainiv(authenc(hmac(sha256),"
2011                         "cbc(aes)))",
2012                 .cra_driver_name = "echainiv-authenc-"
2013                            "hmac-sha256-cbc-aes-"
2014                            "caam-qi2",
2015                 .cra_blocksize = AES_BLOCK_SIZE,
2016             },
2017             .setkey = aead_setkey,
2018             .setauthsize = aead_setauthsize,
2019             .encrypt = aead_encrypt,
2020             .decrypt = aead_decrypt,
2021             .ivsize = AES_BLOCK_SIZE,
2022             .maxauthsize = SHA256_DIGEST_SIZE,
2023         },
2024         .caam = {
2025             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2026             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2027                        OP_ALG_AAI_HMAC_PRECOMP,
2028             .geniv = true,
2029         }
2030     },
2031     {
2032         .aead = {
2033             .base = {
2034                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2035                 .cra_driver_name = "authenc-hmac-sha384-"
2036                            "cbc-aes-caam-qi2",
2037                 .cra_blocksize = AES_BLOCK_SIZE,
2038             },
2039             .setkey = aead_setkey,
2040             .setauthsize = aead_setauthsize,
2041             .encrypt = aead_encrypt,
2042             .decrypt = aead_decrypt,
2043             .ivsize = AES_BLOCK_SIZE,
2044             .maxauthsize = SHA384_DIGEST_SIZE,
2045         },
2046         .caam = {
2047             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2048             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2049                        OP_ALG_AAI_HMAC_PRECOMP,
2050         }
2051     },
2052     {
2053         .aead = {
2054             .base = {
2055                 .cra_name = "echainiv(authenc(hmac(sha384),"
2056                         "cbc(aes)))",
2057                 .cra_driver_name = "echainiv-authenc-"
2058                            "hmac-sha384-cbc-aes-"
2059                            "caam-qi2",
2060                 .cra_blocksize = AES_BLOCK_SIZE,
2061             },
2062             .setkey = aead_setkey,
2063             .setauthsize = aead_setauthsize,
2064             .encrypt = aead_encrypt,
2065             .decrypt = aead_decrypt,
2066             .ivsize = AES_BLOCK_SIZE,
2067             .maxauthsize = SHA384_DIGEST_SIZE,
2068         },
2069         .caam = {
2070             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2071             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2072                        OP_ALG_AAI_HMAC_PRECOMP,
2073             .geniv = true,
2074         }
2075     },
2076     {
2077         .aead = {
2078             .base = {
2079                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2080                 .cra_driver_name = "authenc-hmac-sha512-"
2081                            "cbc-aes-caam-qi2",
2082                 .cra_blocksize = AES_BLOCK_SIZE,
2083             },
2084             .setkey = aead_setkey,
2085             .setauthsize = aead_setauthsize,
2086             .encrypt = aead_encrypt,
2087             .decrypt = aead_decrypt,
2088             .ivsize = AES_BLOCK_SIZE,
2089             .maxauthsize = SHA512_DIGEST_SIZE,
2090         },
2091         .caam = {
2092             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2093             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2094                        OP_ALG_AAI_HMAC_PRECOMP,
2095         }
2096     },
2097     {
2098         .aead = {
2099             .base = {
2100                 .cra_name = "echainiv(authenc(hmac(sha512),"
2101                         "cbc(aes)))",
2102                 .cra_driver_name = "echainiv-authenc-"
2103                            "hmac-sha512-cbc-aes-"
2104                            "caam-qi2",
2105                 .cra_blocksize = AES_BLOCK_SIZE,
2106             },
2107             .setkey = aead_setkey,
2108             .setauthsize = aead_setauthsize,
2109             .encrypt = aead_encrypt,
2110             .decrypt = aead_decrypt,
2111             .ivsize = AES_BLOCK_SIZE,
2112             .maxauthsize = SHA512_DIGEST_SIZE,
2113         },
2114         .caam = {
2115             .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2116             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2117                        OP_ALG_AAI_HMAC_PRECOMP,
2118             .geniv = true,
2119         }
2120     },
2121     {
2122         .aead = {
2123             .base = {
2124                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2125                 .cra_driver_name = "authenc-hmac-md5-"
2126                            "cbc-des3_ede-caam-qi2",
2127                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2128             },
2129             .setkey = des3_aead_setkey,
2130             .setauthsize = aead_setauthsize,
2131             .encrypt = aead_encrypt,
2132             .decrypt = aead_decrypt,
2133             .ivsize = DES3_EDE_BLOCK_SIZE,
2134             .maxauthsize = MD5_DIGEST_SIZE,
2135         },
2136         .caam = {
2137             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2138             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2139                        OP_ALG_AAI_HMAC_PRECOMP,
2140         }
2141     },
2142     {
2143         .aead = {
2144             .base = {
2145                 .cra_name = "echainiv(authenc(hmac(md5),"
2146                         "cbc(des3_ede)))",
2147                 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2148                            "cbc-des3_ede-caam-qi2",
2149                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2150             },
2151             .setkey = des3_aead_setkey,
2152             .setauthsize = aead_setauthsize,
2153             .encrypt = aead_encrypt,
2154             .decrypt = aead_decrypt,
2155             .ivsize = DES3_EDE_BLOCK_SIZE,
2156             .maxauthsize = MD5_DIGEST_SIZE,
2157         },
2158         .caam = {
2159             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2160             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2161                        OP_ALG_AAI_HMAC_PRECOMP,
2162             .geniv = true,
2163         }
2164     },
2165     {
2166         .aead = {
2167             .base = {
2168                 .cra_name = "authenc(hmac(sha1),"
2169                         "cbc(des3_ede))",
2170                 .cra_driver_name = "authenc-hmac-sha1-"
2171                            "cbc-des3_ede-caam-qi2",
2172                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2173             },
2174             .setkey = des3_aead_setkey,
2175             .setauthsize = aead_setauthsize,
2176             .encrypt = aead_encrypt,
2177             .decrypt = aead_decrypt,
2178             .ivsize = DES3_EDE_BLOCK_SIZE,
2179             .maxauthsize = SHA1_DIGEST_SIZE,
2180         },
2181         .caam = {
2182             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2183             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2184                        OP_ALG_AAI_HMAC_PRECOMP,
2185         },
2186     },
2187     {
2188         .aead = {
2189             .base = {
2190                 .cra_name = "echainiv(authenc(hmac(sha1),"
2191                         "cbc(des3_ede)))",
2192                 .cra_driver_name = "echainiv-authenc-"
2193                            "hmac-sha1-"
2194                            "cbc-des3_ede-caam-qi2",
2195                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2196             },
2197             .setkey = des3_aead_setkey,
2198             .setauthsize = aead_setauthsize,
2199             .encrypt = aead_encrypt,
2200             .decrypt = aead_decrypt,
2201             .ivsize = DES3_EDE_BLOCK_SIZE,
2202             .maxauthsize = SHA1_DIGEST_SIZE,
2203         },
2204         .caam = {
2205             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2206             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2207                        OP_ALG_AAI_HMAC_PRECOMP,
2208             .geniv = true,
2209         }
2210     },
2211     {
2212         .aead = {
2213             .base = {
2214                 .cra_name = "authenc(hmac(sha224),"
2215                         "cbc(des3_ede))",
2216                 .cra_driver_name = "authenc-hmac-sha224-"
2217                            "cbc-des3_ede-caam-qi2",
2218                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2219             },
2220             .setkey = des3_aead_setkey,
2221             .setauthsize = aead_setauthsize,
2222             .encrypt = aead_encrypt,
2223             .decrypt = aead_decrypt,
2224             .ivsize = DES3_EDE_BLOCK_SIZE,
2225             .maxauthsize = SHA224_DIGEST_SIZE,
2226         },
2227         .caam = {
2228             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2229             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2230                        OP_ALG_AAI_HMAC_PRECOMP,
2231         },
2232     },
2233     {
2234         .aead = {
2235             .base = {
2236                 .cra_name = "echainiv(authenc(hmac(sha224),"
2237                         "cbc(des3_ede)))",
2238                 .cra_driver_name = "echainiv-authenc-"
2239                            "hmac-sha224-"
2240                            "cbc-des3_ede-caam-qi2",
2241                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2242             },
2243             .setkey = des3_aead_setkey,
2244             .setauthsize = aead_setauthsize,
2245             .encrypt = aead_encrypt,
2246             .decrypt = aead_decrypt,
2247             .ivsize = DES3_EDE_BLOCK_SIZE,
2248             .maxauthsize = SHA224_DIGEST_SIZE,
2249         },
2250         .caam = {
2251             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2252             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2253                        OP_ALG_AAI_HMAC_PRECOMP,
2254             .geniv = true,
2255         }
2256     },
2257     {
2258         .aead = {
2259             .base = {
2260                 .cra_name = "authenc(hmac(sha256),"
2261                         "cbc(des3_ede))",
2262                 .cra_driver_name = "authenc-hmac-sha256-"
2263                            "cbc-des3_ede-caam-qi2",
2264                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2265             },
2266             .setkey = des3_aead_setkey,
2267             .setauthsize = aead_setauthsize,
2268             .encrypt = aead_encrypt,
2269             .decrypt = aead_decrypt,
2270             .ivsize = DES3_EDE_BLOCK_SIZE,
2271             .maxauthsize = SHA256_DIGEST_SIZE,
2272         },
2273         .caam = {
2274             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2275             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2276                        OP_ALG_AAI_HMAC_PRECOMP,
2277         },
2278     },
2279     {
2280         .aead = {
2281             .base = {
2282                 .cra_name = "echainiv(authenc(hmac(sha256),"
2283                         "cbc(des3_ede)))",
2284                 .cra_driver_name = "echainiv-authenc-"
2285                            "hmac-sha256-"
2286                            "cbc-des3_ede-caam-qi2",
2287                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2288             },
2289             .setkey = des3_aead_setkey,
2290             .setauthsize = aead_setauthsize,
2291             .encrypt = aead_encrypt,
2292             .decrypt = aead_decrypt,
2293             .ivsize = DES3_EDE_BLOCK_SIZE,
2294             .maxauthsize = SHA256_DIGEST_SIZE,
2295         },
2296         .caam = {
2297             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2298             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2299                        OP_ALG_AAI_HMAC_PRECOMP,
2300             .geniv = true,
2301         }
2302     },
2303     {
2304         .aead = {
2305             .base = {
2306                 .cra_name = "authenc(hmac(sha384),"
2307                         "cbc(des3_ede))",
2308                 .cra_driver_name = "authenc-hmac-sha384-"
2309                            "cbc-des3_ede-caam-qi2",
2310                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2311             },
2312             .setkey = des3_aead_setkey,
2313             .setauthsize = aead_setauthsize,
2314             .encrypt = aead_encrypt,
2315             .decrypt = aead_decrypt,
2316             .ivsize = DES3_EDE_BLOCK_SIZE,
2317             .maxauthsize = SHA384_DIGEST_SIZE,
2318         },
2319         .caam = {
2320             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2321             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2322                        OP_ALG_AAI_HMAC_PRECOMP,
2323         },
2324     },
2325     {
2326         .aead = {
2327             .base = {
2328                 .cra_name = "echainiv(authenc(hmac(sha384),"
2329                         "cbc(des3_ede)))",
2330                 .cra_driver_name = "echainiv-authenc-"
2331                            "hmac-sha384-"
2332                            "cbc-des3_ede-caam-qi2",
2333                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2334             },
2335             .setkey = des3_aead_setkey,
2336             .setauthsize = aead_setauthsize,
2337             .encrypt = aead_encrypt,
2338             .decrypt = aead_decrypt,
2339             .ivsize = DES3_EDE_BLOCK_SIZE,
2340             .maxauthsize = SHA384_DIGEST_SIZE,
2341         },
2342         .caam = {
2343             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2344             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2345                        OP_ALG_AAI_HMAC_PRECOMP,
2346             .geniv = true,
2347         }
2348     },
2349     {
2350         .aead = {
2351             .base = {
2352                 .cra_name = "authenc(hmac(sha512),"
2353                         "cbc(des3_ede))",
2354                 .cra_driver_name = "authenc-hmac-sha512-"
2355                            "cbc-des3_ede-caam-qi2",
2356                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2357             },
2358             .setkey = des3_aead_setkey,
2359             .setauthsize = aead_setauthsize,
2360             .encrypt = aead_encrypt,
2361             .decrypt = aead_decrypt,
2362             .ivsize = DES3_EDE_BLOCK_SIZE,
2363             .maxauthsize = SHA512_DIGEST_SIZE,
2364         },
2365         .caam = {
2366             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2367             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2368                        OP_ALG_AAI_HMAC_PRECOMP,
2369         },
2370     },
2371     {
2372         .aead = {
2373             .base = {
2374                 .cra_name = "echainiv(authenc(hmac(sha512),"
2375                         "cbc(des3_ede)))",
2376                 .cra_driver_name = "echainiv-authenc-"
2377                            "hmac-sha512-"
2378                            "cbc-des3_ede-caam-qi2",
2379                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2380             },
2381             .setkey = des3_aead_setkey,
2382             .setauthsize = aead_setauthsize,
2383             .encrypt = aead_encrypt,
2384             .decrypt = aead_decrypt,
2385             .ivsize = DES3_EDE_BLOCK_SIZE,
2386             .maxauthsize = SHA512_DIGEST_SIZE,
2387         },
2388         .caam = {
2389             .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2390             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2391                        OP_ALG_AAI_HMAC_PRECOMP,
2392             .geniv = true,
2393         }
2394     },
2395     {
2396         .aead = {
2397             .base = {
2398                 .cra_name = "authenc(hmac(md5),cbc(des))",
2399                 .cra_driver_name = "authenc-hmac-md5-"
2400                            "cbc-des-caam-qi2",
2401                 .cra_blocksize = DES_BLOCK_SIZE,
2402             },
2403             .setkey = aead_setkey,
2404             .setauthsize = aead_setauthsize,
2405             .encrypt = aead_encrypt,
2406             .decrypt = aead_decrypt,
2407             .ivsize = DES_BLOCK_SIZE,
2408             .maxauthsize = MD5_DIGEST_SIZE,
2409         },
2410         .caam = {
2411             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2412             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2413                        OP_ALG_AAI_HMAC_PRECOMP,
2414         },
2415     },
2416     {
2417         .aead = {
2418             .base = {
2419                 .cra_name = "echainiv(authenc(hmac(md5),"
2420                         "cbc(des)))",
2421                 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2422                            "cbc-des-caam-qi2",
2423                 .cra_blocksize = DES_BLOCK_SIZE,
2424             },
2425             .setkey = aead_setkey,
2426             .setauthsize = aead_setauthsize,
2427             .encrypt = aead_encrypt,
2428             .decrypt = aead_decrypt,
2429             .ivsize = DES_BLOCK_SIZE,
2430             .maxauthsize = MD5_DIGEST_SIZE,
2431         },
2432         .caam = {
2433             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2434             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2435                        OP_ALG_AAI_HMAC_PRECOMP,
2436             .geniv = true,
2437         }
2438     },
2439     {
2440         .aead = {
2441             .base = {
2442                 .cra_name = "authenc(hmac(sha1),cbc(des))",
2443                 .cra_driver_name = "authenc-hmac-sha1-"
2444                            "cbc-des-caam-qi2",
2445                 .cra_blocksize = DES_BLOCK_SIZE,
2446             },
2447             .setkey = aead_setkey,
2448             .setauthsize = aead_setauthsize,
2449             .encrypt = aead_encrypt,
2450             .decrypt = aead_decrypt,
2451             .ivsize = DES_BLOCK_SIZE,
2452             .maxauthsize = SHA1_DIGEST_SIZE,
2453         },
2454         .caam = {
2455             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2456             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2457                        OP_ALG_AAI_HMAC_PRECOMP,
2458         },
2459     },
2460     {
2461         .aead = {
2462             .base = {
2463                 .cra_name = "echainiv(authenc(hmac(sha1),"
2464                         "cbc(des)))",
2465                 .cra_driver_name = "echainiv-authenc-"
2466                            "hmac-sha1-cbc-des-caam-qi2",
2467                 .cra_blocksize = DES_BLOCK_SIZE,
2468             },
2469             .setkey = aead_setkey,
2470             .setauthsize = aead_setauthsize,
2471             .encrypt = aead_encrypt,
2472             .decrypt = aead_decrypt,
2473             .ivsize = DES_BLOCK_SIZE,
2474             .maxauthsize = SHA1_DIGEST_SIZE,
2475         },
2476         .caam = {
2477             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2478             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2479                        OP_ALG_AAI_HMAC_PRECOMP,
2480             .geniv = true,
2481         }
2482     },
2483     {
2484         .aead = {
2485             .base = {
2486                 .cra_name = "authenc(hmac(sha224),cbc(des))",
2487                 .cra_driver_name = "authenc-hmac-sha224-"
2488                            "cbc-des-caam-qi2",
2489                 .cra_blocksize = DES_BLOCK_SIZE,
2490             },
2491             .setkey = aead_setkey,
2492             .setauthsize = aead_setauthsize,
2493             .encrypt = aead_encrypt,
2494             .decrypt = aead_decrypt,
2495             .ivsize = DES_BLOCK_SIZE,
2496             .maxauthsize = SHA224_DIGEST_SIZE,
2497         },
2498         .caam = {
2499             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2500             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2501                        OP_ALG_AAI_HMAC_PRECOMP,
2502         },
2503     },
2504     {
2505         .aead = {
2506             .base = {
2507                 .cra_name = "echainiv(authenc(hmac(sha224),"
2508                         "cbc(des)))",
2509                 .cra_driver_name = "echainiv-authenc-"
2510                            "hmac-sha224-cbc-des-"
2511                            "caam-qi2",
2512                 .cra_blocksize = DES_BLOCK_SIZE,
2513             },
2514             .setkey = aead_setkey,
2515             .setauthsize = aead_setauthsize,
2516             .encrypt = aead_encrypt,
2517             .decrypt = aead_decrypt,
2518             .ivsize = DES_BLOCK_SIZE,
2519             .maxauthsize = SHA224_DIGEST_SIZE,
2520         },
2521         .caam = {
2522             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2523             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2524                        OP_ALG_AAI_HMAC_PRECOMP,
2525             .geniv = true,
2526         }
2527     },
2528     {
2529         .aead = {
2530             .base = {
2531                 .cra_name = "authenc(hmac(sha256),cbc(des))",
2532                 .cra_driver_name = "authenc-hmac-sha256-"
2533                            "cbc-des-caam-qi2",
2534                 .cra_blocksize = DES_BLOCK_SIZE,
2535             },
2536             .setkey = aead_setkey,
2537             .setauthsize = aead_setauthsize,
2538             .encrypt = aead_encrypt,
2539             .decrypt = aead_decrypt,
2540             .ivsize = DES_BLOCK_SIZE,
2541             .maxauthsize = SHA256_DIGEST_SIZE,
2542         },
2543         .caam = {
2544             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2545             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2546                        OP_ALG_AAI_HMAC_PRECOMP,
2547         },
2548     },
2549     {
2550         .aead = {
2551             .base = {
2552                 .cra_name = "echainiv(authenc(hmac(sha256),"
2553                         "cbc(des)))",
2554                 .cra_driver_name = "echainiv-authenc-"
2555                            "hmac-sha256-cbc-des-"
2556                            "caam-qi2",
2557                 .cra_blocksize = DES_BLOCK_SIZE,
2558             },
2559             .setkey = aead_setkey,
2560             .setauthsize = aead_setauthsize,
2561             .encrypt = aead_encrypt,
2562             .decrypt = aead_decrypt,
2563             .ivsize = DES_BLOCK_SIZE,
2564             .maxauthsize = SHA256_DIGEST_SIZE,
2565         },
2566         .caam = {
2567             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2568             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2569                        OP_ALG_AAI_HMAC_PRECOMP,
2570             .geniv = true,
2571         },
2572     },
2573     {
2574         .aead = {
2575             .base = {
2576                 .cra_name = "authenc(hmac(sha384),cbc(des))",
2577                 .cra_driver_name = "authenc-hmac-sha384-"
2578                            "cbc-des-caam-qi2",
2579                 .cra_blocksize = DES_BLOCK_SIZE,
2580             },
2581             .setkey = aead_setkey,
2582             .setauthsize = aead_setauthsize,
2583             .encrypt = aead_encrypt,
2584             .decrypt = aead_decrypt,
2585             .ivsize = DES_BLOCK_SIZE,
2586             .maxauthsize = SHA384_DIGEST_SIZE,
2587         },
2588         .caam = {
2589             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2590             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2591                        OP_ALG_AAI_HMAC_PRECOMP,
2592         },
2593     },
2594     {
2595         .aead = {
2596             .base = {
2597                 .cra_name = "echainiv(authenc(hmac(sha384),"
2598                         "cbc(des)))",
2599                 .cra_driver_name = "echainiv-authenc-"
2600                            "hmac-sha384-cbc-des-"
2601                            "caam-qi2",
2602                 .cra_blocksize = DES_BLOCK_SIZE,
2603             },
2604             .setkey = aead_setkey,
2605             .setauthsize = aead_setauthsize,
2606             .encrypt = aead_encrypt,
2607             .decrypt = aead_decrypt,
2608             .ivsize = DES_BLOCK_SIZE,
2609             .maxauthsize = SHA384_DIGEST_SIZE,
2610         },
2611         .caam = {
2612             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2613             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2614                        OP_ALG_AAI_HMAC_PRECOMP,
2615             .geniv = true,
2616         }
2617     },
2618     {
2619         .aead = {
2620             .base = {
2621                 .cra_name = "authenc(hmac(sha512),cbc(des))",
2622                 .cra_driver_name = "authenc-hmac-sha512-"
2623                            "cbc-des-caam-qi2",
2624                 .cra_blocksize = DES_BLOCK_SIZE,
2625             },
2626             .setkey = aead_setkey,
2627             .setauthsize = aead_setauthsize,
2628             .encrypt = aead_encrypt,
2629             .decrypt = aead_decrypt,
2630             .ivsize = DES_BLOCK_SIZE,
2631             .maxauthsize = SHA512_DIGEST_SIZE,
2632         },
2633         .caam = {
2634             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2635             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2636                        OP_ALG_AAI_HMAC_PRECOMP,
2637         }
2638     },
2639     {
2640         .aead = {
2641             .base = {
2642                 .cra_name = "echainiv(authenc(hmac(sha512),"
2643                         "cbc(des)))",
2644                 .cra_driver_name = "echainiv-authenc-"
2645                            "hmac-sha512-cbc-des-"
2646                            "caam-qi2",
2647                 .cra_blocksize = DES_BLOCK_SIZE,
2648             },
2649             .setkey = aead_setkey,
2650             .setauthsize = aead_setauthsize,
2651             .encrypt = aead_encrypt,
2652             .decrypt = aead_decrypt,
2653             .ivsize = DES_BLOCK_SIZE,
2654             .maxauthsize = SHA512_DIGEST_SIZE,
2655         },
2656         .caam = {
2657             .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2658             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2659                        OP_ALG_AAI_HMAC_PRECOMP,
2660             .geniv = true,
2661         }
2662     },
2663     {
2664         .aead = {
2665             .base = {
2666                 .cra_name = "authenc(hmac(md5),"
2667                         "rfc3686(ctr(aes)))",
2668                 .cra_driver_name = "authenc-hmac-md5-"
2669                            "rfc3686-ctr-aes-caam-qi2",
2670                 .cra_blocksize = 1,
2671             },
2672             .setkey = aead_setkey,
2673             .setauthsize = aead_setauthsize,
2674             .encrypt = aead_encrypt,
2675             .decrypt = aead_decrypt,
2676             .ivsize = CTR_RFC3686_IV_SIZE,
2677             .maxauthsize = MD5_DIGEST_SIZE,
2678         },
2679         .caam = {
2680             .class1_alg_type = OP_ALG_ALGSEL_AES |
2681                        OP_ALG_AAI_CTR_MOD128,
2682             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2683                        OP_ALG_AAI_HMAC_PRECOMP,
2684             .rfc3686 = true,
2685         },
2686     },
2687     {
2688         .aead = {
2689             .base = {
2690                 .cra_name = "seqiv(authenc("
2691                         "hmac(md5),rfc3686(ctr(aes))))",
2692                 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2693                            "rfc3686-ctr-aes-caam-qi2",
2694                 .cra_blocksize = 1,
2695             },
2696             .setkey = aead_setkey,
2697             .setauthsize = aead_setauthsize,
2698             .encrypt = aead_encrypt,
2699             .decrypt = aead_decrypt,
2700             .ivsize = CTR_RFC3686_IV_SIZE,
2701             .maxauthsize = MD5_DIGEST_SIZE,
2702         },
2703         .caam = {
2704             .class1_alg_type = OP_ALG_ALGSEL_AES |
2705                        OP_ALG_AAI_CTR_MOD128,
2706             .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2707                        OP_ALG_AAI_HMAC_PRECOMP,
2708             .rfc3686 = true,
2709             .geniv = true,
2710         },
2711     },
2712     {
2713         .aead = {
2714             .base = {
2715                 .cra_name = "authenc(hmac(sha1),"
2716                         "rfc3686(ctr(aes)))",
2717                 .cra_driver_name = "authenc-hmac-sha1-"
2718                            "rfc3686-ctr-aes-caam-qi2",
2719                 .cra_blocksize = 1,
2720             },
2721             .setkey = aead_setkey,
2722             .setauthsize = aead_setauthsize,
2723             .encrypt = aead_encrypt,
2724             .decrypt = aead_decrypt,
2725             .ivsize = CTR_RFC3686_IV_SIZE,
2726             .maxauthsize = SHA1_DIGEST_SIZE,
2727         },
2728         .caam = {
2729             .class1_alg_type = OP_ALG_ALGSEL_AES |
2730                        OP_ALG_AAI_CTR_MOD128,
2731             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2732                        OP_ALG_AAI_HMAC_PRECOMP,
2733             .rfc3686 = true,
2734         },
2735     },
2736     {
2737         .aead = {
2738             .base = {
2739                 .cra_name = "seqiv(authenc("
2740                         "hmac(sha1),rfc3686(ctr(aes))))",
2741                 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2742                            "rfc3686-ctr-aes-caam-qi2",
2743                 .cra_blocksize = 1,
2744             },
2745             .setkey = aead_setkey,
2746             .setauthsize = aead_setauthsize,
2747             .encrypt = aead_encrypt,
2748             .decrypt = aead_decrypt,
2749             .ivsize = CTR_RFC3686_IV_SIZE,
2750             .maxauthsize = SHA1_DIGEST_SIZE,
2751         },
2752         .caam = {
2753             .class1_alg_type = OP_ALG_ALGSEL_AES |
2754                        OP_ALG_AAI_CTR_MOD128,
2755             .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2756                        OP_ALG_AAI_HMAC_PRECOMP,
2757             .rfc3686 = true,
2758             .geniv = true,
2759         },
2760     },
2761     {
2762         .aead = {
2763             .base = {
2764                 .cra_name = "authenc(hmac(sha224),"
2765                         "rfc3686(ctr(aes)))",
2766                 .cra_driver_name = "authenc-hmac-sha224-"
2767                            "rfc3686-ctr-aes-caam-qi2",
2768                 .cra_blocksize = 1,
2769             },
2770             .setkey = aead_setkey,
2771             .setauthsize = aead_setauthsize,
2772             .encrypt = aead_encrypt,
2773             .decrypt = aead_decrypt,
2774             .ivsize = CTR_RFC3686_IV_SIZE,
2775             .maxauthsize = SHA224_DIGEST_SIZE,
2776         },
2777         .caam = {
2778             .class1_alg_type = OP_ALG_ALGSEL_AES |
2779                        OP_ALG_AAI_CTR_MOD128,
2780             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2781                        OP_ALG_AAI_HMAC_PRECOMP,
2782             .rfc3686 = true,
2783         },
2784     },
2785     {
2786         .aead = {
2787             .base = {
2788                 .cra_name = "seqiv(authenc("
2789                         "hmac(sha224),rfc3686(ctr(aes))))",
2790                 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2791                            "rfc3686-ctr-aes-caam-qi2",
2792                 .cra_blocksize = 1,
2793             },
2794             .setkey = aead_setkey,
2795             .setauthsize = aead_setauthsize,
2796             .encrypt = aead_encrypt,
2797             .decrypt = aead_decrypt,
2798             .ivsize = CTR_RFC3686_IV_SIZE,
2799             .maxauthsize = SHA224_DIGEST_SIZE,
2800         },
2801         .caam = {
2802             .class1_alg_type = OP_ALG_ALGSEL_AES |
2803                        OP_ALG_AAI_CTR_MOD128,
2804             .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2805                        OP_ALG_AAI_HMAC_PRECOMP,
2806             .rfc3686 = true,
2807             .geniv = true,
2808         },
2809     },
2810     {
2811         .aead = {
2812             .base = {
2813                 .cra_name = "authenc(hmac(sha256),"
2814                         "rfc3686(ctr(aes)))",
2815                 .cra_driver_name = "authenc-hmac-sha256-"
2816                            "rfc3686-ctr-aes-caam-qi2",
2817                 .cra_blocksize = 1,
2818             },
2819             .setkey = aead_setkey,
2820             .setauthsize = aead_setauthsize,
2821             .encrypt = aead_encrypt,
2822             .decrypt = aead_decrypt,
2823             .ivsize = CTR_RFC3686_IV_SIZE,
2824             .maxauthsize = SHA256_DIGEST_SIZE,
2825         },
2826         .caam = {
2827             .class1_alg_type = OP_ALG_ALGSEL_AES |
2828                        OP_ALG_AAI_CTR_MOD128,
2829             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2830                        OP_ALG_AAI_HMAC_PRECOMP,
2831             .rfc3686 = true,
2832         },
2833     },
2834     {
2835         .aead = {
2836             .base = {
2837                 .cra_name = "seqiv(authenc(hmac(sha256),"
2838                         "rfc3686(ctr(aes))))",
2839                 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2840                            "rfc3686-ctr-aes-caam-qi2",
2841                 .cra_blocksize = 1,
2842             },
2843             .setkey = aead_setkey,
2844             .setauthsize = aead_setauthsize,
2845             .encrypt = aead_encrypt,
2846             .decrypt = aead_decrypt,
2847             .ivsize = CTR_RFC3686_IV_SIZE,
2848             .maxauthsize = SHA256_DIGEST_SIZE,
2849         },
2850         .caam = {
2851             .class1_alg_type = OP_ALG_ALGSEL_AES |
2852                        OP_ALG_AAI_CTR_MOD128,
2853             .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2854                        OP_ALG_AAI_HMAC_PRECOMP,
2855             .rfc3686 = true,
2856             .geniv = true,
2857         },
2858     },
2859     {
2860         .aead = {
2861             .base = {
2862                 .cra_name = "authenc(hmac(sha384),"
2863                         "rfc3686(ctr(aes)))",
2864                 .cra_driver_name = "authenc-hmac-sha384-"
2865                            "rfc3686-ctr-aes-caam-qi2",
2866                 .cra_blocksize = 1,
2867             },
2868             .setkey = aead_setkey,
2869             .setauthsize = aead_setauthsize,
2870             .encrypt = aead_encrypt,
2871             .decrypt = aead_decrypt,
2872             .ivsize = CTR_RFC3686_IV_SIZE,
2873             .maxauthsize = SHA384_DIGEST_SIZE,
2874         },
2875         .caam = {
2876             .class1_alg_type = OP_ALG_ALGSEL_AES |
2877                        OP_ALG_AAI_CTR_MOD128,
2878             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2879                        OP_ALG_AAI_HMAC_PRECOMP,
2880             .rfc3686 = true,
2881         },
2882     },
2883     {
2884         .aead = {
2885             .base = {
2886                 .cra_name = "seqiv(authenc(hmac(sha384),"
2887                         "rfc3686(ctr(aes))))",
2888                 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2889                            "rfc3686-ctr-aes-caam-qi2",
2890                 .cra_blocksize = 1,
2891             },
2892             .setkey = aead_setkey,
2893             .setauthsize = aead_setauthsize,
2894             .encrypt = aead_encrypt,
2895             .decrypt = aead_decrypt,
2896             .ivsize = CTR_RFC3686_IV_SIZE,
2897             .maxauthsize = SHA384_DIGEST_SIZE,
2898         },
2899         .caam = {
2900             .class1_alg_type = OP_ALG_ALGSEL_AES |
2901                        OP_ALG_AAI_CTR_MOD128,
2902             .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2903                        OP_ALG_AAI_HMAC_PRECOMP,
2904             .rfc3686 = true,
2905             .geniv = true,
2906         },
2907     },
2908     {
2909         .aead = {
2910             .base = {
2911                 .cra_name = "rfc7539(chacha20,poly1305)",
2912                 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2913                            "caam-qi2",
2914                 .cra_blocksize = 1,
2915             },
2916             .setkey = chachapoly_setkey,
2917             .setauthsize = chachapoly_setauthsize,
2918             .encrypt = aead_encrypt,
2919             .decrypt = aead_decrypt,
2920             .ivsize = CHACHAPOLY_IV_SIZE,
2921             .maxauthsize = POLY1305_DIGEST_SIZE,
2922         },
2923         .caam = {
2924             .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2925                        OP_ALG_AAI_AEAD,
2926             .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2927                        OP_ALG_AAI_AEAD,
2928             .nodkp = true,
2929         },
2930     },
2931     {
2932         .aead = {
2933             .base = {
2934                 .cra_name = "rfc7539esp(chacha20,poly1305)",
2935                 .cra_driver_name = "rfc7539esp-chacha20-"
2936                            "poly1305-caam-qi2",
2937                 .cra_blocksize = 1,
2938             },
2939             .setkey = chachapoly_setkey,
2940             .setauthsize = chachapoly_setauthsize,
2941             .encrypt = aead_encrypt,
2942             .decrypt = aead_decrypt,
2943             .ivsize = 8,
2944             .maxauthsize = POLY1305_DIGEST_SIZE,
2945         },
2946         .caam = {
2947             .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2948                        OP_ALG_AAI_AEAD,
2949             .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2950                        OP_ALG_AAI_AEAD,
2951             .nodkp = true,
2952         },
2953     },
2954     {
2955         .aead = {
2956             .base = {
2957                 .cra_name = "authenc(hmac(sha512),"
2958                         "rfc3686(ctr(aes)))",
2959                 .cra_driver_name = "authenc-hmac-sha512-"
2960                            "rfc3686-ctr-aes-caam-qi2",
2961                 .cra_blocksize = 1,
2962             },
2963             .setkey = aead_setkey,
2964             .setauthsize = aead_setauthsize,
2965             .encrypt = aead_encrypt,
2966             .decrypt = aead_decrypt,
2967             .ivsize = CTR_RFC3686_IV_SIZE,
2968             .maxauthsize = SHA512_DIGEST_SIZE,
2969         },
2970         .caam = {
2971             .class1_alg_type = OP_ALG_ALGSEL_AES |
2972                        OP_ALG_AAI_CTR_MOD128,
2973             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2974                        OP_ALG_AAI_HMAC_PRECOMP,
2975             .rfc3686 = true,
2976         },
2977     },
2978     {
2979         .aead = {
2980             .base = {
2981                 .cra_name = "seqiv(authenc(hmac(sha512),"
2982                         "rfc3686(ctr(aes))))",
2983                 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2984                            "rfc3686-ctr-aes-caam-qi2",
2985                 .cra_blocksize = 1,
2986             },
2987             .setkey = aead_setkey,
2988             .setauthsize = aead_setauthsize,
2989             .encrypt = aead_encrypt,
2990             .decrypt = aead_decrypt,
2991             .ivsize = CTR_RFC3686_IV_SIZE,
2992             .maxauthsize = SHA512_DIGEST_SIZE,
2993         },
2994         .caam = {
2995             .class1_alg_type = OP_ALG_ALGSEL_AES |
2996                        OP_ALG_AAI_CTR_MOD128,
2997             .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2998                        OP_ALG_AAI_HMAC_PRECOMP,
2999             .rfc3686 = true,
3000             .geniv = true,
3001         },
3002     },
3003 };
3004 
3005 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3006 {
3007     struct skcipher_alg *alg = &t_alg->skcipher;
3008 
3009     alg->base.cra_module = THIS_MODULE;
3010     alg->base.cra_priority = CAAM_CRA_PRIORITY;
3011     alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3012     alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3013                   CRYPTO_ALG_KERN_DRIVER_ONLY);
3014 
3015     alg->init = caam_cra_init_skcipher;
3016     alg->exit = caam_cra_exit;
3017 }
3018 
3019 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3020 {
3021     struct aead_alg *alg = &t_alg->aead;
3022 
3023     alg->base.cra_module = THIS_MODULE;
3024     alg->base.cra_priority = CAAM_CRA_PRIORITY;
3025     alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3026     alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3027                   CRYPTO_ALG_KERN_DRIVER_ONLY;
3028 
3029     alg->init = caam_cra_init_aead;
3030     alg->exit = caam_cra_exit_aead;
3031 }
3032 
3033 /* max hash key is max split key size */
3034 #define CAAM_MAX_HASH_KEY_SIZE      (SHA512_DIGEST_SIZE * 2)
3035 
3036 #define CAAM_MAX_HASH_BLOCK_SIZE    SHA512_BLOCK_SIZE
3037 
3038 /* caam context sizes for hashes: running digest + 8 */
3039 #define HASH_MSG_LEN            8
3040 #define MAX_CTX_LEN         (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3041 
3042 enum hash_optype {
3043     UPDATE = 0,
3044     UPDATE_FIRST,
3045     FINALIZE,
3046     DIGEST,
3047     HASH_NUM_OP
3048 };
3049 
3050 /**
3051  * struct caam_hash_ctx - ahash per-session context
3052  * @flc: Flow Contexts array
3053  * @key: authentication key
3054  * @flc_dma: I/O virtual addresses of the Flow Contexts
3055  * @dev: dpseci device
3056  * @ctx_len: size of Context Register
3057  * @adata: hashing algorithm details
3058  */
3059 struct caam_hash_ctx {
3060     struct caam_flc flc[HASH_NUM_OP];
3061     u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3062     dma_addr_t flc_dma[HASH_NUM_OP];
3063     struct device *dev;
3064     int ctx_len;
3065     struct alginfo adata;
3066 };
3067 
3068 /* ahash state */
3069 struct caam_hash_state {
3070     struct caam_request caam_req;
3071     dma_addr_t buf_dma;
3072     dma_addr_t ctx_dma;
3073     int ctx_dma_len;
3074     u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3075     int buflen;
3076     int next_buflen;
3077     u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3078     int (*update)(struct ahash_request *req);
3079     int (*final)(struct ahash_request *req);
3080     int (*finup)(struct ahash_request *req);
3081 };
3082 
3083 struct caam_export_state {
3084     u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3085     u8 caam_ctx[MAX_CTX_LEN];
3086     int buflen;
3087     int (*update)(struct ahash_request *req);
3088     int (*final)(struct ahash_request *req);
3089     int (*finup)(struct ahash_request *req);
3090 };
3091 
3092 /* Map current buffer in state (if length > 0) and put it in link table */
3093 static inline int buf_map_to_qm_sg(struct device *dev,
3094                    struct dpaa2_sg_entry *qm_sg,
3095                    struct caam_hash_state *state)
3096 {
3097     int buflen = state->buflen;
3098 
3099     if (!buflen)
3100         return 0;
3101 
3102     state->buf_dma = dma_map_single(dev, state->buf, buflen,
3103                     DMA_TO_DEVICE);
3104     if (dma_mapping_error(dev, state->buf_dma)) {
3105         dev_err(dev, "unable to map buf\n");
3106         state->buf_dma = 0;
3107         return -ENOMEM;
3108     }
3109 
3110     dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3111 
3112     return 0;
3113 }
3114 
3115 /* Map state->caam_ctx, and add it to link table */
3116 static inline int ctx_map_to_qm_sg(struct device *dev,
3117                    struct caam_hash_state *state, int ctx_len,
3118                    struct dpaa2_sg_entry *qm_sg, u32 flag)
3119 {
3120     state->ctx_dma_len = ctx_len;
3121     state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3122     if (dma_mapping_error(dev, state->ctx_dma)) {
3123         dev_err(dev, "unable to map ctx\n");
3124         state->ctx_dma = 0;
3125         return -ENOMEM;
3126     }
3127 
3128     dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3129 
3130     return 0;
3131 }
3132 
3133 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3134 {
3135     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3136     int digestsize = crypto_ahash_digestsize(ahash);
3137     struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3138     struct caam_flc *flc;
3139     u32 *desc;
3140 
3141     /* ahash_update shared descriptor */
3142     flc = &ctx->flc[UPDATE];
3143     desc = flc->sh_desc;
3144     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3145               ctx->ctx_len, true, priv->sec_attr.era);
3146     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3147     dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3148                    desc_bytes(desc), DMA_BIDIRECTIONAL);
3149     print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3150                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3151                  1);
3152 
3153     /* ahash_update_first shared descriptor */
3154     flc = &ctx->flc[UPDATE_FIRST];
3155     desc = flc->sh_desc;
3156     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3157               ctx->ctx_len, false, priv->sec_attr.era);
3158     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3159     dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3160                    desc_bytes(desc), DMA_BIDIRECTIONAL);
3161     print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3162                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3163                  1);
3164 
3165     /* ahash_final shared descriptor */
3166     flc = &ctx->flc[FINALIZE];
3167     desc = flc->sh_desc;
3168     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3169               ctx->ctx_len, true, priv->sec_attr.era);
3170     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3171     dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3172                    desc_bytes(desc), DMA_BIDIRECTIONAL);
3173     print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3174                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3175                  1);
3176 
3177     /* ahash_digest shared descriptor */
3178     flc = &ctx->flc[DIGEST];
3179     desc = flc->sh_desc;
3180     cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3181               ctx->ctx_len, false, priv->sec_attr.era);
3182     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3183     dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3184                    desc_bytes(desc), DMA_BIDIRECTIONAL);
3185     print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3186                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3187                  1);
3188 
3189     return 0;
3190 }
3191 
3192 struct split_key_sh_result {
3193     struct completion completion;
3194     int err;
3195     struct device *dev;
3196 };
3197 
3198 static void split_key_sh_done(void *cbk_ctx, u32 err)
3199 {
3200     struct split_key_sh_result *res = cbk_ctx;
3201 
3202     dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3203 
3204     res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3205     complete(&res->completion);
3206 }
3207 
3208 /* Digest hash size if it is too large */
3209 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3210                u32 digestsize)
3211 {
3212     struct caam_request *req_ctx;
3213     u32 *desc;
3214     struct split_key_sh_result result;
3215     dma_addr_t key_dma;
3216     struct caam_flc *flc;
3217     dma_addr_t flc_dma;
3218     int ret = -ENOMEM;
3219     struct dpaa2_fl_entry *in_fle, *out_fle;
3220 
3221     req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3222     if (!req_ctx)
3223         return -ENOMEM;
3224 
3225     in_fle = &req_ctx->fd_flt[1];
3226     out_fle = &req_ctx->fd_flt[0];
3227 
3228     flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3229     if (!flc)
3230         goto err_flc;
3231 
3232     key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3233     if (dma_mapping_error(ctx->dev, key_dma)) {
3234         dev_err(ctx->dev, "unable to map key memory\n");
3235         goto err_key_dma;
3236     }
3237 
3238     desc = flc->sh_desc;
3239 
3240     init_sh_desc(desc, 0);
3241 
3242     /* descriptor to perform unkeyed hash on key_in */
3243     append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3244              OP_ALG_AS_INITFINAL);
3245     append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3246                  FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3247     append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3248              LDST_SRCDST_BYTE_CONTEXT);
3249 
3250     flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3251     flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3252                  desc_bytes(desc), DMA_TO_DEVICE);
3253     if (dma_mapping_error(ctx->dev, flc_dma)) {
3254         dev_err(ctx->dev, "unable to map shared descriptor\n");
3255         goto err_flc_dma;
3256     }
3257 
3258     dpaa2_fl_set_final(in_fle, true);
3259     dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3260     dpaa2_fl_set_addr(in_fle, key_dma);
3261     dpaa2_fl_set_len(in_fle, *keylen);
3262     dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3263     dpaa2_fl_set_addr(out_fle, key_dma);
3264     dpaa2_fl_set_len(out_fle, digestsize);
3265 
3266     print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3267                  DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3268     print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3269                  DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3270                  1);
3271 
3272     result.err = 0;
3273     init_completion(&result.completion);
3274     result.dev = ctx->dev;
3275 
3276     req_ctx->flc = flc;
3277     req_ctx->flc_dma = flc_dma;
3278     req_ctx->cbk = split_key_sh_done;
3279     req_ctx->ctx = &result;
3280 
3281     ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3282     if (ret == -EINPROGRESS) {
3283         /* in progress */
3284         wait_for_completion(&result.completion);
3285         ret = result.err;
3286         print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3287                      DUMP_PREFIX_ADDRESS, 16, 4, key,
3288                      digestsize, 1);
3289     }
3290 
3291     dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3292              DMA_TO_DEVICE);
3293 err_flc_dma:
3294     dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3295 err_key_dma:
3296     kfree(flc);
3297 err_flc:
3298     kfree(req_ctx);
3299 
3300     *keylen = digestsize;
3301 
3302     return ret;
3303 }
3304 
3305 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3306             unsigned int keylen)
3307 {
3308     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3309     unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3310     unsigned int digestsize = crypto_ahash_digestsize(ahash);
3311     int ret;
3312     u8 *hashed_key = NULL;
3313 
3314     dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3315 
3316     if (keylen > blocksize) {
3317         hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3318         if (!hashed_key)
3319             return -ENOMEM;
3320         ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3321         if (ret)
3322             goto bad_free_key;
3323         key = hashed_key;
3324     }
3325 
3326     ctx->adata.keylen = keylen;
3327     ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3328                           OP_ALG_ALGSEL_MASK);
3329     if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3330         goto bad_free_key;
3331 
3332     ctx->adata.key_virt = key;
3333     ctx->adata.key_inline = true;
3334 
3335     /*
3336      * In case |user key| > |derived key|, using DKP<imm,imm> would result
3337      * in invalid opcodes (last bytes of user key) in the resulting
3338      * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3339      * addresses are needed.
3340      */
3341     if (keylen > ctx->adata.keylen_pad) {
3342         memcpy(ctx->key, key, keylen);
3343         dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3344                        ctx->adata.keylen_pad,
3345                        DMA_TO_DEVICE);
3346     }
3347 
3348     ret = ahash_set_sh_desc(ahash);
3349     kfree(hashed_key);
3350     return ret;
3351 bad_free_key:
3352     kfree(hashed_key);
3353     return -EINVAL;
3354 }
3355 
3356 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3357                    struct ahash_request *req)
3358 {
3359     struct caam_hash_state *state = ahash_request_ctx(req);
3360 
3361     if (edesc->src_nents)
3362         dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3363 
3364     if (edesc->qm_sg_bytes)
3365         dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3366                  DMA_TO_DEVICE);
3367 
3368     if (state->buf_dma) {
3369         dma_unmap_single(dev, state->buf_dma, state->buflen,
3370                  DMA_TO_DEVICE);
3371         state->buf_dma = 0;
3372     }
3373 }
3374 
3375 static inline void ahash_unmap_ctx(struct device *dev,
3376                    struct ahash_edesc *edesc,
3377                    struct ahash_request *req, u32 flag)
3378 {
3379     struct caam_hash_state *state = ahash_request_ctx(req);
3380 
3381     if (state->ctx_dma) {
3382         dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3383         state->ctx_dma = 0;
3384     }
3385     ahash_unmap(dev, edesc, req);
3386 }
3387 
3388 static void ahash_done(void *cbk_ctx, u32 status)
3389 {
3390     struct crypto_async_request *areq = cbk_ctx;
3391     struct ahash_request *req = ahash_request_cast(areq);
3392     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3393     struct caam_hash_state *state = ahash_request_ctx(req);
3394     struct ahash_edesc *edesc = state->caam_req.edesc;
3395     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3396     int digestsize = crypto_ahash_digestsize(ahash);
3397     int ecode = 0;
3398 
3399     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3400 
3401     if (unlikely(status))
3402         ecode = caam_qi2_strstatus(ctx->dev, status);
3403 
3404     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3405     memcpy(req->result, state->caam_ctx, digestsize);
3406     qi_cache_free(edesc);
3407 
3408     print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3409                  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3410                  ctx->ctx_len, 1);
3411 
3412     req->base.complete(&req->base, ecode);
3413 }
3414 
3415 static void ahash_done_bi(void *cbk_ctx, u32 status)
3416 {
3417     struct crypto_async_request *areq = cbk_ctx;
3418     struct ahash_request *req = ahash_request_cast(areq);
3419     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3420     struct caam_hash_state *state = ahash_request_ctx(req);
3421     struct ahash_edesc *edesc = state->caam_req.edesc;
3422     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3423     int ecode = 0;
3424 
3425     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3426 
3427     if (unlikely(status))
3428         ecode = caam_qi2_strstatus(ctx->dev, status);
3429 
3430     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3431     qi_cache_free(edesc);
3432 
3433     scatterwalk_map_and_copy(state->buf, req->src,
3434                  req->nbytes - state->next_buflen,
3435                  state->next_buflen, 0);
3436     state->buflen = state->next_buflen;
3437 
3438     print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3439                  DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3440                  state->buflen, 1);
3441 
3442     print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3443                  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3444                  ctx->ctx_len, 1);
3445     if (req->result)
3446         print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3447                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3448                      crypto_ahash_digestsize(ahash), 1);
3449 
3450     req->base.complete(&req->base, ecode);
3451 }
3452 
3453 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3454 {
3455     struct crypto_async_request *areq = cbk_ctx;
3456     struct ahash_request *req = ahash_request_cast(areq);
3457     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3458     struct caam_hash_state *state = ahash_request_ctx(req);
3459     struct ahash_edesc *edesc = state->caam_req.edesc;
3460     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3461     int digestsize = crypto_ahash_digestsize(ahash);
3462     int ecode = 0;
3463 
3464     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3465 
3466     if (unlikely(status))
3467         ecode = caam_qi2_strstatus(ctx->dev, status);
3468 
3469     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3470     memcpy(req->result, state->caam_ctx, digestsize);
3471     qi_cache_free(edesc);
3472 
3473     print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3474                  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3475                  ctx->ctx_len, 1);
3476 
3477     req->base.complete(&req->base, ecode);
3478 }
3479 
3480 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3481 {
3482     struct crypto_async_request *areq = cbk_ctx;
3483     struct ahash_request *req = ahash_request_cast(areq);
3484     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3485     struct caam_hash_state *state = ahash_request_ctx(req);
3486     struct ahash_edesc *edesc = state->caam_req.edesc;
3487     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3488     int ecode = 0;
3489 
3490     dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3491 
3492     if (unlikely(status))
3493         ecode = caam_qi2_strstatus(ctx->dev, status);
3494 
3495     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3496     qi_cache_free(edesc);
3497 
3498     scatterwalk_map_and_copy(state->buf, req->src,
3499                  req->nbytes - state->next_buflen,
3500                  state->next_buflen, 0);
3501     state->buflen = state->next_buflen;
3502 
3503     print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3504                  DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3505                  state->buflen, 1);
3506 
3507     print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3508                  DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3509                  ctx->ctx_len, 1);
3510     if (req->result)
3511         print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3512                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3513                      crypto_ahash_digestsize(ahash), 1);
3514 
3515     req->base.complete(&req->base, ecode);
3516 }
3517 
3518 static int ahash_update_ctx(struct ahash_request *req)
3519 {
3520     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3521     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3522     struct caam_hash_state *state = ahash_request_ctx(req);
3523     struct caam_request *req_ctx = &state->caam_req;
3524     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3525     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3526     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3527               GFP_KERNEL : GFP_ATOMIC;
3528     u8 *buf = state->buf;
3529     int *buflen = &state->buflen;
3530     int *next_buflen = &state->next_buflen;
3531     int in_len = *buflen + req->nbytes, to_hash;
3532     int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3533     struct ahash_edesc *edesc;
3534     int ret = 0;
3535 
3536     *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3537     to_hash = in_len - *next_buflen;
3538 
3539     if (to_hash) {
3540         struct dpaa2_sg_entry *sg_table;
3541         int src_len = req->nbytes - *next_buflen;
3542 
3543         src_nents = sg_nents_for_len(req->src, src_len);
3544         if (src_nents < 0) {
3545             dev_err(ctx->dev, "Invalid number of src SG.\n");
3546             return src_nents;
3547         }
3548 
3549         if (src_nents) {
3550             mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3551                           DMA_TO_DEVICE);
3552             if (!mapped_nents) {
3553                 dev_err(ctx->dev, "unable to DMA map source\n");
3554                 return -ENOMEM;
3555             }
3556         } else {
3557             mapped_nents = 0;
3558         }
3559 
3560         /* allocate space for base edesc and link tables */
3561         edesc = qi_cache_zalloc(GFP_DMA | flags);
3562         if (!edesc) {
3563             dma_unmap_sg(ctx->dev, req->src, src_nents,
3564                      DMA_TO_DEVICE);
3565             return -ENOMEM;
3566         }
3567 
3568         edesc->src_nents = src_nents;
3569         qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3570         qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3571                   sizeof(*sg_table);
3572         sg_table = &edesc->sgt[0];
3573 
3574         ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3575                        DMA_BIDIRECTIONAL);
3576         if (ret)
3577             goto unmap_ctx;
3578 
3579         ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3580         if (ret)
3581             goto unmap_ctx;
3582 
3583         if (mapped_nents) {
3584             sg_to_qm_sg_last(req->src, src_len,
3585                      sg_table + qm_sg_src_index, 0);
3586         } else {
3587             dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3588                        true);
3589         }
3590 
3591         edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3592                           qm_sg_bytes, DMA_TO_DEVICE);
3593         if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3594             dev_err(ctx->dev, "unable to map S/G table\n");
3595             ret = -ENOMEM;
3596             goto unmap_ctx;
3597         }
3598         edesc->qm_sg_bytes = qm_sg_bytes;
3599 
3600         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3601         dpaa2_fl_set_final(in_fle, true);
3602         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3603         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3604         dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3605         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3606         dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3607         dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3608 
3609         req_ctx->flc = &ctx->flc[UPDATE];
3610         req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3611         req_ctx->cbk = ahash_done_bi;
3612         req_ctx->ctx = &req->base;
3613         req_ctx->edesc = edesc;
3614 
3615         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3616         if (ret != -EINPROGRESS &&
3617             !(ret == -EBUSY &&
3618               req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3619             goto unmap_ctx;
3620     } else if (*next_buflen) {
3621         scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3622                      req->nbytes, 0);
3623         *buflen = *next_buflen;
3624 
3625         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3626                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
3627                      *buflen, 1);
3628     }
3629 
3630     return ret;
3631 unmap_ctx:
3632     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3633     qi_cache_free(edesc);
3634     return ret;
3635 }
3636 
3637 static int ahash_final_ctx(struct ahash_request *req)
3638 {
3639     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3640     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3641     struct caam_hash_state *state = ahash_request_ctx(req);
3642     struct caam_request *req_ctx = &state->caam_req;
3643     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3644     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3645     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3646               GFP_KERNEL : GFP_ATOMIC;
3647     int buflen = state->buflen;
3648     int qm_sg_bytes;
3649     int digestsize = crypto_ahash_digestsize(ahash);
3650     struct ahash_edesc *edesc;
3651     struct dpaa2_sg_entry *sg_table;
3652     int ret;
3653 
3654     /* allocate space for base edesc and link tables */
3655     edesc = qi_cache_zalloc(GFP_DMA | flags);
3656     if (!edesc)
3657         return -ENOMEM;
3658 
3659     qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3660     sg_table = &edesc->sgt[0];
3661 
3662     ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3663                    DMA_BIDIRECTIONAL);
3664     if (ret)
3665         goto unmap_ctx;
3666 
3667     ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3668     if (ret)
3669         goto unmap_ctx;
3670 
3671     dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3672 
3673     edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3674                       DMA_TO_DEVICE);
3675     if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3676         dev_err(ctx->dev, "unable to map S/G table\n");
3677         ret = -ENOMEM;
3678         goto unmap_ctx;
3679     }
3680     edesc->qm_sg_bytes = qm_sg_bytes;
3681 
3682     memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3683     dpaa2_fl_set_final(in_fle, true);
3684     dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3685     dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3686     dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3687     dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3688     dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3689     dpaa2_fl_set_len(out_fle, digestsize);
3690 
3691     req_ctx->flc = &ctx->flc[FINALIZE];
3692     req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3693     req_ctx->cbk = ahash_done_ctx_src;
3694     req_ctx->ctx = &req->base;
3695     req_ctx->edesc = edesc;
3696 
3697     ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3698     if (ret == -EINPROGRESS ||
3699         (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3700         return ret;
3701 
3702 unmap_ctx:
3703     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3704     qi_cache_free(edesc);
3705     return ret;
3706 }
3707 
3708 static int ahash_finup_ctx(struct ahash_request *req)
3709 {
3710     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3711     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3712     struct caam_hash_state *state = ahash_request_ctx(req);
3713     struct caam_request *req_ctx = &state->caam_req;
3714     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3715     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3716     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3717               GFP_KERNEL : GFP_ATOMIC;
3718     int buflen = state->buflen;
3719     int qm_sg_bytes, qm_sg_src_index;
3720     int src_nents, mapped_nents;
3721     int digestsize = crypto_ahash_digestsize(ahash);
3722     struct ahash_edesc *edesc;
3723     struct dpaa2_sg_entry *sg_table;
3724     int ret;
3725 
3726     src_nents = sg_nents_for_len(req->src, req->nbytes);
3727     if (src_nents < 0) {
3728         dev_err(ctx->dev, "Invalid number of src SG.\n");
3729         return src_nents;
3730     }
3731 
3732     if (src_nents) {
3733         mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3734                       DMA_TO_DEVICE);
3735         if (!mapped_nents) {
3736             dev_err(ctx->dev, "unable to DMA map source\n");
3737             return -ENOMEM;
3738         }
3739     } else {
3740         mapped_nents = 0;
3741     }
3742 
3743     /* allocate space for base edesc and link tables */
3744     edesc = qi_cache_zalloc(GFP_DMA | flags);
3745     if (!edesc) {
3746         dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3747         return -ENOMEM;
3748     }
3749 
3750     edesc->src_nents = src_nents;
3751     qm_sg_src_index = 1 + (buflen ? 1 : 0);
3752     qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3753               sizeof(*sg_table);
3754     sg_table = &edesc->sgt[0];
3755 
3756     ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3757                    DMA_BIDIRECTIONAL);
3758     if (ret)
3759         goto unmap_ctx;
3760 
3761     ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3762     if (ret)
3763         goto unmap_ctx;
3764 
3765     sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3766 
3767     edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3768                       DMA_TO_DEVICE);
3769     if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3770         dev_err(ctx->dev, "unable to map S/G table\n");
3771         ret = -ENOMEM;
3772         goto unmap_ctx;
3773     }
3774     edesc->qm_sg_bytes = qm_sg_bytes;
3775 
3776     memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3777     dpaa2_fl_set_final(in_fle, true);
3778     dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3779     dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3780     dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3781     dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3782     dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3783     dpaa2_fl_set_len(out_fle, digestsize);
3784 
3785     req_ctx->flc = &ctx->flc[FINALIZE];
3786     req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3787     req_ctx->cbk = ahash_done_ctx_src;
3788     req_ctx->ctx = &req->base;
3789     req_ctx->edesc = edesc;
3790 
3791     ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3792     if (ret == -EINPROGRESS ||
3793         (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3794         return ret;
3795 
3796 unmap_ctx:
3797     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3798     qi_cache_free(edesc);
3799     return ret;
3800 }
3801 
3802 static int ahash_digest(struct ahash_request *req)
3803 {
3804     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3805     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3806     struct caam_hash_state *state = ahash_request_ctx(req);
3807     struct caam_request *req_ctx = &state->caam_req;
3808     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3809     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3810     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3811               GFP_KERNEL : GFP_ATOMIC;
3812     int digestsize = crypto_ahash_digestsize(ahash);
3813     int src_nents, mapped_nents;
3814     struct ahash_edesc *edesc;
3815     int ret = -ENOMEM;
3816 
3817     state->buf_dma = 0;
3818 
3819     src_nents = sg_nents_for_len(req->src, req->nbytes);
3820     if (src_nents < 0) {
3821         dev_err(ctx->dev, "Invalid number of src SG.\n");
3822         return src_nents;
3823     }
3824 
3825     if (src_nents) {
3826         mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3827                       DMA_TO_DEVICE);
3828         if (!mapped_nents) {
3829             dev_err(ctx->dev, "unable to map source for DMA\n");
3830             return ret;
3831         }
3832     } else {
3833         mapped_nents = 0;
3834     }
3835 
3836     /* allocate space for base edesc and link tables */
3837     edesc = qi_cache_zalloc(GFP_DMA | flags);
3838     if (!edesc) {
3839         dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3840         return ret;
3841     }
3842 
3843     edesc->src_nents = src_nents;
3844     memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3845 
3846     if (mapped_nents > 1) {
3847         int qm_sg_bytes;
3848         struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3849 
3850         qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3851         sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3852         edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3853                           qm_sg_bytes, DMA_TO_DEVICE);
3854         if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3855             dev_err(ctx->dev, "unable to map S/G table\n");
3856             goto unmap;
3857         }
3858         edesc->qm_sg_bytes = qm_sg_bytes;
3859         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3860         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3861     } else {
3862         dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3863         dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3864     }
3865 
3866     state->ctx_dma_len = digestsize;
3867     state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3868                     DMA_FROM_DEVICE);
3869     if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3870         dev_err(ctx->dev, "unable to map ctx\n");
3871         state->ctx_dma = 0;
3872         goto unmap;
3873     }
3874 
3875     dpaa2_fl_set_final(in_fle, true);
3876     dpaa2_fl_set_len(in_fle, req->nbytes);
3877     dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3878     dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3879     dpaa2_fl_set_len(out_fle, digestsize);
3880 
3881     req_ctx->flc = &ctx->flc[DIGEST];
3882     req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3883     req_ctx->cbk = ahash_done;
3884     req_ctx->ctx = &req->base;
3885     req_ctx->edesc = edesc;
3886     ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3887     if (ret == -EINPROGRESS ||
3888         (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3889         return ret;
3890 
3891 unmap:
3892     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3893     qi_cache_free(edesc);
3894     return ret;
3895 }
3896 
3897 static int ahash_final_no_ctx(struct ahash_request *req)
3898 {
3899     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3900     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3901     struct caam_hash_state *state = ahash_request_ctx(req);
3902     struct caam_request *req_ctx = &state->caam_req;
3903     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3904     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3905     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3906               GFP_KERNEL : GFP_ATOMIC;
3907     u8 *buf = state->buf;
3908     int buflen = state->buflen;
3909     int digestsize = crypto_ahash_digestsize(ahash);
3910     struct ahash_edesc *edesc;
3911     int ret = -ENOMEM;
3912 
3913     /* allocate space for base edesc and link tables */
3914     edesc = qi_cache_zalloc(GFP_DMA | flags);
3915     if (!edesc)
3916         return ret;
3917 
3918     if (buflen) {
3919         state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3920                         DMA_TO_DEVICE);
3921         if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3922             dev_err(ctx->dev, "unable to map src\n");
3923             goto unmap;
3924         }
3925     }
3926 
3927     state->ctx_dma_len = digestsize;
3928     state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3929                     DMA_FROM_DEVICE);
3930     if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3931         dev_err(ctx->dev, "unable to map ctx\n");
3932         state->ctx_dma = 0;
3933         goto unmap;
3934     }
3935 
3936     memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3937     dpaa2_fl_set_final(in_fle, true);
3938     /*
3939      * crypto engine requires the input entry to be present when
3940      * "frame list" FD is used.
3941      * Since engine does not support FMT=2'b11 (unused entry type), leaving
3942      * in_fle zeroized (except for "Final" flag) is the best option.
3943      */
3944     if (buflen) {
3945         dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3946         dpaa2_fl_set_addr(in_fle, state->buf_dma);
3947         dpaa2_fl_set_len(in_fle, buflen);
3948     }
3949     dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3950     dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3951     dpaa2_fl_set_len(out_fle, digestsize);
3952 
3953     req_ctx->flc = &ctx->flc[DIGEST];
3954     req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3955     req_ctx->cbk = ahash_done;
3956     req_ctx->ctx = &req->base;
3957     req_ctx->edesc = edesc;
3958 
3959     ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3960     if (ret == -EINPROGRESS ||
3961         (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3962         return ret;
3963 
3964 unmap:
3965     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3966     qi_cache_free(edesc);
3967     return ret;
3968 }
3969 
3970 static int ahash_update_no_ctx(struct ahash_request *req)
3971 {
3972     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3973     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3974     struct caam_hash_state *state = ahash_request_ctx(req);
3975     struct caam_request *req_ctx = &state->caam_req;
3976     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3977     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3978     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3979               GFP_KERNEL : GFP_ATOMIC;
3980     u8 *buf = state->buf;
3981     int *buflen = &state->buflen;
3982     int *next_buflen = &state->next_buflen;
3983     int in_len = *buflen + req->nbytes, to_hash;
3984     int qm_sg_bytes, src_nents, mapped_nents;
3985     struct ahash_edesc *edesc;
3986     int ret = 0;
3987 
3988     *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3989     to_hash = in_len - *next_buflen;
3990 
3991     if (to_hash) {
3992         struct dpaa2_sg_entry *sg_table;
3993         int src_len = req->nbytes - *next_buflen;
3994 
3995         src_nents = sg_nents_for_len(req->src, src_len);
3996         if (src_nents < 0) {
3997             dev_err(ctx->dev, "Invalid number of src SG.\n");
3998             return src_nents;
3999         }
4000 
4001         if (src_nents) {
4002             mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4003                           DMA_TO_DEVICE);
4004             if (!mapped_nents) {
4005                 dev_err(ctx->dev, "unable to DMA map source\n");
4006                 return -ENOMEM;
4007             }
4008         } else {
4009             mapped_nents = 0;
4010         }
4011 
4012         /* allocate space for base edesc and link tables */
4013         edesc = qi_cache_zalloc(GFP_DMA | flags);
4014         if (!edesc) {
4015             dma_unmap_sg(ctx->dev, req->src, src_nents,
4016                      DMA_TO_DEVICE);
4017             return -ENOMEM;
4018         }
4019 
4020         edesc->src_nents = src_nents;
4021         qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4022                   sizeof(*sg_table);
4023         sg_table = &edesc->sgt[0];
4024 
4025         ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4026         if (ret)
4027             goto unmap_ctx;
4028 
4029         sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4030 
4031         edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4032                           qm_sg_bytes, DMA_TO_DEVICE);
4033         if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4034             dev_err(ctx->dev, "unable to map S/G table\n");
4035             ret = -ENOMEM;
4036             goto unmap_ctx;
4037         }
4038         edesc->qm_sg_bytes = qm_sg_bytes;
4039 
4040         state->ctx_dma_len = ctx->ctx_len;
4041         state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4042                         ctx->ctx_len, DMA_FROM_DEVICE);
4043         if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4044             dev_err(ctx->dev, "unable to map ctx\n");
4045             state->ctx_dma = 0;
4046             ret = -ENOMEM;
4047             goto unmap_ctx;
4048         }
4049 
4050         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4051         dpaa2_fl_set_final(in_fle, true);
4052         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4053         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4054         dpaa2_fl_set_len(in_fle, to_hash);
4055         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4056         dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4057         dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4058 
4059         req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4060         req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4061         req_ctx->cbk = ahash_done_ctx_dst;
4062         req_ctx->ctx = &req->base;
4063         req_ctx->edesc = edesc;
4064 
4065         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4066         if (ret != -EINPROGRESS &&
4067             !(ret == -EBUSY &&
4068               req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4069             goto unmap_ctx;
4070 
4071         state->update = ahash_update_ctx;
4072         state->finup = ahash_finup_ctx;
4073         state->final = ahash_final_ctx;
4074     } else if (*next_buflen) {
4075         scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4076                      req->nbytes, 0);
4077         *buflen = *next_buflen;
4078 
4079         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4080                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
4081                      *buflen, 1);
4082     }
4083 
4084     return ret;
4085 unmap_ctx:
4086     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4087     qi_cache_free(edesc);
4088     return ret;
4089 }
4090 
4091 static int ahash_finup_no_ctx(struct ahash_request *req)
4092 {
4093     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4094     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4095     struct caam_hash_state *state = ahash_request_ctx(req);
4096     struct caam_request *req_ctx = &state->caam_req;
4097     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4098     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4099     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4100               GFP_KERNEL : GFP_ATOMIC;
4101     int buflen = state->buflen;
4102     int qm_sg_bytes, src_nents, mapped_nents;
4103     int digestsize = crypto_ahash_digestsize(ahash);
4104     struct ahash_edesc *edesc;
4105     struct dpaa2_sg_entry *sg_table;
4106     int ret = -ENOMEM;
4107 
4108     src_nents = sg_nents_for_len(req->src, req->nbytes);
4109     if (src_nents < 0) {
4110         dev_err(ctx->dev, "Invalid number of src SG.\n");
4111         return src_nents;
4112     }
4113 
4114     if (src_nents) {
4115         mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4116                       DMA_TO_DEVICE);
4117         if (!mapped_nents) {
4118             dev_err(ctx->dev, "unable to DMA map source\n");
4119             return ret;
4120         }
4121     } else {
4122         mapped_nents = 0;
4123     }
4124 
4125     /* allocate space for base edesc and link tables */
4126     edesc = qi_cache_zalloc(GFP_DMA | flags);
4127     if (!edesc) {
4128         dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4129         return ret;
4130     }
4131 
4132     edesc->src_nents = src_nents;
4133     qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4134     sg_table = &edesc->sgt[0];
4135 
4136     ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4137     if (ret)
4138         goto unmap;
4139 
4140     sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4141 
4142     edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4143                       DMA_TO_DEVICE);
4144     if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4145         dev_err(ctx->dev, "unable to map S/G table\n");
4146         ret = -ENOMEM;
4147         goto unmap;
4148     }
4149     edesc->qm_sg_bytes = qm_sg_bytes;
4150 
4151     state->ctx_dma_len = digestsize;
4152     state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4153                     DMA_FROM_DEVICE);
4154     if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4155         dev_err(ctx->dev, "unable to map ctx\n");
4156         state->ctx_dma = 0;
4157         ret = -ENOMEM;
4158         goto unmap;
4159     }
4160 
4161     memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4162     dpaa2_fl_set_final(in_fle, true);
4163     dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4164     dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4165     dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4166     dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4167     dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4168     dpaa2_fl_set_len(out_fle, digestsize);
4169 
4170     req_ctx->flc = &ctx->flc[DIGEST];
4171     req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4172     req_ctx->cbk = ahash_done;
4173     req_ctx->ctx = &req->base;
4174     req_ctx->edesc = edesc;
4175     ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4176     if (ret != -EINPROGRESS &&
4177         !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4178         goto unmap;
4179 
4180     return ret;
4181 unmap:
4182     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4183     qi_cache_free(edesc);
4184     return ret;
4185 }
4186 
4187 static int ahash_update_first(struct ahash_request *req)
4188 {
4189     struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4190     struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4191     struct caam_hash_state *state = ahash_request_ctx(req);
4192     struct caam_request *req_ctx = &state->caam_req;
4193     struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4194     struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4195     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4196               GFP_KERNEL : GFP_ATOMIC;
4197     u8 *buf = state->buf;
4198     int *buflen = &state->buflen;
4199     int *next_buflen = &state->next_buflen;
4200     int to_hash;
4201     int src_nents, mapped_nents;
4202     struct ahash_edesc *edesc;
4203     int ret = 0;
4204 
4205     *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4206                       1);
4207     to_hash = req->nbytes - *next_buflen;
4208 
4209     if (to_hash) {
4210         struct dpaa2_sg_entry *sg_table;
4211         int src_len = req->nbytes - *next_buflen;
4212 
4213         src_nents = sg_nents_for_len(req->src, src_len);
4214         if (src_nents < 0) {
4215             dev_err(ctx->dev, "Invalid number of src SG.\n");
4216             return src_nents;
4217         }
4218 
4219         if (src_nents) {
4220             mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4221                           DMA_TO_DEVICE);
4222             if (!mapped_nents) {
4223                 dev_err(ctx->dev, "unable to map source for DMA\n");
4224                 return -ENOMEM;
4225             }
4226         } else {
4227             mapped_nents = 0;
4228         }
4229 
4230         /* allocate space for base edesc and link tables */
4231         edesc = qi_cache_zalloc(GFP_DMA | flags);
4232         if (!edesc) {
4233             dma_unmap_sg(ctx->dev, req->src, src_nents,
4234                      DMA_TO_DEVICE);
4235             return -ENOMEM;
4236         }
4237 
4238         edesc->src_nents = src_nents;
4239         sg_table = &edesc->sgt[0];
4240 
4241         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4242         dpaa2_fl_set_final(in_fle, true);
4243         dpaa2_fl_set_len(in_fle, to_hash);
4244 
4245         if (mapped_nents > 1) {
4246             int qm_sg_bytes;
4247 
4248             sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4249             qm_sg_bytes = pad_sg_nents(mapped_nents) *
4250                       sizeof(*sg_table);
4251             edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4252                               qm_sg_bytes,
4253                               DMA_TO_DEVICE);
4254             if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4255                 dev_err(ctx->dev, "unable to map S/G table\n");
4256                 ret = -ENOMEM;
4257                 goto unmap_ctx;
4258             }
4259             edesc->qm_sg_bytes = qm_sg_bytes;
4260             dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4261             dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4262         } else {
4263             dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4264             dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4265         }
4266 
4267         state->ctx_dma_len = ctx->ctx_len;
4268         state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4269                         ctx->ctx_len, DMA_FROM_DEVICE);
4270         if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4271             dev_err(ctx->dev, "unable to map ctx\n");
4272             state->ctx_dma = 0;
4273             ret = -ENOMEM;
4274             goto unmap_ctx;
4275         }
4276 
4277         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4278         dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4279         dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4280 
4281         req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4282         req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4283         req_ctx->cbk = ahash_done_ctx_dst;
4284         req_ctx->ctx = &req->base;
4285         req_ctx->edesc = edesc;
4286 
4287         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4288         if (ret != -EINPROGRESS &&
4289             !(ret == -EBUSY && req->base.flags &
4290               CRYPTO_TFM_REQ_MAY_BACKLOG))
4291             goto unmap_ctx;
4292 
4293         state->update = ahash_update_ctx;
4294         state->finup = ahash_finup_ctx;
4295         state->final = ahash_final_ctx;
4296     } else if (*next_buflen) {
4297         state->update = ahash_update_no_ctx;
4298         state->finup = ahash_finup_no_ctx;
4299         state->final = ahash_final_no_ctx;
4300         scatterwalk_map_and_copy(buf, req->src, 0,
4301                      req->nbytes, 0);
4302         *buflen = *next_buflen;
4303 
4304         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4305                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
4306                      *buflen, 1);
4307     }
4308 
4309     return ret;
4310 unmap_ctx:
4311     ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4312     qi_cache_free(edesc);
4313     return ret;
4314 }
4315 
4316 static int ahash_finup_first(struct ahash_request *req)
4317 {
4318     return ahash_digest(req);
4319 }
4320 
4321 static int ahash_init(struct ahash_request *req)
4322 {
4323     struct caam_hash_state *state = ahash_request_ctx(req);
4324 
4325     state->update = ahash_update_first;
4326     state->finup = ahash_finup_first;
4327     state->final = ahash_final_no_ctx;
4328 
4329     state->ctx_dma = 0;
4330     state->ctx_dma_len = 0;
4331     state->buf_dma = 0;
4332     state->buflen = 0;
4333     state->next_buflen = 0;
4334 
4335     return 0;
4336 }
4337 
4338 static int ahash_update(struct ahash_request *req)
4339 {
4340     struct caam_hash_state *state = ahash_request_ctx(req);
4341 
4342     return state->update(req);
4343 }
4344 
4345 static int ahash_finup(struct ahash_request *req)
4346 {
4347     struct caam_hash_state *state = ahash_request_ctx(req);
4348 
4349     return state->finup(req);
4350 }
4351 
4352 static int ahash_final(struct ahash_request *req)
4353 {
4354     struct caam_hash_state *state = ahash_request_ctx(req);
4355 
4356     return state->final(req);
4357 }
4358 
4359 static int ahash_export(struct ahash_request *req, void *out)
4360 {
4361     struct caam_hash_state *state = ahash_request_ctx(req);
4362     struct caam_export_state *export = out;
4363     u8 *buf = state->buf;
4364     int len = state->buflen;
4365 
4366     memcpy(export->buf, buf, len);
4367     memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4368     export->buflen = len;
4369     export->update = state->update;
4370     export->final = state->final;
4371     export->finup = state->finup;
4372 
4373     return 0;
4374 }
4375 
4376 static int ahash_import(struct ahash_request *req, const void *in)
4377 {
4378     struct caam_hash_state *state = ahash_request_ctx(req);
4379     const struct caam_export_state *export = in;
4380 
4381     memset(state, 0, sizeof(*state));
4382     memcpy(state->buf, export->buf, export->buflen);
4383     memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4384     state->buflen = export->buflen;
4385     state->update = export->update;
4386     state->final = export->final;
4387     state->finup = export->finup;
4388 
4389     return 0;
4390 }
4391 
4392 struct caam_hash_template {
4393     char name[CRYPTO_MAX_ALG_NAME];
4394     char driver_name[CRYPTO_MAX_ALG_NAME];
4395     char hmac_name[CRYPTO_MAX_ALG_NAME];
4396     char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4397     unsigned int blocksize;
4398     struct ahash_alg template_ahash;
4399     u32 alg_type;
4400 };
4401 
4402 /* ahash descriptors */
4403 static struct caam_hash_template driver_hash[] = {
4404     {
4405         .name = "sha1",
4406         .driver_name = "sha1-caam-qi2",
4407         .hmac_name = "hmac(sha1)",
4408         .hmac_driver_name = "hmac-sha1-caam-qi2",
4409         .blocksize = SHA1_BLOCK_SIZE,
4410         .template_ahash = {
4411             .init = ahash_init,
4412             .update = ahash_update,
4413             .final = ahash_final,
4414             .finup = ahash_finup,
4415             .digest = ahash_digest,
4416             .export = ahash_export,
4417             .import = ahash_import,
4418             .setkey = ahash_setkey,
4419             .halg = {
4420                 .digestsize = SHA1_DIGEST_SIZE,
4421                 .statesize = sizeof(struct caam_export_state),
4422             },
4423         },
4424         .alg_type = OP_ALG_ALGSEL_SHA1,
4425     }, {
4426         .name = "sha224",
4427         .driver_name = "sha224-caam-qi2",
4428         .hmac_name = "hmac(sha224)",
4429         .hmac_driver_name = "hmac-sha224-caam-qi2",
4430         .blocksize = SHA224_BLOCK_SIZE,
4431         .template_ahash = {
4432             .init = ahash_init,
4433             .update = ahash_update,
4434             .final = ahash_final,
4435             .finup = ahash_finup,
4436             .digest = ahash_digest,
4437             .export = ahash_export,
4438             .import = ahash_import,
4439             .setkey = ahash_setkey,
4440             .halg = {
4441                 .digestsize = SHA224_DIGEST_SIZE,
4442                 .statesize = sizeof(struct caam_export_state),
4443             },
4444         },
4445         .alg_type = OP_ALG_ALGSEL_SHA224,
4446     }, {
4447         .name = "sha256",
4448         .driver_name = "sha256-caam-qi2",
4449         .hmac_name = "hmac(sha256)",
4450         .hmac_driver_name = "hmac-sha256-caam-qi2",
4451         .blocksize = SHA256_BLOCK_SIZE,
4452         .template_ahash = {
4453             .init = ahash_init,
4454             .update = ahash_update,
4455             .final = ahash_final,
4456             .finup = ahash_finup,
4457             .digest = ahash_digest,
4458             .export = ahash_export,
4459             .import = ahash_import,
4460             .setkey = ahash_setkey,
4461             .halg = {
4462                 .digestsize = SHA256_DIGEST_SIZE,
4463                 .statesize = sizeof(struct caam_export_state),
4464             },
4465         },
4466         .alg_type = OP_ALG_ALGSEL_SHA256,
4467     }, {
4468         .name = "sha384",
4469         .driver_name = "sha384-caam-qi2",
4470         .hmac_name = "hmac(sha384)",
4471         .hmac_driver_name = "hmac-sha384-caam-qi2",
4472         .blocksize = SHA384_BLOCK_SIZE,
4473         .template_ahash = {
4474             .init = ahash_init,
4475             .update = ahash_update,
4476             .final = ahash_final,
4477             .finup = ahash_finup,
4478             .digest = ahash_digest,
4479             .export = ahash_export,
4480             .import = ahash_import,
4481             .setkey = ahash_setkey,
4482             .halg = {
4483                 .digestsize = SHA384_DIGEST_SIZE,
4484                 .statesize = sizeof(struct caam_export_state),
4485             },
4486         },
4487         .alg_type = OP_ALG_ALGSEL_SHA384,
4488     }, {
4489         .name = "sha512",
4490         .driver_name = "sha512-caam-qi2",
4491         .hmac_name = "hmac(sha512)",
4492         .hmac_driver_name = "hmac-sha512-caam-qi2",
4493         .blocksize = SHA512_BLOCK_SIZE,
4494         .template_ahash = {
4495             .init = ahash_init,
4496             .update = ahash_update,
4497             .final = ahash_final,
4498             .finup = ahash_finup,
4499             .digest = ahash_digest,
4500             .export = ahash_export,
4501             .import = ahash_import,
4502             .setkey = ahash_setkey,
4503             .halg = {
4504                 .digestsize = SHA512_DIGEST_SIZE,
4505                 .statesize = sizeof(struct caam_export_state),
4506             },
4507         },
4508         .alg_type = OP_ALG_ALGSEL_SHA512,
4509     }, {
4510         .name = "md5",
4511         .driver_name = "md5-caam-qi2",
4512         .hmac_name = "hmac(md5)",
4513         .hmac_driver_name = "hmac-md5-caam-qi2",
4514         .blocksize = MD5_BLOCK_WORDS * 4,
4515         .template_ahash = {
4516             .init = ahash_init,
4517             .update = ahash_update,
4518             .final = ahash_final,
4519             .finup = ahash_finup,
4520             .digest = ahash_digest,
4521             .export = ahash_export,
4522             .import = ahash_import,
4523             .setkey = ahash_setkey,
4524             .halg = {
4525                 .digestsize = MD5_DIGEST_SIZE,
4526                 .statesize = sizeof(struct caam_export_state),
4527             },
4528         },
4529         .alg_type = OP_ALG_ALGSEL_MD5,
4530     }
4531 };
4532 
4533 struct caam_hash_alg {
4534     struct list_head entry;
4535     struct device *dev;
4536     int alg_type;
4537     struct ahash_alg ahash_alg;
4538 };
4539 
4540 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4541 {
4542     struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4543     struct crypto_alg *base = tfm->__crt_alg;
4544     struct hash_alg_common *halg =
4545          container_of(base, struct hash_alg_common, base);
4546     struct ahash_alg *alg =
4547          container_of(halg, struct ahash_alg, halg);
4548     struct caam_hash_alg *caam_hash =
4549          container_of(alg, struct caam_hash_alg, ahash_alg);
4550     struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4551     /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4552     static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4553                      HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4554                      HASH_MSG_LEN + 32,
4555                      HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4556                      HASH_MSG_LEN + 64,
4557                      HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4558     dma_addr_t dma_addr;
4559     int i;
4560 
4561     ctx->dev = caam_hash->dev;
4562 
4563     if (alg->setkey) {
4564         ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4565                               ARRAY_SIZE(ctx->key),
4566                               DMA_TO_DEVICE,
4567                               DMA_ATTR_SKIP_CPU_SYNC);
4568         if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4569             dev_err(ctx->dev, "unable to map key\n");
4570             return -ENOMEM;
4571         }
4572     }
4573 
4574     dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4575                     DMA_BIDIRECTIONAL,
4576                     DMA_ATTR_SKIP_CPU_SYNC);
4577     if (dma_mapping_error(ctx->dev, dma_addr)) {
4578         dev_err(ctx->dev, "unable to map shared descriptors\n");
4579         if (ctx->adata.key_dma)
4580             dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4581                            ARRAY_SIZE(ctx->key),
4582                            DMA_TO_DEVICE,
4583                            DMA_ATTR_SKIP_CPU_SYNC);
4584         return -ENOMEM;
4585     }
4586 
4587     for (i = 0; i < HASH_NUM_OP; i++)
4588         ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4589 
4590     /* copy descriptor header template value */
4591     ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4592 
4593     ctx->ctx_len = runninglen[(ctx->adata.algtype &
4594                    OP_ALG_ALGSEL_SUBMASK) >>
4595                   OP_ALG_ALGSEL_SHIFT];
4596 
4597     crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4598                  sizeof(struct caam_hash_state));
4599 
4600     /*
4601      * For keyed hash algorithms shared descriptors
4602      * will be created later in setkey() callback
4603      */
4604     return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4605 }
4606 
4607 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4608 {
4609     struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4610 
4611     dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4612                    DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4613     if (ctx->adata.key_dma)
4614         dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4615                        ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4616                        DMA_ATTR_SKIP_CPU_SYNC);
4617 }
4618 
4619 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4620     struct caam_hash_template *template, bool keyed)
4621 {
4622     struct caam_hash_alg *t_alg;
4623     struct ahash_alg *halg;
4624     struct crypto_alg *alg;
4625 
4626     t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4627     if (!t_alg)
4628         return ERR_PTR(-ENOMEM);
4629 
4630     t_alg->ahash_alg = template->template_ahash;
4631     halg = &t_alg->ahash_alg;
4632     alg = &halg->halg.base;
4633 
4634     if (keyed) {
4635         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4636              template->hmac_name);
4637         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4638              template->hmac_driver_name);
4639     } else {
4640         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4641              template->name);
4642         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4643              template->driver_name);
4644         t_alg->ahash_alg.setkey = NULL;
4645     }
4646     alg->cra_module = THIS_MODULE;
4647     alg->cra_init = caam_hash_cra_init;
4648     alg->cra_exit = caam_hash_cra_exit;
4649     alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4650     alg->cra_priority = CAAM_CRA_PRIORITY;
4651     alg->cra_blocksize = template->blocksize;
4652     alg->cra_alignmask = 0;
4653     alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4654 
4655     t_alg->alg_type = template->alg_type;
4656     t_alg->dev = dev;
4657 
4658     return t_alg;
4659 }
4660 
4661 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4662 {
4663     struct dpaa2_caam_priv_per_cpu *ppriv;
4664 
4665     ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4666     napi_schedule_irqoff(&ppriv->napi);
4667 }
4668 
4669 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4670 {
4671     struct device *dev = priv->dev;
4672     struct dpaa2_io_notification_ctx *nctx;
4673     struct dpaa2_caam_priv_per_cpu *ppriv;
4674     int err, i = 0, cpu;
4675 
4676     for_each_online_cpu(cpu) {
4677         ppriv = per_cpu_ptr(priv->ppriv, cpu);
4678         ppriv->priv = priv;
4679         nctx = &ppriv->nctx;
4680         nctx->is_cdan = 0;
4681         nctx->id = ppriv->rsp_fqid;
4682         nctx->desired_cpu = cpu;
4683         nctx->cb = dpaa2_caam_fqdan_cb;
4684 
4685         /* Register notification callbacks */
4686         ppriv->dpio = dpaa2_io_service_select(cpu);
4687         err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4688         if (unlikely(err)) {
4689             dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4690             nctx->cb = NULL;
4691             /*
4692              * If no affine DPIO for this core, there's probably
4693              * none available for next cores either. Signal we want
4694              * to retry later, in case the DPIO devices weren't
4695              * probed yet.
4696              */
4697             err = -EPROBE_DEFER;
4698             goto err;
4699         }
4700 
4701         ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4702                              dev);
4703         if (unlikely(!ppriv->store)) {
4704             dev_err(dev, "dpaa2_io_store_create() failed\n");
4705             err = -ENOMEM;
4706             goto err;
4707         }
4708 
4709         if (++i == priv->num_pairs)
4710             break;
4711     }
4712 
4713     return 0;
4714 
4715 err:
4716     for_each_online_cpu(cpu) {
4717         ppriv = per_cpu_ptr(priv->ppriv, cpu);
4718         if (!ppriv->nctx.cb)
4719             break;
4720         dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4721     }
4722 
4723     for_each_online_cpu(cpu) {
4724         ppriv = per_cpu_ptr(priv->ppriv, cpu);
4725         if (!ppriv->store)
4726             break;
4727         dpaa2_io_store_destroy(ppriv->store);
4728     }
4729 
4730     return err;
4731 }
4732 
4733 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4734 {
4735     struct dpaa2_caam_priv_per_cpu *ppriv;
4736     int i = 0, cpu;
4737 
4738     for_each_online_cpu(cpu) {
4739         ppriv = per_cpu_ptr(priv->ppriv, cpu);
4740         dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4741                         priv->dev);
4742         dpaa2_io_store_destroy(ppriv->store);
4743 
4744         if (++i == priv->num_pairs)
4745             return;
4746     }
4747 }
4748 
4749 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4750 {
4751     struct dpseci_rx_queue_cfg rx_queue_cfg;
4752     struct device *dev = priv->dev;
4753     struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4754     struct dpaa2_caam_priv_per_cpu *ppriv;
4755     int err = 0, i = 0, cpu;
4756 
4757     /* Configure Rx queues */
4758     for_each_online_cpu(cpu) {
4759         ppriv = per_cpu_ptr(priv->ppriv, cpu);
4760 
4761         rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4762                        DPSECI_QUEUE_OPT_USER_CTX;
4763         rx_queue_cfg.order_preservation_en = 0;
4764         rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4765         rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4766         /*
4767          * Rx priority (WQ) doesn't really matter, since we use
4768          * pull mode, i.e. volatile dequeues from specific FQs
4769          */
4770         rx_queue_cfg.dest_cfg.priority = 0;
4771         rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4772 
4773         err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4774                       &rx_queue_cfg);
4775         if (err) {
4776             dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4777                 err);
4778             return err;
4779         }
4780 
4781         if (++i == priv->num_pairs)
4782             break;
4783     }
4784 
4785     return err;
4786 }
4787 
4788 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4789 {
4790     struct device *dev = priv->dev;
4791 
4792     if (!priv->cscn_mem)
4793         return;
4794 
4795     dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4796     kfree(priv->cscn_mem);
4797 }
4798 
4799 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4800 {
4801     struct device *dev = priv->dev;
4802     struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4803     int err;
4804 
4805     if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4806         err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4807         if (err)
4808             dev_err(dev, "dpseci_reset() failed\n");
4809     }
4810 
4811     dpaa2_dpseci_congestion_free(priv);
4812     dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4813 }
4814 
4815 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4816                   const struct dpaa2_fd *fd)
4817 {
4818     struct caam_request *req;
4819     u32 fd_err;
4820 
4821     if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4822         dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4823         return;
4824     }
4825 
4826     fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4827     if (unlikely(fd_err))
4828         dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4829 
4830     /*
4831      * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4832      * in FD[ERR] or FD[FRC].
4833      */
4834     req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4835     dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4836              DMA_BIDIRECTIONAL);
4837     req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4838 }
4839 
4840 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4841 {
4842     int err;
4843 
4844     /* Retry while portal is busy */
4845     do {
4846         err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4847                            ppriv->store);
4848     } while (err == -EBUSY);
4849 
4850     if (unlikely(err))
4851         dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4852 
4853     return err;
4854 }
4855 
4856 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4857 {
4858     struct dpaa2_dq *dq;
4859     int cleaned = 0, is_last;
4860 
4861     do {
4862         dq = dpaa2_io_store_next(ppriv->store, &is_last);
4863         if (unlikely(!dq)) {
4864             if (unlikely(!is_last)) {
4865                 dev_dbg(ppriv->priv->dev,
4866                     "FQ %d returned no valid frames\n",
4867                     ppriv->rsp_fqid);
4868                 /*
4869                  * MUST retry until we get some sort of
4870                  * valid response token (be it "empty dequeue"
4871                  * or a valid frame).
4872                  */
4873                 continue;
4874             }
4875             break;
4876         }
4877 
4878         /* Process FD */
4879         dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4880         cleaned++;
4881     } while (!is_last);
4882 
4883     return cleaned;
4884 }
4885 
4886 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4887 {
4888     struct dpaa2_caam_priv_per_cpu *ppriv;
4889     struct dpaa2_caam_priv *priv;
4890     int err, cleaned = 0, store_cleaned;
4891 
4892     ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4893     priv = ppriv->priv;
4894 
4895     if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4896         return 0;
4897 
4898     do {
4899         store_cleaned = dpaa2_caam_store_consume(ppriv);
4900         cleaned += store_cleaned;
4901 
4902         if (store_cleaned == 0 ||
4903             cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4904             break;
4905 
4906         /* Try to dequeue some more */
4907         err = dpaa2_caam_pull_fq(ppriv);
4908         if (unlikely(err))
4909             break;
4910     } while (1);
4911 
4912     if (cleaned < budget) {
4913         napi_complete_done(napi, cleaned);
4914         err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4915         if (unlikely(err))
4916             dev_err(priv->dev, "Notification rearm failed: %d\n",
4917                 err);
4918     }
4919 
4920     return cleaned;
4921 }
4922 
4923 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4924                      u16 token)
4925 {
4926     struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4927     struct device *dev = priv->dev;
4928     int err;
4929 
4930     /*
4931      * Congestion group feature supported starting with DPSECI API v5.1
4932      * and only when object has been created with this capability.
4933      */
4934     if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4935         !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4936         return 0;
4937 
4938     priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4939                  GFP_KERNEL | GFP_DMA);
4940     if (!priv->cscn_mem)
4941         return -ENOMEM;
4942 
4943     priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4944     priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4945                     DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4946     if (dma_mapping_error(dev, priv->cscn_dma)) {
4947         dev_err(dev, "Error mapping CSCN memory area\n");
4948         err = -ENOMEM;
4949         goto err_dma_map;
4950     }
4951 
4952     cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4953     cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4954     cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4955     cong_notif_cfg.message_ctx = (uintptr_t)priv;
4956     cong_notif_cfg.message_iova = priv->cscn_dma;
4957     cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4958                     DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4959                     DPSECI_CGN_MODE_COHERENT_WRITE;
4960 
4961     err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4962                          &cong_notif_cfg);
4963     if (err) {
4964         dev_err(dev, "dpseci_set_congestion_notification failed\n");
4965         goto err_set_cong;
4966     }
4967 
4968     return 0;
4969 
4970 err_set_cong:
4971     dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4972 err_dma_map:
4973     kfree(priv->cscn_mem);
4974 
4975     return err;
4976 }
4977 
4978 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4979 {
4980     struct device *dev = &ls_dev->dev;
4981     struct dpaa2_caam_priv *priv;
4982     struct dpaa2_caam_priv_per_cpu *ppriv;
4983     int err, cpu;
4984     u8 i;
4985 
4986     priv = dev_get_drvdata(dev);
4987 
4988     priv->dev = dev;
4989     priv->dpsec_id = ls_dev->obj_desc.id;
4990 
4991     /* Get a handle for the DPSECI this interface is associate with */
4992     err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4993     if (err) {
4994         dev_err(dev, "dpseci_open() failed: %d\n", err);
4995         goto err_open;
4996     }
4997 
4998     err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4999                      &priv->minor_ver);
5000     if (err) {
5001         dev_err(dev, "dpseci_get_api_version() failed\n");
5002         goto err_get_vers;
5003     }
5004 
5005     dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5006 
5007     if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5008         err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5009         if (err) {
5010             dev_err(dev, "dpseci_reset() failed\n");
5011             goto err_get_vers;
5012         }
5013     }
5014 
5015     err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5016                     &priv->dpseci_attr);
5017     if (err) {
5018         dev_err(dev, "dpseci_get_attributes() failed\n");
5019         goto err_get_vers;
5020     }
5021 
5022     err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5023                   &priv->sec_attr);
5024     if (err) {
5025         dev_err(dev, "dpseci_get_sec_attr() failed\n");
5026         goto err_get_vers;
5027     }
5028 
5029     err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5030     if (err) {
5031         dev_err(dev, "setup_congestion() failed\n");
5032         goto err_get_vers;
5033     }
5034 
5035     priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5036                   priv->dpseci_attr.num_tx_queues);
5037     if (priv->num_pairs > num_online_cpus()) {
5038         dev_warn(dev, "%d queues won't be used\n",
5039              priv->num_pairs - num_online_cpus());
5040         priv->num_pairs = num_online_cpus();
5041     }
5042 
5043     for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5044         err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5045                       &priv->rx_queue_attr[i]);
5046         if (err) {
5047             dev_err(dev, "dpseci_get_rx_queue() failed\n");
5048             goto err_get_rx_queue;
5049         }
5050     }
5051 
5052     for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5053         err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5054                       &priv->tx_queue_attr[i]);
5055         if (err) {
5056             dev_err(dev, "dpseci_get_tx_queue() failed\n");
5057             goto err_get_rx_queue;
5058         }
5059     }
5060 
5061     i = 0;
5062     for_each_online_cpu(cpu) {
5063         u8 j;
5064 
5065         j = i % priv->num_pairs;
5066 
5067         ppriv = per_cpu_ptr(priv->ppriv, cpu);
5068         ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5069 
5070         /*
5071          * Allow all cores to enqueue, while only some of them
5072          * will take part in dequeuing.
5073          */
5074         if (++i > priv->num_pairs)
5075             continue;
5076 
5077         ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5078         ppriv->prio = j;
5079 
5080         dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5081             priv->rx_queue_attr[j].fqid,
5082             priv->tx_queue_attr[j].fqid);
5083 
5084         ppriv->net_dev.dev = *dev;
5085         INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5086         netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi,
5087                      dpaa2_dpseci_poll,
5088                      DPAA2_CAAM_NAPI_WEIGHT);
5089     }
5090 
5091     return 0;
5092 
5093 err_get_rx_queue:
5094     dpaa2_dpseci_congestion_free(priv);
5095 err_get_vers:
5096     dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5097 err_open:
5098     return err;
5099 }
5100 
5101 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5102 {
5103     struct device *dev = priv->dev;
5104     struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5105     struct dpaa2_caam_priv_per_cpu *ppriv;
5106     int i;
5107 
5108     for (i = 0; i < priv->num_pairs; i++) {
5109         ppriv = per_cpu_ptr(priv->ppriv, i);
5110         napi_enable(&ppriv->napi);
5111     }
5112 
5113     return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5114 }
5115 
5116 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5117 {
5118     struct device *dev = priv->dev;
5119     struct dpaa2_caam_priv_per_cpu *ppriv;
5120     struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5121     int i, err = 0, enabled;
5122 
5123     err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5124     if (err) {
5125         dev_err(dev, "dpseci_disable() failed\n");
5126         return err;
5127     }
5128 
5129     err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5130     if (err) {
5131         dev_err(dev, "dpseci_is_enabled() failed\n");
5132         return err;
5133     }
5134 
5135     dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5136 
5137     for (i = 0; i < priv->num_pairs; i++) {
5138         ppriv = per_cpu_ptr(priv->ppriv, i);
5139         napi_disable(&ppriv->napi);
5140         netif_napi_del(&ppriv->napi);
5141     }
5142 
5143     return 0;
5144 }
5145 
5146 static struct list_head hash_list;
5147 
5148 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5149 {
5150     struct device *dev;
5151     struct dpaa2_caam_priv *priv;
5152     int i, err = 0;
5153     bool registered = false;
5154 
5155     /*
5156      * There is no way to get CAAM endianness - there is no direct register
5157      * space access and MC f/w does not provide this attribute.
5158      * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5159      * property.
5160      */
5161     caam_little_end = true;
5162 
5163     caam_imx = false;
5164 
5165     dev = &dpseci_dev->dev;
5166 
5167     priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5168     if (!priv)
5169         return -ENOMEM;
5170 
5171     dev_set_drvdata(dev, priv);
5172 
5173     priv->domain = iommu_get_domain_for_dev(dev);
5174 
5175     qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5176                      0, SLAB_CACHE_DMA, NULL);
5177     if (!qi_cache) {
5178         dev_err(dev, "Can't allocate SEC cache\n");
5179         return -ENOMEM;
5180     }
5181 
5182     err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5183     if (err) {
5184         dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5185         goto err_dma_mask;
5186     }
5187 
5188     /* Obtain a MC portal */
5189     err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5190     if (err) {
5191         if (err == -ENXIO)
5192             err = -EPROBE_DEFER;
5193         else
5194             dev_err(dev, "MC portal allocation failed\n");
5195 
5196         goto err_dma_mask;
5197     }
5198 
5199     priv->ppriv = alloc_percpu(*priv->ppriv);
5200     if (!priv->ppriv) {
5201         dev_err(dev, "alloc_percpu() failed\n");
5202         err = -ENOMEM;
5203         goto err_alloc_ppriv;
5204     }
5205 
5206     /* DPSECI initialization */
5207     err = dpaa2_dpseci_setup(dpseci_dev);
5208     if (err) {
5209         dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5210         goto err_dpseci_setup;
5211     }
5212 
5213     /* DPIO */
5214     err = dpaa2_dpseci_dpio_setup(priv);
5215     if (err) {
5216         dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5217         goto err_dpio_setup;
5218     }
5219 
5220     /* DPSECI binding to DPIO */
5221     err = dpaa2_dpseci_bind(priv);
5222     if (err) {
5223         dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5224         goto err_bind;
5225     }
5226 
5227     /* DPSECI enable */
5228     err = dpaa2_dpseci_enable(priv);
5229     if (err) {
5230         dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5231         goto err_bind;
5232     }
5233 
5234     dpaa2_dpseci_debugfs_init(priv);
5235 
5236     /* register crypto algorithms the device supports */
5237     for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5238         struct caam_skcipher_alg *t_alg = driver_algs + i;
5239         u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5240 
5241         /* Skip DES algorithms if not supported by device */
5242         if (!priv->sec_attr.des_acc_num &&
5243             (alg_sel == OP_ALG_ALGSEL_3DES ||
5244              alg_sel == OP_ALG_ALGSEL_DES))
5245             continue;
5246 
5247         /* Skip AES algorithms if not supported by device */
5248         if (!priv->sec_attr.aes_acc_num &&
5249             alg_sel == OP_ALG_ALGSEL_AES)
5250             continue;
5251 
5252         /* Skip CHACHA20 algorithms if not supported by device */
5253         if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5254             !priv->sec_attr.ccha_acc_num)
5255             continue;
5256 
5257         t_alg->caam.dev = dev;
5258         caam_skcipher_alg_init(t_alg);
5259 
5260         err = crypto_register_skcipher(&t_alg->skcipher);
5261         if (err) {
5262             dev_warn(dev, "%s alg registration failed: %d\n",
5263                  t_alg->skcipher.base.cra_driver_name, err);
5264             continue;
5265         }
5266 
5267         t_alg->registered = true;
5268         registered = true;
5269     }
5270 
5271     for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5272         struct caam_aead_alg *t_alg = driver_aeads + i;
5273         u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5274                  OP_ALG_ALGSEL_MASK;
5275         u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5276                  OP_ALG_ALGSEL_MASK;
5277 
5278         /* Skip DES algorithms if not supported by device */
5279         if (!priv->sec_attr.des_acc_num &&
5280             (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5281              c1_alg_sel == OP_ALG_ALGSEL_DES))
5282             continue;
5283 
5284         /* Skip AES algorithms if not supported by device */
5285         if (!priv->sec_attr.aes_acc_num &&
5286             c1_alg_sel == OP_ALG_ALGSEL_AES)
5287             continue;
5288 
5289         /* Skip CHACHA20 algorithms if not supported by device */
5290         if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5291             !priv->sec_attr.ccha_acc_num)
5292             continue;
5293 
5294         /* Skip POLY1305 algorithms if not supported by device */
5295         if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5296             !priv->sec_attr.ptha_acc_num)
5297             continue;
5298 
5299         /*
5300          * Skip algorithms requiring message digests
5301          * if MD not supported by device.
5302          */
5303         if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5304             !priv->sec_attr.md_acc_num)
5305             continue;
5306 
5307         t_alg->caam.dev = dev;
5308         caam_aead_alg_init(t_alg);
5309 
5310         err = crypto_register_aead(&t_alg->aead);
5311         if (err) {
5312             dev_warn(dev, "%s alg registration failed: %d\n",
5313                  t_alg->aead.base.cra_driver_name, err);
5314             continue;
5315         }
5316 
5317         t_alg->registered = true;
5318         registered = true;
5319     }
5320     if (registered)
5321         dev_info(dev, "algorithms registered in /proc/crypto\n");
5322 
5323     /* register hash algorithms the device supports */
5324     INIT_LIST_HEAD(&hash_list);
5325 
5326     /*
5327      * Skip registration of any hashing algorithms if MD block
5328      * is not present.
5329      */
5330     if (!priv->sec_attr.md_acc_num)
5331         return 0;
5332 
5333     for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5334         struct caam_hash_alg *t_alg;
5335         struct caam_hash_template *alg = driver_hash + i;
5336 
5337         /* register hmac version */
5338         t_alg = caam_hash_alloc(dev, alg, true);
5339         if (IS_ERR(t_alg)) {
5340             err = PTR_ERR(t_alg);
5341             dev_warn(dev, "%s hash alg allocation failed: %d\n",
5342                  alg->hmac_driver_name, err);
5343             continue;
5344         }
5345 
5346         err = crypto_register_ahash(&t_alg->ahash_alg);
5347         if (err) {
5348             dev_warn(dev, "%s alg registration failed: %d\n",
5349                  t_alg->ahash_alg.halg.base.cra_driver_name,
5350                  err);
5351             kfree(t_alg);
5352         } else {
5353             list_add_tail(&t_alg->entry, &hash_list);
5354         }
5355 
5356         /* register unkeyed version */
5357         t_alg = caam_hash_alloc(dev, alg, false);
5358         if (IS_ERR(t_alg)) {
5359             err = PTR_ERR(t_alg);
5360             dev_warn(dev, "%s alg allocation failed: %d\n",
5361                  alg->driver_name, err);
5362             continue;
5363         }
5364 
5365         err = crypto_register_ahash(&t_alg->ahash_alg);
5366         if (err) {
5367             dev_warn(dev, "%s alg registration failed: %d\n",
5368                  t_alg->ahash_alg.halg.base.cra_driver_name,
5369                  err);
5370             kfree(t_alg);
5371         } else {
5372             list_add_tail(&t_alg->entry, &hash_list);
5373         }
5374     }
5375     if (!list_empty(&hash_list))
5376         dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5377 
5378     return err;
5379 
5380 err_bind:
5381     dpaa2_dpseci_dpio_free(priv);
5382 err_dpio_setup:
5383     dpaa2_dpseci_free(priv);
5384 err_dpseci_setup:
5385     free_percpu(priv->ppriv);
5386 err_alloc_ppriv:
5387     fsl_mc_portal_free(priv->mc_io);
5388 err_dma_mask:
5389     kmem_cache_destroy(qi_cache);
5390 
5391     return err;
5392 }
5393 
5394 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5395 {
5396     struct device *dev;
5397     struct dpaa2_caam_priv *priv;
5398     int i;
5399 
5400     dev = &ls_dev->dev;
5401     priv = dev_get_drvdata(dev);
5402 
5403     dpaa2_dpseci_debugfs_exit(priv);
5404 
5405     for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5406         struct caam_aead_alg *t_alg = driver_aeads + i;
5407 
5408         if (t_alg->registered)
5409             crypto_unregister_aead(&t_alg->aead);
5410     }
5411 
5412     for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5413         struct caam_skcipher_alg *t_alg = driver_algs + i;
5414 
5415         if (t_alg->registered)
5416             crypto_unregister_skcipher(&t_alg->skcipher);
5417     }
5418 
5419     if (hash_list.next) {
5420         struct caam_hash_alg *t_hash_alg, *p;
5421 
5422         list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5423             crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5424             list_del(&t_hash_alg->entry);
5425             kfree(t_hash_alg);
5426         }
5427     }
5428 
5429     dpaa2_dpseci_disable(priv);
5430     dpaa2_dpseci_dpio_free(priv);
5431     dpaa2_dpseci_free(priv);
5432     free_percpu(priv->ppriv);
5433     fsl_mc_portal_free(priv->mc_io);
5434     kmem_cache_destroy(qi_cache);
5435 
5436     return 0;
5437 }
5438 
5439 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5440 {
5441     struct dpaa2_fd fd;
5442     struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5443     struct dpaa2_caam_priv_per_cpu *ppriv;
5444     int err = 0, i;
5445 
5446     if (IS_ERR(req))
5447         return PTR_ERR(req);
5448 
5449     if (priv->cscn_mem) {
5450         dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5451                     DPAA2_CSCN_SIZE,
5452                     DMA_FROM_DEVICE);
5453         if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5454             dev_dbg_ratelimited(dev, "Dropping request\n");
5455             return -EBUSY;
5456         }
5457     }
5458 
5459     dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5460 
5461     req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5462                      DMA_BIDIRECTIONAL);
5463     if (dma_mapping_error(dev, req->fd_flt_dma)) {
5464         dev_err(dev, "DMA mapping error for QI enqueue request\n");
5465         goto err_out;
5466     }
5467 
5468     memset(&fd, 0, sizeof(fd));
5469     dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5470     dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5471     dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5472     dpaa2_fd_set_flc(&fd, req->flc_dma);
5473 
5474     ppriv = raw_cpu_ptr(priv->ppriv);
5475     for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5476         err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5477                           &fd);
5478         if (err != -EBUSY)
5479             break;
5480 
5481         cpu_relax();
5482     }
5483 
5484     if (unlikely(err)) {
5485         dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5486         goto err_out;
5487     }
5488 
5489     return -EINPROGRESS;
5490 
5491 err_out:
5492     dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5493              DMA_BIDIRECTIONAL);
5494     return -EIO;
5495 }
5496 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5497 
5498 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5499     {
5500         .vendor = FSL_MC_VENDOR_FREESCALE,
5501         .obj_type = "dpseci",
5502     },
5503     { .vendor = 0x0 }
5504 };
5505 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5506 
5507 static struct fsl_mc_driver dpaa2_caam_driver = {
5508     .driver = {
5509         .name       = KBUILD_MODNAME,
5510         .owner      = THIS_MODULE,
5511     },
5512     .probe      = dpaa2_caam_probe,
5513     .remove     = dpaa2_caam_remove,
5514     .match_id_table = dpaa2_caam_match_id_table
5515 };
5516 
5517 MODULE_LICENSE("Dual BSD/GPL");
5518 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5519 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5520 
5521 module_fsl_mc_driver(dpaa2_caam_driver);