Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (C) 2020 Marvell. */
0003 
0004 #include <crypto/aes.h>
0005 #include <crypto/authenc.h>
0006 #include <crypto/cryptd.h>
0007 #include <crypto/des.h>
0008 #include <crypto/internal/aead.h>
0009 #include <crypto/sha1.h>
0010 #include <crypto/sha2.h>
0011 #include <crypto/xts.h>
0012 #include <crypto/gcm.h>
0013 #include <crypto/scatterwalk.h>
0014 #include <linux/rtnetlink.h>
0015 #include <linux/sort.h>
0016 #include <linux/module.h>
0017 #include "otx2_cptvf.h"
0018 #include "otx2_cptvf_algs.h"
0019 #include "otx2_cpt_reqmgr.h"
0020 
0021 /* Size of salt in AES GCM mode */
0022 #define AES_GCM_SALT_SIZE 4
0023 /* Size of IV in AES GCM mode */
0024 #define AES_GCM_IV_SIZE 8
0025 /* Size of ICV (Integrity Check Value) in AES GCM mode */
0026 #define AES_GCM_ICV_SIZE 16
0027 /* Offset of IV in AES GCM mode */
0028 #define AES_GCM_IV_OFFSET 8
0029 #define CONTROL_WORD_LEN 8
0030 #define KEY2_OFFSET 48
0031 #define DMA_MODE_FLAG(dma_mode) \
0032     (((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
0033 
0034 /* Truncated SHA digest size */
0035 #define SHA1_TRUNC_DIGEST_SIZE 12
0036 #define SHA256_TRUNC_DIGEST_SIZE 16
0037 #define SHA384_TRUNC_DIGEST_SIZE 24
0038 #define SHA512_TRUNC_DIGEST_SIZE 32
0039 
0040 static DEFINE_MUTEX(mutex);
0041 static int is_crypto_registered;
0042 
0043 struct cpt_device_desc {
0044     struct pci_dev *dev;
0045     int num_queues;
0046 };
0047 
0048 struct cpt_device_table {
0049     atomic_t count;
0050     struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
0051 };
0052 
0053 static struct cpt_device_table se_devices = {
0054     .count = ATOMIC_INIT(0)
0055 };
0056 
0057 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
0058 {
0059     int count;
0060 
0061     count = atomic_read(&se_devices.count);
0062     if (count < 1)
0063         return -ENODEV;
0064 
0065     *cpu_num = get_cpu();
0066     /*
0067      * On OcteonTX2 platform CPT instruction queue is bound to each
0068      * local function LF, in turn LFs can be attached to PF
0069      * or VF therefore we always use first device. We get maximum
0070      * performance if one CPT queue is available for each cpu
0071      * otherwise CPT queues need to be shared between cpus.
0072      */
0073     if (*cpu_num >= se_devices.desc[0].num_queues)
0074         *cpu_num %= se_devices.desc[0].num_queues;
0075     *pdev = se_devices.desc[0].dev;
0076 
0077     put_cpu();
0078 
0079     return 0;
0080 }
0081 
0082 static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
0083 {
0084     struct otx2_cpt_req_ctx *rctx;
0085     struct aead_request *req;
0086     struct crypto_aead *tfm;
0087 
0088     req = container_of(cpt_req->areq, struct aead_request, base);
0089     tfm = crypto_aead_reqtfm(req);
0090     rctx = aead_request_ctx(req);
0091     if (memcmp(rctx->fctx.hmac.s.hmac_calc,
0092            rctx->fctx.hmac.s.hmac_recv,
0093            crypto_aead_authsize(tfm)) != 0)
0094         return -EBADMSG;
0095 
0096     return 0;
0097 }
0098 
0099 static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
0100 {
0101     struct otx2_cpt_inst_info *inst_info = arg2;
0102     struct crypto_async_request *areq = arg1;
0103     struct otx2_cpt_req_info *cpt_req;
0104     struct pci_dev *pdev;
0105 
0106     if (inst_info) {
0107         cpt_req = inst_info->req;
0108         if (!status) {
0109             /*
0110              * When selected cipher is NULL we need to manually
0111              * verify whether calculated hmac value matches
0112              * received hmac value
0113              */
0114             if (cpt_req->req_type ==
0115                 OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
0116                 !cpt_req->is_enc)
0117                 status = validate_hmac_cipher_null(cpt_req);
0118         }
0119         pdev = inst_info->pdev;
0120         otx2_cpt_info_destroy(pdev, inst_info);
0121     }
0122     if (areq)
0123         areq->complete(areq, status);
0124 }
0125 
0126 static void output_iv_copyback(struct crypto_async_request *areq)
0127 {
0128     struct otx2_cpt_req_info *req_info;
0129     struct otx2_cpt_req_ctx *rctx;
0130     struct skcipher_request *sreq;
0131     struct crypto_skcipher *stfm;
0132     struct otx2_cpt_enc_ctx *ctx;
0133     u32 start, ivsize;
0134 
0135     sreq = container_of(areq, struct skcipher_request, base);
0136     stfm = crypto_skcipher_reqtfm(sreq);
0137     ctx = crypto_skcipher_ctx(stfm);
0138     if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
0139         ctx->cipher_type == OTX2_CPT_DES3_CBC) {
0140         rctx = skcipher_request_ctx(sreq);
0141         req_info = &rctx->cpt_req;
0142         ivsize = crypto_skcipher_ivsize(stfm);
0143         start = sreq->cryptlen - ivsize;
0144 
0145         if (req_info->is_enc) {
0146             scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
0147                          ivsize, 0);
0148         } else {
0149             if (sreq->src != sreq->dst) {
0150                 scatterwalk_map_and_copy(sreq->iv, sreq->src,
0151                              start, ivsize, 0);
0152             } else {
0153                 memcpy(sreq->iv, req_info->iv_out, ivsize);
0154                 kfree(req_info->iv_out);
0155             }
0156         }
0157     }
0158 }
0159 
0160 static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
0161 {
0162     struct otx2_cpt_inst_info *inst_info = arg2;
0163     struct crypto_async_request *areq = arg1;
0164     struct pci_dev *pdev;
0165 
0166     if (areq) {
0167         if (!status)
0168             output_iv_copyback(areq);
0169         if (inst_info) {
0170             pdev = inst_info->pdev;
0171             otx2_cpt_info_destroy(pdev, inst_info);
0172         }
0173         areq->complete(areq, status);
0174     }
0175 }
0176 
0177 static inline void update_input_data(struct otx2_cpt_req_info *req_info,
0178                      struct scatterlist *inp_sg,
0179                      u32 nbytes, u32 *argcnt)
0180 {
0181     req_info->req.dlen += nbytes;
0182 
0183     while (nbytes) {
0184         u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
0185         u8 *ptr = sg_virt(inp_sg);
0186 
0187         req_info->in[*argcnt].vptr = (void *)ptr;
0188         req_info->in[*argcnt].size = len;
0189         nbytes -= len;
0190         ++(*argcnt);
0191         inp_sg = sg_next(inp_sg);
0192     }
0193 }
0194 
0195 static inline void update_output_data(struct otx2_cpt_req_info *req_info,
0196                       struct scatterlist *outp_sg,
0197                       u32 offset, u32 nbytes, u32 *argcnt)
0198 {
0199     u32 len, sg_len;
0200     u8 *ptr;
0201 
0202     req_info->rlen += nbytes;
0203 
0204     while (nbytes) {
0205         sg_len = outp_sg->length - offset;
0206         len = (nbytes < sg_len) ? nbytes : sg_len;
0207         ptr = sg_virt(outp_sg);
0208 
0209         req_info->out[*argcnt].vptr = (void *) (ptr + offset);
0210         req_info->out[*argcnt].size = len;
0211         nbytes -= len;
0212         ++(*argcnt);
0213         offset = 0;
0214         outp_sg = sg_next(outp_sg);
0215     }
0216 }
0217 
0218 static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
0219                  u32 *argcnt)
0220 {
0221     struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
0222     struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0223     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
0224     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
0225     struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
0226     int ivsize = crypto_skcipher_ivsize(stfm);
0227     u32 start = req->cryptlen - ivsize;
0228     gfp_t flags;
0229 
0230     flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0231             GFP_KERNEL : GFP_ATOMIC;
0232     req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
0233     req_info->ctrl.s.se_req = 1;
0234 
0235     req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
0236                 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
0237     if (enc) {
0238         req_info->req.opcode.s.minor = 2;
0239     } else {
0240         req_info->req.opcode.s.minor = 3;
0241         if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
0242             ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
0243             req->src == req->dst) {
0244             req_info->iv_out = kmalloc(ivsize, flags);
0245             if (!req_info->iv_out)
0246                 return -ENOMEM;
0247 
0248             scatterwalk_map_and_copy(req_info->iv_out, req->src,
0249                          start, ivsize, 0);
0250         }
0251     }
0252     /* Encryption data length */
0253     req_info->req.param1 = req->cryptlen;
0254     /* Authentication data length */
0255     req_info->req.param2 = 0;
0256 
0257     fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
0258     fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
0259     fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
0260 
0261     if (ctx->cipher_type == OTX2_CPT_AES_XTS)
0262         memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
0263     else
0264         memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
0265 
0266     memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
0267 
0268     cpu_to_be64s(&fctx->enc.enc_ctrl.u);
0269 
0270     /*
0271      * Storing  Packet Data Information in offset
0272      * Control Word First 8 bytes
0273      */
0274     req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
0275     req_info->in[*argcnt].size = CONTROL_WORD_LEN;
0276     req_info->req.dlen += CONTROL_WORD_LEN;
0277     ++(*argcnt);
0278 
0279     req_info->in[*argcnt].vptr = (u8 *)fctx;
0280     req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
0281     req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
0282 
0283     ++(*argcnt);
0284 
0285     return 0;
0286 }
0287 
0288 static inline int create_input_list(struct skcipher_request *req, u32 enc,
0289                     u32 enc_iv_len)
0290 {
0291     struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0292     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
0293     u32 argcnt =  0;
0294     int ret;
0295 
0296     ret = create_ctx_hdr(req, enc, &argcnt);
0297     if (ret)
0298         return ret;
0299 
0300     update_input_data(req_info, req->src, req->cryptlen, &argcnt);
0301     req_info->in_cnt = argcnt;
0302 
0303     return 0;
0304 }
0305 
0306 static inline void create_output_list(struct skcipher_request *req,
0307                       u32 enc_iv_len)
0308 {
0309     struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0310     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
0311     u32 argcnt = 0;
0312 
0313     /*
0314      * OUTPUT Buffer Processing
0315      * AES encryption/decryption output would be
0316      * received in the following format
0317      *
0318      * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
0319      * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
0320      */
0321     update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
0322     req_info->out_cnt = argcnt;
0323 }
0324 
0325 static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
0326 {
0327     struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
0328     struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0329     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
0330     int ret;
0331 
0332     if (ctx->fbk_cipher) {
0333         skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
0334         skcipher_request_set_callback(&rctx->sk_fbk_req,
0335                           req->base.flags,
0336                           req->base.complete,
0337                           req->base.data);
0338         skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
0339                        req->dst, req->cryptlen, req->iv);
0340         ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
0341                    crypto_skcipher_decrypt(&rctx->sk_fbk_req);
0342     } else {
0343         ret = -EINVAL;
0344     }
0345     return ret;
0346 }
0347 
0348 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
0349 {
0350     struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
0351     struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0352     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
0353     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
0354     u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
0355     struct pci_dev *pdev;
0356     int status, cpu_num;
0357 
0358     if (req->cryptlen == 0)
0359         return 0;
0360 
0361     if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
0362         return -EINVAL;
0363 
0364     if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
0365         return skcipher_do_fallback(req, enc);
0366 
0367     /* Clear control words */
0368     rctx->ctrl_word.flags = 0;
0369     rctx->fctx.enc.enc_ctrl.u = 0;
0370 
0371     status = create_input_list(req, enc, enc_iv_len);
0372     if (status)
0373         return status;
0374     create_output_list(req, enc_iv_len);
0375 
0376     status = get_se_device(&pdev, &cpu_num);
0377     if (status)
0378         return status;
0379 
0380     req_info->callback = otx2_cpt_skcipher_callback;
0381     req_info->areq = &req->base;
0382     req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
0383     req_info->is_enc = enc;
0384     req_info->is_trunc_hmac = false;
0385     req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
0386 
0387     /*
0388      * We perform an asynchronous send and once
0389      * the request is completed the driver would
0390      * intimate through registered call back functions
0391      */
0392     status = otx2_cpt_do_request(pdev, req_info, cpu_num);
0393 
0394     return status;
0395 }
0396 
0397 static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
0398 {
0399     return cpt_enc_dec(req, true);
0400 }
0401 
0402 static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
0403 {
0404     return cpt_enc_dec(req, false);
0405 }
0406 
0407 static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
0408                        const u8 *key, u32 keylen)
0409 {
0410     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0411     const u8 *key2 = key + (keylen / 2);
0412     const u8 *key1 = key;
0413     int ret;
0414 
0415     ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
0416     if (ret)
0417         return ret;
0418     ctx->key_len = keylen;
0419     ctx->enc_align_len = 1;
0420     memcpy(ctx->enc_key, key1, keylen / 2);
0421     memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
0422     ctx->cipher_type = OTX2_CPT_AES_XTS;
0423     switch (ctx->key_len) {
0424     case 2 * AES_KEYSIZE_128:
0425         ctx->key_type = OTX2_CPT_AES_128_BIT;
0426         break;
0427     case 2 * AES_KEYSIZE_192:
0428         ctx->key_type = OTX2_CPT_AES_192_BIT;
0429         break;
0430     case 2 * AES_KEYSIZE_256:
0431         ctx->key_type = OTX2_CPT_AES_256_BIT;
0432         break;
0433     default:
0434         return -EINVAL;
0435     }
0436     return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
0437 }
0438 
0439 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
0440               u32 keylen, u8 cipher_type)
0441 {
0442     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0443 
0444     if (keylen != DES3_EDE_KEY_SIZE)
0445         return -EINVAL;
0446 
0447     ctx->key_len = keylen;
0448     ctx->cipher_type = cipher_type;
0449     ctx->enc_align_len = 8;
0450 
0451     memcpy(ctx->enc_key, key, keylen);
0452 
0453     return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
0454 }
0455 
0456 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
0457               u32 keylen, u8 cipher_type)
0458 {
0459     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0460 
0461     switch (keylen) {
0462     case AES_KEYSIZE_128:
0463         ctx->key_type = OTX2_CPT_AES_128_BIT;
0464         break;
0465     case AES_KEYSIZE_192:
0466         ctx->key_type = OTX2_CPT_AES_192_BIT;
0467         break;
0468     case AES_KEYSIZE_256:
0469         ctx->key_type = OTX2_CPT_AES_256_BIT;
0470         break;
0471     default:
0472         return -EINVAL;
0473     }
0474     if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
0475         ctx->enc_align_len = 16;
0476     else
0477         ctx->enc_align_len = 1;
0478 
0479     ctx->key_len = keylen;
0480     ctx->cipher_type = cipher_type;
0481 
0482     memcpy(ctx->enc_key, key, keylen);
0483 
0484     return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
0485 }
0486 
0487 static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
0488                         const u8 *key, u32 keylen)
0489 {
0490     return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
0491 }
0492 
0493 static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
0494                         const u8 *key, u32 keylen)
0495 {
0496     return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
0497 }
0498 
0499 static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
0500                          const u8 *key, u32 keylen)
0501 {
0502     return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
0503 }
0504 
0505 static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
0506                          const u8 *key, u32 keylen)
0507 {
0508     return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
0509 }
0510 
0511 static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
0512                       struct crypto_alg *alg)
0513 {
0514     if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
0515         ctx->fbk_cipher =
0516                 crypto_alloc_skcipher(alg->cra_name, 0,
0517                               CRYPTO_ALG_ASYNC |
0518                               CRYPTO_ALG_NEED_FALLBACK);
0519         if (IS_ERR(ctx->fbk_cipher)) {
0520             pr_err("%s() failed to allocate fallback for %s\n",
0521                 __func__, alg->cra_name);
0522             return PTR_ERR(ctx->fbk_cipher);
0523         }
0524     }
0525     return 0;
0526 }
0527 
0528 static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
0529 {
0530     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
0531     struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
0532     struct crypto_alg *alg = tfm->__crt_alg;
0533 
0534     memset(ctx, 0, sizeof(*ctx));
0535     /*
0536      * Additional memory for skcipher_request is
0537      * allocated since the cryptd daemon uses
0538      * this memory for request_ctx information
0539      */
0540     crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) +
0541                     sizeof(struct skcipher_request));
0542 
0543     return cpt_skcipher_fallback_init(ctx, alg);
0544 }
0545 
0546 static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
0547 {
0548     struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0549 
0550     if (ctx->fbk_cipher) {
0551         crypto_free_skcipher(ctx->fbk_cipher);
0552         ctx->fbk_cipher = NULL;
0553     }
0554 }
0555 
0556 static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
0557                   struct crypto_alg *alg)
0558 {
0559     if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
0560         ctx->fbk_cipher =
0561                 crypto_alloc_aead(alg->cra_name, 0,
0562                           CRYPTO_ALG_ASYNC |
0563                           CRYPTO_ALG_NEED_FALLBACK);
0564         if (IS_ERR(ctx->fbk_cipher)) {
0565             pr_err("%s() failed to allocate fallback for %s\n",
0566                 __func__, alg->cra_name);
0567             return PTR_ERR(ctx->fbk_cipher);
0568         }
0569     }
0570     return 0;
0571 }
0572 
0573 static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
0574 {
0575     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm);
0576     struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
0577     struct crypto_alg *alg = tfm->__crt_alg;
0578 
0579     ctx->cipher_type = cipher_type;
0580     ctx->mac_type = mac_type;
0581 
0582     /*
0583      * When selected cipher is NULL we use HMAC opcode instead of
0584      * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
0585      * for calculating ipad and opad
0586      */
0587     if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
0588         switch (ctx->mac_type) {
0589         case OTX2_CPT_SHA1:
0590             ctx->hashalg = crypto_alloc_shash("sha1", 0,
0591                               CRYPTO_ALG_ASYNC);
0592             if (IS_ERR(ctx->hashalg))
0593                 return PTR_ERR(ctx->hashalg);
0594             break;
0595 
0596         case OTX2_CPT_SHA256:
0597             ctx->hashalg = crypto_alloc_shash("sha256", 0,
0598                               CRYPTO_ALG_ASYNC);
0599             if (IS_ERR(ctx->hashalg))
0600                 return PTR_ERR(ctx->hashalg);
0601             break;
0602 
0603         case OTX2_CPT_SHA384:
0604             ctx->hashalg = crypto_alloc_shash("sha384", 0,
0605                               CRYPTO_ALG_ASYNC);
0606             if (IS_ERR(ctx->hashalg))
0607                 return PTR_ERR(ctx->hashalg);
0608             break;
0609 
0610         case OTX2_CPT_SHA512:
0611             ctx->hashalg = crypto_alloc_shash("sha512", 0,
0612                               CRYPTO_ALG_ASYNC);
0613             if (IS_ERR(ctx->hashalg))
0614                 return PTR_ERR(ctx->hashalg);
0615             break;
0616         }
0617     }
0618     switch (ctx->cipher_type) {
0619     case OTX2_CPT_AES_CBC:
0620     case OTX2_CPT_AES_ECB:
0621         ctx->enc_align_len = 16;
0622         break;
0623     case OTX2_CPT_DES3_CBC:
0624     case OTX2_CPT_DES3_ECB:
0625         ctx->enc_align_len = 8;
0626         break;
0627     case OTX2_CPT_AES_GCM:
0628     case OTX2_CPT_CIPHER_NULL:
0629         ctx->enc_align_len = 1;
0630         break;
0631     }
0632     crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx));
0633 
0634     return cpt_aead_fallback_init(ctx, alg);
0635 }
0636 
0637 static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
0638 {
0639     return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
0640 }
0641 
0642 static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
0643 {
0644     return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
0645 }
0646 
0647 static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
0648 {
0649     return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
0650 }
0651 
0652 static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
0653 {
0654     return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
0655 }
0656 
0657 static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
0658 {
0659     return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
0660 }
0661 
0662 static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
0663 {
0664     return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
0665 }
0666 
0667 static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
0668 {
0669     return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
0670 }
0671 
0672 static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
0673 {
0674     return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
0675 }
0676 
0677 static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
0678 {
0679     return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
0680 }
0681 
0682 static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
0683 {
0684     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0685 
0686     kfree(ctx->ipad);
0687     kfree(ctx->opad);
0688     if (ctx->hashalg)
0689         crypto_free_shash(ctx->hashalg);
0690     kfree(ctx->sdesc);
0691 
0692     if (ctx->fbk_cipher) {
0693         crypto_free_aead(ctx->fbk_cipher);
0694         ctx->fbk_cipher = NULL;
0695     }
0696 }
0697 
0698 static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
0699                       unsigned int authsize)
0700 {
0701     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0702 
0703     if (crypto_rfc4106_check_authsize(authsize))
0704         return -EINVAL;
0705 
0706     tfm->authsize = authsize;
0707     /* Set authsize for fallback case */
0708     if (ctx->fbk_cipher)
0709         ctx->fbk_cipher->authsize = authsize;
0710 
0711     return 0;
0712 }
0713 
0714 static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
0715                       unsigned int authsize)
0716 {
0717     tfm->authsize = authsize;
0718 
0719     return 0;
0720 }
0721 
0722 static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
0723                        unsigned int authsize)
0724 {
0725     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0726 
0727     ctx->is_trunc_hmac = true;
0728     tfm->authsize = authsize;
0729 
0730     return 0;
0731 }
0732 
0733 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
0734 {
0735     struct otx2_cpt_sdesc *sdesc;
0736     int size;
0737 
0738     size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
0739     sdesc = kmalloc(size, GFP_KERNEL);
0740     if (!sdesc)
0741         return NULL;
0742 
0743     sdesc->shash.tfm = alg;
0744 
0745     return sdesc;
0746 }
0747 
0748 static inline void swap_data32(void *buf, u32 len)
0749 {
0750     cpu_to_be32_array(buf, buf, len / 4);
0751 }
0752 
0753 static inline void swap_data64(void *buf, u32 len)
0754 {
0755     u64 *src = buf;
0756     int i = 0;
0757 
0758     for (i = 0 ; i < len / 8; i++, src++)
0759         cpu_to_be64s(src);
0760 }
0761 
0762 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
0763 {
0764     struct sha512_state *sha512;
0765     struct sha256_state *sha256;
0766     struct sha1_state *sha1;
0767 
0768     switch (mac_type) {
0769     case OTX2_CPT_SHA1:
0770         sha1 = (struct sha1_state *) in_pad;
0771         swap_data32(sha1->state, SHA1_DIGEST_SIZE);
0772         memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
0773         break;
0774 
0775     case OTX2_CPT_SHA256:
0776         sha256 = (struct sha256_state *) in_pad;
0777         swap_data32(sha256->state, SHA256_DIGEST_SIZE);
0778         memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
0779         break;
0780 
0781     case OTX2_CPT_SHA384:
0782     case OTX2_CPT_SHA512:
0783         sha512 = (struct sha512_state *) in_pad;
0784         swap_data64(sha512->state, SHA512_DIGEST_SIZE);
0785         memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
0786         break;
0787 
0788     default:
0789         return -EINVAL;
0790     }
0791 
0792     return 0;
0793 }
0794 
0795 static int aead_hmac_init(struct crypto_aead *cipher)
0796 {
0797     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0798     int state_size = crypto_shash_statesize(ctx->hashalg);
0799     int ds = crypto_shash_digestsize(ctx->hashalg);
0800     int bs = crypto_shash_blocksize(ctx->hashalg);
0801     int authkeylen = ctx->auth_key_len;
0802     u8 *ipad = NULL, *opad = NULL;
0803     int ret = 0, icount = 0;
0804 
0805     ctx->sdesc = alloc_sdesc(ctx->hashalg);
0806     if (!ctx->sdesc)
0807         return -ENOMEM;
0808 
0809     ctx->ipad = kzalloc(bs, GFP_KERNEL);
0810     if (!ctx->ipad) {
0811         ret = -ENOMEM;
0812         goto calc_fail;
0813     }
0814 
0815     ctx->opad = kzalloc(bs, GFP_KERNEL);
0816     if (!ctx->opad) {
0817         ret = -ENOMEM;
0818         goto calc_fail;
0819     }
0820 
0821     ipad = kzalloc(state_size, GFP_KERNEL);
0822     if (!ipad) {
0823         ret = -ENOMEM;
0824         goto calc_fail;
0825     }
0826 
0827     opad = kzalloc(state_size, GFP_KERNEL);
0828     if (!opad) {
0829         ret = -ENOMEM;
0830         goto calc_fail;
0831     }
0832 
0833     if (authkeylen > bs) {
0834         ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
0835                       authkeylen, ipad);
0836         if (ret)
0837             goto calc_fail;
0838 
0839         authkeylen = ds;
0840     } else {
0841         memcpy(ipad, ctx->key, authkeylen);
0842     }
0843 
0844     memset(ipad + authkeylen, 0, bs - authkeylen);
0845     memcpy(opad, ipad, bs);
0846 
0847     for (icount = 0; icount < bs; icount++) {
0848         ipad[icount] ^= 0x36;
0849         opad[icount] ^= 0x5c;
0850     }
0851 
0852     /*
0853      * Partial Hash calculated from the software
0854      * algorithm is retrieved for IPAD & OPAD
0855      */
0856 
0857     /* IPAD Calculation */
0858     crypto_shash_init(&ctx->sdesc->shash);
0859     crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
0860     crypto_shash_export(&ctx->sdesc->shash, ipad);
0861     ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
0862     if (ret)
0863         goto calc_fail;
0864 
0865     /* OPAD Calculation */
0866     crypto_shash_init(&ctx->sdesc->shash);
0867     crypto_shash_update(&ctx->sdesc->shash, opad, bs);
0868     crypto_shash_export(&ctx->sdesc->shash, opad);
0869     ret = copy_pad(ctx->mac_type, ctx->opad, opad);
0870     if (ret)
0871         goto calc_fail;
0872 
0873     kfree(ipad);
0874     kfree(opad);
0875 
0876     return 0;
0877 
0878 calc_fail:
0879     kfree(ctx->ipad);
0880     ctx->ipad = NULL;
0881     kfree(ctx->opad);
0882     ctx->opad = NULL;
0883     kfree(ipad);
0884     kfree(opad);
0885     kfree(ctx->sdesc);
0886     ctx->sdesc = NULL;
0887 
0888     return ret;
0889 }
0890 
0891 static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
0892                         const unsigned char *key,
0893                         unsigned int keylen)
0894 {
0895     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0896     struct crypto_authenc_key_param *param;
0897     int enckeylen = 0, authkeylen = 0;
0898     struct rtattr *rta = (void *)key;
0899 
0900     if (!RTA_OK(rta, keylen))
0901         return -EINVAL;
0902 
0903     if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
0904         return -EINVAL;
0905 
0906     if (RTA_PAYLOAD(rta) < sizeof(*param))
0907         return -EINVAL;
0908 
0909     param = RTA_DATA(rta);
0910     enckeylen = be32_to_cpu(param->enckeylen);
0911     key += RTA_ALIGN(rta->rta_len);
0912     keylen -= RTA_ALIGN(rta->rta_len);
0913     if (keylen < enckeylen)
0914         return -EINVAL;
0915 
0916     if (keylen > OTX2_CPT_MAX_KEY_SIZE)
0917         return -EINVAL;
0918 
0919     authkeylen = keylen - enckeylen;
0920     memcpy(ctx->key, key, keylen);
0921 
0922     switch (enckeylen) {
0923     case AES_KEYSIZE_128:
0924         ctx->key_type = OTX2_CPT_AES_128_BIT;
0925         break;
0926     case AES_KEYSIZE_192:
0927         ctx->key_type = OTX2_CPT_AES_192_BIT;
0928         break;
0929     case AES_KEYSIZE_256:
0930         ctx->key_type = OTX2_CPT_AES_256_BIT;
0931         break;
0932     default:
0933         /* Invalid key length */
0934         return -EINVAL;
0935     }
0936 
0937     ctx->enc_key_len = enckeylen;
0938     ctx->auth_key_len = authkeylen;
0939 
0940     return aead_hmac_init(cipher);
0941 }
0942 
0943 static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
0944                          const unsigned char *key,
0945                          unsigned int keylen)
0946 {
0947     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0948     struct crypto_authenc_key_param *param;
0949     struct rtattr *rta = (void *)key;
0950     int enckeylen = 0;
0951 
0952     if (!RTA_OK(rta, keylen))
0953         return -EINVAL;
0954 
0955     if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
0956         return -EINVAL;
0957 
0958     if (RTA_PAYLOAD(rta) < sizeof(*param))
0959         return -EINVAL;
0960 
0961     param = RTA_DATA(rta);
0962     enckeylen = be32_to_cpu(param->enckeylen);
0963     key += RTA_ALIGN(rta->rta_len);
0964     keylen -= RTA_ALIGN(rta->rta_len);
0965     if (enckeylen != 0)
0966         return -EINVAL;
0967 
0968     if (keylen > OTX2_CPT_MAX_KEY_SIZE)
0969         return -EINVAL;
0970 
0971     memcpy(ctx->key, key, keylen);
0972     ctx->enc_key_len = enckeylen;
0973     ctx->auth_key_len = keylen;
0974 
0975     return 0;
0976 }
0977 
0978 static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
0979                     const unsigned char *key,
0980                     unsigned int keylen)
0981 {
0982     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0983 
0984     /*
0985      * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
0986      * and salt (4 bytes)
0987      */
0988     switch (keylen) {
0989     case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
0990         ctx->key_type = OTX2_CPT_AES_128_BIT;
0991         ctx->enc_key_len = AES_KEYSIZE_128;
0992         break;
0993     case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
0994         ctx->key_type = OTX2_CPT_AES_192_BIT;
0995         ctx->enc_key_len = AES_KEYSIZE_192;
0996         break;
0997     case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
0998         ctx->key_type = OTX2_CPT_AES_256_BIT;
0999         ctx->enc_key_len = AES_KEYSIZE_256;
1000         break;
1001     default:
1002         /* Invalid key and salt length */
1003         return -EINVAL;
1004     }
1005 
1006     /* Store encryption key and salt */
1007     memcpy(ctx->key, key, keylen);
1008 
1009     return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
1010 }
1011 
1012 static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
1013                       u32 *argcnt)
1014 {
1015     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1016     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1017     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1018     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1019     struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
1020     int mac_len = crypto_aead_authsize(tfm);
1021     int ds;
1022 
1023     rctx->ctrl_word.e.enc_data_offset = req->assoclen;
1024 
1025     switch (ctx->cipher_type) {
1026     case OTX2_CPT_AES_CBC:
1027         if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
1028             return -EINVAL;
1029 
1030         fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
1031         /* Copy encryption key to context */
1032         memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
1033                ctx->enc_key_len);
1034         /* Copy IV to context */
1035         memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
1036 
1037         ds = crypto_shash_digestsize(ctx->hashalg);
1038         if (ctx->mac_type == OTX2_CPT_SHA384)
1039             ds = SHA512_DIGEST_SIZE;
1040         if (ctx->ipad)
1041             memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
1042         if (ctx->opad)
1043             memcpy(fctx->hmac.e.opad, ctx->opad, ds);
1044         break;
1045 
1046     case OTX2_CPT_AES_GCM:
1047         if (crypto_ipsec_check_assoclen(req->assoclen))
1048             return -EINVAL;
1049 
1050         fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
1051         /* Copy encryption key to context */
1052         memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1053         /* Copy salt to context */
1054         memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1055                AES_GCM_SALT_SIZE);
1056 
1057         rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1058         break;
1059 
1060     default:
1061         /* Unknown cipher type */
1062         return -EINVAL;
1063     }
1064     cpu_to_be64s(&rctx->ctrl_word.flags);
1065 
1066     req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1067     req_info->ctrl.s.se_req = 1;
1068     req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
1069                  DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1070     if (enc) {
1071         req_info->req.opcode.s.minor = 2;
1072         req_info->req.param1 = req->cryptlen;
1073         req_info->req.param2 = req->cryptlen + req->assoclen;
1074     } else {
1075         req_info->req.opcode.s.minor = 3;
1076         req_info->req.param1 = req->cryptlen - mac_len;
1077         req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1078     }
1079 
1080     fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1081     fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1082     fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1083     fctx->enc.enc_ctrl.e.mac_len = mac_len;
1084     cpu_to_be64s(&fctx->enc.enc_ctrl.u);
1085 
1086     /*
1087      * Storing Packet Data Information in offset
1088      * Control Word First 8 bytes
1089      */
1090     req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1091     req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1092     req_info->req.dlen += CONTROL_WORD_LEN;
1093     ++(*argcnt);
1094 
1095     req_info->in[*argcnt].vptr = (u8 *)fctx;
1096     req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
1097     req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
1098     ++(*argcnt);
1099 
1100     return 0;
1101 }
1102 
1103 static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1104                       u32 enc)
1105 {
1106     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1107     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1108     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1109     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1110 
1111     req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1112     req_info->ctrl.s.se_req = 1;
1113     req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
1114                  DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1115     req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1116 
1117     req_info->req.opcode.s.minor = 0;
1118     req_info->req.param1 = ctx->auth_key_len;
1119     req_info->req.param2 = ctx->mac_type << 8;
1120 
1121     /* Add authentication key */
1122     req_info->in[*argcnt].vptr = ctx->key;
1123     req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1124     req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1125     ++(*argcnt);
1126 }
1127 
1128 static inline int create_aead_input_list(struct aead_request *req, u32 enc)
1129 {
1130     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1131     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1132     u32 inputlen =  req->cryptlen + req->assoclen;
1133     u32 status, argcnt = 0;
1134 
1135     status = create_aead_ctx_hdr(req, enc, &argcnt);
1136     if (status)
1137         return status;
1138     update_input_data(req_info, req->src, inputlen, &argcnt);
1139     req_info->in_cnt = argcnt;
1140 
1141     return 0;
1142 }
1143 
1144 static inline void create_aead_output_list(struct aead_request *req, u32 enc,
1145                        u32 mac_len)
1146 {
1147     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1148     struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1149     u32 argcnt = 0, outputlen = 0;
1150 
1151     if (enc)
1152         outputlen = req->cryptlen +  req->assoclen + mac_len;
1153     else
1154         outputlen = req->cryptlen + req->assoclen - mac_len;
1155 
1156     update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1157     req_info->out_cnt = argcnt;
1158 }
1159 
1160 static inline void create_aead_null_input_list(struct aead_request *req,
1161                            u32 enc, u32 mac_len)
1162 {
1163     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1164     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1165     u32 inputlen, argcnt = 0;
1166 
1167     if (enc)
1168         inputlen =  req->cryptlen + req->assoclen;
1169     else
1170         inputlen =  req->cryptlen + req->assoclen - mac_len;
1171 
1172     create_hmac_ctx_hdr(req, &argcnt, enc);
1173     update_input_data(req_info, req->src, inputlen, &argcnt);
1174     req_info->in_cnt = argcnt;
1175 }
1176 
1177 static inline int create_aead_null_output_list(struct aead_request *req,
1178                            u32 enc, u32 mac_len)
1179 {
1180     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1181     struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1182     struct scatterlist *dst;
1183     u8 *ptr = NULL;
1184     int argcnt = 0, status, offset;
1185     u32 inputlen;
1186 
1187     if (enc)
1188         inputlen =  req->cryptlen + req->assoclen;
1189     else
1190         inputlen =  req->cryptlen + req->assoclen - mac_len;
1191 
1192     /*
1193      * If source and destination are different
1194      * then copy payload to destination
1195      */
1196     if (req->src != req->dst) {
1197 
1198         ptr = kmalloc(inputlen, (req_info->areq->flags &
1199                      CRYPTO_TFM_REQ_MAY_SLEEP) ?
1200                      GFP_KERNEL : GFP_ATOMIC);
1201         if (!ptr)
1202             return -ENOMEM;
1203 
1204         status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1205                        inputlen);
1206         if (status != inputlen) {
1207             status = -EINVAL;
1208             goto error_free;
1209         }
1210         status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1211                          inputlen);
1212         if (status != inputlen) {
1213             status = -EINVAL;
1214             goto error_free;
1215         }
1216         kfree(ptr);
1217     }
1218 
1219     if (enc) {
1220         /*
1221          * In an encryption scenario hmac needs
1222          * to be appended after payload
1223          */
1224         dst = req->dst;
1225         offset = inputlen;
1226         while (offset >= dst->length) {
1227             offset -= dst->length;
1228             dst = sg_next(dst);
1229             if (!dst)
1230                 return -ENOENT;
1231         }
1232 
1233         update_output_data(req_info, dst, offset, mac_len, &argcnt);
1234     } else {
1235         /*
1236          * In a decryption scenario calculated hmac for received
1237          * payload needs to be compare with hmac received
1238          */
1239         status = sg_copy_buffer(req->src, sg_nents(req->src),
1240                     rctx->fctx.hmac.s.hmac_recv, mac_len,
1241                     inputlen, true);
1242         if (status != mac_len)
1243             return -EINVAL;
1244 
1245         req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1246         req_info->out[argcnt].size = mac_len;
1247         argcnt++;
1248     }
1249 
1250     req_info->out_cnt = argcnt;
1251     return 0;
1252 
1253 error_free:
1254     kfree(ptr);
1255     return status;
1256 }
1257 
1258 static int aead_do_fallback(struct aead_request *req, bool is_enc)
1259 {
1260     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1261     struct crypto_aead *aead = crypto_aead_reqtfm(req);
1262     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead);
1263     int ret;
1264 
1265     if (ctx->fbk_cipher) {
1266         /* Store the cipher tfm and then use the fallback tfm */
1267         aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
1268         aead_request_set_callback(&rctx->fbk_req, req->base.flags,
1269                       req->base.complete, req->base.data);
1270         aead_request_set_crypt(&rctx->fbk_req, req->src,
1271                        req->dst, req->cryptlen, req->iv);
1272         aead_request_set_ad(&rctx->fbk_req, req->assoclen);
1273         ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
1274                    crypto_aead_decrypt(&rctx->fbk_req);
1275     } else {
1276         ret = -EINVAL;
1277     }
1278 
1279     return ret;
1280 }
1281 
1282 static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1283 {
1284     struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1285     struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1286     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1287     struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1288     struct pci_dev *pdev;
1289     int status, cpu_num;
1290 
1291     /* Clear control words */
1292     rctx->ctrl_word.flags = 0;
1293     rctx->fctx.enc.enc_ctrl.u = 0;
1294 
1295     req_info->callback = otx2_cpt_aead_callback;
1296     req_info->areq = &req->base;
1297     req_info->req_type = reg_type;
1298     req_info->is_enc = enc;
1299     req_info->is_trunc_hmac = false;
1300 
1301     switch (reg_type) {
1302     case OTX2_CPT_AEAD_ENC_DEC_REQ:
1303         status = create_aead_input_list(req, enc);
1304         if (status)
1305             return status;
1306         create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
1307         break;
1308 
1309     case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
1310         create_aead_null_input_list(req, enc,
1311                         crypto_aead_authsize(tfm));
1312         status = create_aead_null_output_list(req, enc,
1313                         crypto_aead_authsize(tfm));
1314         if (status)
1315             return status;
1316         break;
1317 
1318     default:
1319         return -EINVAL;
1320     }
1321     if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
1322         return -EINVAL;
1323 
1324     if (!req_info->req.param2 ||
1325         (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
1326         (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
1327         return aead_do_fallback(req, enc);
1328 
1329     status = get_se_device(&pdev, &cpu_num);
1330     if (status)
1331         return status;
1332 
1333     req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
1334 
1335     /*
1336      * We perform an asynchronous send and once
1337      * the request is completed the driver would
1338      * intimate through registered call back functions
1339      */
1340     return otx2_cpt_do_request(pdev, req_info, cpu_num);
1341 }
1342 
1343 static int otx2_cpt_aead_encrypt(struct aead_request *req)
1344 {
1345     return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
1346 }
1347 
1348 static int otx2_cpt_aead_decrypt(struct aead_request *req)
1349 {
1350     return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
1351 }
1352 
1353 static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
1354 {
1355     return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1356 }
1357 
1358 static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
1359 {
1360     return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1361 }
1362 
1363 static struct skcipher_alg otx2_cpt_skciphers[] = { {
1364     .base.cra_name = "xts(aes)",
1365     .base.cra_driver_name = "cpt_xts_aes",
1366     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1367     .base.cra_blocksize = AES_BLOCK_SIZE,
1368     .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1369     .base.cra_alignmask = 7,
1370     .base.cra_priority = 4001,
1371     .base.cra_module = THIS_MODULE,
1372 
1373     .init = otx2_cpt_enc_dec_init,
1374     .exit = otx2_cpt_skcipher_exit,
1375     .ivsize = AES_BLOCK_SIZE,
1376     .min_keysize = 2 * AES_MIN_KEY_SIZE,
1377     .max_keysize = 2 * AES_MAX_KEY_SIZE,
1378     .setkey = otx2_cpt_skcipher_xts_setkey,
1379     .encrypt = otx2_cpt_skcipher_encrypt,
1380     .decrypt = otx2_cpt_skcipher_decrypt,
1381 }, {
1382     .base.cra_name = "cbc(aes)",
1383     .base.cra_driver_name = "cpt_cbc_aes",
1384     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1385     .base.cra_blocksize = AES_BLOCK_SIZE,
1386     .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1387     .base.cra_alignmask = 7,
1388     .base.cra_priority = 4001,
1389     .base.cra_module = THIS_MODULE,
1390 
1391     .init = otx2_cpt_enc_dec_init,
1392     .exit = otx2_cpt_skcipher_exit,
1393     .ivsize = AES_BLOCK_SIZE,
1394     .min_keysize = AES_MIN_KEY_SIZE,
1395     .max_keysize = AES_MAX_KEY_SIZE,
1396     .setkey = otx2_cpt_skcipher_cbc_aes_setkey,
1397     .encrypt = otx2_cpt_skcipher_encrypt,
1398     .decrypt = otx2_cpt_skcipher_decrypt,
1399 }, {
1400     .base.cra_name = "ecb(aes)",
1401     .base.cra_driver_name = "cpt_ecb_aes",
1402     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1403     .base.cra_blocksize = AES_BLOCK_SIZE,
1404     .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1405     .base.cra_alignmask = 7,
1406     .base.cra_priority = 4001,
1407     .base.cra_module = THIS_MODULE,
1408 
1409     .init = otx2_cpt_enc_dec_init,
1410     .exit = otx2_cpt_skcipher_exit,
1411     .ivsize = 0,
1412     .min_keysize = AES_MIN_KEY_SIZE,
1413     .max_keysize = AES_MAX_KEY_SIZE,
1414     .setkey = otx2_cpt_skcipher_ecb_aes_setkey,
1415     .encrypt = otx2_cpt_skcipher_encrypt,
1416     .decrypt = otx2_cpt_skcipher_decrypt,
1417 }, {
1418     .base.cra_name = "cbc(des3_ede)",
1419     .base.cra_driver_name = "cpt_cbc_des3_ede",
1420     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1421     .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1422     .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1423     .base.cra_alignmask = 7,
1424     .base.cra_priority = 4001,
1425     .base.cra_module = THIS_MODULE,
1426 
1427     .init = otx2_cpt_enc_dec_init,
1428     .exit = otx2_cpt_skcipher_exit,
1429     .min_keysize = DES3_EDE_KEY_SIZE,
1430     .max_keysize = DES3_EDE_KEY_SIZE,
1431     .ivsize = DES_BLOCK_SIZE,
1432     .setkey = otx2_cpt_skcipher_cbc_des3_setkey,
1433     .encrypt = otx2_cpt_skcipher_encrypt,
1434     .decrypt = otx2_cpt_skcipher_decrypt,
1435 }, {
1436     .base.cra_name = "ecb(des3_ede)",
1437     .base.cra_driver_name = "cpt_ecb_des3_ede",
1438     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1439     .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1440     .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1441     .base.cra_alignmask = 7,
1442     .base.cra_priority = 4001,
1443     .base.cra_module = THIS_MODULE,
1444 
1445     .init = otx2_cpt_enc_dec_init,
1446     .exit = otx2_cpt_skcipher_exit,
1447     .min_keysize = DES3_EDE_KEY_SIZE,
1448     .max_keysize = DES3_EDE_KEY_SIZE,
1449     .ivsize = 0,
1450     .setkey = otx2_cpt_skcipher_ecb_des3_setkey,
1451     .encrypt = otx2_cpt_skcipher_encrypt,
1452     .decrypt = otx2_cpt_skcipher_decrypt,
1453 } };
1454 
1455 static struct aead_alg otx2_cpt_aeads[] = { {
1456     .base = {
1457         .cra_name = "authenc(hmac(sha1),cbc(aes))",
1458         .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1459         .cra_blocksize = AES_BLOCK_SIZE,
1460         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1461         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1462         .cra_priority = 4001,
1463         .cra_alignmask = 0,
1464         .cra_module = THIS_MODULE,
1465     },
1466     .init = otx2_cpt_aead_cbc_aes_sha1_init,
1467     .exit = otx2_cpt_aead_exit,
1468     .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1469     .setauthsize = otx2_cpt_aead_set_authsize,
1470     .encrypt = otx2_cpt_aead_encrypt,
1471     .decrypt = otx2_cpt_aead_decrypt,
1472     .ivsize = AES_BLOCK_SIZE,
1473     .maxauthsize = SHA1_DIGEST_SIZE,
1474 }, {
1475     .base = {
1476         .cra_name = "authenc(hmac(sha256),cbc(aes))",
1477         .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1478         .cra_blocksize = AES_BLOCK_SIZE,
1479         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1480         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1481         .cra_priority = 4001,
1482         .cra_alignmask = 0,
1483         .cra_module = THIS_MODULE,
1484     },
1485     .init = otx2_cpt_aead_cbc_aes_sha256_init,
1486     .exit = otx2_cpt_aead_exit,
1487     .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1488     .setauthsize = otx2_cpt_aead_set_authsize,
1489     .encrypt = otx2_cpt_aead_encrypt,
1490     .decrypt = otx2_cpt_aead_decrypt,
1491     .ivsize = AES_BLOCK_SIZE,
1492     .maxauthsize = SHA256_DIGEST_SIZE,
1493 }, {
1494     .base = {
1495         .cra_name = "authenc(hmac(sha384),cbc(aes))",
1496         .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1497         .cra_blocksize = AES_BLOCK_SIZE,
1498         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1499         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1500         .cra_priority = 4001,
1501         .cra_alignmask = 0,
1502         .cra_module = THIS_MODULE,
1503     },
1504     .init = otx2_cpt_aead_cbc_aes_sha384_init,
1505     .exit = otx2_cpt_aead_exit,
1506     .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1507     .setauthsize = otx2_cpt_aead_set_authsize,
1508     .encrypt = otx2_cpt_aead_encrypt,
1509     .decrypt = otx2_cpt_aead_decrypt,
1510     .ivsize = AES_BLOCK_SIZE,
1511     .maxauthsize = SHA384_DIGEST_SIZE,
1512 }, {
1513     .base = {
1514         .cra_name = "authenc(hmac(sha512),cbc(aes))",
1515         .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1516         .cra_blocksize = AES_BLOCK_SIZE,
1517         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1518         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1519         .cra_priority = 4001,
1520         .cra_alignmask = 0,
1521         .cra_module = THIS_MODULE,
1522     },
1523     .init = otx2_cpt_aead_cbc_aes_sha512_init,
1524     .exit = otx2_cpt_aead_exit,
1525     .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1526     .setauthsize = otx2_cpt_aead_set_authsize,
1527     .encrypt = otx2_cpt_aead_encrypt,
1528     .decrypt = otx2_cpt_aead_decrypt,
1529     .ivsize = AES_BLOCK_SIZE,
1530     .maxauthsize = SHA512_DIGEST_SIZE,
1531 }, {
1532     .base = {
1533         .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1534         .cra_driver_name = "cpt_hmac_sha1_ecb_null",
1535         .cra_blocksize = 1,
1536         .cra_flags = CRYPTO_ALG_ASYNC,
1537         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1538         .cra_priority = 4001,
1539         .cra_alignmask = 0,
1540         .cra_module = THIS_MODULE,
1541     },
1542     .init = otx2_cpt_aead_ecb_null_sha1_init,
1543     .exit = otx2_cpt_aead_exit,
1544     .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1545     .setauthsize = otx2_cpt_aead_null_set_authsize,
1546     .encrypt = otx2_cpt_aead_null_encrypt,
1547     .decrypt = otx2_cpt_aead_null_decrypt,
1548     .ivsize = 0,
1549     .maxauthsize = SHA1_DIGEST_SIZE,
1550 }, {
1551     .base = {
1552         .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1553         .cra_driver_name = "cpt_hmac_sha256_ecb_null",
1554         .cra_blocksize = 1,
1555         .cra_flags = CRYPTO_ALG_ASYNC,
1556         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1557         .cra_priority = 4001,
1558         .cra_alignmask = 0,
1559         .cra_module = THIS_MODULE,
1560     },
1561     .init = otx2_cpt_aead_ecb_null_sha256_init,
1562     .exit = otx2_cpt_aead_exit,
1563     .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1564     .setauthsize = otx2_cpt_aead_null_set_authsize,
1565     .encrypt = otx2_cpt_aead_null_encrypt,
1566     .decrypt = otx2_cpt_aead_null_decrypt,
1567     .ivsize = 0,
1568     .maxauthsize = SHA256_DIGEST_SIZE,
1569 }, {
1570     .base = {
1571         .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1572         .cra_driver_name = "cpt_hmac_sha384_ecb_null",
1573         .cra_blocksize = 1,
1574         .cra_flags = CRYPTO_ALG_ASYNC,
1575         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1576         .cra_priority = 4001,
1577         .cra_alignmask = 0,
1578         .cra_module = THIS_MODULE,
1579     },
1580     .init = otx2_cpt_aead_ecb_null_sha384_init,
1581     .exit = otx2_cpt_aead_exit,
1582     .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1583     .setauthsize = otx2_cpt_aead_null_set_authsize,
1584     .encrypt = otx2_cpt_aead_null_encrypt,
1585     .decrypt = otx2_cpt_aead_null_decrypt,
1586     .ivsize = 0,
1587     .maxauthsize = SHA384_DIGEST_SIZE,
1588 }, {
1589     .base = {
1590         .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1591         .cra_driver_name = "cpt_hmac_sha512_ecb_null",
1592         .cra_blocksize = 1,
1593         .cra_flags = CRYPTO_ALG_ASYNC,
1594         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1595         .cra_priority = 4001,
1596         .cra_alignmask = 0,
1597         .cra_module = THIS_MODULE,
1598     },
1599     .init = otx2_cpt_aead_ecb_null_sha512_init,
1600     .exit = otx2_cpt_aead_exit,
1601     .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1602     .setauthsize = otx2_cpt_aead_null_set_authsize,
1603     .encrypt = otx2_cpt_aead_null_encrypt,
1604     .decrypt = otx2_cpt_aead_null_decrypt,
1605     .ivsize = 0,
1606     .maxauthsize = SHA512_DIGEST_SIZE,
1607 }, {
1608     .base = {
1609         .cra_name = "rfc4106(gcm(aes))",
1610         .cra_driver_name = "cpt_rfc4106_gcm_aes",
1611         .cra_blocksize = 1,
1612         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1613         .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1614         .cra_priority = 4001,
1615         .cra_alignmask = 0,
1616         .cra_module = THIS_MODULE,
1617     },
1618     .init = otx2_cpt_aead_gcm_aes_init,
1619     .exit = otx2_cpt_aead_exit,
1620     .setkey = otx2_cpt_aead_gcm_aes_setkey,
1621     .setauthsize = otx2_cpt_aead_gcm_set_authsize,
1622     .encrypt = otx2_cpt_aead_encrypt,
1623     .decrypt = otx2_cpt_aead_decrypt,
1624     .ivsize = AES_GCM_IV_SIZE,
1625     .maxauthsize = AES_GCM_ICV_SIZE,
1626 } };
1627 
1628 static inline int cpt_register_algs(void)
1629 {
1630     int i, err = 0;
1631 
1632     for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
1633         otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1634 
1635     err = crypto_register_skciphers(otx2_cpt_skciphers,
1636                     ARRAY_SIZE(otx2_cpt_skciphers));
1637     if (err)
1638         return err;
1639 
1640     for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
1641         otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1642 
1643     err = crypto_register_aeads(otx2_cpt_aeads,
1644                     ARRAY_SIZE(otx2_cpt_aeads));
1645     if (err) {
1646         crypto_unregister_skciphers(otx2_cpt_skciphers,
1647                         ARRAY_SIZE(otx2_cpt_skciphers));
1648         return err;
1649     }
1650 
1651     return 0;
1652 }
1653 
1654 static inline void cpt_unregister_algs(void)
1655 {
1656     crypto_unregister_skciphers(otx2_cpt_skciphers,
1657                     ARRAY_SIZE(otx2_cpt_skciphers));
1658     crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
1659 }
1660 
1661 static int compare_func(const void *lptr, const void *rptr)
1662 {
1663     const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1664     const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1665 
1666     if (ldesc->dev->devfn < rdesc->dev->devfn)
1667         return -1;
1668     if (ldesc->dev->devfn > rdesc->dev->devfn)
1669         return 1;
1670     return 0;
1671 }
1672 
1673 static void swap_func(void *lptr, void *rptr, int size)
1674 {
1675     struct cpt_device_desc *ldesc = lptr;
1676     struct cpt_device_desc *rdesc = rptr;
1677 
1678     swap(*ldesc, *rdesc);
1679 }
1680 
1681 int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1682              int num_queues, int num_devices)
1683 {
1684     int ret = 0;
1685     int count;
1686 
1687     mutex_lock(&mutex);
1688     count = atomic_read(&se_devices.count);
1689     if (count >= OTX2_CPT_MAX_LFS_NUM) {
1690         dev_err(&pdev->dev, "No space to add a new device\n");
1691         ret = -ENOSPC;
1692         goto unlock;
1693     }
1694     se_devices.desc[count].num_queues = num_queues;
1695     se_devices.desc[count++].dev = pdev;
1696     atomic_inc(&se_devices.count);
1697 
1698     if (atomic_read(&se_devices.count) == num_devices &&
1699         is_crypto_registered == false) {
1700         if (cpt_register_algs()) {
1701             dev_err(&pdev->dev,
1702                 "Error in registering crypto algorithms\n");
1703             ret =  -EINVAL;
1704             goto unlock;
1705         }
1706         try_module_get(mod);
1707         is_crypto_registered = true;
1708     }
1709     sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1710          compare_func, swap_func);
1711 
1712 unlock:
1713     mutex_unlock(&mutex);
1714     return ret;
1715 }
1716 
1717 void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
1718 {
1719     struct cpt_device_table *dev_tbl;
1720     bool dev_found = false;
1721     int i, j, count;
1722 
1723     mutex_lock(&mutex);
1724 
1725     dev_tbl = &se_devices;
1726     count = atomic_read(&dev_tbl->count);
1727     for (i = 0; i < count; i++) {
1728         if (pdev == dev_tbl->desc[i].dev) {
1729             for (j = i; j < count-1; j++)
1730                 dev_tbl->desc[j] = dev_tbl->desc[j+1];
1731             dev_found = true;
1732             break;
1733         }
1734     }
1735 
1736     if (!dev_found) {
1737         dev_err(&pdev->dev, "%s device not found\n", __func__);
1738         goto unlock;
1739     }
1740     if (atomic_dec_and_test(&se_devices.count)) {
1741         cpt_unregister_algs();
1742         module_put(mod);
1743         is_crypto_registered = false;
1744     }
1745 
1746 unlock:
1747     mutex_unlock(&mutex);
1748 }