Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell OcteonTX CPT driver
0003  *
0004  * Copyright (C) 2019 Marvell International Ltd.
0005  *
0006  * This program is free software; you can redistribute it and/or modify
0007  * it under the terms of the GNU General Public License version 2 as
0008  * published by the Free Software Foundation.
0009  */
0010 
0011 #include <crypto/aes.h>
0012 #include <crypto/authenc.h>
0013 #include <crypto/cryptd.h>
0014 #include <crypto/des.h>
0015 #include <crypto/internal/aead.h>
0016 #include <crypto/sha1.h>
0017 #include <crypto/sha2.h>
0018 #include <crypto/xts.h>
0019 #include <crypto/scatterwalk.h>
0020 #include <linux/rtnetlink.h>
0021 #include <linux/sort.h>
0022 #include <linux/module.h>
0023 #include "otx_cptvf.h"
0024 #include "otx_cptvf_algs.h"
0025 #include "otx_cptvf_reqmgr.h"
0026 
0027 #define CPT_MAX_VF_NUM  64
0028 /* Size of salt in AES GCM mode */
0029 #define AES_GCM_SALT_SIZE   4
0030 /* Size of IV in AES GCM mode */
0031 #define AES_GCM_IV_SIZE     8
0032 /* Size of ICV (Integrity Check Value) in AES GCM mode */
0033 #define AES_GCM_ICV_SIZE    16
0034 /* Offset of IV in AES GCM mode */
0035 #define AES_GCM_IV_OFFSET   8
0036 #define CONTROL_WORD_LEN    8
0037 #define KEY2_OFFSET     48
0038 #define DMA_MODE_FLAG(dma_mode) \
0039     (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
0040 
0041 /* Truncated SHA digest size */
0042 #define SHA1_TRUNC_DIGEST_SIZE      12
0043 #define SHA256_TRUNC_DIGEST_SIZE    16
0044 #define SHA384_TRUNC_DIGEST_SIZE    24
0045 #define SHA512_TRUNC_DIGEST_SIZE    32
0046 
0047 static DEFINE_MUTEX(mutex);
0048 static int is_crypto_registered;
0049 
0050 struct cpt_device_desc {
0051     enum otx_cptpf_type pf_type;
0052     struct pci_dev *dev;
0053     int num_queues;
0054 };
0055 
0056 struct cpt_device_table {
0057     atomic_t count;
0058     struct cpt_device_desc desc[CPT_MAX_VF_NUM];
0059 };
0060 
0061 static struct cpt_device_table se_devices = {
0062     .count = ATOMIC_INIT(0)
0063 };
0064 
0065 static struct cpt_device_table ae_devices = {
0066     .count = ATOMIC_INIT(0)
0067 };
0068 
0069 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
0070 {
0071     int count, ret = 0;
0072 
0073     count = atomic_read(&se_devices.count);
0074     if (count < 1)
0075         return -ENODEV;
0076 
0077     *cpu_num = get_cpu();
0078 
0079     if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
0080         /*
0081          * On OcteonTX platform there is one CPT instruction queue bound
0082          * to each VF. We get maximum performance if one CPT queue
0083          * is available for each cpu otherwise CPT queues need to be
0084          * shared between cpus.
0085          */
0086         if (*cpu_num >= count)
0087             *cpu_num %= count;
0088         *pdev = se_devices.desc[*cpu_num].dev;
0089     } else {
0090         pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
0091         ret = -EINVAL;
0092     }
0093     put_cpu();
0094 
0095     return ret;
0096 }
0097 
0098 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
0099 {
0100     struct otx_cpt_req_ctx *rctx;
0101     struct aead_request *req;
0102     struct crypto_aead *tfm;
0103 
0104     req = container_of(cpt_req->areq, struct aead_request, base);
0105     tfm = crypto_aead_reqtfm(req);
0106     rctx = aead_request_ctx(req);
0107     if (memcmp(rctx->fctx.hmac.s.hmac_calc,
0108            rctx->fctx.hmac.s.hmac_recv,
0109            crypto_aead_authsize(tfm)) != 0)
0110         return -EBADMSG;
0111 
0112     return 0;
0113 }
0114 
0115 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
0116 {
0117     struct otx_cpt_info_buffer *cpt_info = arg2;
0118     struct crypto_async_request *areq = arg1;
0119     struct otx_cpt_req_info *cpt_req;
0120     struct pci_dev *pdev;
0121 
0122     if (!cpt_info)
0123         goto complete;
0124 
0125     cpt_req = cpt_info->req;
0126     if (!status) {
0127         /*
0128          * When selected cipher is NULL we need to manually
0129          * verify whether calculated hmac value matches
0130          * received hmac value
0131          */
0132         if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
0133             !cpt_req->is_enc)
0134             status = validate_hmac_cipher_null(cpt_req);
0135     }
0136     pdev = cpt_info->pdev;
0137     do_request_cleanup(pdev, cpt_info);
0138 
0139 complete:
0140     if (areq)
0141         areq->complete(areq, status);
0142 }
0143 
0144 static void output_iv_copyback(struct crypto_async_request *areq)
0145 {
0146     struct otx_cpt_req_info *req_info;
0147     struct skcipher_request *sreq;
0148     struct crypto_skcipher *stfm;
0149     struct otx_cpt_req_ctx *rctx;
0150     struct otx_cpt_enc_ctx *ctx;
0151     u32 start, ivsize;
0152 
0153     sreq = container_of(areq, struct skcipher_request, base);
0154     stfm = crypto_skcipher_reqtfm(sreq);
0155     ctx = crypto_skcipher_ctx(stfm);
0156     if (ctx->cipher_type == OTX_CPT_AES_CBC ||
0157         ctx->cipher_type == OTX_CPT_DES3_CBC) {
0158         rctx = skcipher_request_ctx(sreq);
0159         req_info = &rctx->cpt_req;
0160         ivsize = crypto_skcipher_ivsize(stfm);
0161         start = sreq->cryptlen - ivsize;
0162 
0163         if (req_info->is_enc) {
0164             scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
0165                          ivsize, 0);
0166         } else {
0167             if (sreq->src != sreq->dst) {
0168                 scatterwalk_map_and_copy(sreq->iv, sreq->src,
0169                              start, ivsize, 0);
0170             } else {
0171                 memcpy(sreq->iv, req_info->iv_out, ivsize);
0172                 kfree(req_info->iv_out);
0173             }
0174         }
0175     }
0176 }
0177 
0178 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
0179 {
0180     struct otx_cpt_info_buffer *cpt_info = arg2;
0181     struct crypto_async_request *areq = arg1;
0182     struct pci_dev *pdev;
0183 
0184     if (areq) {
0185         if (!status)
0186             output_iv_copyback(areq);
0187         if (cpt_info) {
0188             pdev = cpt_info->pdev;
0189             do_request_cleanup(pdev, cpt_info);
0190         }
0191         areq->complete(areq, status);
0192     }
0193 }
0194 
0195 static inline void update_input_data(struct otx_cpt_req_info *req_info,
0196                      struct scatterlist *inp_sg,
0197                      u32 nbytes, u32 *argcnt)
0198 {
0199     req_info->req.dlen += nbytes;
0200 
0201     while (nbytes) {
0202         u32 len = min(nbytes, inp_sg->length);
0203         u8 *ptr = sg_virt(inp_sg);
0204 
0205         req_info->in[*argcnt].vptr = (void *)ptr;
0206         req_info->in[*argcnt].size = len;
0207         nbytes -= len;
0208         ++(*argcnt);
0209         inp_sg = sg_next(inp_sg);
0210     }
0211 }
0212 
0213 static inline void update_output_data(struct otx_cpt_req_info *req_info,
0214                       struct scatterlist *outp_sg,
0215                       u32 offset, u32 nbytes, u32 *argcnt)
0216 {
0217     req_info->rlen += nbytes;
0218 
0219     while (nbytes) {
0220         u32 len = min(nbytes, outp_sg->length - offset);
0221         u8 *ptr = sg_virt(outp_sg);
0222 
0223         req_info->out[*argcnt].vptr = (void *) (ptr + offset);
0224         req_info->out[*argcnt].size = len;
0225         nbytes -= len;
0226         ++(*argcnt);
0227         offset = 0;
0228         outp_sg = sg_next(outp_sg);
0229     }
0230 }
0231 
0232 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
0233                  u32 *argcnt)
0234 {
0235     struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
0236     struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0237     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
0238     struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
0239     struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
0240     struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
0241     int ivsize = crypto_skcipher_ivsize(stfm);
0242     u32 start = req->cryptlen - ivsize;
0243     gfp_t flags;
0244 
0245     flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0246             GFP_KERNEL : GFP_ATOMIC;
0247     req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
0248     req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
0249 
0250     req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
0251                 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
0252     if (enc) {
0253         req_info->req.opcode.s.minor = 2;
0254     } else {
0255         req_info->req.opcode.s.minor = 3;
0256         if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
0257             ctx->cipher_type == OTX_CPT_DES3_CBC) &&
0258             req->src == req->dst) {
0259             req_info->iv_out = kmalloc(ivsize, flags);
0260             if (!req_info->iv_out)
0261                 return -ENOMEM;
0262 
0263             scatterwalk_map_and_copy(req_info->iv_out, req->src,
0264                          start, ivsize, 0);
0265         }
0266     }
0267     /* Encryption data length */
0268     req_info->req.param1 = req->cryptlen;
0269     /* Authentication data length */
0270     req_info->req.param2 = 0;
0271 
0272     fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
0273     fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
0274     fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
0275 
0276     if (ctx->cipher_type == OTX_CPT_AES_XTS)
0277         memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
0278     else
0279         memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
0280 
0281     memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
0282 
0283     fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
0284 
0285     /*
0286      * Storing  Packet Data Information in offset
0287      * Control Word First 8 bytes
0288      */
0289     req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
0290     req_info->in[*argcnt].size = CONTROL_WORD_LEN;
0291     req_info->req.dlen += CONTROL_WORD_LEN;
0292     ++(*argcnt);
0293 
0294     req_info->in[*argcnt].vptr = (u8 *)fctx;
0295     req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
0296     req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
0297 
0298     ++(*argcnt);
0299 
0300     return 0;
0301 }
0302 
0303 static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
0304                     u32 enc_iv_len)
0305 {
0306     struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0307     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
0308     u32 argcnt =  0;
0309     int ret;
0310 
0311     ret = create_ctx_hdr(req, enc, &argcnt);
0312     if (ret)
0313         return ret;
0314 
0315     update_input_data(req_info, req->src, req->cryptlen, &argcnt);
0316     req_info->incnt = argcnt;
0317 
0318     return 0;
0319 }
0320 
0321 static inline void create_output_list(struct skcipher_request *req,
0322                       u32 enc_iv_len)
0323 {
0324     struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0325     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
0326     u32 argcnt = 0;
0327 
0328     /*
0329      * OUTPUT Buffer Processing
0330      * AES encryption/decryption output would be
0331      * received in the following format
0332      *
0333      * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
0334      * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
0335      */
0336     update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
0337     req_info->outcnt = argcnt;
0338 }
0339 
0340 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
0341 {
0342     struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
0343     struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
0344     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
0345     u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
0346     struct pci_dev *pdev;
0347     int status, cpu_num;
0348 
0349     /* Validate that request doesn't exceed maximum CPT supported size */
0350     if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
0351         return -E2BIG;
0352 
0353     /* Clear control words */
0354     rctx->ctrl_word.flags = 0;
0355     rctx->fctx.enc.enc_ctrl.flags = 0;
0356 
0357     status = create_input_list(req, enc, enc_iv_len);
0358     if (status)
0359         return status;
0360     create_output_list(req, enc_iv_len);
0361 
0362     status = get_se_device(&pdev, &cpu_num);
0363     if (status)
0364         return status;
0365 
0366     req_info->callback = (void *)otx_cpt_skcipher_callback;
0367     req_info->areq = &req->base;
0368     req_info->req_type = OTX_CPT_ENC_DEC_REQ;
0369     req_info->is_enc = enc;
0370     req_info->is_trunc_hmac = false;
0371     req_info->ctrl.s.grp = 0;
0372 
0373     /*
0374      * We perform an asynchronous send and once
0375      * the request is completed the driver would
0376      * intimate through registered call back functions
0377      */
0378     status = otx_cpt_do_request(pdev, req_info, cpu_num);
0379 
0380     return status;
0381 }
0382 
0383 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
0384 {
0385     return cpt_enc_dec(req, true);
0386 }
0387 
0388 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
0389 {
0390     return cpt_enc_dec(req, false);
0391 }
0392 
0393 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
0394                        const u8 *key, u32 keylen)
0395 {
0396     struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0397     const u8 *key2 = key + (keylen / 2);
0398     const u8 *key1 = key;
0399     int ret;
0400 
0401     ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
0402     if (ret)
0403         return ret;
0404     ctx->key_len = keylen;
0405     memcpy(ctx->enc_key, key1, keylen / 2);
0406     memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
0407     ctx->cipher_type = OTX_CPT_AES_XTS;
0408     switch (ctx->key_len) {
0409     case 2 * AES_KEYSIZE_128:
0410         ctx->key_type = OTX_CPT_AES_128_BIT;
0411         break;
0412     case 2 * AES_KEYSIZE_256:
0413         ctx->key_type = OTX_CPT_AES_256_BIT;
0414         break;
0415     default:
0416         return -EINVAL;
0417     }
0418 
0419     return 0;
0420 }
0421 
0422 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
0423               u32 keylen, u8 cipher_type)
0424 {
0425     struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0426 
0427     if (keylen != DES3_EDE_KEY_SIZE)
0428         return -EINVAL;
0429 
0430     ctx->key_len = keylen;
0431     ctx->cipher_type = cipher_type;
0432 
0433     memcpy(ctx->enc_key, key, keylen);
0434 
0435     return 0;
0436 }
0437 
0438 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
0439               u32 keylen, u8 cipher_type)
0440 {
0441     struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0442 
0443     switch (keylen) {
0444     case AES_KEYSIZE_128:
0445         ctx->key_type = OTX_CPT_AES_128_BIT;
0446         break;
0447     case AES_KEYSIZE_192:
0448         ctx->key_type = OTX_CPT_AES_192_BIT;
0449         break;
0450     case AES_KEYSIZE_256:
0451         ctx->key_type = OTX_CPT_AES_256_BIT;
0452         break;
0453     default:
0454         return -EINVAL;
0455     }
0456     ctx->key_len = keylen;
0457     ctx->cipher_type = cipher_type;
0458 
0459     memcpy(ctx->enc_key, key, keylen);
0460 
0461     return 0;
0462 }
0463 
0464 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
0465                        const u8 *key, u32 keylen)
0466 {
0467     return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
0468 }
0469 
0470 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
0471                        const u8 *key, u32 keylen)
0472 {
0473     return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
0474 }
0475 
0476 static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
0477                        const u8 *key, u32 keylen)
0478 {
0479     return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
0480 }
0481 
0482 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
0483                         const u8 *key, u32 keylen)
0484 {
0485     return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
0486 }
0487 
0488 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
0489                         const u8 *key, u32 keylen)
0490 {
0491     return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
0492 }
0493 
0494 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
0495 {
0496     struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
0497 
0498     memset(ctx, 0, sizeof(*ctx));
0499     /*
0500      * Additional memory for skcipher_request is
0501      * allocated since the cryptd daemon uses
0502      * this memory for request_ctx information
0503      */
0504     crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) +
0505                     sizeof(struct skcipher_request));
0506 
0507     return 0;
0508 }
0509 
0510 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
0511 {
0512     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0513 
0514     ctx->cipher_type = cipher_type;
0515     ctx->mac_type = mac_type;
0516 
0517     /*
0518      * When selected cipher is NULL we use HMAC opcode instead of
0519      * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
0520      * for calculating ipad and opad
0521      */
0522     if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
0523         switch (ctx->mac_type) {
0524         case OTX_CPT_SHA1:
0525             ctx->hashalg = crypto_alloc_shash("sha1", 0,
0526                               CRYPTO_ALG_ASYNC);
0527             if (IS_ERR(ctx->hashalg))
0528                 return PTR_ERR(ctx->hashalg);
0529             break;
0530 
0531         case OTX_CPT_SHA256:
0532             ctx->hashalg = crypto_alloc_shash("sha256", 0,
0533                               CRYPTO_ALG_ASYNC);
0534             if (IS_ERR(ctx->hashalg))
0535                 return PTR_ERR(ctx->hashalg);
0536             break;
0537 
0538         case OTX_CPT_SHA384:
0539             ctx->hashalg = crypto_alloc_shash("sha384", 0,
0540                               CRYPTO_ALG_ASYNC);
0541             if (IS_ERR(ctx->hashalg))
0542                 return PTR_ERR(ctx->hashalg);
0543             break;
0544 
0545         case OTX_CPT_SHA512:
0546             ctx->hashalg = crypto_alloc_shash("sha512", 0,
0547                               CRYPTO_ALG_ASYNC);
0548             if (IS_ERR(ctx->hashalg))
0549                 return PTR_ERR(ctx->hashalg);
0550             break;
0551         }
0552     }
0553 
0554     crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx));
0555 
0556     return 0;
0557 }
0558 
0559 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
0560 {
0561     return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
0562 }
0563 
0564 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
0565 {
0566     return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
0567 }
0568 
0569 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
0570 {
0571     return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
0572 }
0573 
0574 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
0575 {
0576     return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
0577 }
0578 
0579 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
0580 {
0581     return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
0582 }
0583 
0584 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
0585 {
0586     return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
0587 }
0588 
0589 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
0590 {
0591     return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
0592 }
0593 
0594 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
0595 {
0596     return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
0597 }
0598 
0599 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
0600 {
0601     return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
0602 }
0603 
0604 static void otx_cpt_aead_exit(struct crypto_aead *tfm)
0605 {
0606     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0607 
0608     kfree(ctx->ipad);
0609     kfree(ctx->opad);
0610     if (ctx->hashalg)
0611         crypto_free_shash(ctx->hashalg);
0612     kfree(ctx->sdesc);
0613 }
0614 
0615 /*
0616  * This is the Integrity Check Value validation (aka the authentication tag
0617  * length)
0618  */
0619 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
0620                      unsigned int authsize)
0621 {
0622     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0623 
0624     switch (ctx->mac_type) {
0625     case OTX_CPT_SHA1:
0626         if (authsize != SHA1_DIGEST_SIZE &&
0627             authsize != SHA1_TRUNC_DIGEST_SIZE)
0628             return -EINVAL;
0629 
0630         if (authsize == SHA1_TRUNC_DIGEST_SIZE)
0631             ctx->is_trunc_hmac = true;
0632         break;
0633 
0634     case OTX_CPT_SHA256:
0635         if (authsize != SHA256_DIGEST_SIZE &&
0636             authsize != SHA256_TRUNC_DIGEST_SIZE)
0637             return -EINVAL;
0638 
0639         if (authsize == SHA256_TRUNC_DIGEST_SIZE)
0640             ctx->is_trunc_hmac = true;
0641         break;
0642 
0643     case OTX_CPT_SHA384:
0644         if (authsize != SHA384_DIGEST_SIZE &&
0645             authsize != SHA384_TRUNC_DIGEST_SIZE)
0646             return -EINVAL;
0647 
0648         if (authsize == SHA384_TRUNC_DIGEST_SIZE)
0649             ctx->is_trunc_hmac = true;
0650         break;
0651 
0652     case OTX_CPT_SHA512:
0653         if (authsize != SHA512_DIGEST_SIZE &&
0654             authsize != SHA512_TRUNC_DIGEST_SIZE)
0655             return -EINVAL;
0656 
0657         if (authsize == SHA512_TRUNC_DIGEST_SIZE)
0658             ctx->is_trunc_hmac = true;
0659         break;
0660 
0661     case OTX_CPT_MAC_NULL:
0662         if (ctx->cipher_type == OTX_CPT_AES_GCM) {
0663             if (authsize != AES_GCM_ICV_SIZE)
0664                 return -EINVAL;
0665         } else
0666             return -EINVAL;
0667         break;
0668 
0669     default:
0670         return -EINVAL;
0671     }
0672 
0673     tfm->authsize = authsize;
0674     return 0;
0675 }
0676 
0677 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
0678 {
0679     struct otx_cpt_sdesc *sdesc;
0680     int size;
0681 
0682     size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
0683     sdesc = kmalloc(size, GFP_KERNEL);
0684     if (!sdesc)
0685         return NULL;
0686 
0687     sdesc->shash.tfm = alg;
0688 
0689     return sdesc;
0690 }
0691 
0692 static inline void swap_data32(void *buf, u32 len)
0693 {
0694     cpu_to_be32_array(buf, buf, len / 4);
0695 }
0696 
0697 static inline void swap_data64(void *buf, u32 len)
0698 {
0699     __be64 *dst = buf;
0700     u64 *src = buf;
0701     int i = 0;
0702 
0703     for (i = 0 ; i < len / 8; i++, src++, dst++)
0704         *dst = cpu_to_be64p(src);
0705 }
0706 
0707 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
0708 {
0709     struct sha512_state *sha512;
0710     struct sha256_state *sha256;
0711     struct sha1_state *sha1;
0712 
0713     switch (mac_type) {
0714     case OTX_CPT_SHA1:
0715         sha1 = (struct sha1_state *) in_pad;
0716         swap_data32(sha1->state, SHA1_DIGEST_SIZE);
0717         memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
0718         break;
0719 
0720     case OTX_CPT_SHA256:
0721         sha256 = (struct sha256_state *) in_pad;
0722         swap_data32(sha256->state, SHA256_DIGEST_SIZE);
0723         memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
0724         break;
0725 
0726     case OTX_CPT_SHA384:
0727     case OTX_CPT_SHA512:
0728         sha512 = (struct sha512_state *) in_pad;
0729         swap_data64(sha512->state, SHA512_DIGEST_SIZE);
0730         memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
0731         break;
0732 
0733     default:
0734         return -EINVAL;
0735     }
0736 
0737     return 0;
0738 }
0739 
0740 static int aead_hmac_init(struct crypto_aead *cipher)
0741 {
0742     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0743     int state_size = crypto_shash_statesize(ctx->hashalg);
0744     int ds = crypto_shash_digestsize(ctx->hashalg);
0745     int bs = crypto_shash_blocksize(ctx->hashalg);
0746     int authkeylen = ctx->auth_key_len;
0747     u8 *ipad = NULL, *opad = NULL;
0748     int ret = 0, icount = 0;
0749 
0750     ctx->sdesc = alloc_sdesc(ctx->hashalg);
0751     if (!ctx->sdesc)
0752         return -ENOMEM;
0753 
0754     ctx->ipad = kzalloc(bs, GFP_KERNEL);
0755     if (!ctx->ipad) {
0756         ret = -ENOMEM;
0757         goto calc_fail;
0758     }
0759 
0760     ctx->opad = kzalloc(bs, GFP_KERNEL);
0761     if (!ctx->opad) {
0762         ret = -ENOMEM;
0763         goto calc_fail;
0764     }
0765 
0766     ipad = kzalloc(state_size, GFP_KERNEL);
0767     if (!ipad) {
0768         ret = -ENOMEM;
0769         goto calc_fail;
0770     }
0771 
0772     opad = kzalloc(state_size, GFP_KERNEL);
0773     if (!opad) {
0774         ret = -ENOMEM;
0775         goto calc_fail;
0776     }
0777 
0778     if (authkeylen > bs) {
0779         ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
0780                       authkeylen, ipad);
0781         if (ret)
0782             goto calc_fail;
0783 
0784         authkeylen = ds;
0785     } else {
0786         memcpy(ipad, ctx->key, authkeylen);
0787     }
0788 
0789     memset(ipad + authkeylen, 0, bs - authkeylen);
0790     memcpy(opad, ipad, bs);
0791 
0792     for (icount = 0; icount < bs; icount++) {
0793         ipad[icount] ^= 0x36;
0794         opad[icount] ^= 0x5c;
0795     }
0796 
0797     /*
0798      * Partial Hash calculated from the software
0799      * algorithm is retrieved for IPAD & OPAD
0800      */
0801 
0802     /* IPAD Calculation */
0803     crypto_shash_init(&ctx->sdesc->shash);
0804     crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
0805     crypto_shash_export(&ctx->sdesc->shash, ipad);
0806     ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
0807     if (ret)
0808         goto calc_fail;
0809 
0810     /* OPAD Calculation */
0811     crypto_shash_init(&ctx->sdesc->shash);
0812     crypto_shash_update(&ctx->sdesc->shash, opad, bs);
0813     crypto_shash_export(&ctx->sdesc->shash, opad);
0814     ret = copy_pad(ctx->mac_type, ctx->opad, opad);
0815     if (ret)
0816         goto calc_fail;
0817 
0818     kfree(ipad);
0819     kfree(opad);
0820 
0821     return 0;
0822 
0823 calc_fail:
0824     kfree(ctx->ipad);
0825     ctx->ipad = NULL;
0826     kfree(ctx->opad);
0827     ctx->opad = NULL;
0828     kfree(ipad);
0829     kfree(opad);
0830     kfree(ctx->sdesc);
0831     ctx->sdesc = NULL;
0832 
0833     return ret;
0834 }
0835 
0836 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
0837                        const unsigned char *key,
0838                        unsigned int keylen)
0839 {
0840     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0841     struct crypto_authenc_key_param *param;
0842     int enckeylen = 0, authkeylen = 0;
0843     struct rtattr *rta = (void *)key;
0844     int status = -EINVAL;
0845 
0846     if (!RTA_OK(rta, keylen))
0847         goto badkey;
0848 
0849     if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
0850         goto badkey;
0851 
0852     if (RTA_PAYLOAD(rta) < sizeof(*param))
0853         goto badkey;
0854 
0855     param = RTA_DATA(rta);
0856     enckeylen = be32_to_cpu(param->enckeylen);
0857     key += RTA_ALIGN(rta->rta_len);
0858     keylen -= RTA_ALIGN(rta->rta_len);
0859     if (keylen < enckeylen)
0860         goto badkey;
0861 
0862     if (keylen > OTX_CPT_MAX_KEY_SIZE)
0863         goto badkey;
0864 
0865     authkeylen = keylen - enckeylen;
0866     memcpy(ctx->key, key, keylen);
0867 
0868     switch (enckeylen) {
0869     case AES_KEYSIZE_128:
0870         ctx->key_type = OTX_CPT_AES_128_BIT;
0871         break;
0872     case AES_KEYSIZE_192:
0873         ctx->key_type = OTX_CPT_AES_192_BIT;
0874         break;
0875     case AES_KEYSIZE_256:
0876         ctx->key_type = OTX_CPT_AES_256_BIT;
0877         break;
0878     default:
0879         /* Invalid key length */
0880         goto badkey;
0881     }
0882 
0883     ctx->enc_key_len = enckeylen;
0884     ctx->auth_key_len = authkeylen;
0885 
0886     status = aead_hmac_init(cipher);
0887     if (status)
0888         goto badkey;
0889 
0890     return 0;
0891 badkey:
0892     return status;
0893 }
0894 
0895 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
0896                         const unsigned char *key,
0897                         unsigned int keylen)
0898 {
0899     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0900     struct crypto_authenc_key_param *param;
0901     struct rtattr *rta = (void *)key;
0902     int enckeylen = 0;
0903 
0904     if (!RTA_OK(rta, keylen))
0905         goto badkey;
0906 
0907     if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
0908         goto badkey;
0909 
0910     if (RTA_PAYLOAD(rta) < sizeof(*param))
0911         goto badkey;
0912 
0913     param = RTA_DATA(rta);
0914     enckeylen = be32_to_cpu(param->enckeylen);
0915     key += RTA_ALIGN(rta->rta_len);
0916     keylen -= RTA_ALIGN(rta->rta_len);
0917     if (enckeylen != 0)
0918         goto badkey;
0919 
0920     if (keylen > OTX_CPT_MAX_KEY_SIZE)
0921         goto badkey;
0922 
0923     memcpy(ctx->key, key, keylen);
0924     ctx->enc_key_len = enckeylen;
0925     ctx->auth_key_len = keylen;
0926     return 0;
0927 badkey:
0928     return -EINVAL;
0929 }
0930 
0931 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
0932                        const unsigned char *key,
0933                        unsigned int keylen)
0934 {
0935     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
0936 
0937     /*
0938      * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
0939      * and salt (4 bytes)
0940      */
0941     switch (keylen) {
0942     case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
0943         ctx->key_type = OTX_CPT_AES_128_BIT;
0944         ctx->enc_key_len = AES_KEYSIZE_128;
0945         break;
0946     case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
0947         ctx->key_type = OTX_CPT_AES_192_BIT;
0948         ctx->enc_key_len = AES_KEYSIZE_192;
0949         break;
0950     case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
0951         ctx->key_type = OTX_CPT_AES_256_BIT;
0952         ctx->enc_key_len = AES_KEYSIZE_256;
0953         break;
0954     default:
0955         /* Invalid key and salt length */
0956         return -EINVAL;
0957     }
0958 
0959     /* Store encryption key and salt */
0960     memcpy(ctx->key, key, keylen);
0961 
0962     return 0;
0963 }
0964 
0965 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
0966                       u32 *argcnt)
0967 {
0968     struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
0969     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0970     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
0971     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
0972     struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
0973     int mac_len = crypto_aead_authsize(tfm);
0974     int ds;
0975 
0976     rctx->ctrl_word.e.enc_data_offset = req->assoclen;
0977 
0978     switch (ctx->cipher_type) {
0979     case OTX_CPT_AES_CBC:
0980         fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
0981         /* Copy encryption key to context */
0982         memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
0983                ctx->enc_key_len);
0984         /* Copy IV to context */
0985         memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
0986 
0987         ds = crypto_shash_digestsize(ctx->hashalg);
0988         if (ctx->mac_type == OTX_CPT_SHA384)
0989             ds = SHA512_DIGEST_SIZE;
0990         if (ctx->ipad)
0991             memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
0992         if (ctx->opad)
0993             memcpy(fctx->hmac.e.opad, ctx->opad, ds);
0994         break;
0995 
0996     case OTX_CPT_AES_GCM:
0997         fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
0998         /* Copy encryption key to context */
0999         memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1000         /* Copy salt to context */
1001         memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1002                AES_GCM_SALT_SIZE);
1003 
1004         rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1005         break;
1006 
1007     default:
1008         /* Unknown cipher type */
1009         return -EINVAL;
1010     }
1011     rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
1012 
1013     req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1014     req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1015     req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
1016                  DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1017     if (enc) {
1018         req_info->req.opcode.s.minor = 2;
1019         req_info->req.param1 = req->cryptlen;
1020         req_info->req.param2 = req->cryptlen + req->assoclen;
1021     } else {
1022         req_info->req.opcode.s.minor = 3;
1023         req_info->req.param1 = req->cryptlen - mac_len;
1024         req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1025     }
1026 
1027     fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1028     fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1029     fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1030     fctx->enc.enc_ctrl.e.mac_len = mac_len;
1031     fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
1032 
1033     /*
1034      * Storing Packet Data Information in offset
1035      * Control Word First 8 bytes
1036      */
1037     req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1038     req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1039     req_info->req.dlen += CONTROL_WORD_LEN;
1040     ++(*argcnt);
1041 
1042     req_info->in[*argcnt].vptr = (u8 *)fctx;
1043     req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
1044     req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
1045     ++(*argcnt);
1046 
1047     return 0;
1048 }
1049 
1050 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1051                       u32 enc)
1052 {
1053     struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1054     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1055     struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1056     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1057 
1058     req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1059     req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1060     req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
1061                  DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1062     req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1063 
1064     req_info->req.opcode.s.minor = 0;
1065     req_info->req.param1 = ctx->auth_key_len;
1066     req_info->req.param2 = ctx->mac_type << 8;
1067 
1068     /* Add authentication key */
1069     req_info->in[*argcnt].vptr = ctx->key;
1070     req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1071     req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1072     ++(*argcnt);
1073 
1074     return 0;
1075 }
1076 
1077 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
1078 {
1079     struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1080     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1081     u32 inputlen =  req->cryptlen + req->assoclen;
1082     u32 status, argcnt = 0;
1083 
1084     status = create_aead_ctx_hdr(req, enc, &argcnt);
1085     if (status)
1086         return status;
1087     update_input_data(req_info, req->src, inputlen, &argcnt);
1088     req_info->incnt = argcnt;
1089 
1090     return 0;
1091 }
1092 
1093 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
1094                       u32 mac_len)
1095 {
1096     struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1097     struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1098     u32 argcnt = 0, outputlen = 0;
1099 
1100     if (enc)
1101         outputlen = req->cryptlen +  req->assoclen + mac_len;
1102     else
1103         outputlen = req->cryptlen + req->assoclen - mac_len;
1104 
1105     update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1106     req_info->outcnt = argcnt;
1107 
1108     return 0;
1109 }
1110 
1111 static inline u32 create_aead_null_input_list(struct aead_request *req,
1112                           u32 enc, u32 mac_len)
1113 {
1114     struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1115     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1116     u32 inputlen, argcnt = 0;
1117 
1118     if (enc)
1119         inputlen =  req->cryptlen + req->assoclen;
1120     else
1121         inputlen =  req->cryptlen + req->assoclen - mac_len;
1122 
1123     create_hmac_ctx_hdr(req, &argcnt, enc);
1124     update_input_data(req_info, req->src, inputlen, &argcnt);
1125     req_info->incnt = argcnt;
1126 
1127     return 0;
1128 }
1129 
1130 static inline u32 create_aead_null_output_list(struct aead_request *req,
1131                            u32 enc, u32 mac_len)
1132 {
1133     struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1134     struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1135     struct scatterlist *dst;
1136     u8 *ptr = NULL;
1137     int argcnt = 0, status, offset;
1138     u32 inputlen;
1139 
1140     if (enc)
1141         inputlen =  req->cryptlen + req->assoclen;
1142     else
1143         inputlen =  req->cryptlen + req->assoclen - mac_len;
1144 
1145     /*
1146      * If source and destination are different
1147      * then copy payload to destination
1148      */
1149     if (req->src != req->dst) {
1150 
1151         ptr = kmalloc(inputlen, (req_info->areq->flags &
1152                      CRYPTO_TFM_REQ_MAY_SLEEP) ?
1153                      GFP_KERNEL : GFP_ATOMIC);
1154         if (!ptr) {
1155             status = -ENOMEM;
1156             goto error;
1157         }
1158 
1159         status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1160                        inputlen);
1161         if (status != inputlen) {
1162             status = -EINVAL;
1163             goto error_free;
1164         }
1165         status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1166                          inputlen);
1167         if (status != inputlen) {
1168             status = -EINVAL;
1169             goto error_free;
1170         }
1171         kfree(ptr);
1172     }
1173 
1174     if (enc) {
1175         /*
1176          * In an encryption scenario hmac needs
1177          * to be appended after payload
1178          */
1179         dst = req->dst;
1180         offset = inputlen;
1181         while (offset >= dst->length) {
1182             offset -= dst->length;
1183             dst = sg_next(dst);
1184             if (!dst) {
1185                 status = -ENOENT;
1186                 goto error;
1187             }
1188         }
1189 
1190         update_output_data(req_info, dst, offset, mac_len, &argcnt);
1191     } else {
1192         /*
1193          * In a decryption scenario calculated hmac for received
1194          * payload needs to be compare with hmac received
1195          */
1196         status = sg_copy_buffer(req->src, sg_nents(req->src),
1197                     rctx->fctx.hmac.s.hmac_recv, mac_len,
1198                     inputlen, true);
1199         if (status != mac_len) {
1200             status = -EINVAL;
1201             goto error;
1202         }
1203 
1204         req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1205         req_info->out[argcnt].size = mac_len;
1206         argcnt++;
1207     }
1208 
1209     req_info->outcnt = argcnt;
1210     return 0;
1211 
1212 error_free:
1213     kfree(ptr);
1214 error:
1215     return status;
1216 }
1217 
1218 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1219 {
1220     struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1221     struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1222     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1223     struct pci_dev *pdev;
1224     u32 status, cpu_num;
1225 
1226     /* Clear control words */
1227     rctx->ctrl_word.flags = 0;
1228     rctx->fctx.enc.enc_ctrl.flags = 0;
1229 
1230     req_info->callback = otx_cpt_aead_callback;
1231     req_info->areq = &req->base;
1232     req_info->req_type = reg_type;
1233     req_info->is_enc = enc;
1234     req_info->is_trunc_hmac = false;
1235 
1236     switch (reg_type) {
1237     case OTX_CPT_AEAD_ENC_DEC_REQ:
1238         status = create_aead_input_list(req, enc);
1239         if (status)
1240             return status;
1241         status = create_aead_output_list(req, enc,
1242                          crypto_aead_authsize(tfm));
1243         if (status)
1244             return status;
1245         break;
1246 
1247     case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
1248         status = create_aead_null_input_list(req, enc,
1249                              crypto_aead_authsize(tfm));
1250         if (status)
1251             return status;
1252         status = create_aead_null_output_list(req, enc,
1253                         crypto_aead_authsize(tfm));
1254         if (status)
1255             return status;
1256         break;
1257 
1258     default:
1259         return -EINVAL;
1260     }
1261 
1262     /* Validate that request doesn't exceed maximum CPT supported size */
1263     if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
1264         req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
1265         return -E2BIG;
1266 
1267     status = get_se_device(&pdev, &cpu_num);
1268     if (status)
1269         return status;
1270 
1271     req_info->ctrl.s.grp = 0;
1272 
1273     status = otx_cpt_do_request(pdev, req_info, cpu_num);
1274     /*
1275      * We perform an asynchronous send and once
1276      * the request is completed the driver would
1277      * intimate through registered call back functions
1278      */
1279     return status;
1280 }
1281 
1282 static int otx_cpt_aead_encrypt(struct aead_request *req)
1283 {
1284     return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
1285 }
1286 
1287 static int otx_cpt_aead_decrypt(struct aead_request *req)
1288 {
1289     return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
1290 }
1291 
1292 static int otx_cpt_aead_null_encrypt(struct aead_request *req)
1293 {
1294     return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1295 }
1296 
1297 static int otx_cpt_aead_null_decrypt(struct aead_request *req)
1298 {
1299     return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1300 }
1301 
1302 static struct skcipher_alg otx_cpt_skciphers[] = { {
1303     .base.cra_name = "xts(aes)",
1304     .base.cra_driver_name = "cpt_xts_aes",
1305     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1306     .base.cra_blocksize = AES_BLOCK_SIZE,
1307     .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1308     .base.cra_alignmask = 7,
1309     .base.cra_priority = 4001,
1310     .base.cra_module = THIS_MODULE,
1311 
1312     .init = otx_cpt_enc_dec_init,
1313     .ivsize = AES_BLOCK_SIZE,
1314     .min_keysize = 2 * AES_MIN_KEY_SIZE,
1315     .max_keysize = 2 * AES_MAX_KEY_SIZE,
1316     .setkey = otx_cpt_skcipher_xts_setkey,
1317     .encrypt = otx_cpt_skcipher_encrypt,
1318     .decrypt = otx_cpt_skcipher_decrypt,
1319 }, {
1320     .base.cra_name = "cbc(aes)",
1321     .base.cra_driver_name = "cpt_cbc_aes",
1322     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1323     .base.cra_blocksize = AES_BLOCK_SIZE,
1324     .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1325     .base.cra_alignmask = 7,
1326     .base.cra_priority = 4001,
1327     .base.cra_module = THIS_MODULE,
1328 
1329     .init = otx_cpt_enc_dec_init,
1330     .ivsize = AES_BLOCK_SIZE,
1331     .min_keysize = AES_MIN_KEY_SIZE,
1332     .max_keysize = AES_MAX_KEY_SIZE,
1333     .setkey = otx_cpt_skcipher_cbc_aes_setkey,
1334     .encrypt = otx_cpt_skcipher_encrypt,
1335     .decrypt = otx_cpt_skcipher_decrypt,
1336 }, {
1337     .base.cra_name = "ecb(aes)",
1338     .base.cra_driver_name = "cpt_ecb_aes",
1339     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1340     .base.cra_blocksize = AES_BLOCK_SIZE,
1341     .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1342     .base.cra_alignmask = 7,
1343     .base.cra_priority = 4001,
1344     .base.cra_module = THIS_MODULE,
1345 
1346     .init = otx_cpt_enc_dec_init,
1347     .ivsize = 0,
1348     .min_keysize = AES_MIN_KEY_SIZE,
1349     .max_keysize = AES_MAX_KEY_SIZE,
1350     .setkey = otx_cpt_skcipher_ecb_aes_setkey,
1351     .encrypt = otx_cpt_skcipher_encrypt,
1352     .decrypt = otx_cpt_skcipher_decrypt,
1353 }, {
1354     .base.cra_name = "cfb(aes)",
1355     .base.cra_driver_name = "cpt_cfb_aes",
1356     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1357     .base.cra_blocksize = AES_BLOCK_SIZE,
1358     .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1359     .base.cra_alignmask = 7,
1360     .base.cra_priority = 4001,
1361     .base.cra_module = THIS_MODULE,
1362 
1363     .init = otx_cpt_enc_dec_init,
1364     .ivsize = AES_BLOCK_SIZE,
1365     .min_keysize = AES_MIN_KEY_SIZE,
1366     .max_keysize = AES_MAX_KEY_SIZE,
1367     .setkey = otx_cpt_skcipher_cfb_aes_setkey,
1368     .encrypt = otx_cpt_skcipher_encrypt,
1369     .decrypt = otx_cpt_skcipher_decrypt,
1370 }, {
1371     .base.cra_name = "cbc(des3_ede)",
1372     .base.cra_driver_name = "cpt_cbc_des3_ede",
1373     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1374     .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1375     .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1376     .base.cra_alignmask = 7,
1377     .base.cra_priority = 4001,
1378     .base.cra_module = THIS_MODULE,
1379 
1380     .init = otx_cpt_enc_dec_init,
1381     .min_keysize = DES3_EDE_KEY_SIZE,
1382     .max_keysize = DES3_EDE_KEY_SIZE,
1383     .ivsize = DES_BLOCK_SIZE,
1384     .setkey = otx_cpt_skcipher_cbc_des3_setkey,
1385     .encrypt = otx_cpt_skcipher_encrypt,
1386     .decrypt = otx_cpt_skcipher_decrypt,
1387 }, {
1388     .base.cra_name = "ecb(des3_ede)",
1389     .base.cra_driver_name = "cpt_ecb_des3_ede",
1390     .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1391     .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1392     .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1393     .base.cra_alignmask = 7,
1394     .base.cra_priority = 4001,
1395     .base.cra_module = THIS_MODULE,
1396 
1397     .init = otx_cpt_enc_dec_init,
1398     .min_keysize = DES3_EDE_KEY_SIZE,
1399     .max_keysize = DES3_EDE_KEY_SIZE,
1400     .ivsize = 0,
1401     .setkey = otx_cpt_skcipher_ecb_des3_setkey,
1402     .encrypt = otx_cpt_skcipher_encrypt,
1403     .decrypt = otx_cpt_skcipher_decrypt,
1404 } };
1405 
1406 static struct aead_alg otx_cpt_aeads[] = { {
1407     .base = {
1408         .cra_name = "authenc(hmac(sha1),cbc(aes))",
1409         .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1410         .cra_blocksize = AES_BLOCK_SIZE,
1411         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1412         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1413         .cra_priority = 4001,
1414         .cra_alignmask = 0,
1415         .cra_module = THIS_MODULE,
1416     },
1417     .init = otx_cpt_aead_cbc_aes_sha1_init,
1418     .exit = otx_cpt_aead_exit,
1419     .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1420     .setauthsize = otx_cpt_aead_set_authsize,
1421     .encrypt = otx_cpt_aead_encrypt,
1422     .decrypt = otx_cpt_aead_decrypt,
1423     .ivsize = AES_BLOCK_SIZE,
1424     .maxauthsize = SHA1_DIGEST_SIZE,
1425 }, {
1426     .base = {
1427         .cra_name = "authenc(hmac(sha256),cbc(aes))",
1428         .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1429         .cra_blocksize = AES_BLOCK_SIZE,
1430         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1431         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1432         .cra_priority = 4001,
1433         .cra_alignmask = 0,
1434         .cra_module = THIS_MODULE,
1435     },
1436     .init = otx_cpt_aead_cbc_aes_sha256_init,
1437     .exit = otx_cpt_aead_exit,
1438     .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1439     .setauthsize = otx_cpt_aead_set_authsize,
1440     .encrypt = otx_cpt_aead_encrypt,
1441     .decrypt = otx_cpt_aead_decrypt,
1442     .ivsize = AES_BLOCK_SIZE,
1443     .maxauthsize = SHA256_DIGEST_SIZE,
1444 }, {
1445     .base = {
1446         .cra_name = "authenc(hmac(sha384),cbc(aes))",
1447         .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1448         .cra_blocksize = AES_BLOCK_SIZE,
1449         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1450         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1451         .cra_priority = 4001,
1452         .cra_alignmask = 0,
1453         .cra_module = THIS_MODULE,
1454     },
1455     .init = otx_cpt_aead_cbc_aes_sha384_init,
1456     .exit = otx_cpt_aead_exit,
1457     .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1458     .setauthsize = otx_cpt_aead_set_authsize,
1459     .encrypt = otx_cpt_aead_encrypt,
1460     .decrypt = otx_cpt_aead_decrypt,
1461     .ivsize = AES_BLOCK_SIZE,
1462     .maxauthsize = SHA384_DIGEST_SIZE,
1463 }, {
1464     .base = {
1465         .cra_name = "authenc(hmac(sha512),cbc(aes))",
1466         .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1467         .cra_blocksize = AES_BLOCK_SIZE,
1468         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1469         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1470         .cra_priority = 4001,
1471         .cra_alignmask = 0,
1472         .cra_module = THIS_MODULE,
1473     },
1474     .init = otx_cpt_aead_cbc_aes_sha512_init,
1475     .exit = otx_cpt_aead_exit,
1476     .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1477     .setauthsize = otx_cpt_aead_set_authsize,
1478     .encrypt = otx_cpt_aead_encrypt,
1479     .decrypt = otx_cpt_aead_decrypt,
1480     .ivsize = AES_BLOCK_SIZE,
1481     .maxauthsize = SHA512_DIGEST_SIZE,
1482 }, {
1483     .base = {
1484         .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1485         .cra_driver_name = "cpt_hmac_sha1_ecb_null",
1486         .cra_blocksize = 1,
1487         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1488         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1489         .cra_priority = 4001,
1490         .cra_alignmask = 0,
1491         .cra_module = THIS_MODULE,
1492     },
1493     .init = otx_cpt_aead_ecb_null_sha1_init,
1494     .exit = otx_cpt_aead_exit,
1495     .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1496     .setauthsize = otx_cpt_aead_set_authsize,
1497     .encrypt = otx_cpt_aead_null_encrypt,
1498     .decrypt = otx_cpt_aead_null_decrypt,
1499     .ivsize = 0,
1500     .maxauthsize = SHA1_DIGEST_SIZE,
1501 }, {
1502     .base = {
1503         .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1504         .cra_driver_name = "cpt_hmac_sha256_ecb_null",
1505         .cra_blocksize = 1,
1506         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1507         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1508         .cra_priority = 4001,
1509         .cra_alignmask = 0,
1510         .cra_module = THIS_MODULE,
1511     },
1512     .init = otx_cpt_aead_ecb_null_sha256_init,
1513     .exit = otx_cpt_aead_exit,
1514     .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1515     .setauthsize = otx_cpt_aead_set_authsize,
1516     .encrypt = otx_cpt_aead_null_encrypt,
1517     .decrypt = otx_cpt_aead_null_decrypt,
1518     .ivsize = 0,
1519     .maxauthsize = SHA256_DIGEST_SIZE,
1520 }, {
1521     .base = {
1522         .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1523         .cra_driver_name = "cpt_hmac_sha384_ecb_null",
1524         .cra_blocksize = 1,
1525         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1526         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1527         .cra_priority = 4001,
1528         .cra_alignmask = 0,
1529         .cra_module = THIS_MODULE,
1530     },
1531     .init = otx_cpt_aead_ecb_null_sha384_init,
1532     .exit = otx_cpt_aead_exit,
1533     .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1534     .setauthsize = otx_cpt_aead_set_authsize,
1535     .encrypt = otx_cpt_aead_null_encrypt,
1536     .decrypt = otx_cpt_aead_null_decrypt,
1537     .ivsize = 0,
1538     .maxauthsize = SHA384_DIGEST_SIZE,
1539 }, {
1540     .base = {
1541         .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1542         .cra_driver_name = "cpt_hmac_sha512_ecb_null",
1543         .cra_blocksize = 1,
1544         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1545         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1546         .cra_priority = 4001,
1547         .cra_alignmask = 0,
1548         .cra_module = THIS_MODULE,
1549     },
1550     .init = otx_cpt_aead_ecb_null_sha512_init,
1551     .exit = otx_cpt_aead_exit,
1552     .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1553     .setauthsize = otx_cpt_aead_set_authsize,
1554     .encrypt = otx_cpt_aead_null_encrypt,
1555     .decrypt = otx_cpt_aead_null_decrypt,
1556     .ivsize = 0,
1557     .maxauthsize = SHA512_DIGEST_SIZE,
1558 }, {
1559     .base = {
1560         .cra_name = "rfc4106(gcm(aes))",
1561         .cra_driver_name = "cpt_rfc4106_gcm_aes",
1562         .cra_blocksize = 1,
1563         .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1564         .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1565         .cra_priority = 4001,
1566         .cra_alignmask = 0,
1567         .cra_module = THIS_MODULE,
1568     },
1569     .init = otx_cpt_aead_gcm_aes_init,
1570     .exit = otx_cpt_aead_exit,
1571     .setkey = otx_cpt_aead_gcm_aes_setkey,
1572     .setauthsize = otx_cpt_aead_set_authsize,
1573     .encrypt = otx_cpt_aead_encrypt,
1574     .decrypt = otx_cpt_aead_decrypt,
1575     .ivsize = AES_GCM_IV_SIZE,
1576     .maxauthsize = AES_GCM_ICV_SIZE,
1577 } };
1578 
1579 static inline int is_any_alg_used(void)
1580 {
1581     int i;
1582 
1583     for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1584         if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
1585             return true;
1586     for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1587         if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
1588             return true;
1589     return false;
1590 }
1591 
1592 static inline int cpt_register_algs(void)
1593 {
1594     int i, err = 0;
1595 
1596     if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
1597         for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1598             otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1599 
1600         err = crypto_register_skciphers(otx_cpt_skciphers,
1601                         ARRAY_SIZE(otx_cpt_skciphers));
1602         if (err)
1603             return err;
1604     }
1605 
1606     for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1607         otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1608 
1609     err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1610     if (err) {
1611         crypto_unregister_skciphers(otx_cpt_skciphers,
1612                         ARRAY_SIZE(otx_cpt_skciphers));
1613         return err;
1614     }
1615 
1616     return 0;
1617 }
1618 
1619 static inline void cpt_unregister_algs(void)
1620 {
1621     crypto_unregister_skciphers(otx_cpt_skciphers,
1622                     ARRAY_SIZE(otx_cpt_skciphers));
1623     crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1624 }
1625 
1626 static int compare_func(const void *lptr, const void *rptr)
1627 {
1628     struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1629     struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1630 
1631     if (ldesc->dev->devfn < rdesc->dev->devfn)
1632         return -1;
1633     if (ldesc->dev->devfn > rdesc->dev->devfn)
1634         return 1;
1635     return 0;
1636 }
1637 
1638 static void swap_func(void *lptr, void *rptr, int size)
1639 {
1640     struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1641     struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1642 
1643     swap(*ldesc, *rdesc);
1644 }
1645 
1646 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1647             enum otx_cptpf_type pf_type,
1648             enum otx_cptvf_type engine_type,
1649             int num_queues, int num_devices)
1650 {
1651     int ret = 0;
1652     int count;
1653 
1654     mutex_lock(&mutex);
1655     switch (engine_type) {
1656     case OTX_CPT_SE_TYPES:
1657         count = atomic_read(&se_devices.count);
1658         if (count >= CPT_MAX_VF_NUM) {
1659             dev_err(&pdev->dev, "No space to add a new device\n");
1660             ret = -ENOSPC;
1661             goto err;
1662         }
1663         se_devices.desc[count].pf_type = pf_type;
1664         se_devices.desc[count].num_queues = num_queues;
1665         se_devices.desc[count++].dev = pdev;
1666         atomic_inc(&se_devices.count);
1667 
1668         if (atomic_read(&se_devices.count) == num_devices &&
1669             is_crypto_registered == false) {
1670             if (cpt_register_algs()) {
1671                 dev_err(&pdev->dev,
1672                    "Error in registering crypto algorithms\n");
1673                 ret =  -EINVAL;
1674                 goto err;
1675             }
1676             try_module_get(mod);
1677             is_crypto_registered = true;
1678         }
1679         sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1680              compare_func, swap_func);
1681         break;
1682 
1683     case OTX_CPT_AE_TYPES:
1684         count = atomic_read(&ae_devices.count);
1685         if (count >= CPT_MAX_VF_NUM) {
1686             dev_err(&pdev->dev, "No space to a add new device\n");
1687             ret = -ENOSPC;
1688             goto err;
1689         }
1690         ae_devices.desc[count].pf_type = pf_type;
1691         ae_devices.desc[count].num_queues = num_queues;
1692         ae_devices.desc[count++].dev = pdev;
1693         atomic_inc(&ae_devices.count);
1694         sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
1695              compare_func, swap_func);
1696         break;
1697 
1698     default:
1699         dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
1700         ret = BAD_OTX_CPTVF_TYPE;
1701     }
1702 err:
1703     mutex_unlock(&mutex);
1704     return ret;
1705 }
1706 
1707 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
1708              enum otx_cptvf_type engine_type)
1709 {
1710     struct cpt_device_table *dev_tbl;
1711     bool dev_found = false;
1712     int i, j, count;
1713 
1714     mutex_lock(&mutex);
1715 
1716     dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
1717     count = atomic_read(&dev_tbl->count);
1718     for (i = 0; i < count; i++)
1719         if (pdev == dev_tbl->desc[i].dev) {
1720             for (j = i; j < count-1; j++)
1721                 dev_tbl->desc[j] = dev_tbl->desc[j+1];
1722             dev_found = true;
1723             break;
1724         }
1725 
1726     if (!dev_found) {
1727         dev_err(&pdev->dev, "%s device not found\n", __func__);
1728         goto exit;
1729     }
1730 
1731     if (engine_type != OTX_CPT_AE_TYPES) {
1732         if (atomic_dec_and_test(&se_devices.count) &&
1733             !is_any_alg_used()) {
1734             cpt_unregister_algs();
1735             module_put(mod);
1736             is_crypto_registered = false;
1737         }
1738     } else
1739         atomic_dec(&ae_devices.count);
1740 exit:
1741     mutex_unlock(&mutex);
1742 }