Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
0003 
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <crypto/algapi.h>
0007 #include <crypto/internal/aead.h>
0008 #include <crypto/authenc.h>
0009 #include <crypto/gcm.h>
0010 #include <linux/rtnetlink.h>
0011 #include <crypto/internal/des.h>
0012 #include "cc_driver.h"
0013 #include "cc_buffer_mgr.h"
0014 #include "cc_aead.h"
0015 #include "cc_request_mgr.h"
0016 #include "cc_hash.h"
0017 #include "cc_sram_mgr.h"
0018 
0019 #define template_aead   template_u.aead
0020 
0021 #define MAX_AEAD_SETKEY_SEQ 12
0022 #define MAX_AEAD_PROCESS_SEQ 23
0023 
0024 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
0025 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
0026 
0027 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
0028 
0029 struct cc_aead_handle {
0030     u32 sram_workspace_addr;
0031     struct list_head aead_list;
0032 };
0033 
0034 struct cc_hmac_s {
0035     u8 *padded_authkey;
0036     u8 *ipad_opad; /* IPAD, OPAD*/
0037     dma_addr_t padded_authkey_dma_addr;
0038     dma_addr_t ipad_opad_dma_addr;
0039 };
0040 
0041 struct cc_xcbc_s {
0042     u8 *xcbc_keys; /* K1,K2,K3 */
0043     dma_addr_t xcbc_keys_dma_addr;
0044 };
0045 
0046 struct cc_aead_ctx {
0047     struct cc_drvdata *drvdata;
0048     u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
0049     u8 *enckey;
0050     dma_addr_t enckey_dma_addr;
0051     union {
0052         struct cc_hmac_s hmac;
0053         struct cc_xcbc_s xcbc;
0054     } auth_state;
0055     unsigned int enc_keylen;
0056     unsigned int auth_keylen;
0057     unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
0058     unsigned int hash_len;
0059     enum drv_cipher_mode cipher_mode;
0060     enum cc_flow_mode flow_mode;
0061     enum drv_hash_mode auth_mode;
0062 };
0063 
0064 static void cc_aead_exit(struct crypto_aead *tfm)
0065 {
0066     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0067     struct device *dev = drvdata_to_dev(ctx->drvdata);
0068 
0069     dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
0070         crypto_tfm_alg_name(&tfm->base));
0071 
0072     /* Unmap enckey buffer */
0073     if (ctx->enckey) {
0074         dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
0075                   ctx->enckey_dma_addr);
0076         dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
0077             &ctx->enckey_dma_addr);
0078         ctx->enckey_dma_addr = 0;
0079         ctx->enckey = NULL;
0080     }
0081 
0082     if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
0083         struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
0084 
0085         if (xcbc->xcbc_keys) {
0086             dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
0087                       xcbc->xcbc_keys,
0088                       xcbc->xcbc_keys_dma_addr);
0089         }
0090         dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
0091             &xcbc->xcbc_keys_dma_addr);
0092         xcbc->xcbc_keys_dma_addr = 0;
0093         xcbc->xcbc_keys = NULL;
0094     } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
0095         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
0096 
0097         if (hmac->ipad_opad) {
0098             dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
0099                       hmac->ipad_opad,
0100                       hmac->ipad_opad_dma_addr);
0101             dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
0102                 &hmac->ipad_opad_dma_addr);
0103             hmac->ipad_opad_dma_addr = 0;
0104             hmac->ipad_opad = NULL;
0105         }
0106         if (hmac->padded_authkey) {
0107             dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
0108                       hmac->padded_authkey,
0109                       hmac->padded_authkey_dma_addr);
0110             dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
0111                 &hmac->padded_authkey_dma_addr);
0112             hmac->padded_authkey_dma_addr = 0;
0113             hmac->padded_authkey = NULL;
0114         }
0115     }
0116 }
0117 
0118 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
0119 {
0120     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0121 
0122     return cc_get_default_hash_len(ctx->drvdata);
0123 }
0124 
0125 static int cc_aead_init(struct crypto_aead *tfm)
0126 {
0127     struct aead_alg *alg = crypto_aead_alg(tfm);
0128     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0129     struct cc_crypto_alg *cc_alg =
0130             container_of(alg, struct cc_crypto_alg, aead_alg);
0131     struct device *dev = drvdata_to_dev(cc_alg->drvdata);
0132 
0133     dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
0134         crypto_tfm_alg_name(&tfm->base));
0135 
0136     /* Initialize modes in instance */
0137     ctx->cipher_mode = cc_alg->cipher_mode;
0138     ctx->flow_mode = cc_alg->flow_mode;
0139     ctx->auth_mode = cc_alg->auth_mode;
0140     ctx->drvdata = cc_alg->drvdata;
0141     crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
0142 
0143     /* Allocate key buffer, cache line aligned */
0144     ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
0145                      &ctx->enckey_dma_addr, GFP_KERNEL);
0146     if (!ctx->enckey) {
0147         dev_err(dev, "Failed allocating key buffer\n");
0148         goto init_failed;
0149     }
0150     dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
0151         ctx->enckey);
0152 
0153     /* Set default authlen value */
0154 
0155     if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
0156         struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
0157         const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
0158 
0159         /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
0160         /* (and temporary for user key - up to 256b) */
0161         xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
0162                              &xcbc->xcbc_keys_dma_addr,
0163                              GFP_KERNEL);
0164         if (!xcbc->xcbc_keys) {
0165             dev_err(dev, "Failed allocating buffer for XCBC keys\n");
0166             goto init_failed;
0167         }
0168     } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
0169         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
0170         const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
0171         dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
0172 
0173         /* Allocate dma-coherent buffer for IPAD + OPAD */
0174         hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
0175                              &hmac->ipad_opad_dma_addr,
0176                              GFP_KERNEL);
0177 
0178         if (!hmac->ipad_opad) {
0179             dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
0180             goto init_failed;
0181         }
0182 
0183         dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
0184             hmac->ipad_opad);
0185 
0186         hmac->padded_authkey = dma_alloc_coherent(dev,
0187                               MAX_HMAC_BLOCK_SIZE,
0188                               pkey_dma,
0189                               GFP_KERNEL);
0190 
0191         if (!hmac->padded_authkey) {
0192             dev_err(dev, "failed to allocate padded_authkey\n");
0193             goto init_failed;
0194         }
0195     } else {
0196         ctx->auth_state.hmac.ipad_opad = NULL;
0197         ctx->auth_state.hmac.padded_authkey = NULL;
0198     }
0199     ctx->hash_len = cc_get_aead_hash_len(tfm);
0200 
0201     return 0;
0202 
0203 init_failed:
0204     cc_aead_exit(tfm);
0205     return -ENOMEM;
0206 }
0207 
0208 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
0209 {
0210     struct aead_request *areq = (struct aead_request *)cc_req;
0211     struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
0212     struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
0213     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0214 
0215     /* BACKLOG notification */
0216     if (err == -EINPROGRESS)
0217         goto done;
0218 
0219     cc_unmap_aead_request(dev, areq);
0220 
0221     /* Restore ordinary iv pointer */
0222     areq->iv = areq_ctx->backup_iv;
0223 
0224     if (err)
0225         goto done;
0226 
0227     if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
0228         if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
0229                ctx->authsize) != 0) {
0230             dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
0231                 ctx->authsize, ctx->cipher_mode);
0232             /* In case of payload authentication failure, MUST NOT
0233              * revealed the decrypted message --> zero its memory.
0234              */
0235             sg_zero_buffer(areq->dst, sg_nents(areq->dst),
0236                        areq->cryptlen, areq->assoclen);
0237             err = -EBADMSG;
0238         }
0239     /*ENCRYPT*/
0240     } else if (areq_ctx->is_icv_fragmented) {
0241         u32 skip = areq->cryptlen + areq_ctx->dst_offset;
0242 
0243         cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
0244                    skip, (skip + ctx->authsize),
0245                    CC_SG_FROM_BUF);
0246     }
0247 done:
0248     aead_request_complete(areq, err);
0249 }
0250 
0251 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
0252                 struct cc_aead_ctx *ctx)
0253 {
0254     /* Load the AES key */
0255     hw_desc_init(&desc[0]);
0256     /* We are using for the source/user key the same buffer
0257      * as for the output keys, * because after this key loading it
0258      * is not needed anymore
0259      */
0260     set_din_type(&desc[0], DMA_DLLI,
0261              ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
0262              NS_BIT);
0263     set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
0264     set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
0265     set_key_size_aes(&desc[0], ctx->auth_keylen);
0266     set_flow_mode(&desc[0], S_DIN_to_AES);
0267     set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
0268 
0269     hw_desc_init(&desc[1]);
0270     set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
0271     set_flow_mode(&desc[1], DIN_AES_DOUT);
0272     set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
0273               AES_KEYSIZE_128, NS_BIT, 0);
0274 
0275     hw_desc_init(&desc[2]);
0276     set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
0277     set_flow_mode(&desc[2], DIN_AES_DOUT);
0278     set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
0279                      + AES_KEYSIZE_128),
0280                   AES_KEYSIZE_128, NS_BIT, 0);
0281 
0282     hw_desc_init(&desc[3]);
0283     set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
0284     set_flow_mode(&desc[3], DIN_AES_DOUT);
0285     set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
0286                       + 2 * AES_KEYSIZE_128),
0287                   AES_KEYSIZE_128, NS_BIT, 0);
0288 
0289     return 4;
0290 }
0291 
0292 static unsigned int hmac_setkey(struct cc_hw_desc *desc,
0293                 struct cc_aead_ctx *ctx)
0294 {
0295     unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
0296     unsigned int digest_ofs = 0;
0297     unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
0298             DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
0299     unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
0300             CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
0301     struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
0302 
0303     unsigned int idx = 0;
0304     int i;
0305 
0306     /* calc derived HMAC key */
0307     for (i = 0; i < 2; i++) {
0308         /* Load hash initial state */
0309         hw_desc_init(&desc[idx]);
0310         set_cipher_mode(&desc[idx], hash_mode);
0311         set_din_sram(&desc[idx],
0312                  cc_larval_digest_addr(ctx->drvdata,
0313                            ctx->auth_mode),
0314                  digest_size);
0315         set_flow_mode(&desc[idx], S_DIN_to_HASH);
0316         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0317         idx++;
0318 
0319         /* Load the hash current length*/
0320         hw_desc_init(&desc[idx]);
0321         set_cipher_mode(&desc[idx], hash_mode);
0322         set_din_const(&desc[idx], 0, ctx->hash_len);
0323         set_flow_mode(&desc[idx], S_DIN_to_HASH);
0324         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0325         idx++;
0326 
0327         /* Prepare ipad key */
0328         hw_desc_init(&desc[idx]);
0329         set_xor_val(&desc[idx], hmac_pad_const[i]);
0330         set_cipher_mode(&desc[idx], hash_mode);
0331         set_flow_mode(&desc[idx], S_DIN_to_HASH);
0332         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
0333         idx++;
0334 
0335         /* Perform HASH update */
0336         hw_desc_init(&desc[idx]);
0337         set_din_type(&desc[idx], DMA_DLLI,
0338                  hmac->padded_authkey_dma_addr,
0339                  SHA256_BLOCK_SIZE, NS_BIT);
0340         set_cipher_mode(&desc[idx], hash_mode);
0341         set_xor_active(&desc[idx]);
0342         set_flow_mode(&desc[idx], DIN_HASH);
0343         idx++;
0344 
0345         /* Get the digset */
0346         hw_desc_init(&desc[idx]);
0347         set_cipher_mode(&desc[idx], hash_mode);
0348         set_dout_dlli(&desc[idx],
0349                   (hmac->ipad_opad_dma_addr + digest_ofs),
0350                   digest_size, NS_BIT, 0);
0351         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0352         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0353         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
0354         idx++;
0355 
0356         digest_ofs += digest_size;
0357     }
0358 
0359     return idx;
0360 }
0361 
0362 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
0363 {
0364     struct device *dev = drvdata_to_dev(ctx->drvdata);
0365 
0366     dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
0367         ctx->enc_keylen, ctx->auth_keylen);
0368 
0369     switch (ctx->auth_mode) {
0370     case DRV_HASH_SHA1:
0371     case DRV_HASH_SHA256:
0372         break;
0373     case DRV_HASH_XCBC_MAC:
0374         if (ctx->auth_keylen != AES_KEYSIZE_128 &&
0375             ctx->auth_keylen != AES_KEYSIZE_192 &&
0376             ctx->auth_keylen != AES_KEYSIZE_256)
0377             return -ENOTSUPP;
0378         break;
0379     case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
0380         if (ctx->auth_keylen > 0)
0381             return -EINVAL;
0382         break;
0383     default:
0384         dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
0385         return -EINVAL;
0386     }
0387     /* Check cipher key size */
0388     if (ctx->flow_mode == S_DIN_to_DES) {
0389         if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
0390             dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
0391                 ctx->enc_keylen);
0392             return -EINVAL;
0393         }
0394     } else { /* Default assumed to be AES ciphers */
0395         if (ctx->enc_keylen != AES_KEYSIZE_128 &&
0396             ctx->enc_keylen != AES_KEYSIZE_192 &&
0397             ctx->enc_keylen != AES_KEYSIZE_256) {
0398             dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
0399                 ctx->enc_keylen);
0400             return -EINVAL;
0401         }
0402     }
0403 
0404     return 0; /* All tests of keys sizes passed */
0405 }
0406 
0407 /* This function prepers the user key so it can pass to the hmac processing
0408  * (copy to intenral buffer or hash in case of key longer than block
0409  */
0410 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
0411                  unsigned int keylen)
0412 {
0413     dma_addr_t key_dma_addr = 0;
0414     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0415     struct device *dev = drvdata_to_dev(ctx->drvdata);
0416     u32 larval_addr;
0417     struct cc_crypto_req cc_req = {};
0418     unsigned int blocksize;
0419     unsigned int digestsize;
0420     unsigned int hashmode;
0421     unsigned int idx = 0;
0422     int rc = 0;
0423     u8 *key = NULL;
0424     struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
0425     dma_addr_t padded_authkey_dma_addr =
0426         ctx->auth_state.hmac.padded_authkey_dma_addr;
0427 
0428     switch (ctx->auth_mode) { /* auth_key required and >0 */
0429     case DRV_HASH_SHA1:
0430         blocksize = SHA1_BLOCK_SIZE;
0431         digestsize = SHA1_DIGEST_SIZE;
0432         hashmode = DRV_HASH_HW_SHA1;
0433         break;
0434     case DRV_HASH_SHA256:
0435     default:
0436         blocksize = SHA256_BLOCK_SIZE;
0437         digestsize = SHA256_DIGEST_SIZE;
0438         hashmode = DRV_HASH_HW_SHA256;
0439     }
0440 
0441     if (keylen != 0) {
0442 
0443         key = kmemdup(authkey, keylen, GFP_KERNEL);
0444         if (!key)
0445             return -ENOMEM;
0446 
0447         key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
0448         if (dma_mapping_error(dev, key_dma_addr)) {
0449             dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
0450                 key, keylen);
0451             kfree_sensitive(key);
0452             return -ENOMEM;
0453         }
0454         if (keylen > blocksize) {
0455             /* Load hash initial state */
0456             hw_desc_init(&desc[idx]);
0457             set_cipher_mode(&desc[idx], hashmode);
0458             larval_addr = cc_larval_digest_addr(ctx->drvdata,
0459                                 ctx->auth_mode);
0460             set_din_sram(&desc[idx], larval_addr, digestsize);
0461             set_flow_mode(&desc[idx], S_DIN_to_HASH);
0462             set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0463             idx++;
0464 
0465             /* Load the hash current length*/
0466             hw_desc_init(&desc[idx]);
0467             set_cipher_mode(&desc[idx], hashmode);
0468             set_din_const(&desc[idx], 0, ctx->hash_len);
0469             set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
0470             set_flow_mode(&desc[idx], S_DIN_to_HASH);
0471             set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0472             idx++;
0473 
0474             hw_desc_init(&desc[idx]);
0475             set_din_type(&desc[idx], DMA_DLLI,
0476                      key_dma_addr, keylen, NS_BIT);
0477             set_flow_mode(&desc[idx], DIN_HASH);
0478             idx++;
0479 
0480             /* Get hashed key */
0481             hw_desc_init(&desc[idx]);
0482             set_cipher_mode(&desc[idx], hashmode);
0483             set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
0484                       digestsize, NS_BIT, 0);
0485             set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0486             set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0487             set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
0488             set_cipher_config0(&desc[idx],
0489                        HASH_DIGEST_RESULT_LITTLE_ENDIAN);
0490             idx++;
0491 
0492             hw_desc_init(&desc[idx]);
0493             set_din_const(&desc[idx], 0, (blocksize - digestsize));
0494             set_flow_mode(&desc[idx], BYPASS);
0495             set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
0496                       digestsize), (blocksize - digestsize),
0497                       NS_BIT, 0);
0498             idx++;
0499         } else {
0500             hw_desc_init(&desc[idx]);
0501             set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
0502                      keylen, NS_BIT);
0503             set_flow_mode(&desc[idx], BYPASS);
0504             set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
0505                       keylen, NS_BIT, 0);
0506             idx++;
0507 
0508             if ((blocksize - keylen) != 0) {
0509                 hw_desc_init(&desc[idx]);
0510                 set_din_const(&desc[idx], 0,
0511                           (blocksize - keylen));
0512                 set_flow_mode(&desc[idx], BYPASS);
0513                 set_dout_dlli(&desc[idx],
0514                           (padded_authkey_dma_addr +
0515                            keylen),
0516                           (blocksize - keylen), NS_BIT, 0);
0517                 idx++;
0518             }
0519         }
0520     } else {
0521         hw_desc_init(&desc[idx]);
0522         set_din_const(&desc[idx], 0, (blocksize - keylen));
0523         set_flow_mode(&desc[idx], BYPASS);
0524         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
0525                   blocksize, NS_BIT, 0);
0526         idx++;
0527     }
0528 
0529     rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
0530     if (rc)
0531         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
0532 
0533     if (key_dma_addr)
0534         dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
0535 
0536     kfree_sensitive(key);
0537 
0538     return rc;
0539 }
0540 
0541 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
0542               unsigned int keylen)
0543 {
0544     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0545     struct cc_crypto_req cc_req = {};
0546     struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
0547     unsigned int seq_len = 0;
0548     struct device *dev = drvdata_to_dev(ctx->drvdata);
0549     const u8 *enckey, *authkey;
0550     int rc;
0551 
0552     dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
0553         ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
0554 
0555     /* STAT_PHASE_0: Init and sanity checks */
0556 
0557     if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
0558         struct crypto_authenc_keys keys;
0559 
0560         rc = crypto_authenc_extractkeys(&keys, key, keylen);
0561         if (rc)
0562             return rc;
0563         enckey = keys.enckey;
0564         authkey = keys.authkey;
0565         ctx->enc_keylen = keys.enckeylen;
0566         ctx->auth_keylen = keys.authkeylen;
0567 
0568         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
0569             /* the nonce is stored in bytes at end of key */
0570             if (ctx->enc_keylen <
0571                 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
0572                 return -EINVAL;
0573             /* Copy nonce from last 4 bytes in CTR key to
0574              *  first 4 bytes in CTR IV
0575              */
0576             memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
0577                    CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
0578             /* Set CTR key size */
0579             ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
0580         }
0581     } else { /* non-authenc - has just one key */
0582         enckey = key;
0583         authkey = NULL;
0584         ctx->enc_keylen = keylen;
0585         ctx->auth_keylen = 0;
0586     }
0587 
0588     rc = validate_keys_sizes(ctx);
0589     if (rc)
0590         return rc;
0591 
0592     /* STAT_PHASE_1: Copy key to ctx */
0593 
0594     /* Get key material */
0595     memcpy(ctx->enckey, enckey, ctx->enc_keylen);
0596     if (ctx->enc_keylen == 24)
0597         memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
0598     if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
0599         memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
0600                ctx->auth_keylen);
0601     } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
0602         rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
0603         if (rc)
0604             return rc;
0605     }
0606 
0607     /* STAT_PHASE_2: Create sequence */
0608 
0609     switch (ctx->auth_mode) {
0610     case DRV_HASH_SHA1:
0611     case DRV_HASH_SHA256:
0612         seq_len = hmac_setkey(desc, ctx);
0613         break;
0614     case DRV_HASH_XCBC_MAC:
0615         seq_len = xcbc_setkey(desc, ctx);
0616         break;
0617     case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
0618         break; /* No auth. key setup */
0619     default:
0620         dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
0621         return -ENOTSUPP;
0622     }
0623 
0624     /* STAT_PHASE_3: Submit sequence to HW */
0625 
0626     if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
0627         rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
0628         if (rc) {
0629             dev_err(dev, "send_request() failed (rc=%d)\n", rc);
0630             return rc;
0631         }
0632     }
0633 
0634     /* Update STAT_PHASE_3 */
0635     return rc;
0636 }
0637 
0638 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
0639                    unsigned int keylen)
0640 {
0641     struct crypto_authenc_keys keys;
0642     int err;
0643 
0644     err = crypto_authenc_extractkeys(&keys, key, keylen);
0645     if (unlikely(err))
0646         return err;
0647 
0648     err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
0649           cc_aead_setkey(aead, key, keylen);
0650 
0651     memzero_explicit(&keys, sizeof(keys));
0652     return err;
0653 }
0654 
0655 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
0656                  unsigned int keylen)
0657 {
0658     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0659 
0660     if (keylen < 3)
0661         return -EINVAL;
0662 
0663     keylen -= 3;
0664     memcpy(ctx->ctr_nonce, key + keylen, 3);
0665 
0666     return cc_aead_setkey(tfm, key, keylen);
0667 }
0668 
0669 static int cc_aead_setauthsize(struct crypto_aead *authenc,
0670                    unsigned int authsize)
0671 {
0672     struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
0673     struct device *dev = drvdata_to_dev(ctx->drvdata);
0674 
0675     /* Unsupported auth. sizes */
0676     if (authsize == 0 ||
0677         authsize > crypto_aead_maxauthsize(authenc)) {
0678         return -ENOTSUPP;
0679     }
0680 
0681     ctx->authsize = authsize;
0682     dev_dbg(dev, "authlen=%d\n", ctx->authsize);
0683 
0684     return 0;
0685 }
0686 
0687 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
0688                       unsigned int authsize)
0689 {
0690     switch (authsize) {
0691     case 8:
0692     case 12:
0693     case 16:
0694         break;
0695     default:
0696         return -EINVAL;
0697     }
0698 
0699     return cc_aead_setauthsize(authenc, authsize);
0700 }
0701 
0702 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
0703                   unsigned int authsize)
0704 {
0705     switch (authsize) {
0706     case 4:
0707     case 6:
0708     case 8:
0709     case 10:
0710     case 12:
0711     case 14:
0712     case 16:
0713         break;
0714     default:
0715         return -EINVAL;
0716     }
0717 
0718     return cc_aead_setauthsize(authenc, authsize);
0719 }
0720 
0721 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
0722                   struct cc_hw_desc desc[], unsigned int *seq_size)
0723 {
0724     struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
0725     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0726     struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
0727     enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
0728     unsigned int idx = *seq_size;
0729     struct device *dev = drvdata_to_dev(ctx->drvdata);
0730 
0731     switch (assoc_dma_type) {
0732     case CC_DMA_BUF_DLLI:
0733         dev_dbg(dev, "ASSOC buffer type DLLI\n");
0734         hw_desc_init(&desc[idx]);
0735         set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
0736                  areq_ctx->assoclen, NS_BIT);
0737         set_flow_mode(&desc[idx], flow_mode);
0738         if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
0739             areq_ctx->cryptlen > 0)
0740             set_din_not_last_indication(&desc[idx]);
0741         break;
0742     case CC_DMA_BUF_MLLI:
0743         dev_dbg(dev, "ASSOC buffer type MLLI\n");
0744         hw_desc_init(&desc[idx]);
0745         set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
0746                  areq_ctx->assoc.mlli_nents, NS_BIT);
0747         set_flow_mode(&desc[idx], flow_mode);
0748         if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
0749             areq_ctx->cryptlen > 0)
0750             set_din_not_last_indication(&desc[idx]);
0751         break;
0752     case CC_DMA_BUF_NULL:
0753     default:
0754         dev_err(dev, "Invalid ASSOC buffer type\n");
0755     }
0756 
0757     *seq_size = (++idx);
0758 }
0759 
0760 static void cc_proc_authen_desc(struct aead_request *areq,
0761                 unsigned int flow_mode,
0762                 struct cc_hw_desc desc[],
0763                 unsigned int *seq_size, int direct)
0764 {
0765     struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
0766     enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
0767     unsigned int idx = *seq_size;
0768     struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
0769     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0770     struct device *dev = drvdata_to_dev(ctx->drvdata);
0771 
0772     switch (data_dma_type) {
0773     case CC_DMA_BUF_DLLI:
0774     {
0775         struct scatterlist *cipher =
0776             (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
0777             areq_ctx->dst_sgl : areq_ctx->src_sgl;
0778 
0779         unsigned int offset =
0780             (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
0781             areq_ctx->dst_offset : areq_ctx->src_offset;
0782         dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
0783         hw_desc_init(&desc[idx]);
0784         set_din_type(&desc[idx], DMA_DLLI,
0785                  (sg_dma_address(cipher) + offset),
0786                  areq_ctx->cryptlen, NS_BIT);
0787         set_flow_mode(&desc[idx], flow_mode);
0788         break;
0789     }
0790     case CC_DMA_BUF_MLLI:
0791     {
0792         /* DOUBLE-PASS flow (as default)
0793          * assoc. + iv + data -compact in one table
0794          * if assoclen is ZERO only IV perform
0795          */
0796         u32 mlli_addr = areq_ctx->assoc.sram_addr;
0797         u32 mlli_nents = areq_ctx->assoc.mlli_nents;
0798 
0799         if (areq_ctx->is_single_pass) {
0800             if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
0801                 mlli_addr = areq_ctx->dst.sram_addr;
0802                 mlli_nents = areq_ctx->dst.mlli_nents;
0803             } else {
0804                 mlli_addr = areq_ctx->src.sram_addr;
0805                 mlli_nents = areq_ctx->src.mlli_nents;
0806             }
0807         }
0808 
0809         dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
0810         hw_desc_init(&desc[idx]);
0811         set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
0812                  NS_BIT);
0813         set_flow_mode(&desc[idx], flow_mode);
0814         break;
0815     }
0816     case CC_DMA_BUF_NULL:
0817     default:
0818         dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
0819     }
0820 
0821     *seq_size = (++idx);
0822 }
0823 
0824 static void cc_proc_cipher_desc(struct aead_request *areq,
0825                 unsigned int flow_mode,
0826                 struct cc_hw_desc desc[],
0827                 unsigned int *seq_size)
0828 {
0829     unsigned int idx = *seq_size;
0830     struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
0831     enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
0832     struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
0833     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0834     struct device *dev = drvdata_to_dev(ctx->drvdata);
0835 
0836     if (areq_ctx->cryptlen == 0)
0837         return; /*null processing*/
0838 
0839     switch (data_dma_type) {
0840     case CC_DMA_BUF_DLLI:
0841         dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
0842         hw_desc_init(&desc[idx]);
0843         set_din_type(&desc[idx], DMA_DLLI,
0844                  (sg_dma_address(areq_ctx->src_sgl) +
0845                   areq_ctx->src_offset), areq_ctx->cryptlen,
0846                   NS_BIT);
0847         set_dout_dlli(&desc[idx],
0848                   (sg_dma_address(areq_ctx->dst_sgl) +
0849                    areq_ctx->dst_offset),
0850                   areq_ctx->cryptlen, NS_BIT, 0);
0851         set_flow_mode(&desc[idx], flow_mode);
0852         break;
0853     case CC_DMA_BUF_MLLI:
0854         dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
0855         hw_desc_init(&desc[idx]);
0856         set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
0857                  areq_ctx->src.mlli_nents, NS_BIT);
0858         set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
0859                   areq_ctx->dst.mlli_nents, NS_BIT, 0);
0860         set_flow_mode(&desc[idx], flow_mode);
0861         break;
0862     case CC_DMA_BUF_NULL:
0863     default:
0864         dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
0865     }
0866 
0867     *seq_size = (++idx);
0868 }
0869 
0870 static void cc_proc_digest_desc(struct aead_request *req,
0871                 struct cc_hw_desc desc[],
0872                 unsigned int *seq_size)
0873 {
0874     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0875     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0876     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
0877     unsigned int idx = *seq_size;
0878     unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
0879                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
0880     int direct = req_ctx->gen_ctx.op_type;
0881 
0882     /* Get final ICV result */
0883     if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
0884         hw_desc_init(&desc[idx]);
0885         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0886         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0887         set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
0888                   NS_BIT, 1);
0889         set_queue_last_ind(ctx->drvdata, &desc[idx]);
0890         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
0891             set_aes_not_hash_mode(&desc[idx]);
0892             set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
0893         } else {
0894             set_cipher_config0(&desc[idx],
0895                        HASH_DIGEST_RESULT_LITTLE_ENDIAN);
0896             set_cipher_mode(&desc[idx], hash_mode);
0897         }
0898     } else { /*Decrypt*/
0899         /* Get ICV out from hardware */
0900         hw_desc_init(&desc[idx]);
0901         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
0902         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
0903         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
0904                   ctx->authsize, NS_BIT, 1);
0905         set_queue_last_ind(ctx->drvdata, &desc[idx]);
0906         set_cipher_config0(&desc[idx],
0907                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
0908         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
0909         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
0910             set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
0911             set_aes_not_hash_mode(&desc[idx]);
0912         } else {
0913             set_cipher_mode(&desc[idx], hash_mode);
0914         }
0915     }
0916 
0917     *seq_size = (++idx);
0918 }
0919 
0920 static void cc_set_cipher_desc(struct aead_request *req,
0921                    struct cc_hw_desc desc[],
0922                    unsigned int *seq_size)
0923 {
0924     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0925     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0926     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
0927     unsigned int hw_iv_size = req_ctx->hw_iv_size;
0928     unsigned int idx = *seq_size;
0929     int direct = req_ctx->gen_ctx.op_type;
0930 
0931     /* Setup cipher state */
0932     hw_desc_init(&desc[idx]);
0933     set_cipher_config0(&desc[idx], direct);
0934     set_flow_mode(&desc[idx], ctx->flow_mode);
0935     set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
0936              hw_iv_size, NS_BIT);
0937     if (ctx->cipher_mode == DRV_CIPHER_CTR)
0938         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
0939     else
0940         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
0941     set_cipher_mode(&desc[idx], ctx->cipher_mode);
0942     idx++;
0943 
0944     /* Setup enc. key */
0945     hw_desc_init(&desc[idx]);
0946     set_cipher_config0(&desc[idx], direct);
0947     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
0948     set_flow_mode(&desc[idx], ctx->flow_mode);
0949     if (ctx->flow_mode == S_DIN_to_AES) {
0950         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
0951                  ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
0952                   ctx->enc_keylen), NS_BIT);
0953         set_key_size_aes(&desc[idx], ctx->enc_keylen);
0954     } else {
0955         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
0956                  ctx->enc_keylen, NS_BIT);
0957         set_key_size_des(&desc[idx], ctx->enc_keylen);
0958     }
0959     set_cipher_mode(&desc[idx], ctx->cipher_mode);
0960     idx++;
0961 
0962     *seq_size = idx;
0963 }
0964 
0965 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
0966                unsigned int *seq_size, unsigned int data_flow_mode)
0967 {
0968     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
0969     int direct = req_ctx->gen_ctx.op_type;
0970     unsigned int idx = *seq_size;
0971 
0972     if (req_ctx->cryptlen == 0)
0973         return; /*null processing*/
0974 
0975     cc_set_cipher_desc(req, desc, &idx);
0976     cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
0977     if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
0978         /* We must wait for DMA to write all cipher */
0979         hw_desc_init(&desc[idx]);
0980         set_din_no_dma(&desc[idx], 0, 0xfffff0);
0981         set_dout_no_dma(&desc[idx], 0, 0, 1);
0982         idx++;
0983     }
0984 
0985     *seq_size = idx;
0986 }
0987 
0988 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
0989                  unsigned int *seq_size)
0990 {
0991     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0992     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
0993     unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
0994                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
0995     unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
0996                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
0997     unsigned int idx = *seq_size;
0998 
0999     /* Loading hash ipad xor key state */
1000     hw_desc_init(&desc[idx]);
1001     set_cipher_mode(&desc[idx], hash_mode);
1002     set_din_type(&desc[idx], DMA_DLLI,
1003              ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1004              NS_BIT);
1005     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006     set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1007     idx++;
1008 
1009     /* Load init. digest len (64 bytes) */
1010     hw_desc_init(&desc[idx]);
1011     set_cipher_mode(&desc[idx], hash_mode);
1012     set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1013              ctx->hash_len);
1014     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1016     idx++;
1017 
1018     *seq_size = idx;
1019 }
1020 
1021 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1022                  unsigned int *seq_size)
1023 {
1024     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1025     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1026     unsigned int idx = *seq_size;
1027 
1028     /* Loading MAC state */
1029     hw_desc_init(&desc[idx]);
1030     set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1031     set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1032     set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1033     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1034     set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1035     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1036     set_aes_not_hash_mode(&desc[idx]);
1037     idx++;
1038 
1039     /* Setup XCBC MAC K1 */
1040     hw_desc_init(&desc[idx]);
1041     set_din_type(&desc[idx], DMA_DLLI,
1042              ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1043              AES_KEYSIZE_128, NS_BIT);
1044     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1045     set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047     set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049     set_aes_not_hash_mode(&desc[idx]);
1050     idx++;
1051 
1052     /* Setup XCBC MAC K2 */
1053     hw_desc_init(&desc[idx]);
1054     set_din_type(&desc[idx], DMA_DLLI,
1055              (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1056               AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1057     set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1058     set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060     set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062     set_aes_not_hash_mode(&desc[idx]);
1063     idx++;
1064 
1065     /* Setup XCBC MAC K3 */
1066     hw_desc_init(&desc[idx]);
1067     set_din_type(&desc[idx], DMA_DLLI,
1068              (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069               2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070     set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1071     set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073     set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075     set_aes_not_hash_mode(&desc[idx]);
1076     idx++;
1077 
1078     *seq_size = idx;
1079 }
1080 
1081 static void cc_proc_header_desc(struct aead_request *req,
1082                 struct cc_hw_desc desc[],
1083                 unsigned int *seq_size)
1084 {
1085     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1086     unsigned int idx = *seq_size;
1087 
1088     /* Hash associated data */
1089     if (areq_ctx->assoclen > 0)
1090         cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1091 
1092     /* Hash IV */
1093     *seq_size = idx;
1094 }
1095 
1096 static void cc_proc_scheme_desc(struct aead_request *req,
1097                 struct cc_hw_desc desc[],
1098                 unsigned int *seq_size)
1099 {
1100     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1101     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1102     struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1103     unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1104                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1105     unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1106                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1107     unsigned int idx = *seq_size;
1108 
1109     hw_desc_init(&desc[idx]);
1110     set_cipher_mode(&desc[idx], hash_mode);
1111     set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1112               ctx->hash_len);
1113     set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1114     set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1115     set_cipher_do(&desc[idx], DO_PAD);
1116     idx++;
1117 
1118     /* Get final ICV result */
1119     hw_desc_init(&desc[idx]);
1120     set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1121               digest_size);
1122     set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1123     set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1124     set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1125     set_cipher_mode(&desc[idx], hash_mode);
1126     idx++;
1127 
1128     /* Loading hash opad xor key state */
1129     hw_desc_init(&desc[idx]);
1130     set_cipher_mode(&desc[idx], hash_mode);
1131     set_din_type(&desc[idx], DMA_DLLI,
1132              (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1133              digest_size, NS_BIT);
1134     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1135     set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1136     idx++;
1137 
1138     /* Load init. digest len (64 bytes) */
1139     hw_desc_init(&desc[idx]);
1140     set_cipher_mode(&desc[idx], hash_mode);
1141     set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1142              ctx->hash_len);
1143     set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1144     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1145     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1146     idx++;
1147 
1148     /* Perform HASH update */
1149     hw_desc_init(&desc[idx]);
1150     set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1151              digest_size);
1152     set_flow_mode(&desc[idx], DIN_HASH);
1153     idx++;
1154 
1155     *seq_size = idx;
1156 }
1157 
1158 static void cc_mlli_to_sram(struct aead_request *req,
1159                 struct cc_hw_desc desc[], unsigned int *seq_size)
1160 {
1161     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1162     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1163     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1164     struct device *dev = drvdata_to_dev(ctx->drvdata);
1165 
1166     if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1167         req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1168         !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1169         dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1170             ctx->drvdata->mlli_sram_addr,
1171             req_ctx->mlli_params.mlli_len);
1172         /* Copy MLLI table host-to-sram */
1173         hw_desc_init(&desc[*seq_size]);
1174         set_din_type(&desc[*seq_size], DMA_DLLI,
1175                  req_ctx->mlli_params.mlli_dma_addr,
1176                  req_ctx->mlli_params.mlli_len, NS_BIT);
1177         set_dout_sram(&desc[*seq_size],
1178                   ctx->drvdata->mlli_sram_addr,
1179                   req_ctx->mlli_params.mlli_len);
1180         set_flow_mode(&desc[*seq_size], BYPASS);
1181         (*seq_size)++;
1182     }
1183 }
1184 
1185 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1186                       enum cc_flow_mode setup_flow_mode,
1187                       bool is_single_pass)
1188 {
1189     enum cc_flow_mode data_flow_mode;
1190 
1191     if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1192         if (setup_flow_mode == S_DIN_to_AES)
1193             data_flow_mode = is_single_pass ?
1194                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1195         else
1196             data_flow_mode = is_single_pass ?
1197                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1198     } else { /* Decrypt */
1199         if (setup_flow_mode == S_DIN_to_AES)
1200             data_flow_mode = is_single_pass ?
1201                 AES_and_HASH : DIN_AES_DOUT;
1202         else
1203             data_flow_mode = is_single_pass ?
1204                 DES_and_HASH : DIN_DES_DOUT;
1205     }
1206 
1207     return data_flow_mode;
1208 }
1209 
1210 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1211                 unsigned int *seq_size)
1212 {
1213     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1215     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1216     int direct = req_ctx->gen_ctx.op_type;
1217     unsigned int data_flow_mode =
1218         cc_get_data_flow(direct, ctx->flow_mode,
1219                  req_ctx->is_single_pass);
1220 
1221     if (req_ctx->is_single_pass) {
1222         /*
1223          * Single-pass flow
1224          */
1225         cc_set_hmac_desc(req, desc, seq_size);
1226         cc_set_cipher_desc(req, desc, seq_size);
1227         cc_proc_header_desc(req, desc, seq_size);
1228         cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1229         cc_proc_scheme_desc(req, desc, seq_size);
1230         cc_proc_digest_desc(req, desc, seq_size);
1231         return;
1232     }
1233 
1234     /*
1235      * Double-pass flow
1236      * Fallback for unsupported single-pass modes,
1237      * i.e. using assoc. data of non-word-multiple
1238      */
1239     if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1240         /* encrypt first.. */
1241         cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1242         /* authenc after..*/
1243         cc_set_hmac_desc(req, desc, seq_size);
1244         cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1245         cc_proc_scheme_desc(req, desc, seq_size);
1246         cc_proc_digest_desc(req, desc, seq_size);
1247 
1248     } else { /*DECRYPT*/
1249         /* authenc first..*/
1250         cc_set_hmac_desc(req, desc, seq_size);
1251         cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1252         cc_proc_scheme_desc(req, desc, seq_size);
1253         /* decrypt after.. */
1254         cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1255         /* read the digest result with setting the completion bit
1256          * must be after the cipher operation
1257          */
1258         cc_proc_digest_desc(req, desc, seq_size);
1259     }
1260 }
1261 
1262 static void
1263 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1264         unsigned int *seq_size)
1265 {
1266     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1267     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1268     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1269     int direct = req_ctx->gen_ctx.op_type;
1270     unsigned int data_flow_mode =
1271         cc_get_data_flow(direct, ctx->flow_mode,
1272                  req_ctx->is_single_pass);
1273 
1274     if (req_ctx->is_single_pass) {
1275         /*
1276          * Single-pass flow
1277          */
1278         cc_set_xcbc_desc(req, desc, seq_size);
1279         cc_set_cipher_desc(req, desc, seq_size);
1280         cc_proc_header_desc(req, desc, seq_size);
1281         cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1282         cc_proc_digest_desc(req, desc, seq_size);
1283         return;
1284     }
1285 
1286     /*
1287      * Double-pass flow
1288      * Fallback for unsupported single-pass modes,
1289      * i.e. using assoc. data of non-word-multiple
1290      */
1291     if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1292         /* encrypt first.. */
1293         cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1294         /* authenc after.. */
1295         cc_set_xcbc_desc(req, desc, seq_size);
1296         cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1297         cc_proc_digest_desc(req, desc, seq_size);
1298     } else { /*DECRYPT*/
1299         /* authenc first.. */
1300         cc_set_xcbc_desc(req, desc, seq_size);
1301         cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1302         /* decrypt after..*/
1303         cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1304         /* read the digest result with setting the completion bit
1305          * must be after the cipher operation
1306          */
1307         cc_proc_digest_desc(req, desc, seq_size);
1308     }
1309 }
1310 
1311 static int validate_data_size(struct cc_aead_ctx *ctx,
1312                   enum drv_crypto_direction direct,
1313                   struct aead_request *req)
1314 {
1315     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1316     struct device *dev = drvdata_to_dev(ctx->drvdata);
1317     unsigned int assoclen = areq_ctx->assoclen;
1318     unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1319             (req->cryptlen - ctx->authsize) : req->cryptlen;
1320 
1321     if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1322         req->cryptlen < ctx->authsize)
1323         goto data_size_err;
1324 
1325     areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1326 
1327     switch (ctx->flow_mode) {
1328     case S_DIN_to_AES:
1329         if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1330             !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1331             goto data_size_err;
1332         if (ctx->cipher_mode == DRV_CIPHER_CCM)
1333             break;
1334         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1335             if (areq_ctx->plaintext_authenticate_only)
1336                 areq_ctx->is_single_pass = false;
1337             break;
1338         }
1339 
1340         if (!IS_ALIGNED(assoclen, sizeof(u32)))
1341             areq_ctx->is_single_pass = false;
1342 
1343         if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1344             !IS_ALIGNED(cipherlen, sizeof(u32)))
1345             areq_ctx->is_single_pass = false;
1346 
1347         break;
1348     case S_DIN_to_DES:
1349         if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1350             goto data_size_err;
1351         if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1352             areq_ctx->is_single_pass = false;
1353         break;
1354     default:
1355         dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1356         goto data_size_err;
1357     }
1358 
1359     return 0;
1360 
1361 data_size_err:
1362     return -EINVAL;
1363 }
1364 
1365 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1366 {
1367     unsigned int len = 0;
1368 
1369     if (header_size == 0)
1370         return 0;
1371 
1372     if (header_size < ((1UL << 16) - (1UL << 8))) {
1373         len = 2;
1374 
1375         pa0_buff[0] = (header_size >> 8) & 0xFF;
1376         pa0_buff[1] = header_size & 0xFF;
1377     } else {
1378         len = 6;
1379 
1380         pa0_buff[0] = 0xFF;
1381         pa0_buff[1] = 0xFE;
1382         pa0_buff[2] = (header_size >> 24) & 0xFF;
1383         pa0_buff[3] = (header_size >> 16) & 0xFF;
1384         pa0_buff[4] = (header_size >> 8) & 0xFF;
1385         pa0_buff[5] = header_size & 0xFF;
1386     }
1387 
1388     return len;
1389 }
1390 
1391 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1392 {
1393     __be32 data;
1394 
1395     memset(block, 0, csize);
1396     block += csize;
1397 
1398     if (csize >= 4)
1399         csize = 4;
1400     else if (msglen > (1 << (8 * csize)))
1401         return -EOVERFLOW;
1402 
1403     data = cpu_to_be32(msglen);
1404     memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1405 
1406     return 0;
1407 }
1408 
1409 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1410           unsigned int *seq_size)
1411 {
1412     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1413     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1414     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1415     unsigned int idx = *seq_size;
1416     unsigned int cipher_flow_mode;
1417     dma_addr_t mac_result;
1418 
1419     if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1420         cipher_flow_mode = AES_to_HASH_and_DOUT;
1421         mac_result = req_ctx->mac_buf_dma_addr;
1422     } else { /* Encrypt */
1423         cipher_flow_mode = AES_and_HASH;
1424         mac_result = req_ctx->icv_dma_addr;
1425     }
1426 
1427     /* load key */
1428     hw_desc_init(&desc[idx]);
1429     set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1430     set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1431              ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1432               ctx->enc_keylen), NS_BIT);
1433     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1434     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1435     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1436     set_flow_mode(&desc[idx], S_DIN_to_AES);
1437     idx++;
1438 
1439     /* load ctr state */
1440     hw_desc_init(&desc[idx]);
1441     set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1442     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1443     set_din_type(&desc[idx], DMA_DLLI,
1444              req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1445     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1446     set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1447     set_flow_mode(&desc[idx], S_DIN_to_AES);
1448     idx++;
1449 
1450     /* load MAC key */
1451     hw_desc_init(&desc[idx]);
1452     set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1453     set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1454              ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1455               ctx->enc_keylen), NS_BIT);
1456     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1457     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1458     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1459     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1460     set_aes_not_hash_mode(&desc[idx]);
1461     idx++;
1462 
1463     /* load MAC state */
1464     hw_desc_init(&desc[idx]);
1465     set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1466     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1467     set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1468              AES_BLOCK_SIZE, NS_BIT);
1469     set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1470     set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1471     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1472     set_aes_not_hash_mode(&desc[idx]);
1473     idx++;
1474 
1475     /* process assoc data */
1476     if (req_ctx->assoclen > 0) {
1477         cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1478     } else {
1479         hw_desc_init(&desc[idx]);
1480         set_din_type(&desc[idx], DMA_DLLI,
1481                  sg_dma_address(&req_ctx->ccm_adata_sg),
1482                  AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1483         set_flow_mode(&desc[idx], DIN_HASH);
1484         idx++;
1485     }
1486 
1487     /* process the cipher */
1488     if (req_ctx->cryptlen)
1489         cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1490 
1491     /* Read temporal MAC */
1492     hw_desc_init(&desc[idx]);
1493     set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1494     set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1495               NS_BIT, 0);
1496     set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1497     set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1498     set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1499     set_aes_not_hash_mode(&desc[idx]);
1500     idx++;
1501 
1502     /* load AES-CTR state (for last MAC calculation)*/
1503     hw_desc_init(&desc[idx]);
1504     set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1505     set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1506     set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1507              AES_BLOCK_SIZE, NS_BIT);
1508     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1509     set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1510     set_flow_mode(&desc[idx], S_DIN_to_AES);
1511     idx++;
1512 
1513     hw_desc_init(&desc[idx]);
1514     set_din_no_dma(&desc[idx], 0, 0xfffff0);
1515     set_dout_no_dma(&desc[idx], 0, 0, 1);
1516     idx++;
1517 
1518     /* encrypt the "T" value and store MAC in mac_state */
1519     hw_desc_init(&desc[idx]);
1520     set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1521              ctx->authsize, NS_BIT);
1522     set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1523     set_queue_last_ind(ctx->drvdata, &desc[idx]);
1524     set_flow_mode(&desc[idx], DIN_AES_DOUT);
1525     idx++;
1526 
1527     *seq_size = idx;
1528     return 0;
1529 }
1530 
1531 static int config_ccm_adata(struct aead_request *req)
1532 {
1533     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1534     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1535     struct device *dev = drvdata_to_dev(ctx->drvdata);
1536     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1537     //unsigned int size_of_a = 0, rem_a_size = 0;
1538     unsigned int lp = req->iv[0];
1539     /* Note: The code assume that req->iv[0] already contains the value
1540      * of L' of RFC3610
1541      */
1542     unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1543     unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1544     u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1545     u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1546     u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1547     unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1548                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1549                 req->cryptlen :
1550                 (req->cryptlen - ctx->authsize);
1551     int rc;
1552 
1553     memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1554     memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1555 
1556     /* taken from crypto/ccm.c */
1557     /* 2 <= L <= 8, so 1 <= L' <= 7. */
1558     if (l < 2 || l > 8) {
1559         dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
1560         return -EINVAL;
1561     }
1562     memcpy(b0, req->iv, AES_BLOCK_SIZE);
1563 
1564     /* format control info per RFC 3610 and
1565      * NIST Special Publication 800-38C
1566      */
1567     *b0 |= (8 * ((m - 2) / 2));
1568     if (req_ctx->assoclen > 0)
1569         *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1570 
1571     rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1572     if (rc) {
1573         dev_err(dev, "message len overflow detected");
1574         return rc;
1575     }
1576      /* END of "taken from crypto/ccm.c" */
1577 
1578     /* l(a) - size of associated data. */
1579     req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1580 
1581     memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1582     req->iv[15] = 1;
1583 
1584     memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1585     ctr_count_0[15] = 0;
1586 
1587     return 0;
1588 }
1589 
1590 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1591 {
1592     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1593     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1594     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1595 
1596     /* L' */
1597     memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1598     /* For RFC 4309, always use 4 bytes for message length
1599      * (at most 2^32-1 bytes).
1600      */
1601     areq_ctx->ctr_iv[0] = 3;
1602 
1603     /* In RFC 4309 there is an 11-bytes nonce+IV part,
1604      * that we build here.
1605      */
1606     memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1607            CCM_BLOCK_NONCE_SIZE);
1608     memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1609            CCM_BLOCK_IV_SIZE);
1610     req->iv = areq_ctx->ctr_iv;
1611 }
1612 
1613 static void cc_set_ghash_desc(struct aead_request *req,
1614                   struct cc_hw_desc desc[], unsigned int *seq_size)
1615 {
1616     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1617     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1618     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1619     unsigned int idx = *seq_size;
1620 
1621     /* load key to AES*/
1622     hw_desc_init(&desc[idx]);
1623     set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1624     set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1625     set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1626              ctx->enc_keylen, NS_BIT);
1627     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1628     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1629     set_flow_mode(&desc[idx], S_DIN_to_AES);
1630     idx++;
1631 
1632     /* process one zero block to generate hkey */
1633     hw_desc_init(&desc[idx]);
1634     set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1635     set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1636               NS_BIT, 0);
1637     set_flow_mode(&desc[idx], DIN_AES_DOUT);
1638     idx++;
1639 
1640     /* Memory Barrier */
1641     hw_desc_init(&desc[idx]);
1642     set_din_no_dma(&desc[idx], 0, 0xfffff0);
1643     set_dout_no_dma(&desc[idx], 0, 0, 1);
1644     idx++;
1645 
1646     /* Load GHASH subkey */
1647     hw_desc_init(&desc[idx]);
1648     set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1649              AES_BLOCK_SIZE, NS_BIT);
1650     set_dout_no_dma(&desc[idx], 0, 0, 1);
1651     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1652     set_aes_not_hash_mode(&desc[idx]);
1653     set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1654     set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1655     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1656     idx++;
1657 
1658     /* Configure Hash Engine to work with GHASH.
1659      * Since it was not possible to extend HASH submodes to add GHASH,
1660      * The following command is necessary in order to
1661      * select GHASH (according to HW designers)
1662      */
1663     hw_desc_init(&desc[idx]);
1664     set_din_no_dma(&desc[idx], 0, 0xfffff0);
1665     set_dout_no_dma(&desc[idx], 0, 0, 1);
1666     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1667     set_aes_not_hash_mode(&desc[idx]);
1668     set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1669     set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1670     set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1671     set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1672     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1673     idx++;
1674 
1675     /* Load GHASH initial STATE (which is 0). (for any hash there is an
1676      * initial state)
1677      */
1678     hw_desc_init(&desc[idx]);
1679     set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1680     set_dout_no_dma(&desc[idx], 0, 0, 1);
1681     set_flow_mode(&desc[idx], S_DIN_to_HASH);
1682     set_aes_not_hash_mode(&desc[idx]);
1683     set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1684     set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1685     set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1686     idx++;
1687 
1688     *seq_size = idx;
1689 }
1690 
1691 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1692                  unsigned int *seq_size)
1693 {
1694     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1695     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1696     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1697     unsigned int idx = *seq_size;
1698 
1699     /* load key to AES*/
1700     hw_desc_init(&desc[idx]);
1701     set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1702     set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1703     set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1704              ctx->enc_keylen, NS_BIT);
1705     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1706     set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1707     set_flow_mode(&desc[idx], S_DIN_to_AES);
1708     idx++;
1709 
1710     if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1711         /* load AES/CTR initial CTR value inc by 2*/
1712         hw_desc_init(&desc[idx]);
1713         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1714         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1715         set_din_type(&desc[idx], DMA_DLLI,
1716                  req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1717                  NS_BIT);
1718         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1719         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1720         set_flow_mode(&desc[idx], S_DIN_to_AES);
1721         idx++;
1722     }
1723 
1724     *seq_size = idx;
1725 }
1726 
1727 static void cc_proc_gcm_result(struct aead_request *req,
1728                    struct cc_hw_desc desc[],
1729                    unsigned int *seq_size)
1730 {
1731     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1732     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1733     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1734     dma_addr_t mac_result;
1735     unsigned int idx = *seq_size;
1736 
1737     if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1738         mac_result = req_ctx->mac_buf_dma_addr;
1739     } else { /* Encrypt */
1740         mac_result = req_ctx->icv_dma_addr;
1741     }
1742 
1743     /* process(ghash) gcm_block_len */
1744     hw_desc_init(&desc[idx]);
1745     set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1746              AES_BLOCK_SIZE, NS_BIT);
1747     set_flow_mode(&desc[idx], DIN_HASH);
1748     idx++;
1749 
1750     /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1751     hw_desc_init(&desc[idx]);
1752     set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1753     set_din_no_dma(&desc[idx], 0, 0xfffff0);
1754     set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1755               NS_BIT, 0);
1756     set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1757     set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1758     set_aes_not_hash_mode(&desc[idx]);
1759 
1760     idx++;
1761 
1762     /* load AES/CTR initial CTR value inc by 1*/
1763     hw_desc_init(&desc[idx]);
1764     set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1765     set_key_size_aes(&desc[idx], ctx->enc_keylen);
1766     set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1767              AES_BLOCK_SIZE, NS_BIT);
1768     set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1769     set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1770     set_flow_mode(&desc[idx], S_DIN_to_AES);
1771     idx++;
1772 
1773     /* Memory Barrier */
1774     hw_desc_init(&desc[idx]);
1775     set_din_no_dma(&desc[idx], 0, 0xfffff0);
1776     set_dout_no_dma(&desc[idx], 0, 0, 1);
1777     idx++;
1778 
1779     /* process GCTR on stored GHASH and store MAC in mac_state*/
1780     hw_desc_init(&desc[idx]);
1781     set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1782     set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1783              AES_BLOCK_SIZE, NS_BIT);
1784     set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1785     set_queue_last_ind(ctx->drvdata, &desc[idx]);
1786     set_flow_mode(&desc[idx], DIN_AES_DOUT);
1787     idx++;
1788 
1789     *seq_size = idx;
1790 }
1791 
1792 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1793           unsigned int *seq_size)
1794 {
1795     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1796     unsigned int cipher_flow_mode;
1797 
1798     //in RFC4543 no data to encrypt. just copy data from src to dest.
1799     if (req_ctx->plaintext_authenticate_only) {
1800         cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1801         cc_set_ghash_desc(req, desc, seq_size);
1802         /* process(ghash) assoc data */
1803         cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1804         cc_set_gctr_desc(req, desc, seq_size);
1805         cc_proc_gcm_result(req, desc, seq_size);
1806         return 0;
1807     }
1808 
1809     if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1810         cipher_flow_mode = AES_and_HASH;
1811     } else { /* Encrypt */
1812         cipher_flow_mode = AES_to_HASH_and_DOUT;
1813     }
1814 
1815     // for gcm and rfc4106.
1816     cc_set_ghash_desc(req, desc, seq_size);
1817     /* process(ghash) assoc data */
1818     if (req_ctx->assoclen > 0)
1819         cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1820     cc_set_gctr_desc(req, desc, seq_size);
1821     /* process(gctr+ghash) */
1822     if (req_ctx->cryptlen)
1823         cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1824     cc_proc_gcm_result(req, desc, seq_size);
1825 
1826     return 0;
1827 }
1828 
1829 static int config_gcm_context(struct aead_request *req)
1830 {
1831     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1832     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1833     struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1834     struct device *dev = drvdata_to_dev(ctx->drvdata);
1835 
1836     unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1837                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1838                 req->cryptlen :
1839                 (req->cryptlen - ctx->authsize);
1840     __be32 counter = cpu_to_be32(2);
1841 
1842     dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1843         __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1844 
1845     memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1846 
1847     memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1848 
1849     memcpy(req->iv + 12, &counter, 4);
1850     memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1851 
1852     counter = cpu_to_be32(1);
1853     memcpy(req->iv + 12, &counter, 4);
1854     memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1855 
1856     if (!req_ctx->plaintext_authenticate_only) {
1857         __be64 temp64;
1858 
1859         temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1860         memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1861         temp64 = cpu_to_be64(cryptlen * 8);
1862         memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1863     } else {
1864         /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1865          * data that is nothing is encrypted.
1866          */
1867         __be64 temp64;
1868 
1869         temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
1870         memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1871         temp64 = 0;
1872         memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1873     }
1874 
1875     return 0;
1876 }
1877 
1878 static void cc_proc_rfc4_gcm(struct aead_request *req)
1879 {
1880     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1882     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1883 
1884     memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1885            ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1886     memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1887            GCM_BLOCK_RFC4_IV_SIZE);
1888     req->iv = areq_ctx->ctr_iv;
1889 }
1890 
1891 static int cc_proc_aead(struct aead_request *req,
1892             enum drv_crypto_direction direct)
1893 {
1894     int rc = 0;
1895     int seq_len = 0;
1896     struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1897     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1898     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1899     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1900     struct device *dev = drvdata_to_dev(ctx->drvdata);
1901     struct cc_crypto_req cc_req = {};
1902 
1903     dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1904         ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1905         ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1906         sg_virt(req->dst), req->dst->offset, req->cryptlen);
1907 
1908     /* STAT_PHASE_0: Init and sanity checks */
1909 
1910     /* Check data length according to mode */
1911     if (validate_data_size(ctx, direct, req)) {
1912         dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1913             req->cryptlen, areq_ctx->assoclen);
1914         return -EINVAL;
1915     }
1916 
1917     /* Setup request structure */
1918     cc_req.user_cb = cc_aead_complete;
1919     cc_req.user_arg = req;
1920 
1921     /* Setup request context */
1922     areq_ctx->gen_ctx.op_type = direct;
1923     areq_ctx->req_authsize = ctx->authsize;
1924     areq_ctx->cipher_mode = ctx->cipher_mode;
1925 
1926     /* STAT_PHASE_1: Map buffers */
1927 
1928     if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1929         /* Build CTR IV - Copy nonce from last 4 bytes in
1930          * CTR key to first 4 bytes in CTR IV
1931          */
1932         memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1933                CTR_RFC3686_NONCE_SIZE);
1934         memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1935                CTR_RFC3686_IV_SIZE);
1936         /* Initialize counter portion of counter block */
1937         *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1938                 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1939 
1940         /* Replace with counter iv */
1941         req->iv = areq_ctx->ctr_iv;
1942         areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1943     } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1944            (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1945         areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1946         if (areq_ctx->ctr_iv != req->iv) {
1947             memcpy(areq_ctx->ctr_iv, req->iv,
1948                    crypto_aead_ivsize(tfm));
1949             req->iv = areq_ctx->ctr_iv;
1950         }
1951     }  else {
1952         areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1953     }
1954 
1955     if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1956         rc = config_ccm_adata(req);
1957         if (rc) {
1958             dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1959                 rc);
1960             goto exit;
1961         }
1962     } else {
1963         areq_ctx->ccm_hdr_size = ccm_header_size_null;
1964     }
1965 
1966     if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1967         rc = config_gcm_context(req);
1968         if (rc) {
1969             dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1970                 rc);
1971             goto exit;
1972         }
1973     }
1974 
1975     rc = cc_map_aead_request(ctx->drvdata, req);
1976     if (rc) {
1977         dev_err(dev, "map_request() failed\n");
1978         goto exit;
1979     }
1980 
1981     /* STAT_PHASE_2: Create sequence */
1982 
1983     /* Load MLLI tables to SRAM if necessary */
1984     cc_mlli_to_sram(req, desc, &seq_len);
1985 
1986     switch (ctx->auth_mode) {
1987     case DRV_HASH_SHA1:
1988     case DRV_HASH_SHA256:
1989         cc_hmac_authenc(req, desc, &seq_len);
1990         break;
1991     case DRV_HASH_XCBC_MAC:
1992         cc_xcbc_authenc(req, desc, &seq_len);
1993         break;
1994     case DRV_HASH_NULL:
1995         if (ctx->cipher_mode == DRV_CIPHER_CCM)
1996             cc_ccm(req, desc, &seq_len);
1997         if (ctx->cipher_mode == DRV_CIPHER_GCTR)
1998             cc_gcm(req, desc, &seq_len);
1999         break;
2000     default:
2001         dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2002         cc_unmap_aead_request(dev, req);
2003         rc = -ENOTSUPP;
2004         goto exit;
2005     }
2006 
2007     /* STAT_PHASE_3: Lock HW and push sequence */
2008 
2009     rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2010 
2011     if (rc != -EINPROGRESS && rc != -EBUSY) {
2012         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2013         cc_unmap_aead_request(dev, req);
2014     }
2015 
2016 exit:
2017     return rc;
2018 }
2019 
2020 static int cc_aead_encrypt(struct aead_request *req)
2021 {
2022     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2023     int rc;
2024 
2025     memset(areq_ctx, 0, sizeof(*areq_ctx));
2026 
2027     /* No generated IV required */
2028     areq_ctx->backup_iv = req->iv;
2029     areq_ctx->assoclen = req->assoclen;
2030 
2031     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2032     if (rc != -EINPROGRESS && rc != -EBUSY)
2033         req->iv = areq_ctx->backup_iv;
2034 
2035     return rc;
2036 }
2037 
2038 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2039 {
2040     /* Very similar to cc_aead_encrypt() above. */
2041 
2042     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2043     int rc;
2044 
2045     rc = crypto_ipsec_check_assoclen(req->assoclen);
2046     if (rc)
2047         goto out;
2048 
2049     memset(areq_ctx, 0, sizeof(*areq_ctx));
2050 
2051     /* No generated IV required */
2052     areq_ctx->backup_iv = req->iv;
2053     areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
2054 
2055     cc_proc_rfc4309_ccm(req);
2056 
2057     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2058     if (rc != -EINPROGRESS && rc != -EBUSY)
2059         req->iv = areq_ctx->backup_iv;
2060 out:
2061     return rc;
2062 }
2063 
2064 static int cc_aead_decrypt(struct aead_request *req)
2065 {
2066     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2067     int rc;
2068 
2069     memset(areq_ctx, 0, sizeof(*areq_ctx));
2070 
2071     /* No generated IV required */
2072     areq_ctx->backup_iv = req->iv;
2073     areq_ctx->assoclen = req->assoclen;
2074 
2075     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2076     if (rc != -EINPROGRESS && rc != -EBUSY)
2077         req->iv = areq_ctx->backup_iv;
2078 
2079     return rc;
2080 }
2081 
2082 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2083 {
2084     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2085     int rc;
2086 
2087     rc = crypto_ipsec_check_assoclen(req->assoclen);
2088     if (rc)
2089         goto out;
2090 
2091     memset(areq_ctx, 0, sizeof(*areq_ctx));
2092 
2093     /* No generated IV required */
2094     areq_ctx->backup_iv = req->iv;
2095     areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
2096 
2097     cc_proc_rfc4309_ccm(req);
2098 
2099     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2100     if (rc != -EINPROGRESS && rc != -EBUSY)
2101         req->iv = areq_ctx->backup_iv;
2102 
2103 out:
2104     return rc;
2105 }
2106 
2107 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2108                  unsigned int keylen)
2109 {
2110     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2111     struct device *dev = drvdata_to_dev(ctx->drvdata);
2112 
2113     dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2114 
2115     if (keylen < 4)
2116         return -EINVAL;
2117 
2118     keylen -= 4;
2119     memcpy(ctx->ctr_nonce, key + keylen, 4);
2120 
2121     return cc_aead_setkey(tfm, key, keylen);
2122 }
2123 
2124 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2125                  unsigned int keylen)
2126 {
2127     struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2128     struct device *dev = drvdata_to_dev(ctx->drvdata);
2129 
2130     dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2131 
2132     if (keylen < 4)
2133         return -EINVAL;
2134 
2135     keylen -= 4;
2136     memcpy(ctx->ctr_nonce, key + keylen, 4);
2137 
2138     return cc_aead_setkey(tfm, key, keylen);
2139 }
2140 
2141 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2142                   unsigned int authsize)
2143 {
2144     switch (authsize) {
2145     case 4:
2146     case 8:
2147     case 12:
2148     case 13:
2149     case 14:
2150     case 15:
2151     case 16:
2152         break;
2153     default:
2154         return -EINVAL;
2155     }
2156 
2157     return cc_aead_setauthsize(authenc, authsize);
2158 }
2159 
2160 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2161                       unsigned int authsize)
2162 {
2163     struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2164     struct device *dev = drvdata_to_dev(ctx->drvdata);
2165 
2166     dev_dbg(dev, "authsize %d\n", authsize);
2167 
2168     switch (authsize) {
2169     case 8:
2170     case 12:
2171     case 16:
2172         break;
2173     default:
2174         return -EINVAL;
2175     }
2176 
2177     return cc_aead_setauthsize(authenc, authsize);
2178 }
2179 
2180 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2181                       unsigned int authsize)
2182 {
2183     struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2184     struct device *dev = drvdata_to_dev(ctx->drvdata);
2185 
2186     dev_dbg(dev, "authsize %d\n", authsize);
2187 
2188     if (authsize != 16)
2189         return -EINVAL;
2190 
2191     return cc_aead_setauthsize(authenc, authsize);
2192 }
2193 
2194 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2195 {
2196     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2197     int rc;
2198 
2199     rc = crypto_ipsec_check_assoclen(req->assoclen);
2200     if (rc)
2201         goto out;
2202 
2203     memset(areq_ctx, 0, sizeof(*areq_ctx));
2204 
2205     /* No generated IV required */
2206     areq_ctx->backup_iv = req->iv;
2207     areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
2208 
2209     cc_proc_rfc4_gcm(req);
2210 
2211     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2212     if (rc != -EINPROGRESS && rc != -EBUSY)
2213         req->iv = areq_ctx->backup_iv;
2214 out:
2215     return rc;
2216 }
2217 
2218 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2219 {
2220     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2221     int rc;
2222 
2223     rc = crypto_ipsec_check_assoclen(req->assoclen);
2224     if (rc)
2225         goto out;
2226 
2227     memset(areq_ctx, 0, sizeof(*areq_ctx));
2228 
2229     //plaintext is not encryped with rfc4543
2230     areq_ctx->plaintext_authenticate_only = true;
2231 
2232     /* No generated IV required */
2233     areq_ctx->backup_iv = req->iv;
2234     areq_ctx->assoclen = req->assoclen;
2235 
2236     cc_proc_rfc4_gcm(req);
2237 
2238     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2239     if (rc != -EINPROGRESS && rc != -EBUSY)
2240         req->iv = areq_ctx->backup_iv;
2241 out:
2242     return rc;
2243 }
2244 
2245 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2246 {
2247     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2248     int rc;
2249 
2250     rc = crypto_ipsec_check_assoclen(req->assoclen);
2251     if (rc)
2252         goto out;
2253 
2254     memset(areq_ctx, 0, sizeof(*areq_ctx));
2255 
2256     /* No generated IV required */
2257     areq_ctx->backup_iv = req->iv;
2258     areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
2259 
2260     cc_proc_rfc4_gcm(req);
2261 
2262     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2263     if (rc != -EINPROGRESS && rc != -EBUSY)
2264         req->iv = areq_ctx->backup_iv;
2265 out:
2266     return rc;
2267 }
2268 
2269 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2270 {
2271     struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2272     int rc;
2273 
2274     rc = crypto_ipsec_check_assoclen(req->assoclen);
2275     if (rc)
2276         goto out;
2277 
2278     memset(areq_ctx, 0, sizeof(*areq_ctx));
2279 
2280     //plaintext is not decryped with rfc4543
2281     areq_ctx->plaintext_authenticate_only = true;
2282 
2283     /* No generated IV required */
2284     areq_ctx->backup_iv = req->iv;
2285     areq_ctx->assoclen = req->assoclen;
2286 
2287     cc_proc_rfc4_gcm(req);
2288 
2289     rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2290     if (rc != -EINPROGRESS && rc != -EBUSY)
2291         req->iv = areq_ctx->backup_iv;
2292 out:
2293     return rc;
2294 }
2295 
2296 /* aead alg */
2297 static struct cc_alg_template aead_algs[] = {
2298     {
2299         .name = "authenc(hmac(sha1),cbc(aes))",
2300         .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2301         .blocksize = AES_BLOCK_SIZE,
2302         .template_aead = {
2303             .setkey = cc_aead_setkey,
2304             .setauthsize = cc_aead_setauthsize,
2305             .encrypt = cc_aead_encrypt,
2306             .decrypt = cc_aead_decrypt,
2307             .init = cc_aead_init,
2308             .exit = cc_aead_exit,
2309             .ivsize = AES_BLOCK_SIZE,
2310             .maxauthsize = SHA1_DIGEST_SIZE,
2311         },
2312         .cipher_mode = DRV_CIPHER_CBC,
2313         .flow_mode = S_DIN_to_AES,
2314         .auth_mode = DRV_HASH_SHA1,
2315         .min_hw_rev = CC_HW_REV_630,
2316         .std_body = CC_STD_NIST,
2317     },
2318     {
2319         .name = "authenc(hmac(sha1),cbc(des3_ede))",
2320         .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2321         .blocksize = DES3_EDE_BLOCK_SIZE,
2322         .template_aead = {
2323             .setkey = cc_des3_aead_setkey,
2324             .setauthsize = cc_aead_setauthsize,
2325             .encrypt = cc_aead_encrypt,
2326             .decrypt = cc_aead_decrypt,
2327             .init = cc_aead_init,
2328             .exit = cc_aead_exit,
2329             .ivsize = DES3_EDE_BLOCK_SIZE,
2330             .maxauthsize = SHA1_DIGEST_SIZE,
2331         },
2332         .cipher_mode = DRV_CIPHER_CBC,
2333         .flow_mode = S_DIN_to_DES,
2334         .auth_mode = DRV_HASH_SHA1,
2335         .min_hw_rev = CC_HW_REV_630,
2336         .std_body = CC_STD_NIST,
2337     },
2338     {
2339         .name = "authenc(hmac(sha256),cbc(aes))",
2340         .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2341         .blocksize = AES_BLOCK_SIZE,
2342         .template_aead = {
2343             .setkey = cc_aead_setkey,
2344             .setauthsize = cc_aead_setauthsize,
2345             .encrypt = cc_aead_encrypt,
2346             .decrypt = cc_aead_decrypt,
2347             .init = cc_aead_init,
2348             .exit = cc_aead_exit,
2349             .ivsize = AES_BLOCK_SIZE,
2350             .maxauthsize = SHA256_DIGEST_SIZE,
2351         },
2352         .cipher_mode = DRV_CIPHER_CBC,
2353         .flow_mode = S_DIN_to_AES,
2354         .auth_mode = DRV_HASH_SHA256,
2355         .min_hw_rev = CC_HW_REV_630,
2356         .std_body = CC_STD_NIST,
2357     },
2358     {
2359         .name = "authenc(hmac(sha256),cbc(des3_ede))",
2360         .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2361         .blocksize = DES3_EDE_BLOCK_SIZE,
2362         .template_aead = {
2363             .setkey = cc_des3_aead_setkey,
2364             .setauthsize = cc_aead_setauthsize,
2365             .encrypt = cc_aead_encrypt,
2366             .decrypt = cc_aead_decrypt,
2367             .init = cc_aead_init,
2368             .exit = cc_aead_exit,
2369             .ivsize = DES3_EDE_BLOCK_SIZE,
2370             .maxauthsize = SHA256_DIGEST_SIZE,
2371         },
2372         .cipher_mode = DRV_CIPHER_CBC,
2373         .flow_mode = S_DIN_to_DES,
2374         .auth_mode = DRV_HASH_SHA256,
2375         .min_hw_rev = CC_HW_REV_630,
2376         .std_body = CC_STD_NIST,
2377     },
2378     {
2379         .name = "authenc(xcbc(aes),cbc(aes))",
2380         .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2381         .blocksize = AES_BLOCK_SIZE,
2382         .template_aead = {
2383             .setkey = cc_aead_setkey,
2384             .setauthsize = cc_aead_setauthsize,
2385             .encrypt = cc_aead_encrypt,
2386             .decrypt = cc_aead_decrypt,
2387             .init = cc_aead_init,
2388             .exit = cc_aead_exit,
2389             .ivsize = AES_BLOCK_SIZE,
2390             .maxauthsize = AES_BLOCK_SIZE,
2391         },
2392         .cipher_mode = DRV_CIPHER_CBC,
2393         .flow_mode = S_DIN_to_AES,
2394         .auth_mode = DRV_HASH_XCBC_MAC,
2395         .min_hw_rev = CC_HW_REV_630,
2396         .std_body = CC_STD_NIST,
2397     },
2398     {
2399         .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2400         .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2401         .blocksize = 1,
2402         .template_aead = {
2403             .setkey = cc_aead_setkey,
2404             .setauthsize = cc_aead_setauthsize,
2405             .encrypt = cc_aead_encrypt,
2406             .decrypt = cc_aead_decrypt,
2407             .init = cc_aead_init,
2408             .exit = cc_aead_exit,
2409             .ivsize = CTR_RFC3686_IV_SIZE,
2410             .maxauthsize = SHA1_DIGEST_SIZE,
2411         },
2412         .cipher_mode = DRV_CIPHER_CTR,
2413         .flow_mode = S_DIN_to_AES,
2414         .auth_mode = DRV_HASH_SHA1,
2415         .min_hw_rev = CC_HW_REV_630,
2416         .std_body = CC_STD_NIST,
2417     },
2418     {
2419         .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2420         .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2421         .blocksize = 1,
2422         .template_aead = {
2423             .setkey = cc_aead_setkey,
2424             .setauthsize = cc_aead_setauthsize,
2425             .encrypt = cc_aead_encrypt,
2426             .decrypt = cc_aead_decrypt,
2427             .init = cc_aead_init,
2428             .exit = cc_aead_exit,
2429             .ivsize = CTR_RFC3686_IV_SIZE,
2430             .maxauthsize = SHA256_DIGEST_SIZE,
2431         },
2432         .cipher_mode = DRV_CIPHER_CTR,
2433         .flow_mode = S_DIN_to_AES,
2434         .auth_mode = DRV_HASH_SHA256,
2435         .min_hw_rev = CC_HW_REV_630,
2436         .std_body = CC_STD_NIST,
2437     },
2438     {
2439         .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2440         .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2441         .blocksize = 1,
2442         .template_aead = {
2443             .setkey = cc_aead_setkey,
2444             .setauthsize = cc_aead_setauthsize,
2445             .encrypt = cc_aead_encrypt,
2446             .decrypt = cc_aead_decrypt,
2447             .init = cc_aead_init,
2448             .exit = cc_aead_exit,
2449             .ivsize = CTR_RFC3686_IV_SIZE,
2450             .maxauthsize = AES_BLOCK_SIZE,
2451         },
2452         .cipher_mode = DRV_CIPHER_CTR,
2453         .flow_mode = S_DIN_to_AES,
2454         .auth_mode = DRV_HASH_XCBC_MAC,
2455         .min_hw_rev = CC_HW_REV_630,
2456         .std_body = CC_STD_NIST,
2457     },
2458     {
2459         .name = "ccm(aes)",
2460         .driver_name = "ccm-aes-ccree",
2461         .blocksize = 1,
2462         .template_aead = {
2463             .setkey = cc_aead_setkey,
2464             .setauthsize = cc_ccm_setauthsize,
2465             .encrypt = cc_aead_encrypt,
2466             .decrypt = cc_aead_decrypt,
2467             .init = cc_aead_init,
2468             .exit = cc_aead_exit,
2469             .ivsize = AES_BLOCK_SIZE,
2470             .maxauthsize = AES_BLOCK_SIZE,
2471         },
2472         .cipher_mode = DRV_CIPHER_CCM,
2473         .flow_mode = S_DIN_to_AES,
2474         .auth_mode = DRV_HASH_NULL,
2475         .min_hw_rev = CC_HW_REV_630,
2476         .std_body = CC_STD_NIST,
2477     },
2478     {
2479         .name = "rfc4309(ccm(aes))",
2480         .driver_name = "rfc4309-ccm-aes-ccree",
2481         .blocksize = 1,
2482         .template_aead = {
2483             .setkey = cc_rfc4309_ccm_setkey,
2484             .setauthsize = cc_rfc4309_ccm_setauthsize,
2485             .encrypt = cc_rfc4309_ccm_encrypt,
2486             .decrypt = cc_rfc4309_ccm_decrypt,
2487             .init = cc_aead_init,
2488             .exit = cc_aead_exit,
2489             .ivsize = CCM_BLOCK_IV_SIZE,
2490             .maxauthsize = AES_BLOCK_SIZE,
2491         },
2492         .cipher_mode = DRV_CIPHER_CCM,
2493         .flow_mode = S_DIN_to_AES,
2494         .auth_mode = DRV_HASH_NULL,
2495         .min_hw_rev = CC_HW_REV_630,
2496         .std_body = CC_STD_NIST,
2497     },
2498     {
2499         .name = "gcm(aes)",
2500         .driver_name = "gcm-aes-ccree",
2501         .blocksize = 1,
2502         .template_aead = {
2503             .setkey = cc_aead_setkey,
2504             .setauthsize = cc_gcm_setauthsize,
2505             .encrypt = cc_aead_encrypt,
2506             .decrypt = cc_aead_decrypt,
2507             .init = cc_aead_init,
2508             .exit = cc_aead_exit,
2509             .ivsize = 12,
2510             .maxauthsize = AES_BLOCK_SIZE,
2511         },
2512         .cipher_mode = DRV_CIPHER_GCTR,
2513         .flow_mode = S_DIN_to_AES,
2514         .auth_mode = DRV_HASH_NULL,
2515         .min_hw_rev = CC_HW_REV_630,
2516         .std_body = CC_STD_NIST,
2517     },
2518     {
2519         .name = "rfc4106(gcm(aes))",
2520         .driver_name = "rfc4106-gcm-aes-ccree",
2521         .blocksize = 1,
2522         .template_aead = {
2523             .setkey = cc_rfc4106_gcm_setkey,
2524             .setauthsize = cc_rfc4106_gcm_setauthsize,
2525             .encrypt = cc_rfc4106_gcm_encrypt,
2526             .decrypt = cc_rfc4106_gcm_decrypt,
2527             .init = cc_aead_init,
2528             .exit = cc_aead_exit,
2529             .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2530             .maxauthsize = AES_BLOCK_SIZE,
2531         },
2532         .cipher_mode = DRV_CIPHER_GCTR,
2533         .flow_mode = S_DIN_to_AES,
2534         .auth_mode = DRV_HASH_NULL,
2535         .min_hw_rev = CC_HW_REV_630,
2536         .std_body = CC_STD_NIST,
2537     },
2538     {
2539         .name = "rfc4543(gcm(aes))",
2540         .driver_name = "rfc4543-gcm-aes-ccree",
2541         .blocksize = 1,
2542         .template_aead = {
2543             .setkey = cc_rfc4543_gcm_setkey,
2544             .setauthsize = cc_rfc4543_gcm_setauthsize,
2545             .encrypt = cc_rfc4543_gcm_encrypt,
2546             .decrypt = cc_rfc4543_gcm_decrypt,
2547             .init = cc_aead_init,
2548             .exit = cc_aead_exit,
2549             .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2550             .maxauthsize = AES_BLOCK_SIZE,
2551         },
2552         .cipher_mode = DRV_CIPHER_GCTR,
2553         .flow_mode = S_DIN_to_AES,
2554         .auth_mode = DRV_HASH_NULL,
2555         .min_hw_rev = CC_HW_REV_630,
2556         .std_body = CC_STD_NIST,
2557     },
2558 };
2559 
2560 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2561                         struct device *dev)
2562 {
2563     struct cc_crypto_alg *t_alg;
2564     struct aead_alg *alg;
2565 
2566     t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
2567     if (!t_alg)
2568         return ERR_PTR(-ENOMEM);
2569 
2570     alg = &tmpl->template_aead;
2571 
2572     snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2573     snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2574          tmpl->driver_name);
2575     alg->base.cra_module = THIS_MODULE;
2576     alg->base.cra_priority = CC_CRA_PRIO;
2577 
2578     alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2579     alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2580     alg->base.cra_blocksize = tmpl->blocksize;
2581     alg->init = cc_aead_init;
2582     alg->exit = cc_aead_exit;
2583 
2584     t_alg->aead_alg = *alg;
2585 
2586     t_alg->cipher_mode = tmpl->cipher_mode;
2587     t_alg->flow_mode = tmpl->flow_mode;
2588     t_alg->auth_mode = tmpl->auth_mode;
2589 
2590     return t_alg;
2591 }
2592 
2593 int cc_aead_free(struct cc_drvdata *drvdata)
2594 {
2595     struct cc_crypto_alg *t_alg, *n;
2596     struct cc_aead_handle *aead_handle = drvdata->aead_handle;
2597 
2598     /* Remove registered algs */
2599     list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2600         crypto_unregister_aead(&t_alg->aead_alg);
2601         list_del(&t_alg->entry);
2602     }
2603 
2604     return 0;
2605 }
2606 
2607 int cc_aead_alloc(struct cc_drvdata *drvdata)
2608 {
2609     struct cc_aead_handle *aead_handle;
2610     struct cc_crypto_alg *t_alg;
2611     int rc = -ENOMEM;
2612     int alg;
2613     struct device *dev = drvdata_to_dev(drvdata);
2614 
2615     aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
2616     if (!aead_handle) {
2617         rc = -ENOMEM;
2618         goto fail0;
2619     }
2620 
2621     INIT_LIST_HEAD(&aead_handle->aead_list);
2622     drvdata->aead_handle = aead_handle;
2623 
2624     aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2625                              MAX_HMAC_DIGEST_SIZE);
2626 
2627     if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2628         rc = -ENOMEM;
2629         goto fail1;
2630     }
2631 
2632     /* Linux crypto */
2633     for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2634         if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2635             !(drvdata->std_bodies & aead_algs[alg].std_body))
2636             continue;
2637 
2638         t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2639         if (IS_ERR(t_alg)) {
2640             rc = PTR_ERR(t_alg);
2641             dev_err(dev, "%s alg allocation failed\n",
2642                 aead_algs[alg].driver_name);
2643             goto fail1;
2644         }
2645         t_alg->drvdata = drvdata;
2646         rc = crypto_register_aead(&t_alg->aead_alg);
2647         if (rc) {
2648             dev_err(dev, "%s alg registration failed\n",
2649                 t_alg->aead_alg.base.cra_driver_name);
2650             goto fail1;
2651         }
2652 
2653         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2654         dev_dbg(dev, "Registered %s\n",
2655             t_alg->aead_alg.base.cra_driver_name);
2656     }
2657 
2658     return 0;
2659 
2660 fail1:
2661     cc_aead_free(drvdata);
2662 fail0:
2663     return rc;
2664 }