Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 
0003 /*
0004  * Copyright (C) 2021, Linaro Limited. All rights reserved.
0005  */
0006 #include <linux/dma-mapping.h>
0007 #include <linux/interrupt.h>
0008 #include <crypto/gcm.h>
0009 #include <crypto/authenc.h>
0010 #include <crypto/internal/aead.h>
0011 #include <crypto/internal/des.h>
0012 #include <crypto/sha1.h>
0013 #include <crypto/sha2.h>
0014 #include <crypto/scatterwalk.h>
0015 #include "aead.h"
0016 
0017 #define CCM_NONCE_ADATA_SHIFT       6
0018 #define CCM_NONCE_AUTHSIZE_SHIFT    3
0019 #define MAX_CCM_ADATA_HEADER_LEN        6
0020 
0021 static LIST_HEAD(aead_algs);
0022 
0023 static void qce_aead_done(void *data)
0024 {
0025     struct crypto_async_request *async_req = data;
0026     struct aead_request *req = aead_request_cast(async_req);
0027     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0028     struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
0029     struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
0030     struct qce_device *qce = tmpl->qce;
0031     struct qce_result_dump *result_buf = qce->dma.result_buf;
0032     enum dma_data_direction dir_src, dir_dst;
0033     bool diff_dst;
0034     int error;
0035     u32 status;
0036     unsigned int totallen;
0037     unsigned char tag[SHA256_DIGEST_SIZE] = {0};
0038     int ret = 0;
0039 
0040     diff_dst = (req->src != req->dst) ? true : false;
0041     dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
0042     dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
0043 
0044     error = qce_dma_terminate_all(&qce->dma);
0045     if (error)
0046         dev_dbg(qce->dev, "aead dma termination error (%d)\n",
0047             error);
0048     if (diff_dst)
0049         dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
0050 
0051     dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
0052 
0053     if (IS_CCM(rctx->flags)) {
0054         if (req->assoclen) {
0055             sg_free_table(&rctx->src_tbl);
0056             if (diff_dst)
0057                 sg_free_table(&rctx->dst_tbl);
0058         } else {
0059             if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
0060                 sg_free_table(&rctx->dst_tbl);
0061         }
0062     } else {
0063         sg_free_table(&rctx->dst_tbl);
0064     }
0065 
0066     error = qce_check_status(qce, &status);
0067     if (error < 0 && (error != -EBADMSG))
0068         dev_err(qce->dev, "aead operation error (%x)\n", status);
0069 
0070     if (IS_ENCRYPT(rctx->flags)) {
0071         totallen = req->cryptlen + req->assoclen;
0072         if (IS_CCM(rctx->flags))
0073             scatterwalk_map_and_copy(rctx->ccmresult_buf, req->dst,
0074                          totallen, ctx->authsize, 1);
0075         else
0076             scatterwalk_map_and_copy(result_buf->auth_iv, req->dst,
0077                          totallen, ctx->authsize, 1);
0078 
0079     } else if (!IS_CCM(rctx->flags)) {
0080         totallen = req->cryptlen + req->assoclen - ctx->authsize;
0081         scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
0082         ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
0083         if (ret) {
0084             pr_err("Bad message error\n");
0085             error = -EBADMSG;
0086         }
0087     }
0088 
0089     qce->async_req_done(qce, error);
0090 }
0091 
0092 static struct scatterlist *
0093 qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
0094 {
0095     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0096     struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
0097     struct qce_device *qce = tmpl->qce;
0098 
0099     sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
0100     return qce_sgtable_add(tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ);
0101 }
0102 
0103 static struct scatterlist *
0104 qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
0105 {
0106     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0107 
0108     sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
0109     return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
0110 }
0111 
0112 static struct scatterlist *
0113 qce_aead_prepare_dst_buf(struct aead_request *req)
0114 {
0115     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0116     struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
0117     struct qce_device *qce = tmpl->qce;
0118     struct scatterlist *sg, *msg_sg, __sg[2];
0119     gfp_t gfp;
0120     unsigned int assoclen = req->assoclen;
0121     unsigned int totallen;
0122     int ret;
0123 
0124     totallen = rctx->cryptlen + assoclen;
0125     rctx->dst_nents = sg_nents_for_len(req->dst, totallen);
0126     if (rctx->dst_nents < 0) {
0127         dev_err(qce->dev, "Invalid numbers of dst SG.\n");
0128         return ERR_PTR(-EINVAL);
0129     }
0130     if (IS_CCM(rctx->flags))
0131         rctx->dst_nents += 2;
0132     else
0133         rctx->dst_nents += 1;
0134 
0135     gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0136                         GFP_KERNEL : GFP_ATOMIC;
0137     ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
0138     if (ret)
0139         return ERR_PTR(ret);
0140 
0141     if (IS_CCM(rctx->flags) && assoclen) {
0142         /* Get the dst buffer */
0143         msg_sg = scatterwalk_ffwd(__sg, req->dst, assoclen);
0144 
0145         sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
0146                      rctx->assoclen);
0147         if (IS_ERR(sg)) {
0148             ret = PTR_ERR(sg);
0149             goto dst_tbl_free;
0150         }
0151         /* dst buffer */
0152         sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
0153         if (IS_ERR(sg)) {
0154             ret = PTR_ERR(sg);
0155             goto dst_tbl_free;
0156         }
0157         totallen = rctx->cryptlen + rctx->assoclen;
0158     } else {
0159         if (totallen) {
0160             sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, totallen);
0161             if (IS_ERR(sg))
0162                 goto dst_tbl_free;
0163         }
0164     }
0165     if (IS_CCM(rctx->flags))
0166         sg = qce_aead_prepare_ccm_result_buf(&rctx->dst_tbl, req);
0167     else
0168         sg = qce_aead_prepare_result_buf(&rctx->dst_tbl, req);
0169 
0170     if (IS_ERR(sg))
0171         goto dst_tbl_free;
0172 
0173     sg_mark_end(sg);
0174     rctx->dst_sg = rctx->dst_tbl.sgl;
0175     rctx->dst_nents = sg_nents_for_len(rctx->dst_sg, totallen) + 1;
0176 
0177     return sg;
0178 
0179 dst_tbl_free:
0180     sg_free_table(&rctx->dst_tbl);
0181     return sg;
0182 }
0183 
0184 static int
0185 qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
0186 {
0187     struct scatterlist *sg, *msg_sg, __sg[2];
0188     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0189     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0190     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0191     unsigned int assoclen = rctx->assoclen;
0192     unsigned int adata_header_len, cryptlen, totallen;
0193     gfp_t gfp;
0194     bool diff_dst;
0195     int ret;
0196 
0197     if (IS_DECRYPT(rctx->flags))
0198         cryptlen = rctx->cryptlen + ctx->authsize;
0199     else
0200         cryptlen = rctx->cryptlen;
0201     totallen = cryptlen + req->assoclen;
0202 
0203     /* Get the msg */
0204     msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);
0205 
0206     rctx->adata = kzalloc((ALIGN(assoclen, 16) + MAX_CCM_ADATA_HEADER_LEN) *
0207                    sizeof(unsigned char), GFP_ATOMIC);
0208     if (!rctx->adata)
0209         return -ENOMEM;
0210 
0211     /*
0212      * Format associated data (RFC3610 and NIST 800-38C)
0213      * Even though specification allows for AAD to be up to 2^64 - 1 bytes,
0214      * the assoclen field in aead_request is unsigned int and thus limits
0215      * the AAD to be up to 2^32 - 1 bytes. So we handle only two scenarios
0216      * while forming the header for AAD.
0217      */
0218     if (assoclen < 0xff00) {
0219         adata_header_len = 2;
0220         *(__be16 *)rctx->adata = cpu_to_be16(assoclen);
0221     } else {
0222         adata_header_len = 6;
0223         *(__be16 *)rctx->adata = cpu_to_be16(0xfffe);
0224         *(__be32 *)(rctx->adata + 2) = cpu_to_be32(assoclen);
0225     }
0226 
0227     /* Copy the associated data */
0228     if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, assoclen),
0229                   rctx->adata + adata_header_len,
0230                   assoclen) != assoclen)
0231         return -EINVAL;
0232 
0233     /* Pad associated data to block size */
0234     rctx->assoclen = ALIGN(assoclen + adata_header_len, 16);
0235 
0236     diff_dst = (req->src != req->dst) ? true : false;
0237 
0238     if (diff_dst)
0239         rctx->src_nents = sg_nents_for_len(req->src, totallen) + 1;
0240     else
0241         rctx->src_nents = sg_nents_for_len(req->src, totallen) + 2;
0242 
0243     gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
0244     ret = sg_alloc_table(&rctx->src_tbl, rctx->src_nents, gfp);
0245     if (ret)
0246         return ret;
0247 
0248     /* Associated Data */
0249     sg_init_one(&rctx->adata_sg, rctx->adata, rctx->assoclen);
0250     sg = qce_sgtable_add(&rctx->src_tbl, &rctx->adata_sg,
0251                  rctx->assoclen);
0252     if (IS_ERR(sg)) {
0253         ret = PTR_ERR(sg);
0254         goto err_free;
0255     }
0256     /* src msg */
0257     sg = qce_sgtable_add(&rctx->src_tbl, msg_sg, cryptlen);
0258     if (IS_ERR(sg)) {
0259         ret = PTR_ERR(sg);
0260         goto err_free;
0261     }
0262     if (!diff_dst) {
0263         /*
0264          * For decrypt, when src and dst buffers are same, there is already space
0265          * in the buffer for padded 0's which is output in lieu of
0266          * the MAC that is input. So skip the below.
0267          */
0268         if (!IS_DECRYPT(rctx->flags)) {
0269             sg = qce_aead_prepare_ccm_result_buf(&rctx->src_tbl, req);
0270             if (IS_ERR(sg)) {
0271                 ret = PTR_ERR(sg);
0272                 goto err_free;
0273             }
0274         }
0275     }
0276     sg_mark_end(sg);
0277     rctx->src_sg = rctx->src_tbl.sgl;
0278     totallen = cryptlen + rctx->assoclen;
0279     rctx->src_nents = sg_nents_for_len(rctx->src_sg, totallen);
0280 
0281     if (diff_dst) {
0282         sg = qce_aead_prepare_dst_buf(req);
0283         if (IS_ERR(sg)) {
0284             ret = PTR_ERR(sg);
0285             goto err_free;
0286         }
0287     } else {
0288         if (IS_ENCRYPT(rctx->flags))
0289             rctx->dst_nents = rctx->src_nents + 1;
0290         else
0291             rctx->dst_nents = rctx->src_nents;
0292         rctx->dst_sg = rctx->src_sg;
0293     }
0294 
0295     return 0;
0296 err_free:
0297     sg_free_table(&rctx->src_tbl);
0298     return ret;
0299 }
0300 
0301 static int qce_aead_prepare_buf(struct aead_request *req)
0302 {
0303     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0304     struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
0305     struct qce_device *qce = tmpl->qce;
0306     struct scatterlist *sg;
0307     bool diff_dst = (req->src != req->dst) ? true : false;
0308     unsigned int totallen;
0309 
0310     totallen = rctx->cryptlen + rctx->assoclen;
0311 
0312     sg = qce_aead_prepare_dst_buf(req);
0313     if (IS_ERR(sg))
0314         return PTR_ERR(sg);
0315     if (diff_dst) {
0316         rctx->src_nents = sg_nents_for_len(req->src, totallen);
0317         if (rctx->src_nents < 0) {
0318             dev_err(qce->dev, "Invalid numbers of src SG.\n");
0319             return -EINVAL;
0320         }
0321         rctx->src_sg = req->src;
0322     } else {
0323         rctx->src_nents = rctx->dst_nents - 1;
0324         rctx->src_sg = rctx->dst_sg;
0325     }
0326     return 0;
0327 }
0328 
0329 static int qce_aead_ccm_prepare_buf(struct aead_request *req)
0330 {
0331     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0332     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0333     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0334     struct scatterlist *sg;
0335     bool diff_dst = (req->src != req->dst) ? true : false;
0336     unsigned int cryptlen;
0337 
0338     if (rctx->assoclen)
0339         return qce_aead_ccm_prepare_buf_assoclen(req);
0340 
0341     if (IS_ENCRYPT(rctx->flags))
0342         return qce_aead_prepare_buf(req);
0343 
0344     cryptlen = rctx->cryptlen + ctx->authsize;
0345     if (diff_dst) {
0346         rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
0347         rctx->src_sg = req->src;
0348         sg = qce_aead_prepare_dst_buf(req);
0349         if (IS_ERR(sg))
0350             return PTR_ERR(sg);
0351     } else {
0352         rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
0353         rctx->src_sg = req->src;
0354         rctx->dst_nents = rctx->src_nents;
0355         rctx->dst_sg = rctx->src_sg;
0356     }
0357 
0358     return 0;
0359 }
0360 
0361 static int qce_aead_create_ccm_nonce(struct qce_aead_reqctx *rctx, struct qce_aead_ctx *ctx)
0362 {
0363     unsigned int msglen_size, ivsize;
0364     u8 msg_len[4];
0365     int i;
0366 
0367     if (!rctx || !rctx->iv)
0368         return -EINVAL;
0369 
0370     msglen_size = rctx->iv[0] + 1;
0371 
0372     /* Verify that msg len size is valid */
0373     if (msglen_size < 2 || msglen_size > 8)
0374         return -EINVAL;
0375 
0376     ivsize = rctx->ivsize;
0377 
0378     /*
0379      * Clear the msglen bytes in IV.
0380      * Else the h/w engine and nonce will use any stray value pending there.
0381      */
0382     if (!IS_CCM_RFC4309(rctx->flags)) {
0383         for (i = 0; i < msglen_size; i++)
0384             rctx->iv[ivsize - i - 1] = 0;
0385     }
0386 
0387     /*
0388      * The crypto framework encodes cryptlen as unsigned int. Thus, even though
0389      * spec allows for upto 8 bytes to encode msg_len only 4 bytes are needed.
0390      */
0391     if (msglen_size > 4)
0392         msglen_size = 4;
0393 
0394     memcpy(&msg_len[0], &rctx->cryptlen, 4);
0395 
0396     memcpy(&rctx->ccm_nonce[0], rctx->iv, rctx->ivsize);
0397     if (rctx->assoclen)
0398         rctx->ccm_nonce[0] |= 1 << CCM_NONCE_ADATA_SHIFT;
0399     rctx->ccm_nonce[0] |= ((ctx->authsize - 2) / 2) <<
0400                 CCM_NONCE_AUTHSIZE_SHIFT;
0401     for (i = 0; i < msglen_size; i++)
0402         rctx->ccm_nonce[QCE_MAX_NONCE - i - 1] = msg_len[i];
0403 
0404     return 0;
0405 }
0406 
0407 static int
0408 qce_aead_async_req_handle(struct crypto_async_request *async_req)
0409 {
0410     struct aead_request *req = aead_request_cast(async_req);
0411     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0412     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0413     struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
0414     struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
0415     struct qce_device *qce = tmpl->qce;
0416     enum dma_data_direction dir_src, dir_dst;
0417     bool diff_dst;
0418     int dst_nents, src_nents, ret;
0419 
0420     if (IS_CCM_RFC4309(rctx->flags)) {
0421         memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
0422         rctx->ccm_rfc4309_iv[0] = 3;
0423         memcpy(&rctx->ccm_rfc4309_iv[1], ctx->ccm4309_salt, QCE_CCM4309_SALT_SIZE);
0424         memcpy(&rctx->ccm_rfc4309_iv[4], req->iv, 8);
0425         rctx->iv = rctx->ccm_rfc4309_iv;
0426         rctx->ivsize = AES_BLOCK_SIZE;
0427     } else {
0428         rctx->iv = req->iv;
0429         rctx->ivsize = crypto_aead_ivsize(tfm);
0430     }
0431     if (IS_CCM_RFC4309(rctx->flags))
0432         rctx->assoclen = req->assoclen - 8;
0433     else
0434         rctx->assoclen = req->assoclen;
0435 
0436     diff_dst = (req->src != req->dst) ? true : false;
0437     dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
0438     dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
0439 
0440     if (IS_CCM(rctx->flags)) {
0441         ret = qce_aead_create_ccm_nonce(rctx, ctx);
0442         if (ret)
0443             return ret;
0444     }
0445     if (IS_CCM(rctx->flags))
0446         ret = qce_aead_ccm_prepare_buf(req);
0447     else
0448         ret = qce_aead_prepare_buf(req);
0449 
0450     if (ret)
0451         return ret;
0452     dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
0453     if (dst_nents < 0) {
0454         ret = dst_nents;
0455         goto error_free;
0456     }
0457 
0458     if (diff_dst) {
0459         src_nents = dma_map_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
0460         if (src_nents < 0) {
0461             ret = src_nents;
0462             goto error_unmap_dst;
0463         }
0464     } else {
0465         if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
0466             src_nents = dst_nents;
0467         else
0468             src_nents = dst_nents - 1;
0469     }
0470 
0471     ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents,
0472                    qce_aead_done, async_req);
0473     if (ret)
0474         goto error_unmap_src;
0475 
0476     qce_dma_issue_pending(&qce->dma);
0477 
0478     ret = qce_start(async_req, tmpl->crypto_alg_type);
0479     if (ret)
0480         goto error_terminate;
0481 
0482     return 0;
0483 
0484 error_terminate:
0485     qce_dma_terminate_all(&qce->dma);
0486 error_unmap_src:
0487     if (diff_dst)
0488         dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
0489 error_unmap_dst:
0490     dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
0491 error_free:
0492     if (IS_CCM(rctx->flags) && rctx->assoclen) {
0493         sg_free_table(&rctx->src_tbl);
0494         if (diff_dst)
0495             sg_free_table(&rctx->dst_tbl);
0496     } else {
0497         sg_free_table(&rctx->dst_tbl);
0498     }
0499     return ret;
0500 }
0501 
0502 static int qce_aead_crypt(struct aead_request *req, int encrypt)
0503 {
0504     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0505     struct qce_aead_reqctx *rctx = aead_request_ctx(req);
0506     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0507     struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
0508     unsigned int blocksize = crypto_aead_blocksize(tfm);
0509 
0510     rctx->flags  = tmpl->alg_flags;
0511     rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
0512 
0513     if (encrypt)
0514         rctx->cryptlen = req->cryptlen;
0515     else
0516         rctx->cryptlen = req->cryptlen - ctx->authsize;
0517 
0518     /* CE does not handle 0 length messages */
0519     if (!rctx->cryptlen) {
0520         if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
0521             ctx->need_fallback = true;
0522     }
0523 
0524     /* If fallback is needed, schedule and exit */
0525     if (ctx->need_fallback) {
0526         /* Reset need_fallback in case the same ctx is used for another transaction */
0527         ctx->need_fallback = false;
0528 
0529         aead_request_set_tfm(&rctx->fallback_req, ctx->fallback);
0530         aead_request_set_callback(&rctx->fallback_req, req->base.flags,
0531                       req->base.complete, req->base.data);
0532         aead_request_set_crypt(&rctx->fallback_req, req->src,
0533                        req->dst, req->cryptlen, req->iv);
0534         aead_request_set_ad(&rctx->fallback_req, req->assoclen);
0535 
0536         return encrypt ? crypto_aead_encrypt(&rctx->fallback_req) :
0537                  crypto_aead_decrypt(&rctx->fallback_req);
0538     }
0539 
0540     /*
0541      * CBC algorithms require message lengths to be
0542      * multiples of block size.
0543      */
0544     if (IS_CBC(rctx->flags) && !IS_ALIGNED(rctx->cryptlen, blocksize))
0545         return -EINVAL;
0546 
0547     /* RFC4309 supported AAD size 16 bytes/20 bytes */
0548     if (IS_CCM_RFC4309(rctx->flags))
0549         if (crypto_ipsec_check_assoclen(req->assoclen))
0550             return -EINVAL;
0551 
0552     return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
0553 }
0554 
0555 static int qce_aead_encrypt(struct aead_request *req)
0556 {
0557     return qce_aead_crypt(req, 1);
0558 }
0559 
0560 static int qce_aead_decrypt(struct aead_request *req)
0561 {
0562     return qce_aead_crypt(req, 0);
0563 }
0564 
0565 static int qce_aead_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
0566                    unsigned int keylen)
0567 {
0568     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0569     unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
0570 
0571     if (IS_CCM_RFC4309(flags)) {
0572         if (keylen < QCE_CCM4309_SALT_SIZE)
0573             return -EINVAL;
0574         keylen -= QCE_CCM4309_SALT_SIZE;
0575         memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
0576     }
0577 
0578     if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192)
0579         return -EINVAL;
0580 
0581     ctx->enc_keylen = keylen;
0582     ctx->auth_keylen = keylen;
0583 
0584     memcpy(ctx->enc_key, key, keylen);
0585     memcpy(ctx->auth_key, key, keylen);
0586 
0587     if (keylen == AES_KEYSIZE_192)
0588         ctx->need_fallback = true;
0589 
0590     return IS_CCM_RFC4309(flags) ?
0591         crypto_aead_setkey(ctx->fallback, key, keylen + QCE_CCM4309_SALT_SIZE) :
0592         crypto_aead_setkey(ctx->fallback, key, keylen);
0593 }
0594 
0595 static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
0596 {
0597     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0598     struct crypto_authenc_keys authenc_keys;
0599     unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
0600     u32 _key[6];
0601     int err;
0602 
0603     err = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
0604     if (err)
0605         return err;
0606 
0607     if (authenc_keys.enckeylen > QCE_MAX_KEY_SIZE ||
0608         authenc_keys.authkeylen > QCE_MAX_KEY_SIZE)
0609         return -EINVAL;
0610 
0611     if (IS_DES(flags)) {
0612         err = verify_aead_des_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
0613         if (err)
0614             return err;
0615     } else if (IS_3DES(flags)) {
0616         err = verify_aead_des3_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
0617         if (err)
0618             return err;
0619         /*
0620          * The crypto engine does not support any two keys
0621          * being the same for triple des algorithms. The
0622          * verify_skcipher_des3_key does not check for all the
0623          * below conditions. Schedule fallback in this case.
0624          */
0625         memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
0626         if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
0627             !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
0628             !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
0629             ctx->need_fallback = true;
0630     } else if (IS_AES(flags)) {
0631         /* No random key sizes */
0632         if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
0633             authenc_keys.enckeylen != AES_KEYSIZE_192 &&
0634             authenc_keys.enckeylen != AES_KEYSIZE_256)
0635             return -EINVAL;
0636         if (authenc_keys.enckeylen == AES_KEYSIZE_192)
0637             ctx->need_fallback = true;
0638     }
0639 
0640     ctx->enc_keylen = authenc_keys.enckeylen;
0641     ctx->auth_keylen = authenc_keys.authkeylen;
0642 
0643     memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
0644 
0645     memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
0646     memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
0647 
0648     return crypto_aead_setkey(ctx->fallback, key, keylen);
0649 }
0650 
0651 static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
0652 {
0653     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0654     unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
0655 
0656     if (IS_CCM(flags)) {
0657         if (authsize < 4 || authsize > 16 || authsize % 2)
0658             return -EINVAL;
0659         if (IS_CCM_RFC4309(flags) && (authsize < 8 || authsize % 4))
0660             return -EINVAL;
0661     }
0662     ctx->authsize = authsize;
0663 
0664     return crypto_aead_setauthsize(ctx->fallback, authsize);
0665 }
0666 
0667 static int qce_aead_init(struct crypto_aead *tfm)
0668 {
0669     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0670 
0671     ctx->need_fallback = false;
0672     ctx->fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
0673                       0, CRYPTO_ALG_NEED_FALLBACK);
0674 
0675     if (IS_ERR(ctx->fallback))
0676         return PTR_ERR(ctx->fallback);
0677 
0678     crypto_aead_set_reqsize(tfm, sizeof(struct qce_aead_reqctx) +
0679                 crypto_aead_reqsize(ctx->fallback));
0680     return 0;
0681 }
0682 
0683 static void qce_aead_exit(struct crypto_aead *tfm)
0684 {
0685     struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
0686 
0687     crypto_free_aead(ctx->fallback);
0688 }
0689 
0690 struct qce_aead_def {
0691     unsigned long flags;
0692     const char *name;
0693     const char *drv_name;
0694     unsigned int blocksize;
0695     unsigned int chunksize;
0696     unsigned int ivsize;
0697     unsigned int maxauthsize;
0698 };
0699 
0700 static const struct qce_aead_def aead_def[] = {
0701     {
0702         .flags          = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
0703         .name           = "authenc(hmac(sha1),cbc(des))",
0704         .drv_name       = "authenc-hmac-sha1-cbc-des-qce",
0705         .blocksize      = DES_BLOCK_SIZE,
0706         .ivsize         = DES_BLOCK_SIZE,
0707         .maxauthsize    = SHA1_DIGEST_SIZE,
0708     },
0709     {
0710         .flags          = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
0711         .name           = "authenc(hmac(sha1),cbc(des3_ede))",
0712         .drv_name       = "authenc-hmac-sha1-cbc-3des-qce",
0713         .blocksize      = DES3_EDE_BLOCK_SIZE,
0714         .ivsize         = DES3_EDE_BLOCK_SIZE,
0715         .maxauthsize    = SHA1_DIGEST_SIZE,
0716     },
0717     {
0718         .flags          = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
0719         .name           = "authenc(hmac(sha256),cbc(des))",
0720         .drv_name       = "authenc-hmac-sha256-cbc-des-qce",
0721         .blocksize      = DES_BLOCK_SIZE,
0722         .ivsize         = DES_BLOCK_SIZE,
0723         .maxauthsize    = SHA256_DIGEST_SIZE,
0724     },
0725     {
0726         .flags          = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
0727         .name           = "authenc(hmac(sha256),cbc(des3_ede))",
0728         .drv_name       = "authenc-hmac-sha256-cbc-3des-qce",
0729         .blocksize      = DES3_EDE_BLOCK_SIZE,
0730         .ivsize         = DES3_EDE_BLOCK_SIZE,
0731         .maxauthsize    = SHA256_DIGEST_SIZE,
0732     },
0733     {
0734         .flags          =  QCE_ALG_AES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
0735         .name           = "authenc(hmac(sha256),cbc(aes))",
0736         .drv_name       = "authenc-hmac-sha256-cbc-aes-qce",
0737         .blocksize      = AES_BLOCK_SIZE,
0738         .ivsize         = AES_BLOCK_SIZE,
0739         .maxauthsize    = SHA256_DIGEST_SIZE,
0740     },
0741     {
0742         .flags          =  QCE_ALG_AES | QCE_MODE_CCM,
0743         .name           = "ccm(aes)",
0744         .drv_name       = "ccm-aes-qce",
0745         .blocksize  = 1,
0746         .ivsize         = AES_BLOCK_SIZE,
0747         .maxauthsize    = AES_BLOCK_SIZE,
0748     },
0749     {
0750         .flags          =  QCE_ALG_AES | QCE_MODE_CCM | QCE_MODE_CCM_RFC4309,
0751         .name           = "rfc4309(ccm(aes))",
0752         .drv_name       = "rfc4309-ccm-aes-qce",
0753         .blocksize  = 1,
0754         .ivsize         = 8,
0755         .maxauthsize    = AES_BLOCK_SIZE,
0756     },
0757 };
0758 
0759 static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_device *qce)
0760 {
0761     struct qce_alg_template *tmpl;
0762     struct aead_alg *alg;
0763     int ret;
0764 
0765     tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
0766     if (!tmpl)
0767         return -ENOMEM;
0768 
0769     alg = &tmpl->alg.aead;
0770 
0771     snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
0772     snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
0773          def->drv_name);
0774 
0775     alg->base.cra_blocksize     = def->blocksize;
0776     alg->chunksize          = def->chunksize;
0777     alg->ivsize         = def->ivsize;
0778     alg->maxauthsize        = def->maxauthsize;
0779     if (IS_CCM(def->flags))
0780         alg->setkey     = qce_aead_ccm_setkey;
0781     else
0782         alg->setkey     = qce_aead_setkey;
0783     alg->setauthsize        = qce_aead_setauthsize;
0784     alg->encrypt            = qce_aead_encrypt;
0785     alg->decrypt            = qce_aead_decrypt;
0786     alg->init           = qce_aead_init;
0787     alg->exit           = qce_aead_exit;
0788 
0789     alg->base.cra_priority      = 300;
0790     alg->base.cra_flags     = CRYPTO_ALG_ASYNC |
0791                       CRYPTO_ALG_ALLOCATES_MEMORY |
0792                       CRYPTO_ALG_KERN_DRIVER_ONLY |
0793                       CRYPTO_ALG_NEED_FALLBACK;
0794     alg->base.cra_ctxsize       = sizeof(struct qce_aead_ctx);
0795     alg->base.cra_alignmask     = 0;
0796     alg->base.cra_module        = THIS_MODULE;
0797 
0798     INIT_LIST_HEAD(&tmpl->entry);
0799     tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AEAD;
0800     tmpl->alg_flags = def->flags;
0801     tmpl->qce = qce;
0802 
0803     ret = crypto_register_aead(alg);
0804     if (ret) {
0805         dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
0806         kfree(tmpl);
0807         return ret;
0808     }
0809 
0810     list_add_tail(&tmpl->entry, &aead_algs);
0811     dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
0812     return 0;
0813 }
0814 
0815 static void qce_aead_unregister(struct qce_device *qce)
0816 {
0817     struct qce_alg_template *tmpl, *n;
0818 
0819     list_for_each_entry_safe(tmpl, n, &aead_algs, entry) {
0820         crypto_unregister_aead(&tmpl->alg.aead);
0821         list_del(&tmpl->entry);
0822         kfree(tmpl);
0823     }
0824 }
0825 
0826 static int qce_aead_register(struct qce_device *qce)
0827 {
0828     int ret, i;
0829 
0830     for (i = 0; i < ARRAY_SIZE(aead_def); i++) {
0831         ret = qce_aead_register_one(&aead_def[i], qce);
0832         if (ret)
0833             goto err;
0834     }
0835 
0836     return 0;
0837 err:
0838     qce_aead_unregister(qce);
0839     return ret;
0840 }
0841 
0842 const struct qce_algo_ops aead_ops = {
0843     .type = CRYPTO_ALG_TYPE_AEAD,
0844     .register_algs = qce_aead_register,
0845     .unregister_algs = qce_aead_unregister,
0846     .async_req_handle = qce_aead_async_req_handle,
0847 };