Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (c) 2016-2017 HiSilicon Limited. */
0003 #include <linux/crypto.h>
0004 #include <linux/dma-mapping.h>
0005 #include <linux/dmapool.h>
0006 #include <linux/module.h>
0007 #include <linux/mutex.h>
0008 #include <linux/slab.h>
0009 
0010 #include <crypto/aes.h>
0011 #include <crypto/algapi.h>
0012 #include <crypto/internal/des.h>
0013 #include <crypto/skcipher.h>
0014 #include <crypto/xts.h>
0015 #include <crypto/internal/skcipher.h>
0016 
0017 #include "sec_drv.h"
0018 
0019 #define SEC_MAX_CIPHER_KEY      64
0020 #define SEC_REQ_LIMIT SZ_32M
0021 
0022 struct sec_c_alg_cfg {
0023     unsigned c_alg      : 3;
0024     unsigned c_mode     : 3;
0025     unsigned key_len    : 2;
0026     unsigned c_width    : 2;
0027 };
0028 
0029 static const struct sec_c_alg_cfg sec_c_alg_cfgs[] =  {
0030     [SEC_C_DES_ECB_64] = {
0031         .c_alg = SEC_C_ALG_DES,
0032         .c_mode = SEC_C_MODE_ECB,
0033         .key_len = SEC_KEY_LEN_DES,
0034     },
0035     [SEC_C_DES_CBC_64] = {
0036         .c_alg = SEC_C_ALG_DES,
0037         .c_mode = SEC_C_MODE_CBC,
0038         .key_len = SEC_KEY_LEN_DES,
0039     },
0040     [SEC_C_3DES_ECB_192_3KEY] = {
0041         .c_alg = SEC_C_ALG_3DES,
0042         .c_mode = SEC_C_MODE_ECB,
0043         .key_len = SEC_KEY_LEN_3DES_3_KEY,
0044     },
0045     [SEC_C_3DES_ECB_192_2KEY] = {
0046         .c_alg = SEC_C_ALG_3DES,
0047         .c_mode = SEC_C_MODE_ECB,
0048         .key_len = SEC_KEY_LEN_3DES_2_KEY,
0049     },
0050     [SEC_C_3DES_CBC_192_3KEY] = {
0051         .c_alg = SEC_C_ALG_3DES,
0052         .c_mode = SEC_C_MODE_CBC,
0053         .key_len = SEC_KEY_LEN_3DES_3_KEY,
0054     },
0055     [SEC_C_3DES_CBC_192_2KEY] = {
0056         .c_alg = SEC_C_ALG_3DES,
0057         .c_mode = SEC_C_MODE_CBC,
0058         .key_len = SEC_KEY_LEN_3DES_2_KEY,
0059     },
0060     [SEC_C_AES_ECB_128] = {
0061         .c_alg = SEC_C_ALG_AES,
0062         .c_mode = SEC_C_MODE_ECB,
0063         .key_len = SEC_KEY_LEN_AES_128,
0064     },
0065     [SEC_C_AES_ECB_192] = {
0066         .c_alg = SEC_C_ALG_AES,
0067         .c_mode = SEC_C_MODE_ECB,
0068         .key_len = SEC_KEY_LEN_AES_192,
0069     },
0070     [SEC_C_AES_ECB_256] = {
0071         .c_alg = SEC_C_ALG_AES,
0072         .c_mode = SEC_C_MODE_ECB,
0073         .key_len = SEC_KEY_LEN_AES_256,
0074     },
0075     [SEC_C_AES_CBC_128] = {
0076         .c_alg = SEC_C_ALG_AES,
0077         .c_mode = SEC_C_MODE_CBC,
0078         .key_len = SEC_KEY_LEN_AES_128,
0079     },
0080     [SEC_C_AES_CBC_192] = {
0081         .c_alg = SEC_C_ALG_AES,
0082         .c_mode = SEC_C_MODE_CBC,
0083         .key_len = SEC_KEY_LEN_AES_192,
0084     },
0085     [SEC_C_AES_CBC_256] = {
0086         .c_alg = SEC_C_ALG_AES,
0087         .c_mode = SEC_C_MODE_CBC,
0088         .key_len = SEC_KEY_LEN_AES_256,
0089     },
0090     [SEC_C_AES_CTR_128] = {
0091         .c_alg = SEC_C_ALG_AES,
0092         .c_mode = SEC_C_MODE_CTR,
0093         .key_len = SEC_KEY_LEN_AES_128,
0094     },
0095     [SEC_C_AES_CTR_192] = {
0096         .c_alg = SEC_C_ALG_AES,
0097         .c_mode = SEC_C_MODE_CTR,
0098         .key_len = SEC_KEY_LEN_AES_192,
0099     },
0100     [SEC_C_AES_CTR_256] = {
0101         .c_alg = SEC_C_ALG_AES,
0102         .c_mode = SEC_C_MODE_CTR,
0103         .key_len = SEC_KEY_LEN_AES_256,
0104     },
0105     [SEC_C_AES_XTS_128] = {
0106         .c_alg = SEC_C_ALG_AES,
0107         .c_mode = SEC_C_MODE_XTS,
0108         .key_len = SEC_KEY_LEN_AES_128,
0109     },
0110     [SEC_C_AES_XTS_256] = {
0111         .c_alg = SEC_C_ALG_AES,
0112         .c_mode = SEC_C_MODE_XTS,
0113         .key_len = SEC_KEY_LEN_AES_256,
0114     },
0115     [SEC_C_NULL] = {
0116     },
0117 };
0118 
0119 /*
0120  * Mutex used to ensure safe operation of reference count of
0121  * alg providers
0122  */
0123 static DEFINE_MUTEX(algs_lock);
0124 static unsigned int active_devs;
0125 
0126 static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
0127                        struct sec_bd_info *req,
0128                        enum sec_cipher_alg alg)
0129 {
0130     const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
0131 
0132     memset(req, 0, sizeof(*req));
0133     req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
0134     req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
0135     req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
0136     req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
0137 
0138     req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
0139     req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
0140 }
0141 
0142 static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
0143                       const u8 *key,
0144                       unsigned int keylen,
0145                       enum sec_cipher_alg alg)
0146 {
0147     struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
0148     struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
0149 
0150     ctx->cipher_alg = alg;
0151     memcpy(ctx->key, key, keylen);
0152     sec_alg_skcipher_init_template(ctx, &ctx->req_template,
0153                        ctx->cipher_alg);
0154 }
0155 
0156 static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
0157                 dma_addr_t psec_sgl, struct sec_dev_info *info)
0158 {
0159     struct sec_hw_sgl *sgl_current, *sgl_next;
0160     dma_addr_t sgl_next_dma;
0161 
0162     sgl_current = hw_sgl;
0163     while (sgl_current) {
0164         sgl_next = sgl_current->next;
0165         sgl_next_dma = sgl_current->next_sgl;
0166 
0167         dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
0168 
0169         sgl_current = sgl_next;
0170         psec_sgl = sgl_next_dma;
0171     }
0172 }
0173 
0174 static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
0175                      dma_addr_t *psec_sgl,
0176                      struct scatterlist *sgl,
0177                      int count,
0178                      struct sec_dev_info *info,
0179                      gfp_t gfp)
0180 {
0181     struct sec_hw_sgl *sgl_current = NULL;
0182     struct sec_hw_sgl *sgl_next;
0183     dma_addr_t sgl_next_dma;
0184     struct scatterlist *sg;
0185     int ret, sge_index, i;
0186 
0187     if (!count)
0188         return -EINVAL;
0189 
0190     for_each_sg(sgl, sg, count, i) {
0191         sge_index = i % SEC_MAX_SGE_NUM;
0192         if (sge_index == 0) {
0193             sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
0194                            gfp, &sgl_next_dma);
0195             if (!sgl_next) {
0196                 ret = -ENOMEM;
0197                 goto err_free_hw_sgls;
0198             }
0199 
0200             if (!sgl_current) { /* First one */
0201                 *psec_sgl = sgl_next_dma;
0202                 *sec_sgl = sgl_next;
0203             } else { /* Chained */
0204                 sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
0205                 sgl_current->next_sgl = sgl_next_dma;
0206                 sgl_current->next = sgl_next;
0207             }
0208             sgl_current = sgl_next;
0209         }
0210         sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
0211         sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
0212         sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
0213     }
0214     sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
0215     sgl_current->next_sgl = 0;
0216     (*sec_sgl)->entry_sum_in_chain = count;
0217 
0218     return 0;
0219 
0220 err_free_hw_sgls:
0221     sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
0222     *psec_sgl = 0;
0223 
0224     return ret;
0225 }
0226 
0227 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
0228                    const u8 *key, unsigned int keylen,
0229                    enum sec_cipher_alg alg)
0230 {
0231     struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0232     struct device *dev = ctx->queue->dev_info->dev;
0233 
0234     mutex_lock(&ctx->lock);
0235     if (ctx->key) {
0236         /* rekeying */
0237         memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
0238     } else {
0239         /* new key */
0240         ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
0241                           &ctx->pkey, GFP_KERNEL);
0242         if (!ctx->key) {
0243             mutex_unlock(&ctx->lock);
0244             return -ENOMEM;
0245         }
0246     }
0247     mutex_unlock(&ctx->lock);
0248     sec_alg_skcipher_init_context(tfm, key, keylen, alg);
0249 
0250     return 0;
0251 }
0252 
0253 static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
0254                        const u8 *key, unsigned int keylen)
0255 {
0256     enum sec_cipher_alg alg;
0257 
0258     switch (keylen) {
0259     case AES_KEYSIZE_128:
0260         alg = SEC_C_AES_ECB_128;
0261         break;
0262     case AES_KEYSIZE_192:
0263         alg = SEC_C_AES_ECB_192;
0264         break;
0265     case AES_KEYSIZE_256:
0266         alg = SEC_C_AES_ECB_256;
0267         break;
0268     default:
0269         return -EINVAL;
0270     }
0271 
0272     return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
0273 }
0274 
0275 static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
0276                        const u8 *key, unsigned int keylen)
0277 {
0278     enum sec_cipher_alg alg;
0279 
0280     switch (keylen) {
0281     case AES_KEYSIZE_128:
0282         alg = SEC_C_AES_CBC_128;
0283         break;
0284     case AES_KEYSIZE_192:
0285         alg = SEC_C_AES_CBC_192;
0286         break;
0287     case AES_KEYSIZE_256:
0288         alg = SEC_C_AES_CBC_256;
0289         break;
0290     default:
0291         return -EINVAL;
0292     }
0293 
0294     return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
0295 }
0296 
0297 static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
0298                        const u8 *key, unsigned int keylen)
0299 {
0300     enum sec_cipher_alg alg;
0301 
0302     switch (keylen) {
0303     case AES_KEYSIZE_128:
0304         alg = SEC_C_AES_CTR_128;
0305         break;
0306     case AES_KEYSIZE_192:
0307         alg = SEC_C_AES_CTR_192;
0308         break;
0309     case AES_KEYSIZE_256:
0310         alg = SEC_C_AES_CTR_256;
0311         break;
0312     default:
0313         return -EINVAL;
0314     }
0315 
0316     return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
0317 }
0318 
0319 static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
0320                        const u8 *key, unsigned int keylen)
0321 {
0322     enum sec_cipher_alg alg;
0323     int ret;
0324 
0325     ret = xts_verify_key(tfm, key, keylen);
0326     if (ret)
0327         return ret;
0328 
0329     switch (keylen) {
0330     case AES_KEYSIZE_128 * 2:
0331         alg = SEC_C_AES_XTS_128;
0332         break;
0333     case AES_KEYSIZE_256 * 2:
0334         alg = SEC_C_AES_XTS_256;
0335         break;
0336     default:
0337         return -EINVAL;
0338     }
0339 
0340     return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
0341 }
0342 
0343 static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
0344                        const u8 *key, unsigned int keylen)
0345 {
0346     return verify_skcipher_des_key(tfm, key) ?:
0347            sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
0348 }
0349 
0350 static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
0351                        const u8 *key, unsigned int keylen)
0352 {
0353     return verify_skcipher_des_key(tfm, key) ?:
0354            sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
0355 }
0356 
0357 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
0358                         const u8 *key, unsigned int keylen)
0359 {
0360     return verify_skcipher_des3_key(tfm, key) ?:
0361            sec_alg_skcipher_setkey(tfm, key, keylen,
0362                        SEC_C_3DES_ECB_192_3KEY);
0363 }
0364 
0365 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
0366                         const u8 *key, unsigned int keylen)
0367 {
0368     return verify_skcipher_des3_key(tfm, key) ?:
0369            sec_alg_skcipher_setkey(tfm, key, keylen,
0370                        SEC_C_3DES_CBC_192_3KEY);
0371 }
0372 
0373 static void sec_alg_free_el(struct sec_request_el *el,
0374                 struct sec_dev_info *info)
0375 {
0376     sec_free_hw_sgl(el->out, el->dma_out, info);
0377     sec_free_hw_sgl(el->in, el->dma_in, info);
0378     kfree(el->sgl_in);
0379     kfree(el->sgl_out);
0380     kfree(el);
0381 }
0382 
0383 /* queuelock must be held */
0384 static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
0385 {
0386     struct sec_request_el *el, *temp;
0387     int ret = 0;
0388 
0389     mutex_lock(&sec_req->lock);
0390     list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
0391         /*
0392          * Add to hardware queue only under following circumstances
0393          * 1) Software and hardware queue empty so no chain dependencies
0394          * 2) No dependencies as new IV - (check software queue empty
0395          *    to maintain order)
0396          * 3) No dependencies because the mode does no chaining.
0397          *
0398          * In other cases first insert onto the software queue which
0399          * is then emptied as requests complete
0400          */
0401         if (!queue->havesoftqueue ||
0402             (kfifo_is_empty(&queue->softqueue) &&
0403              sec_queue_empty(queue))) {
0404             ret = sec_queue_send(queue, &el->req, sec_req);
0405             if (ret == -EAGAIN) {
0406                 /* Wait unti we can send then try again */
0407                 /* DEAD if here - should not happen */
0408                 ret = -EBUSY;
0409                 goto err_unlock;
0410             }
0411         } else {
0412             kfifo_put(&queue->softqueue, el);
0413         }
0414     }
0415 err_unlock:
0416     mutex_unlock(&sec_req->lock);
0417 
0418     return ret;
0419 }
0420 
0421 static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
0422                       struct crypto_async_request *req_base)
0423 {
0424     struct skcipher_request *skreq = container_of(req_base,
0425                               struct skcipher_request,
0426                               base);
0427     struct sec_request *sec_req = skcipher_request_ctx(skreq);
0428     struct sec_request *backlog_req;
0429     struct sec_request_el *sec_req_el, *nextrequest;
0430     struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
0431     struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
0432     struct device *dev = ctx->queue->dev_info->dev;
0433     int icv_or_skey_en, ret;
0434     bool done;
0435 
0436     sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
0437                       head);
0438     icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
0439         SEC_BD_W0_ICV_OR_SKEY_EN_S;
0440     if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
0441         dev_err(dev, "Got an invalid answer %lu %d\n",
0442             sec_resp->w1 & SEC_BD_W1_BD_INVALID,
0443             icv_or_skey_en);
0444         sec_req->err = -EINVAL;
0445         /*
0446          * We need to muddle on to avoid getting stuck with elements
0447          * on the queue. Error will be reported so requester so
0448          * it should be able to handle appropriately.
0449          */
0450     }
0451 
0452     spin_lock_bh(&ctx->queue->queuelock);
0453     /* Put the IV in place for chained cases */
0454     switch (ctx->cipher_alg) {
0455     case SEC_C_AES_CBC_128:
0456     case SEC_C_AES_CBC_192:
0457     case SEC_C_AES_CBC_256:
0458         if (sec_req_el->req.w0 & SEC_BD_W0_DE)
0459             sg_pcopy_to_buffer(sec_req_el->sgl_out,
0460                        sg_nents(sec_req_el->sgl_out),
0461                        skreq->iv,
0462                        crypto_skcipher_ivsize(atfm),
0463                        sec_req_el->el_length -
0464                        crypto_skcipher_ivsize(atfm));
0465         else
0466             sg_pcopy_to_buffer(sec_req_el->sgl_in,
0467                        sg_nents(sec_req_el->sgl_in),
0468                        skreq->iv,
0469                        crypto_skcipher_ivsize(atfm),
0470                        sec_req_el->el_length -
0471                        crypto_skcipher_ivsize(atfm));
0472         /* No need to sync to the device as coherent DMA */
0473         break;
0474     case SEC_C_AES_CTR_128:
0475     case SEC_C_AES_CTR_192:
0476     case SEC_C_AES_CTR_256:
0477         crypto_inc(skreq->iv, 16);
0478         break;
0479     default:
0480         /* Do not update */
0481         break;
0482     }
0483 
0484     if (ctx->queue->havesoftqueue &&
0485         !kfifo_is_empty(&ctx->queue->softqueue) &&
0486         sec_queue_empty(ctx->queue)) {
0487         ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
0488         if (ret <= 0)
0489             dev_err(dev,
0490                 "Error getting next element from kfifo %d\n",
0491                 ret);
0492         else
0493             /* We know there is space so this cannot fail */
0494             sec_queue_send(ctx->queue, &nextrequest->req,
0495                        nextrequest->sec_req);
0496     } else if (!list_empty(&ctx->backlog)) {
0497         /* Need to verify there is room first */
0498         backlog_req = list_first_entry(&ctx->backlog,
0499                            typeof(*backlog_req),
0500                            backlog_head);
0501         if (sec_queue_can_enqueue(ctx->queue,
0502             backlog_req->num_elements) ||
0503             (ctx->queue->havesoftqueue &&
0504              kfifo_avail(&ctx->queue->softqueue) >
0505              backlog_req->num_elements)) {
0506             sec_send_request(backlog_req, ctx->queue);
0507             backlog_req->req_base->complete(backlog_req->req_base,
0508                             -EINPROGRESS);
0509             list_del(&backlog_req->backlog_head);
0510         }
0511     }
0512     spin_unlock_bh(&ctx->queue->queuelock);
0513 
0514     mutex_lock(&sec_req->lock);
0515     list_del(&sec_req_el->head);
0516     mutex_unlock(&sec_req->lock);
0517     sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
0518 
0519     /*
0520      * Request is done.
0521      * The dance is needed as the lock is freed in the completion
0522      */
0523     mutex_lock(&sec_req->lock);
0524     done = list_empty(&sec_req->elements);
0525     mutex_unlock(&sec_req->lock);
0526     if (done) {
0527         if (crypto_skcipher_ivsize(atfm)) {
0528             dma_unmap_single(dev, sec_req->dma_iv,
0529                      crypto_skcipher_ivsize(atfm),
0530                      DMA_TO_DEVICE);
0531         }
0532         dma_unmap_sg(dev, skreq->src, sec_req->len_in,
0533                  DMA_BIDIRECTIONAL);
0534         if (skreq->src != skreq->dst)
0535             dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
0536                      DMA_BIDIRECTIONAL);
0537         skreq->base.complete(&skreq->base, sec_req->err);
0538     }
0539 }
0540 
0541 void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
0542 {
0543     struct sec_request *sec_req = shadow;
0544 
0545     sec_req->cb(resp, sec_req->req_base);
0546 }
0547 
0548 static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
0549                           int *steps, gfp_t gfp)
0550 {
0551     size_t *sizes;
0552     int i;
0553 
0554     /* Split into suitable sized blocks */
0555     *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
0556     sizes = kcalloc(*steps, sizeof(*sizes), gfp);
0557     if (!sizes)
0558         return -ENOMEM;
0559 
0560     for (i = 0; i < *steps - 1; i++)
0561         sizes[i] = SEC_REQ_LIMIT;
0562     sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
0563     *split_sizes = sizes;
0564 
0565     return 0;
0566 }
0567 
0568 static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
0569                 int steps, struct scatterlist ***splits,
0570                 int **splits_nents,
0571                 int sgl_len_in,
0572                 struct device *dev, gfp_t gfp)
0573 {
0574     int ret, count;
0575 
0576     count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
0577     if (!count)
0578         return -EINVAL;
0579 
0580     *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
0581     if (!*splits) {
0582         ret = -ENOMEM;
0583         goto err_unmap_sg;
0584     }
0585     *splits_nents = kcalloc(steps, sizeof(int), gfp);
0586     if (!*splits_nents) {
0587         ret = -ENOMEM;
0588         goto err_free_splits;
0589     }
0590 
0591     /* output the scatter list before and after this */
0592     ret = sg_split(sgl, count, 0, steps, split_sizes,
0593                *splits, *splits_nents, gfp);
0594     if (ret) {
0595         ret = -ENOMEM;
0596         goto err_free_splits_nents;
0597     }
0598 
0599     return 0;
0600 
0601 err_free_splits_nents:
0602     kfree(*splits_nents);
0603 err_free_splits:
0604     kfree(*splits);
0605 err_unmap_sg:
0606     dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
0607 
0608     return ret;
0609 }
0610 
0611 /*
0612  * Reverses the sec_map_and_split_sg call for messages not yet added to
0613  * the queues.
0614  */
0615 static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
0616                 struct scatterlist **splits, int *splits_nents,
0617                 int sgl_len_in, struct device *dev)
0618 {
0619     int i;
0620 
0621     for (i = 0; i < steps; i++)
0622         kfree(splits[i]);
0623     kfree(splits_nents);
0624     kfree(splits);
0625 
0626     dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
0627 }
0628 
0629 static struct sec_request_el
0630 *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
0631                int el_size, bool different_dest,
0632                struct scatterlist *sgl_in, int n_ents_in,
0633                struct scatterlist *sgl_out, int n_ents_out,
0634                struct sec_dev_info *info, gfp_t gfp)
0635 {
0636     struct sec_request_el *el;
0637     struct sec_bd_info *req;
0638     int ret;
0639 
0640     el = kzalloc(sizeof(*el), gfp);
0641     if (!el)
0642         return ERR_PTR(-ENOMEM);
0643     el->el_length = el_size;
0644     req = &el->req;
0645     memcpy(req, template, sizeof(*req));
0646 
0647     req->w0 &= ~SEC_BD_W0_CIPHER_M;
0648     if (encrypt)
0649         req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
0650     else
0651         req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
0652 
0653     req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
0654     req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
0655         SEC_BD_W0_C_GRAN_SIZE_19_16_M;
0656 
0657     req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
0658     req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
0659         SEC_BD_W0_C_GRAN_SIZE_21_20_M;
0660 
0661     /* Writing whole u32 so no need to take care of masking */
0662     req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
0663         ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
0664          SEC_BD_W2_C_GRAN_SIZE_15_0_M);
0665 
0666     req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
0667     req->w1 |= SEC_BD_W1_ADDR_TYPE;
0668 
0669     el->sgl_in = sgl_in;
0670 
0671     ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
0672                     n_ents_in, info, gfp);
0673     if (ret)
0674         goto err_free_el;
0675 
0676     req->data_addr_lo = lower_32_bits(el->dma_in);
0677     req->data_addr_hi = upper_32_bits(el->dma_in);
0678 
0679     if (different_dest) {
0680         el->sgl_out = sgl_out;
0681         ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
0682                         el->sgl_out,
0683                         n_ents_out, info, gfp);
0684         if (ret)
0685             goto err_free_hw_sgl_in;
0686 
0687         req->w0 |= SEC_BD_W0_DE;
0688         req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
0689         req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
0690 
0691     } else {
0692         req->w0 &= ~SEC_BD_W0_DE;
0693         req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
0694         req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
0695     }
0696 
0697     return el;
0698 
0699 err_free_hw_sgl_in:
0700     sec_free_hw_sgl(el->in, el->dma_in, info);
0701 err_free_el:
0702     kfree(el);
0703 
0704     return ERR_PTR(ret);
0705 }
0706 
0707 static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
0708                    bool encrypt)
0709 {
0710     struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
0711     struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
0712     struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
0713     struct sec_queue *queue = ctx->queue;
0714     struct sec_request *sec_req = skcipher_request_ctx(skreq);
0715     struct sec_dev_info *info = queue->dev_info;
0716     int i, ret, steps;
0717     size_t *split_sizes;
0718     struct scatterlist **splits_in;
0719     struct scatterlist **splits_out = NULL;
0720     int *splits_in_nents;
0721     int *splits_out_nents = NULL;
0722     struct sec_request_el *el, *temp;
0723     bool split = skreq->src != skreq->dst;
0724     gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
0725 
0726     mutex_init(&sec_req->lock);
0727     sec_req->req_base = &skreq->base;
0728     sec_req->err = 0;
0729     /* SGL mapping out here to allow us to break it up as necessary */
0730     sec_req->len_in = sg_nents(skreq->src);
0731 
0732     ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
0733                          &steps, gfp);
0734     if (ret)
0735         return ret;
0736     sec_req->num_elements = steps;
0737     ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
0738                    &splits_in_nents, sec_req->len_in,
0739                    info->dev, gfp);
0740     if (ret)
0741         goto err_free_split_sizes;
0742 
0743     if (split) {
0744         sec_req->len_out = sg_nents(skreq->dst);
0745         ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
0746                        &splits_out, &splits_out_nents,
0747                        sec_req->len_out, info->dev, gfp);
0748         if (ret)
0749             goto err_unmap_in_sg;
0750     }
0751     /* Shared info stored in seq_req - applies to all BDs */
0752     sec_req->tfm_ctx = ctx;
0753     sec_req->cb = sec_skcipher_alg_callback;
0754     INIT_LIST_HEAD(&sec_req->elements);
0755 
0756     /*
0757      * Future optimization.
0758      * In the chaining case we can't use a dma pool bounce buffer
0759      * but in the case where we know there is no chaining we can
0760      */
0761     if (crypto_skcipher_ivsize(atfm)) {
0762         sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
0763                          crypto_skcipher_ivsize(atfm),
0764                          DMA_TO_DEVICE);
0765         if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
0766             ret = -ENOMEM;
0767             goto err_unmap_out_sg;
0768         }
0769     }
0770 
0771     /* Set them all up then queue - cleaner error handling. */
0772     for (i = 0; i < steps; i++) {
0773         el = sec_alg_alloc_and_fill_el(&ctx->req_template,
0774                            encrypt ? 1 : 0,
0775                            split_sizes[i],
0776                            skreq->src != skreq->dst,
0777                            splits_in[i], splits_in_nents[i],
0778                            split ? splits_out[i] : NULL,
0779                            split ? splits_out_nents[i] : 0,
0780                            info, gfp);
0781         if (IS_ERR(el)) {
0782             ret = PTR_ERR(el);
0783             goto err_free_elements;
0784         }
0785         el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
0786         el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
0787         el->sec_req = sec_req;
0788         list_add_tail(&el->head, &sec_req->elements);
0789     }
0790 
0791     /*
0792      * Only attempt to queue if the whole lot can fit in the queue -
0793      * we can't successfully cleanup after a partial queing so this
0794      * must succeed or fail atomically.
0795      *
0796      * Big hammer test of both software and hardware queues - could be
0797      * more refined but this is unlikely to happen so no need.
0798      */
0799 
0800     /* Grab a big lock for a long time to avoid concurrency issues */
0801     spin_lock_bh(&queue->queuelock);
0802 
0803     /*
0804      * Can go on to queue if we have space in either:
0805      * 1) The hardware queue and no software queue
0806      * 2) The software queue
0807      * AND there is nothing in the backlog.  If there is backlog we
0808      * have to only queue to the backlog queue and return busy.
0809      */
0810     if ((!sec_queue_can_enqueue(queue, steps) &&
0811          (!queue->havesoftqueue ||
0812           kfifo_avail(&queue->softqueue) > steps)) ||
0813         !list_empty(&ctx->backlog)) {
0814         ret = -EBUSY;
0815         if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
0816             list_add_tail(&sec_req->backlog_head, &ctx->backlog);
0817             spin_unlock_bh(&queue->queuelock);
0818             goto out;
0819         }
0820 
0821         spin_unlock_bh(&queue->queuelock);
0822         goto err_free_elements;
0823     }
0824     ret = sec_send_request(sec_req, queue);
0825     spin_unlock_bh(&queue->queuelock);
0826     if (ret)
0827         goto err_free_elements;
0828 
0829     ret = -EINPROGRESS;
0830 out:
0831     /* Cleanup - all elements in pointer arrays have been copied */
0832     kfree(splits_in_nents);
0833     kfree(splits_in);
0834     kfree(splits_out_nents);
0835     kfree(splits_out);
0836     kfree(split_sizes);
0837     return ret;
0838 
0839 err_free_elements:
0840     list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
0841         list_del(&el->head);
0842         sec_alg_free_el(el, info);
0843     }
0844     if (crypto_skcipher_ivsize(atfm))
0845         dma_unmap_single(info->dev, sec_req->dma_iv,
0846                  crypto_skcipher_ivsize(atfm),
0847                  DMA_BIDIRECTIONAL);
0848 err_unmap_out_sg:
0849     if (split)
0850         sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
0851                     splits_out_nents, sec_req->len_out,
0852                     info->dev);
0853 err_unmap_in_sg:
0854     sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
0855                 sec_req->len_in, info->dev);
0856 err_free_split_sizes:
0857     kfree(split_sizes);
0858 
0859     return ret;
0860 }
0861 
0862 static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
0863 {
0864     return sec_alg_skcipher_crypto(req, true);
0865 }
0866 
0867 static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
0868 {
0869     return sec_alg_skcipher_crypto(req, false);
0870 }
0871 
0872 static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
0873 {
0874     struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0875 
0876     mutex_init(&ctx->lock);
0877     INIT_LIST_HEAD(&ctx->backlog);
0878     crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
0879 
0880     ctx->queue = sec_queue_alloc_start_safe();
0881     if (IS_ERR(ctx->queue))
0882         return PTR_ERR(ctx->queue);
0883 
0884     spin_lock_init(&ctx->queue->queuelock);
0885     ctx->queue->havesoftqueue = false;
0886 
0887     return 0;
0888 }
0889 
0890 static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
0891 {
0892     struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0893     struct device *dev = ctx->queue->dev_info->dev;
0894 
0895     if (ctx->key) {
0896         memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
0897         dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
0898                   ctx->pkey);
0899     }
0900     sec_queue_stop_release(ctx->queue);
0901 }
0902 
0903 static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
0904 {
0905     struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0906     int ret;
0907 
0908     ret = sec_alg_skcipher_init(tfm);
0909     if (ret)
0910         return ret;
0911 
0912     INIT_KFIFO(ctx->queue->softqueue);
0913     ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
0914     if (ret) {
0915         sec_alg_skcipher_exit(tfm);
0916         return ret;
0917     }
0918     ctx->queue->havesoftqueue = true;
0919 
0920     return 0;
0921 }
0922 
0923 static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
0924 {
0925     struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
0926 
0927     kfifo_free(&ctx->queue->softqueue);
0928     sec_alg_skcipher_exit(tfm);
0929 }
0930 
0931 static struct skcipher_alg sec_algs[] = {
0932     {
0933         .base = {
0934             .cra_name = "ecb(aes)",
0935             .cra_driver_name = "hisi_sec_aes_ecb",
0936             .cra_priority = 4001,
0937             .cra_flags = CRYPTO_ALG_ASYNC |
0938                      CRYPTO_ALG_ALLOCATES_MEMORY,
0939             .cra_blocksize = AES_BLOCK_SIZE,
0940             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
0941             .cra_alignmask = 0,
0942             .cra_module = THIS_MODULE,
0943         },
0944         .init = sec_alg_skcipher_init,
0945         .exit = sec_alg_skcipher_exit,
0946         .setkey = sec_alg_skcipher_setkey_aes_ecb,
0947         .decrypt = sec_alg_skcipher_decrypt,
0948         .encrypt = sec_alg_skcipher_encrypt,
0949         .min_keysize = AES_MIN_KEY_SIZE,
0950         .max_keysize = AES_MAX_KEY_SIZE,
0951         .ivsize = 0,
0952     }, {
0953         .base = {
0954             .cra_name = "cbc(aes)",
0955             .cra_driver_name = "hisi_sec_aes_cbc",
0956             .cra_priority = 4001,
0957             .cra_flags = CRYPTO_ALG_ASYNC |
0958                      CRYPTO_ALG_ALLOCATES_MEMORY,
0959             .cra_blocksize = AES_BLOCK_SIZE,
0960             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
0961             .cra_alignmask = 0,
0962             .cra_module = THIS_MODULE,
0963         },
0964         .init = sec_alg_skcipher_init_with_queue,
0965         .exit = sec_alg_skcipher_exit_with_queue,
0966         .setkey = sec_alg_skcipher_setkey_aes_cbc,
0967         .decrypt = sec_alg_skcipher_decrypt,
0968         .encrypt = sec_alg_skcipher_encrypt,
0969         .min_keysize = AES_MIN_KEY_SIZE,
0970         .max_keysize = AES_MAX_KEY_SIZE,
0971         .ivsize = AES_BLOCK_SIZE,
0972     }, {
0973         .base = {
0974             .cra_name = "ctr(aes)",
0975             .cra_driver_name = "hisi_sec_aes_ctr",
0976             .cra_priority = 4001,
0977             .cra_flags = CRYPTO_ALG_ASYNC |
0978                      CRYPTO_ALG_ALLOCATES_MEMORY,
0979             .cra_blocksize = AES_BLOCK_SIZE,
0980             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
0981             .cra_alignmask = 0,
0982             .cra_module = THIS_MODULE,
0983         },
0984         .init = sec_alg_skcipher_init_with_queue,
0985         .exit = sec_alg_skcipher_exit_with_queue,
0986         .setkey = sec_alg_skcipher_setkey_aes_ctr,
0987         .decrypt = sec_alg_skcipher_decrypt,
0988         .encrypt = sec_alg_skcipher_encrypt,
0989         .min_keysize = AES_MIN_KEY_SIZE,
0990         .max_keysize = AES_MAX_KEY_SIZE,
0991         .ivsize = AES_BLOCK_SIZE,
0992     }, {
0993         .base = {
0994             .cra_name = "xts(aes)",
0995             .cra_driver_name = "hisi_sec_aes_xts",
0996             .cra_priority = 4001,
0997             .cra_flags = CRYPTO_ALG_ASYNC |
0998                      CRYPTO_ALG_ALLOCATES_MEMORY,
0999             .cra_blocksize = AES_BLOCK_SIZE,
1000             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1001             .cra_alignmask = 0,
1002             .cra_module = THIS_MODULE,
1003         },
1004         .init = sec_alg_skcipher_init,
1005         .exit = sec_alg_skcipher_exit,
1006         .setkey = sec_alg_skcipher_setkey_aes_xts,
1007         .decrypt = sec_alg_skcipher_decrypt,
1008         .encrypt = sec_alg_skcipher_encrypt,
1009         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1010         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1011         .ivsize = AES_BLOCK_SIZE,
1012     }, {
1013     /* Unable to find any test vectors so untested */
1014         .base = {
1015             .cra_name = "ecb(des)",
1016             .cra_driver_name = "hisi_sec_des_ecb",
1017             .cra_priority = 4001,
1018             .cra_flags = CRYPTO_ALG_ASYNC |
1019                      CRYPTO_ALG_ALLOCATES_MEMORY,
1020             .cra_blocksize = DES_BLOCK_SIZE,
1021             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1022             .cra_alignmask = 0,
1023             .cra_module = THIS_MODULE,
1024         },
1025         .init = sec_alg_skcipher_init,
1026         .exit = sec_alg_skcipher_exit,
1027         .setkey = sec_alg_skcipher_setkey_des_ecb,
1028         .decrypt = sec_alg_skcipher_decrypt,
1029         .encrypt = sec_alg_skcipher_encrypt,
1030         .min_keysize = DES_KEY_SIZE,
1031         .max_keysize = DES_KEY_SIZE,
1032         .ivsize = 0,
1033     }, {
1034         .base = {
1035             .cra_name = "cbc(des)",
1036             .cra_driver_name = "hisi_sec_des_cbc",
1037             .cra_priority = 4001,
1038             .cra_flags = CRYPTO_ALG_ASYNC |
1039                      CRYPTO_ALG_ALLOCATES_MEMORY,
1040             .cra_blocksize = DES_BLOCK_SIZE,
1041             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1042             .cra_alignmask = 0,
1043             .cra_module = THIS_MODULE,
1044         },
1045         .init = sec_alg_skcipher_init_with_queue,
1046         .exit = sec_alg_skcipher_exit_with_queue,
1047         .setkey = sec_alg_skcipher_setkey_des_cbc,
1048         .decrypt = sec_alg_skcipher_decrypt,
1049         .encrypt = sec_alg_skcipher_encrypt,
1050         .min_keysize = DES_KEY_SIZE,
1051         .max_keysize = DES_KEY_SIZE,
1052         .ivsize = DES_BLOCK_SIZE,
1053     }, {
1054         .base = {
1055             .cra_name = "cbc(des3_ede)",
1056             .cra_driver_name = "hisi_sec_3des_cbc",
1057             .cra_priority = 4001,
1058             .cra_flags = CRYPTO_ALG_ASYNC |
1059                      CRYPTO_ALG_ALLOCATES_MEMORY,
1060             .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1061             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1062             .cra_alignmask = 0,
1063             .cra_module = THIS_MODULE,
1064         },
1065         .init = sec_alg_skcipher_init_with_queue,
1066         .exit = sec_alg_skcipher_exit_with_queue,
1067         .setkey = sec_alg_skcipher_setkey_3des_cbc,
1068         .decrypt = sec_alg_skcipher_decrypt,
1069         .encrypt = sec_alg_skcipher_encrypt,
1070         .min_keysize = DES3_EDE_KEY_SIZE,
1071         .max_keysize = DES3_EDE_KEY_SIZE,
1072         .ivsize = DES3_EDE_BLOCK_SIZE,
1073     }, {
1074         .base = {
1075             .cra_name = "ecb(des3_ede)",
1076             .cra_driver_name = "hisi_sec_3des_ecb",
1077             .cra_priority = 4001,
1078             .cra_flags = CRYPTO_ALG_ASYNC |
1079                      CRYPTO_ALG_ALLOCATES_MEMORY,
1080             .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1081             .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
1082             .cra_alignmask = 0,
1083             .cra_module = THIS_MODULE,
1084         },
1085         .init = sec_alg_skcipher_init,
1086         .exit = sec_alg_skcipher_exit,
1087         .setkey = sec_alg_skcipher_setkey_3des_ecb,
1088         .decrypt = sec_alg_skcipher_decrypt,
1089         .encrypt = sec_alg_skcipher_encrypt,
1090         .min_keysize = DES3_EDE_KEY_SIZE,
1091         .max_keysize = DES3_EDE_KEY_SIZE,
1092         .ivsize = 0,
1093     }
1094 };
1095 
1096 int sec_algs_register(void)
1097 {
1098     int ret = 0;
1099 
1100     mutex_lock(&algs_lock);
1101     if (++active_devs != 1)
1102         goto unlock;
1103 
1104     ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1105     if (ret)
1106         --active_devs;
1107 unlock:
1108     mutex_unlock(&algs_lock);
1109 
1110     return ret;
1111 }
1112 
1113 void sec_algs_unregister(void)
1114 {
1115     mutex_lock(&algs_lock);
1116     if (--active_devs != 0)
1117         goto unlock;
1118     crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
1119 
1120 unlock:
1121     mutex_unlock(&algs_lock);
1122 }