Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
0004  *
0005  * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
0006  *
0007  * This file adds support for AES cipher with 128,192,256 bits keysize in
0008  * ECB mode.
0009  */
0010 
0011 #include <linux/crypto.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/delay.h>
0014 #include <linux/io.h>
0015 #include <linux/pm_runtime.h>
0016 #include <crypto/scatterwalk.h>
0017 #include <crypto/internal/skcipher.h>
0018 #include "sl3516-ce.h"
0019 
0020 /* sl3516_ce_need_fallback - check if a request can be handled by the CE */
0021 static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
0022 {
0023     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0024     struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0025     struct sl3516_ce_dev *ce = op->ce;
0026     struct scatterlist *in_sg;
0027     struct scatterlist *out_sg;
0028     struct scatterlist *sg;
0029 
0030     if (areq->cryptlen == 0 || areq->cryptlen % 16) {
0031         ce->fallback_mod16++;
0032         return true;
0033     }
0034 
0035     /*
0036      * check if we have enough descriptors for TX
0037      * Note: TX need one control desc for each SG
0038      */
0039     if (sg_nents(areq->src) > MAXDESC / 2) {
0040         ce->fallback_sg_count_tx++;
0041         return true;
0042     }
0043     /* check if we have enough descriptors for RX */
0044     if (sg_nents(areq->dst) > MAXDESC) {
0045         ce->fallback_sg_count_rx++;
0046         return true;
0047     }
0048 
0049     sg = areq->src;
0050     while (sg) {
0051         if ((sg->length % 16) != 0) {
0052             ce->fallback_mod16++;
0053             return true;
0054         }
0055         if ((sg_dma_len(sg) % 16) != 0) {
0056             ce->fallback_mod16++;
0057             return true;
0058         }
0059         if (!IS_ALIGNED(sg->offset, 16)) {
0060             ce->fallback_align16++;
0061             return true;
0062         }
0063         sg = sg_next(sg);
0064     }
0065     sg = areq->dst;
0066     while (sg) {
0067         if ((sg->length % 16) != 0) {
0068             ce->fallback_mod16++;
0069             return true;
0070         }
0071         if ((sg_dma_len(sg) % 16) != 0) {
0072             ce->fallback_mod16++;
0073             return true;
0074         }
0075         if (!IS_ALIGNED(sg->offset, 16)) {
0076             ce->fallback_align16++;
0077             return true;
0078         }
0079         sg = sg_next(sg);
0080     }
0081 
0082     /* need same numbers of SG (with same length) for source and destination */
0083     in_sg = areq->src;
0084     out_sg = areq->dst;
0085     while (in_sg && out_sg) {
0086         if (in_sg->length != out_sg->length) {
0087             ce->fallback_not_same_len++;
0088             return true;
0089         }
0090         in_sg = sg_next(in_sg);
0091         out_sg = sg_next(out_sg);
0092     }
0093     if (in_sg || out_sg)
0094         return true;
0095 
0096     return false;
0097 }
0098 
0099 static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
0100 {
0101     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0102     struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0103     struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0104     struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
0105     struct sl3516_ce_alg_template *algt;
0106     int err;
0107 
0108     algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
0109     algt->stat_fb++;
0110 
0111     skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
0112     skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
0113                       areq->base.complete, areq->base.data);
0114     skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
0115                    areq->cryptlen, areq->iv);
0116     if (rctx->op_dir == CE_DECRYPTION)
0117         err = crypto_skcipher_decrypt(&rctx->fallback_req);
0118     else
0119         err = crypto_skcipher_encrypt(&rctx->fallback_req);
0120     return err;
0121 }
0122 
0123 static int sl3516_ce_cipher(struct skcipher_request *areq)
0124 {
0125     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0126     struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0127     struct sl3516_ce_dev *ce = op->ce;
0128     struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0129     struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
0130     struct sl3516_ce_alg_template *algt;
0131     struct scatterlist *sg;
0132     unsigned int todo, len;
0133     struct pkt_control_ecb *ecb;
0134     int nr_sgs = 0;
0135     int nr_sgd = 0;
0136     int err = 0;
0137     int i;
0138 
0139     algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
0140 
0141     dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
0142         crypto_tfm_alg_name(areq->base.tfm),
0143         areq->cryptlen,
0144         rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
0145         op->keylen);
0146 
0147     algt->stat_req++;
0148 
0149     if (areq->src == areq->dst) {
0150         nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
0151                     DMA_BIDIRECTIONAL);
0152         if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
0153             dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
0154             err = -EINVAL;
0155             goto theend;
0156         }
0157         nr_sgd = nr_sgs;
0158     } else {
0159         nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
0160                     DMA_TO_DEVICE);
0161         if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
0162             dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
0163             err = -EINVAL;
0164             goto theend;
0165         }
0166         nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
0167                     DMA_FROM_DEVICE);
0168         if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
0169             dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
0170             err = -EINVAL;
0171             goto theend_sgs;
0172         }
0173     }
0174 
0175     len = areq->cryptlen;
0176     i = 0;
0177     sg = areq->src;
0178     while (i < nr_sgs && sg && len) {
0179         if (sg_dma_len(sg) == 0)
0180             goto sgs_next;
0181         rctx->t_src[i].addr = sg_dma_address(sg);
0182         todo = min(len, sg_dma_len(sg));
0183         rctx->t_src[i].len = todo;
0184         dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
0185             areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
0186         len -= todo;
0187         i++;
0188 sgs_next:
0189         sg = sg_next(sg);
0190     }
0191     if (len > 0) {
0192         dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
0193         err = -EINVAL;
0194         goto theend_sgs;
0195     }
0196 
0197     len = areq->cryptlen;
0198     i = 0;
0199     sg = areq->dst;
0200     while (i < nr_sgd && sg && len) {
0201         if (sg_dma_len(sg) == 0)
0202             goto sgd_next;
0203         rctx->t_dst[i].addr = sg_dma_address(sg);
0204         todo = min(len, sg_dma_len(sg));
0205         rctx->t_dst[i].len = todo;
0206         dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
0207             areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
0208         len -= todo;
0209         i++;
0210 
0211 sgd_next:
0212         sg = sg_next(sg);
0213     }
0214     if (len > 0) {
0215         dev_err(ce->dev, "remaining len %d\n", len);
0216         err = -EINVAL;
0217         goto theend_sgs;
0218     }
0219 
0220     switch (algt->mode) {
0221     case ECB_AES:
0222         rctx->pctrllen = sizeof(struct pkt_control_ecb);
0223         ecb = (struct pkt_control_ecb *)ce->pctrl;
0224 
0225         rctx->tqflag = TQ0_TYPE_CTRL;
0226         rctx->tqflag |= TQ1_CIPHER;
0227         ecb->control.op_mode = rctx->op_dir;
0228         ecb->control.cipher_algorithm = ECB_AES;
0229         ecb->cipher.header_len = 0;
0230         ecb->cipher.algorithm_len = areq->cryptlen;
0231         cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
0232         rctx->h = &ecb->cipher;
0233 
0234         rctx->tqflag |= TQ4_KEY0;
0235         rctx->tqflag |= TQ5_KEY4;
0236         rctx->tqflag |= TQ6_KEY6;
0237         ecb->control.aesnk = op->keylen / 4;
0238         break;
0239     }
0240 
0241     rctx->nr_sgs = nr_sgs;
0242     rctx->nr_sgd = nr_sgd;
0243     err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
0244 
0245 theend_sgs:
0246     if (areq->src == areq->dst) {
0247         dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
0248                  DMA_BIDIRECTIONAL);
0249     } else {
0250         dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
0251                  DMA_TO_DEVICE);
0252         dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
0253                  DMA_FROM_DEVICE);
0254     }
0255 
0256 theend:
0257 
0258     return err;
0259 }
0260 
0261 static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
0262 {
0263     int err;
0264     struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
0265 
0266     err = sl3516_ce_cipher(breq);
0267     local_bh_disable();
0268     crypto_finalize_skcipher_request(engine, breq, err);
0269     local_bh_enable();
0270 
0271     return 0;
0272 }
0273 
0274 int sl3516_ce_skdecrypt(struct skcipher_request *areq)
0275 {
0276     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0277     struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0278     struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0279     struct crypto_engine *engine;
0280 
0281     memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
0282     rctx->op_dir = CE_DECRYPTION;
0283 
0284     if (sl3516_ce_need_fallback(areq))
0285         return sl3516_ce_cipher_fallback(areq);
0286 
0287     engine = op->ce->engine;
0288 
0289     return crypto_transfer_skcipher_request_to_engine(engine, areq);
0290 }
0291 
0292 int sl3516_ce_skencrypt(struct skcipher_request *areq)
0293 {
0294     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0295     struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0296     struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0297     struct crypto_engine *engine;
0298 
0299     memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
0300     rctx->op_dir = CE_ENCRYPTION;
0301 
0302     if (sl3516_ce_need_fallback(areq))
0303         return sl3516_ce_cipher_fallback(areq);
0304 
0305     engine = op->ce->engine;
0306 
0307     return crypto_transfer_skcipher_request_to_engine(engine, areq);
0308 }
0309 
0310 int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
0311 {
0312     struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
0313     struct sl3516_ce_alg_template *algt;
0314     const char *name = crypto_tfm_alg_name(tfm);
0315     struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
0316     struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
0317     int err;
0318 
0319     memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
0320 
0321     algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
0322     op->ce = algt->ce;
0323 
0324     op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
0325     if (IS_ERR(op->fallback_tfm)) {
0326         dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
0327             name, PTR_ERR(op->fallback_tfm));
0328         return PTR_ERR(op->fallback_tfm);
0329     }
0330 
0331     sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
0332              crypto_skcipher_reqsize(op->fallback_tfm);
0333 
0334     dev_info(op->ce->dev, "Fallback for %s is %s\n",
0335          crypto_tfm_alg_driver_name(&sktfm->base),
0336          crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
0337 
0338     op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
0339     op->enginectx.op.prepare_request = NULL;
0340     op->enginectx.op.unprepare_request = NULL;
0341 
0342     err = pm_runtime_get_sync(op->ce->dev);
0343     if (err < 0)
0344         goto error_pm;
0345 
0346     return 0;
0347 error_pm:
0348     pm_runtime_put_noidle(op->ce->dev);
0349     crypto_free_skcipher(op->fallback_tfm);
0350     return err;
0351 }
0352 
0353 void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
0354 {
0355     struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
0356 
0357     kfree_sensitive(op->key);
0358     crypto_free_skcipher(op->fallback_tfm);
0359     pm_runtime_put_sync_suspend(op->ce->dev);
0360 }
0361 
0362 int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
0363              unsigned int keylen)
0364 {
0365     struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0366     struct sl3516_ce_dev *ce = op->ce;
0367 
0368     switch (keylen) {
0369     case 128 / 8:
0370         break;
0371     case 192 / 8:
0372         break;
0373     case 256 / 8:
0374         break;
0375     default:
0376         dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
0377         return -EINVAL;
0378     }
0379     kfree_sensitive(op->key);
0380     op->keylen = keylen;
0381     op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
0382     if (!op->key)
0383         return -ENOMEM;
0384 
0385     crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
0386     crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
0387 
0388     return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
0389 }