0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/crypto.h>
0011 #include <linux/debugfs.h>
0012 #include <linux/dev_printk.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/io.h>
0016 #include <linux/irq.h>
0017 #include <linux/module.h>
0018 #include <linux/of.h>
0019 #include <linux/of_device.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/reset.h>
0023 #include <crypto/internal/rng.h>
0024 #include <crypto/internal/skcipher.h>
0025
0026 #include "sl3516-ce.h"
0027
0028 static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce)
0029 {
0030 const size_t sz = sizeof(struct descriptor) * MAXDESC;
0031 int i;
0032
0033 ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL);
0034 if (!ce->tx)
0035 return -ENOMEM;
0036 ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL);
0037 if (!ce->rx)
0038 goto err_rx;
0039
0040 for (i = 0; i < MAXDESC; i++) {
0041 ce->tx[i].frame_ctrl.bits.own = CE_CPU;
0042 ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor);
0043 }
0044 ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx;
0045
0046 for (i = 0; i < MAXDESC; i++) {
0047 ce->rx[i].frame_ctrl.bits.own = CE_CPU;
0048 ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor);
0049 }
0050 ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx;
0051
0052 ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb),
0053 &ce->dctrl, GFP_KERNEL);
0054 if (!ce->pctrl)
0055 goto err_pctrl;
0056
0057 return 0;
0058 err_pctrl:
0059 dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
0060 err_rx:
0061 dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
0062 return -ENOMEM;
0063 }
0064
0065 static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce)
0066 {
0067 const size_t sz = sizeof(struct descriptor) * MAXDESC;
0068
0069 dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
0070 dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
0071 dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl,
0072 ce->dctrl);
0073 }
0074
0075 static void start_dma_tx(struct sl3516_ce_dev *ce)
0076 {
0077 u32 v;
0078
0079 v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \
0080 TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK;
0081
0082 writel(v, ce->base + IPSEC_TXDMA_CTRL);
0083 }
0084
0085 static void start_dma_rx(struct sl3516_ce_dev *ce)
0086 {
0087 u32 v;
0088
0089 v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \
0090 RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \
0091 RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \
0092 RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF;
0093
0094 writel(v, ce->base + IPSEC_RXDMA_CTRL);
0095 }
0096
0097 static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce)
0098 {
0099 struct descriptor *dd;
0100
0101 dd = &ce->tx[ce->ctx];
0102 ce->ctx++;
0103 if (ce->ctx >= MAXDESC)
0104 ce->ctx = 0;
0105 return dd;
0106 }
0107
0108 static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce)
0109 {
0110 struct descriptor *rdd;
0111
0112 rdd = &ce->rx[ce->crx];
0113 ce->crx++;
0114 if (ce->crx >= MAXDESC)
0115 ce->crx = 0;
0116 return rdd;
0117 }
0118
0119 int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx,
0120 const char *name)
0121 {
0122 struct descriptor *dd, *rdd = NULL;
0123 u32 v;
0124 int i, err = 0;
0125
0126 ce->stat_req++;
0127
0128 reinit_completion(&ce->complete);
0129 ce->status = 0;
0130
0131 for (i = 0; i < rctx->nr_sgd; i++) {
0132 dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__,
0133 i, rctx->nr_sgd, rctx->t_dst[i].len);
0134 rdd = get_desc_rx(ce);
0135 rdd->buf_adr = rctx->t_dst[i].addr;
0136 rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len;
0137 rdd->frame_ctrl.bits.own = CE_DMA;
0138 }
0139 rdd->next_desc.bits.eofie = 1;
0140
0141 for (i = 0; i < rctx->nr_sgs; i++) {
0142 dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__,
0143 i, rctx->nr_sgs, rctx->t_src[i].len);
0144 rctx->h->algorithm_len = rctx->t_src[i].len;
0145
0146 dd = get_desc_tx(ce);
0147 dd->frame_ctrl.raw = 0;
0148 dd->flag_status.raw = 0;
0149 dd->frame_ctrl.bits.buffer_size = rctx->pctrllen;
0150 dd->buf_adr = ce->dctrl;
0151 dd->flag_status.tx_flag.tqflag = rctx->tqflag;
0152 dd->next_desc.bits.eofie = 0;
0153 dd->next_desc.bits.dec = 0;
0154 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
0155 dd->frame_ctrl.bits.own = CE_DMA;
0156
0157 dd = get_desc_tx(ce);
0158 dd->frame_ctrl.raw = 0;
0159 dd->flag_status.raw = 0;
0160 dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len;
0161 dd->buf_adr = rctx->t_src[i].addr;
0162 dd->flag_status.tx_flag.tqflag = 0;
0163 dd->next_desc.bits.eofie = 0;
0164 dd->next_desc.bits.dec = 0;
0165 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
0166 dd->frame_ctrl.bits.own = CE_DMA;
0167 start_dma_tx(ce);
0168 start_dma_rx(ce);
0169 }
0170 wait_for_completion_interruptible_timeout(&ce->complete,
0171 msecs_to_jiffies(5000));
0172 if (ce->status == 0) {
0173 dev_err(ce->dev, "DMA timeout for %s\n", name);
0174 err = -EFAULT;
0175 }
0176 v = readl(ce->base + IPSEC_STATUS_REG);
0177 if (v & 0xFFF) {
0178 dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v);
0179 err = -EFAULT;
0180 }
0181
0182 return err;
0183 }
0184
0185 static irqreturn_t ce_irq_handler(int irq, void *data)
0186 {
0187 struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data;
0188 u32 v;
0189
0190 ce->stat_irq++;
0191
0192 v = readl(ce->base + IPSEC_DMA_STATUS);
0193 writel(v, ce->base + IPSEC_DMA_STATUS);
0194
0195 if (v & DMA_STATUS_TS_DERR)
0196 dev_err(ce->dev, "AHB bus Error While Tx !!!\n");
0197 if (v & DMA_STATUS_TS_PERR)
0198 dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n");
0199 if (v & DMA_STATUS_RS_DERR)
0200 dev_err(ce->dev, "AHB bus Error While Rx !!!\n");
0201 if (v & DMA_STATUS_RS_PERR)
0202 dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n");
0203
0204 if (v & DMA_STATUS_TS_EOFI)
0205 ce->stat_irq_tx++;
0206 if (v & DMA_STATUS_RS_EOFI) {
0207 ce->status = 1;
0208 complete(&ce->complete);
0209 ce->stat_irq_rx++;
0210 return IRQ_HANDLED;
0211 }
0212
0213 return IRQ_HANDLED;
0214 }
0215
0216 static struct sl3516_ce_alg_template ce_algs[] = {
0217 {
0218 .type = CRYPTO_ALG_TYPE_SKCIPHER,
0219 .mode = ECB_AES,
0220 .alg.skcipher = {
0221 .base = {
0222 .cra_name = "ecb(aes)",
0223 .cra_driver_name = "ecb-aes-sl3516",
0224 .cra_priority = 400,
0225 .cra_blocksize = AES_BLOCK_SIZE,
0226 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
0227 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
0228 .cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx),
0229 .cra_module = THIS_MODULE,
0230 .cra_alignmask = 0xf,
0231 .cra_init = sl3516_ce_cipher_init,
0232 .cra_exit = sl3516_ce_cipher_exit,
0233 },
0234 .min_keysize = AES_MIN_KEY_SIZE,
0235 .max_keysize = AES_MAX_KEY_SIZE,
0236 .setkey = sl3516_ce_aes_setkey,
0237 .encrypt = sl3516_ce_skencrypt,
0238 .decrypt = sl3516_ce_skdecrypt,
0239 }
0240 },
0241 };
0242
0243 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
0244 static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
0245 {
0246 struct sl3516_ce_dev *ce = seq->private;
0247 unsigned int i;
0248
0249 seq_printf(seq, "HWRNG %lu %lu\n",
0250 ce->hwrng_stat_req, ce->hwrng_stat_bytes);
0251 seq_printf(seq, "IRQ %lu\n", ce->stat_irq);
0252 seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx);
0253 seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx);
0254 seq_printf(seq, "nreq %lu\n", ce->stat_req);
0255 seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx);
0256 seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx);
0257 seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16);
0258 seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16);
0259 seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len);
0260
0261 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
0262 if (!ce_algs[i].ce)
0263 continue;
0264 switch (ce_algs[i].type) {
0265 case CRYPTO_ALG_TYPE_SKCIPHER:
0266 seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
0267 ce_algs[i].alg.skcipher.base.cra_driver_name,
0268 ce_algs[i].alg.skcipher.base.cra_name,
0269 ce_algs[i].stat_req, ce_algs[i].stat_fb);
0270 break;
0271 }
0272 }
0273 return 0;
0274 }
0275
0276 DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
0277 #endif
0278
0279 static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
0280 {
0281 int err;
0282 unsigned int i;
0283
0284 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
0285 ce_algs[i].ce = ce;
0286 switch (ce_algs[i].type) {
0287 case CRYPTO_ALG_TYPE_SKCIPHER:
0288 dev_info(ce->dev, "DEBUG: Register %s\n",
0289 ce_algs[i].alg.skcipher.base.cra_name);
0290 err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
0291 if (err) {
0292 dev_err(ce->dev, "Fail to register %s\n",
0293 ce_algs[i].alg.skcipher.base.cra_name);
0294 ce_algs[i].ce = NULL;
0295 return err;
0296 }
0297 break;
0298 default:
0299 ce_algs[i].ce = NULL;
0300 dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
0301 }
0302 }
0303 return 0;
0304 }
0305
0306 static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
0307 {
0308 unsigned int i;
0309
0310 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
0311 if (!ce_algs[i].ce)
0312 continue;
0313 switch (ce_algs[i].type) {
0314 case CRYPTO_ALG_TYPE_SKCIPHER:
0315 dev_info(ce->dev, "Unregister %d %s\n", i,
0316 ce_algs[i].alg.skcipher.base.cra_name);
0317 crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
0318 break;
0319 }
0320 }
0321 }
0322
0323 static void sl3516_ce_start(struct sl3516_ce_dev *ce)
0324 {
0325 ce->ctx = 0;
0326 ce->crx = 0;
0327 writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC);
0328 writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC);
0329 writel(0, ce->base + IPSEC_DMA_STATUS);
0330 }
0331
0332
0333
0334
0335
0336 static int sl3516_ce_pm_suspend(struct device *dev)
0337 {
0338 struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
0339
0340 reset_control_assert(ce->reset);
0341 clk_disable_unprepare(ce->clks);
0342 return 0;
0343 }
0344
0345 static int sl3516_ce_pm_resume(struct device *dev)
0346 {
0347 struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
0348 int err;
0349
0350 err = clk_prepare_enable(ce->clks);
0351 if (err) {
0352 dev_err(ce->dev, "Cannot prepare_enable\n");
0353 goto error;
0354 }
0355 err = reset_control_deassert(ce->reset);
0356 if (err) {
0357 dev_err(ce->dev, "Cannot deassert reset control\n");
0358 goto error;
0359 }
0360
0361 sl3516_ce_start(ce);
0362
0363 return 0;
0364 error:
0365 sl3516_ce_pm_suspend(dev);
0366 return err;
0367 }
0368
0369 static const struct dev_pm_ops sl3516_ce_pm_ops = {
0370 SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL)
0371 };
0372
0373 static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce)
0374 {
0375 int err;
0376
0377 pm_runtime_use_autosuspend(ce->dev);
0378 pm_runtime_set_autosuspend_delay(ce->dev, 2000);
0379
0380 err = pm_runtime_set_suspended(ce->dev);
0381 if (err)
0382 return err;
0383 pm_runtime_enable(ce->dev);
0384 return err;
0385 }
0386
0387 static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce)
0388 {
0389 pm_runtime_disable(ce->dev);
0390 }
0391
0392 static int sl3516_ce_probe(struct platform_device *pdev)
0393 {
0394 struct sl3516_ce_dev *ce;
0395 int err, irq;
0396 u32 v;
0397
0398 ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
0399 if (!ce)
0400 return -ENOMEM;
0401
0402 ce->dev = &pdev->dev;
0403 platform_set_drvdata(pdev, ce);
0404
0405 ce->base = devm_platform_ioremap_resource(pdev, 0);
0406 if (IS_ERR(ce->base))
0407 return PTR_ERR(ce->base);
0408
0409 irq = platform_get_irq(pdev, 0);
0410 if (irq < 0)
0411 return irq;
0412
0413 err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce);
0414 if (err) {
0415 dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err);
0416 return err;
0417 }
0418
0419 ce->reset = devm_reset_control_get(&pdev->dev, NULL);
0420 if (IS_ERR(ce->reset))
0421 return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
0422 "No reset control found\n");
0423 ce->clks = devm_clk_get(ce->dev, NULL);
0424 if (IS_ERR(ce->clks)) {
0425 err = PTR_ERR(ce->clks);
0426 dev_err(ce->dev, "Cannot get clock err=%d\n", err);
0427 return err;
0428 }
0429
0430 err = sl3516_ce_desc_init(ce);
0431 if (err)
0432 return err;
0433
0434 err = sl3516_ce_pm_init(ce);
0435 if (err)
0436 goto error_pm;
0437
0438 init_completion(&ce->complete);
0439
0440 ce->engine = crypto_engine_alloc_init(ce->dev, true);
0441 if (!ce->engine) {
0442 dev_err(ce->dev, "Cannot allocate engine\n");
0443 err = -ENOMEM;
0444 goto error_engine;
0445 }
0446
0447 err = crypto_engine_start(ce->engine);
0448 if (err) {
0449 dev_err(ce->dev, "Cannot start engine\n");
0450 goto error_engine;
0451 }
0452
0453 err = sl3516_ce_register_algs(ce);
0454 if (err)
0455 goto error_alg;
0456
0457 err = sl3516_ce_rng_register(ce);
0458 if (err)
0459 goto error_rng;
0460
0461 err = pm_runtime_resume_and_get(ce->dev);
0462 if (err < 0)
0463 goto error_pmuse;
0464
0465 v = readl(ce->base + IPSEC_ID);
0466 dev_info(ce->dev, "SL3516 dev %lx rev %lx\n",
0467 v & GENMASK(31, 4),
0468 v & GENMASK(3, 0));
0469 v = readl(ce->base + IPSEC_DMA_DEVICE_ID);
0470 dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n",
0471 v & GENMASK(15, 4),
0472 v & GENMASK(3, 0));
0473
0474 pm_runtime_put_sync(ce->dev);
0475
0476 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
0477
0478 ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
0479 ce->dbgfs_stats = debugfs_create_file("stats", 0444,
0480 ce->dbgfs_dir, ce,
0481 &sl3516_ce_debugfs_fops);
0482 #endif
0483
0484 return 0;
0485 error_pmuse:
0486 sl3516_ce_rng_unregister(ce);
0487 error_rng:
0488 sl3516_ce_unregister_algs(ce);
0489 error_alg:
0490 crypto_engine_exit(ce->engine);
0491 error_engine:
0492 sl3516_ce_pm_exit(ce);
0493 error_pm:
0494 sl3516_ce_free_descs(ce);
0495 return err;
0496 }
0497
0498 static int sl3516_ce_remove(struct platform_device *pdev)
0499 {
0500 struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
0501
0502 sl3516_ce_rng_unregister(ce);
0503 sl3516_ce_unregister_algs(ce);
0504 crypto_engine_exit(ce->engine);
0505 sl3516_ce_pm_exit(ce);
0506 sl3516_ce_free_descs(ce);
0507
0508 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
0509 debugfs_remove_recursive(ce->dbgfs_dir);
0510 #endif
0511
0512 return 0;
0513 }
0514
0515 static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
0516 { .compatible = "cortina,sl3516-crypto"},
0517 {}
0518 };
0519 MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
0520
0521 static struct platform_driver sl3516_ce_driver = {
0522 .probe = sl3516_ce_probe,
0523 .remove = sl3516_ce_remove,
0524 .driver = {
0525 .name = "sl3516-crypto",
0526 .pm = &sl3516_ce_pm_ops,
0527 .of_match_table = sl3516_ce_crypto_of_match_table,
0528 },
0529 };
0530
0531 module_platform_driver(sl3516_ce_driver);
0532
0533 MODULE_DESCRIPTION("SL3516 cryptographic offloader");
0534 MODULE_LICENSE("GPL");
0535 MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");