0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/of_irq.h>
0011 #include <linux/of_address.h>
0012
0013 #include "compat.h"
0014 #include "ctrl.h"
0015 #include "regs.h"
0016 #include "jr.h"
0017 #include "desc.h"
0018 #include "intern.h"
0019
0020 struct jr_driver_data {
0021
0022 struct list_head jr_list;
0023 spinlock_t jr_alloc_lock;
0024 } ____cacheline_aligned;
0025
0026 static struct jr_driver_data driver_data;
0027 static DEFINE_MUTEX(algs_lock);
0028 static unsigned int active_devs;
0029
0030 static void register_algs(struct caam_drv_private_jr *jrpriv,
0031 struct device *dev)
0032 {
0033 mutex_lock(&algs_lock);
0034
0035 if (++active_devs != 1)
0036 goto algs_unlock;
0037
0038 caam_algapi_init(dev);
0039 caam_algapi_hash_init(dev);
0040 caam_pkc_init(dev);
0041 jrpriv->hwrng = !caam_rng_init(dev);
0042 caam_prng_register(dev);
0043 caam_qi_algapi_init(dev);
0044
0045 algs_unlock:
0046 mutex_unlock(&algs_lock);
0047 }
0048
0049 static void unregister_algs(void)
0050 {
0051 mutex_lock(&algs_lock);
0052
0053 if (--active_devs != 0)
0054 goto algs_unlock;
0055
0056 caam_qi_algapi_exit();
0057 caam_prng_unregister(NULL);
0058 caam_pkc_exit();
0059 caam_algapi_hash_exit();
0060 caam_algapi_exit();
0061
0062 algs_unlock:
0063 mutex_unlock(&algs_lock);
0064 }
0065
0066 static void caam_jr_crypto_engine_exit(void *data)
0067 {
0068 struct device *jrdev = data;
0069 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
0070
0071
0072 crypto_engine_exit(jrpriv->engine);
0073 }
0074
0075 static int caam_reset_hw_jr(struct device *dev)
0076 {
0077 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
0078 unsigned int timeout = 100000;
0079
0080
0081
0082
0083
0084 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
0085
0086
0087 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
0088 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
0089 JRINT_ERR_HALT_INPROGRESS) && --timeout)
0090 cpu_relax();
0091
0092 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
0093 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
0094 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
0095 return -EIO;
0096 }
0097
0098
0099 timeout = 100000;
0100 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
0101 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
0102 cpu_relax();
0103
0104 if (timeout == 0) {
0105 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
0106 return -EIO;
0107 }
0108
0109
0110 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
0111
0112 return 0;
0113 }
0114
0115
0116
0117
0118 static int caam_jr_shutdown(struct device *dev)
0119 {
0120 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
0121 int ret;
0122
0123 ret = caam_reset_hw_jr(dev);
0124
0125 tasklet_kill(&jrp->irqtask);
0126
0127 return ret;
0128 }
0129
0130 static int caam_jr_remove(struct platform_device *pdev)
0131 {
0132 int ret;
0133 struct device *jrdev;
0134 struct caam_drv_private_jr *jrpriv;
0135
0136 jrdev = &pdev->dev;
0137 jrpriv = dev_get_drvdata(jrdev);
0138
0139 if (jrpriv->hwrng)
0140 caam_rng_exit(jrdev->parent);
0141
0142
0143
0144
0145 if (atomic_read(&jrpriv->tfm_count)) {
0146 dev_err(jrdev, "Device is busy\n");
0147 return -EBUSY;
0148 }
0149
0150
0151 unregister_algs();
0152
0153
0154 spin_lock(&driver_data.jr_alloc_lock);
0155 list_del(&jrpriv->list_node);
0156 spin_unlock(&driver_data.jr_alloc_lock);
0157
0158
0159 ret = caam_jr_shutdown(jrdev);
0160 if (ret)
0161 dev_err(jrdev, "Failed to shut down job ring\n");
0162
0163 return ret;
0164 }
0165
0166
0167 static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
0168 {
0169 struct device *dev = st_dev;
0170 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
0171 u32 irqstate;
0172
0173
0174
0175
0176
0177 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
0178 if (!irqstate)
0179 return IRQ_NONE;
0180
0181
0182
0183
0184
0185
0186 if (irqstate & JRINT_JR_ERROR) {
0187 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
0188 BUG();
0189 }
0190
0191
0192 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
0193
0194
0195 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
0196
0197 preempt_disable();
0198 tasklet_schedule(&jrp->irqtask);
0199 preempt_enable();
0200
0201 return IRQ_HANDLED;
0202 }
0203
0204
0205 static void caam_jr_dequeue(unsigned long devarg)
0206 {
0207 int hw_idx, sw_idx, i, head, tail;
0208 struct device *dev = (struct device *)devarg;
0209 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
0210 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
0211 u32 *userdesc, userstatus;
0212 void *userarg;
0213 u32 outring_used = 0;
0214
0215 while (outring_used ||
0216 (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
0217
0218 head = READ_ONCE(jrp->head);
0219
0220 sw_idx = tail = jrp->tail;
0221 hw_idx = jrp->out_ring_read_index;
0222
0223 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
0224 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
0225
0226 if (jr_outentry_desc(jrp->outring, hw_idx) ==
0227 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
0228 break;
0229 }
0230
0231 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
0232
0233
0234 dma_unmap_single(dev,
0235 caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
0236 hw_idx)),
0237 jrp->entinfo[sw_idx].desc_size,
0238 DMA_TO_DEVICE);
0239
0240
0241 jrp->entinfo[sw_idx].desc_addr_dma = 0;
0242
0243
0244 usercall = jrp->entinfo[sw_idx].callbk;
0245 userarg = jrp->entinfo[sw_idx].cbkarg;
0246 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
0247 userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
0248 hw_idx));
0249
0250
0251
0252
0253
0254
0255 mb();
0256
0257
0258 wr_reg32(&jrp->rregs->outring_rmvd, 1);
0259
0260 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
0261 (JOBR_DEPTH - 1);
0262
0263
0264
0265
0266
0267
0268 if (sw_idx == tail) {
0269 do {
0270 tail = (tail + 1) & (JOBR_DEPTH - 1);
0271 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
0272 jrp->entinfo[tail].desc_addr_dma == 0);
0273
0274 jrp->tail = tail;
0275 }
0276
0277
0278 usercall(dev, userdesc, userstatus, userarg);
0279 outring_used--;
0280 }
0281
0282
0283 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
0284 }
0285
0286
0287
0288
0289
0290
0291
0292 struct device *caam_jr_alloc(void)
0293 {
0294 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
0295 struct device *dev = ERR_PTR(-ENODEV);
0296 int min_tfm_cnt = INT_MAX;
0297 int tfm_cnt;
0298
0299 spin_lock(&driver_data.jr_alloc_lock);
0300
0301 if (list_empty(&driver_data.jr_list)) {
0302 spin_unlock(&driver_data.jr_alloc_lock);
0303 return ERR_PTR(-ENODEV);
0304 }
0305
0306 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
0307 tfm_cnt = atomic_read(&jrpriv->tfm_count);
0308 if (tfm_cnt < min_tfm_cnt) {
0309 min_tfm_cnt = tfm_cnt;
0310 min_jrpriv = jrpriv;
0311 }
0312 if (!min_tfm_cnt)
0313 break;
0314 }
0315
0316 if (min_jrpriv) {
0317 atomic_inc(&min_jrpriv->tfm_count);
0318 dev = min_jrpriv->dev;
0319 }
0320 spin_unlock(&driver_data.jr_alloc_lock);
0321
0322 return dev;
0323 }
0324 EXPORT_SYMBOL(caam_jr_alloc);
0325
0326
0327
0328
0329
0330
0331 void caam_jr_free(struct device *rdev)
0332 {
0333 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
0334
0335 atomic_dec(&jrpriv->tfm_count);
0336 }
0337 EXPORT_SYMBOL(caam_jr_free);
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 int caam_jr_enqueue(struct device *dev, u32 *desc,
0367 void (*cbk)(struct device *dev, u32 *desc,
0368 u32 status, void *areq),
0369 void *areq)
0370 {
0371 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
0372 struct caam_jrentry_info *head_entry;
0373 int head, tail, desc_size;
0374 dma_addr_t desc_dma;
0375
0376 desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
0377 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
0378 if (dma_mapping_error(dev, desc_dma)) {
0379 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
0380 return -EIO;
0381 }
0382
0383 spin_lock_bh(&jrp->inplock);
0384
0385 head = jrp->head;
0386 tail = READ_ONCE(jrp->tail);
0387
0388 if (!jrp->inpring_avail ||
0389 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
0390 spin_unlock_bh(&jrp->inplock);
0391 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
0392 return -ENOSPC;
0393 }
0394
0395 head_entry = &jrp->entinfo[head];
0396 head_entry->desc_addr_virt = desc;
0397 head_entry->desc_size = desc_size;
0398 head_entry->callbk = (void *)cbk;
0399 head_entry->cbkarg = areq;
0400 head_entry->desc_addr_dma = desc_dma;
0401
0402 jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
0403
0404
0405
0406
0407
0408
0409 smp_wmb();
0410
0411 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
0422
0423 jrp->inpring_avail--;
0424 if (!jrp->inpring_avail)
0425 jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
0426
0427 spin_unlock_bh(&jrp->inplock);
0428
0429 return -EINPROGRESS;
0430 }
0431 EXPORT_SYMBOL(caam_jr_enqueue);
0432
0433
0434
0435
0436 static int caam_jr_init(struct device *dev)
0437 {
0438 struct caam_drv_private_jr *jrp;
0439 dma_addr_t inpbusaddr, outbusaddr;
0440 int i, error;
0441
0442 jrp = dev_get_drvdata(dev);
0443
0444 error = caam_reset_hw_jr(dev);
0445 if (error)
0446 return error;
0447
0448 jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
0449 JOBR_DEPTH, &inpbusaddr,
0450 GFP_KERNEL);
0451 if (!jrp->inpring)
0452 return -ENOMEM;
0453
0454 jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
0455 JOBR_DEPTH, &outbusaddr,
0456 GFP_KERNEL);
0457 if (!jrp->outring)
0458 return -ENOMEM;
0459
0460 jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
0461 GFP_KERNEL);
0462 if (!jrp->entinfo)
0463 return -ENOMEM;
0464
0465 for (i = 0; i < JOBR_DEPTH; i++)
0466 jrp->entinfo[i].desc_addr_dma = !0;
0467
0468
0469 jrp->out_ring_read_index = 0;
0470 jrp->head = 0;
0471 jrp->tail = 0;
0472
0473 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
0474 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
0475 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
0476 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
0477
0478 jrp->inpring_avail = JOBR_DEPTH;
0479
0480 spin_lock_init(&jrp->inplock);
0481
0482
0483 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
0484 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
0485 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
0486
0487 tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
0488
0489
0490 error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
0491 dev_name(dev), dev);
0492 if (error) {
0493 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
0494 jrp->ridx, jrp->irq);
0495 tasklet_kill(&jrp->irqtask);
0496 }
0497
0498 return error;
0499 }
0500
0501 static void caam_jr_irq_dispose_mapping(void *data)
0502 {
0503 irq_dispose_mapping((unsigned long)data);
0504 }
0505
0506
0507
0508
0509 static int caam_jr_probe(struct platform_device *pdev)
0510 {
0511 struct device *jrdev;
0512 struct device_node *nprop;
0513 struct caam_job_ring __iomem *ctrl;
0514 struct caam_drv_private_jr *jrpriv;
0515 static int total_jobrs;
0516 struct resource *r;
0517 int error;
0518
0519 jrdev = &pdev->dev;
0520 jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
0521 if (!jrpriv)
0522 return -ENOMEM;
0523
0524 dev_set_drvdata(jrdev, jrpriv);
0525
0526
0527 jrpriv->ridx = total_jobrs++;
0528
0529 nprop = pdev->dev.of_node;
0530
0531
0532 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0533 if (!r) {
0534 dev_err(jrdev, "platform_get_resource() failed\n");
0535 return -ENOMEM;
0536 }
0537
0538 ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
0539 if (!ctrl) {
0540 dev_err(jrdev, "devm_ioremap() failed\n");
0541 return -ENOMEM;
0542 }
0543
0544 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
0545
0546 error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
0547 if (error) {
0548 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
0549 error);
0550 return error;
0551 }
0552
0553
0554 jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
0555 false,
0556 CRYPTO_ENGINE_MAX_QLEN);
0557 if (!jrpriv->engine) {
0558 dev_err(jrdev, "Could not init crypto-engine\n");
0559 return -ENOMEM;
0560 }
0561
0562 error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
0563 jrdev);
0564 if (error)
0565 return error;
0566
0567
0568 error = crypto_engine_start(jrpriv->engine);
0569 if (error) {
0570 dev_err(jrdev, "Could not start crypto-engine\n");
0571 return error;
0572 }
0573
0574
0575 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
0576 if (!jrpriv->irq) {
0577 dev_err(jrdev, "irq_of_parse_and_map failed\n");
0578 return -EINVAL;
0579 }
0580
0581 error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
0582 (void *)(unsigned long)jrpriv->irq);
0583 if (error)
0584 return error;
0585
0586
0587 error = caam_jr_init(jrdev);
0588 if (error)
0589 return error;
0590
0591 jrpriv->dev = jrdev;
0592 spin_lock(&driver_data.jr_alloc_lock);
0593 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
0594 spin_unlock(&driver_data.jr_alloc_lock);
0595
0596 atomic_set(&jrpriv->tfm_count, 0);
0597
0598 register_algs(jrpriv, jrdev->parent);
0599
0600 return 0;
0601 }
0602
0603 static const struct of_device_id caam_jr_match[] = {
0604 {
0605 .compatible = "fsl,sec-v4.0-job-ring",
0606 },
0607 {
0608 .compatible = "fsl,sec4.0-job-ring",
0609 },
0610 {},
0611 };
0612 MODULE_DEVICE_TABLE(of, caam_jr_match);
0613
0614 static struct platform_driver caam_jr_driver = {
0615 .driver = {
0616 .name = "caam_jr",
0617 .of_match_table = caam_jr_match,
0618 },
0619 .probe = caam_jr_probe,
0620 .remove = caam_jr_remove,
0621 };
0622
0623 static int __init jr_driver_init(void)
0624 {
0625 spin_lock_init(&driver_data.jr_alloc_lock);
0626 INIT_LIST_HEAD(&driver_data.jr_list);
0627 return platform_driver_register(&caam_jr_driver);
0628 }
0629
0630 static void __exit jr_driver_exit(void)
0631 {
0632 platform_driver_unregister(&caam_jr_driver);
0633 }
0634
0635 module_init(jr_driver_init);
0636 module_exit(jr_driver_exit);
0637
0638 MODULE_LICENSE("GPL");
0639 MODULE_DESCRIPTION("FSL CAAM JR request backend");
0640 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");