0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/device.h>
0010 #include <linux/of_address.h>
0011 #include <linux/of_irq.h>
0012 #include <linux/sys_soc.h>
0013 #include <linux/fsl/mc.h>
0014
0015 #include "compat.h"
0016 #include "debugfs.h"
0017 #include "regs.h"
0018 #include "intern.h"
0019 #include "jr.h"
0020 #include "desc_constr.h"
0021 #include "ctrl.h"
0022
0023 bool caam_dpaa2;
0024 EXPORT_SYMBOL(caam_dpaa2);
0025
0026 #ifdef CONFIG_CAAM_QI
0027 #include "qi.h"
0028 #endif
0029
0030
0031
0032
0033
0034 static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
0035 {
0036 u32 *jump_cmd, op_flags;
0037
0038 init_job_desc(desc, 0);
0039
0040 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
0041 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
0042 OP_ALG_PR_ON;
0043
0044
0045 append_operation(desc, op_flags);
0046
0047 if (!handle && do_sk) {
0048
0049
0050
0051
0052
0053 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
0054 set_jump_tgt_here(desc, jump_cmd);
0055
0056
0057
0058
0059
0060 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
0061
0062
0063 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
0064 OP_ALG_AAI_RNG4_SK);
0065 }
0066
0067 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
0068 }
0069
0070
0071 static void build_deinstantiation_desc(u32 *desc, int handle)
0072 {
0073 init_job_desc(desc, 0);
0074
0075
0076 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
0077 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
0078
0079 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
0093 u32 *status)
0094 {
0095 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
0096 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
0097 struct caam_deco __iomem *deco = ctrlpriv->deco;
0098 unsigned int timeout = 100000;
0099 u32 deco_dbg_reg, deco_state, flags;
0100 int i;
0101
0102
0103 if (ctrlpriv->virt_en == 1 ||
0104
0105
0106
0107
0108 of_machine_is_compatible("fsl,imx8mq") ||
0109 of_machine_is_compatible("fsl,imx8mm") ||
0110 of_machine_is_compatible("fsl,imx8mn") ||
0111 of_machine_is_compatible("fsl,imx8mp")) {
0112 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
0113
0114 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
0115 --timeout)
0116 cpu_relax();
0117
0118 timeout = 100000;
0119 }
0120
0121 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
0122
0123 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
0124 --timeout)
0125 cpu_relax();
0126
0127 if (!timeout) {
0128 dev_err(ctrldev, "failed to acquire DECO 0\n");
0129 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
0130 return -ENODEV;
0131 }
0132
0133 for (i = 0; i < desc_len(desc); i++)
0134 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
0135
0136 flags = DECO_JQCR_WHL;
0137
0138
0139
0140
0141 if (desc_len(desc) >= 4)
0142 flags |= DECO_JQCR_FOUR;
0143
0144
0145 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
0146
0147 timeout = 10000000;
0148 do {
0149 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
0150
0151 if (ctrlpriv->era < 10)
0152 deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
0153 DESC_DBG_DECO_STAT_SHIFT;
0154 else
0155 deco_state = (rd_reg32(&deco->dbg_exec) &
0156 DESC_DER_DECO_STAT_MASK) >>
0157 DESC_DER_DECO_STAT_SHIFT;
0158
0159
0160
0161
0162
0163 if (deco_state == DECO_STAT_HOST_ERR)
0164 break;
0165
0166 cpu_relax();
0167 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
0168
0169 *status = rd_reg32(&deco->op_status_hi) &
0170 DECO_OP_STATUS_HI_ERR_MASK;
0171
0172 if (ctrlpriv->virt_en == 1)
0173 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
0174
0175
0176 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
0177
0178 if (!timeout)
0179 return -EAGAIN;
0180
0181 return 0;
0182 }
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
0198 {
0199 u32 *desc, status;
0200 int sh_idx, ret = 0;
0201
0202 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
0203 if (!desc)
0204 return -ENOMEM;
0205
0206 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
0207
0208
0209
0210
0211
0212 if ((1 << sh_idx) & state_handle_mask) {
0213
0214
0215
0216
0217 build_deinstantiation_desc(desc, sh_idx);
0218
0219
0220 ret = run_descriptor_deco0(ctrldev, desc, &status);
0221
0222 if (ret ||
0223 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
0224 dev_err(ctrldev,
0225 "Failed to deinstantiate RNG4 SH%d\n",
0226 sh_idx);
0227 break;
0228 }
0229 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
0230 }
0231 }
0232
0233 kfree(desc);
0234
0235 return ret;
0236 }
0237
0238 static void devm_deinstantiate_rng(void *data)
0239 {
0240 struct device *ctrldev = data;
0241 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
0242
0243
0244
0245
0246
0247 if (ctrlpriv->rng4_sh_init)
0248 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
0271 int gen_sk)
0272 {
0273 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
0274 struct caam_ctrl __iomem *ctrl;
0275 u32 *desc, status = 0, rdsta_val;
0276 int ret = 0, sh_idx;
0277
0278 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
0279 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
0280 if (!desc)
0281 return -ENOMEM;
0282
0283 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
0284 const u32 rdsta_if = RDSTA_IF0 << sh_idx;
0285 const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
0286 const u32 rdsta_mask = rdsta_if | rdsta_pr;
0287
0288
0289
0290
0291 if (rdsta_if & state_handle_mask) {
0292 if (rdsta_pr & state_handle_mask)
0293 continue;
0294
0295 dev_info(ctrldev,
0296 "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
0297 sh_idx);
0298
0299 ret = deinstantiate_rng(ctrldev, rdsta_if);
0300 if (ret)
0301 break;
0302 }
0303
0304
0305 build_instantiation_desc(desc, sh_idx, gen_sk);
0306
0307
0308 ret = run_descriptor_deco0(ctrldev, desc, &status);
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319 if (ret)
0320 break;
0321
0322 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
0323 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
0324 (rdsta_val & rdsta_mask) != rdsta_mask) {
0325 ret = -EAGAIN;
0326 break;
0327 }
0328
0329 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
0330
0331 memset(desc, 0x00, CAAM_CMD_SZ * 7);
0332 }
0333
0334 kfree(desc);
0335
0336 if (ret)
0337 return ret;
0338
0339 return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev);
0340 }
0341
0342
0343
0344
0345
0346
0347
0348 static void kick_trng(struct platform_device *pdev, int ent_delay)
0349 {
0350 struct device *ctrldev = &pdev->dev;
0351 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
0352 struct caam_ctrl __iomem *ctrl;
0353 struct rng4tst __iomem *r4tst;
0354 u32 val;
0355
0356 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
0357 r4tst = &ctrl->r4tst[0];
0358
0359
0360
0361
0362
0363
0364 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
0375 >> RTSDCTL_ENT_DLY_SHIFT;
0376 if (ent_delay <= val)
0377 goto start_rng;
0378
0379 val = rd_reg32(&r4tst->rtsdctl);
0380 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
0381 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
0382 wr_reg32(&r4tst->rtsdctl, val);
0383
0384 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
0385
0386 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
0387
0388 val = rd_reg32(&r4tst->rtmctl);
0389 start_rng:
0390
0391
0392
0393
0394 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
0395 RTMCTL_SAMP_MODE_RAW_ES_SC);
0396 }
0397
0398 static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
0399 {
0400 static const struct {
0401 u16 ip_id;
0402 u8 maj_rev;
0403 u8 era;
0404 } id[] = {
0405 {0x0A10, 1, 1},
0406 {0x0A10, 2, 2},
0407 {0x0A12, 1, 3},
0408 {0x0A14, 1, 3},
0409 {0x0A14, 2, 4},
0410 {0x0A16, 1, 4},
0411 {0x0A10, 3, 4},
0412 {0x0A11, 1, 4},
0413 {0x0A18, 1, 4},
0414 {0x0A11, 2, 5},
0415 {0x0A12, 2, 5},
0416 {0x0A13, 1, 5},
0417 {0x0A1C, 1, 5}
0418 };
0419 u32 ccbvid, id_ms;
0420 u8 maj_rev, era;
0421 u16 ip_id;
0422 int i;
0423
0424 ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
0425 era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
0426 if (era)
0427 return era;
0428
0429 id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
0430 ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
0431 maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
0432
0433 for (i = 0; i < ARRAY_SIZE(id); i++)
0434 if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
0435 return id[i].era;
0436
0437 return -ENOTSUPP;
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 static int caam_get_era(struct caam_ctrl __iomem *ctrl)
0450 {
0451 struct device_node *caam_node;
0452 int ret;
0453 u32 prop;
0454
0455 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
0456 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
0457 of_node_put(caam_node);
0458
0459 if (!ret)
0460 return prop;
0461 else
0462 return caam_get_era_from_hw(ctrl);
0463 }
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474 static void handle_imx6_err005766(u32 __iomem *mcr)
0475 {
0476 if (of_machine_is_compatible("fsl,imx6q") ||
0477 of_machine_is_compatible("fsl,imx6dl") ||
0478 of_machine_is_compatible("fsl,imx6qp"))
0479 clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
0480 1 << MCFGR_AXIPIPE_SHIFT);
0481 }
0482
0483 static const struct of_device_id caam_match[] = {
0484 {
0485 .compatible = "fsl,sec-v4.0",
0486 },
0487 {
0488 .compatible = "fsl,sec4.0",
0489 },
0490 {},
0491 };
0492 MODULE_DEVICE_TABLE(of, caam_match);
0493
0494 struct caam_imx_data {
0495 const struct clk_bulk_data *clks;
0496 int num_clks;
0497 };
0498
0499 static const struct clk_bulk_data caam_imx6_clks[] = {
0500 { .id = "ipg" },
0501 { .id = "mem" },
0502 { .id = "aclk" },
0503 { .id = "emi_slow" },
0504 };
0505
0506 static const struct caam_imx_data caam_imx6_data = {
0507 .clks = caam_imx6_clks,
0508 .num_clks = ARRAY_SIZE(caam_imx6_clks),
0509 };
0510
0511 static const struct clk_bulk_data caam_imx7_clks[] = {
0512 { .id = "ipg" },
0513 { .id = "aclk" },
0514 };
0515
0516 static const struct caam_imx_data caam_imx7_data = {
0517 .clks = caam_imx7_clks,
0518 .num_clks = ARRAY_SIZE(caam_imx7_clks),
0519 };
0520
0521 static const struct clk_bulk_data caam_imx6ul_clks[] = {
0522 { .id = "ipg" },
0523 { .id = "mem" },
0524 { .id = "aclk" },
0525 };
0526
0527 static const struct caam_imx_data caam_imx6ul_data = {
0528 .clks = caam_imx6ul_clks,
0529 .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
0530 };
0531
0532 static const struct clk_bulk_data caam_vf610_clks[] = {
0533 { .id = "ipg" },
0534 };
0535
0536 static const struct caam_imx_data caam_vf610_data = {
0537 .clks = caam_vf610_clks,
0538 .num_clks = ARRAY_SIZE(caam_vf610_clks),
0539 };
0540
0541 static const struct soc_device_attribute caam_imx_soc_table[] = {
0542 { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
0543 { .soc_id = "i.MX6*", .data = &caam_imx6_data },
0544 { .soc_id = "i.MX7*", .data = &caam_imx7_data },
0545 { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
0546 { .soc_id = "VF*", .data = &caam_vf610_data },
0547 { .family = "Freescale i.MX" },
0548 { }
0549 };
0550
0551 static void disable_clocks(void *data)
0552 {
0553 struct caam_drv_private *ctrlpriv = data;
0554
0555 clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
0556 }
0557
0558 static int init_clocks(struct device *dev, const struct caam_imx_data *data)
0559 {
0560 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
0561 int ret;
0562
0563 ctrlpriv->num_clks = data->num_clks;
0564 ctrlpriv->clks = devm_kmemdup(dev, data->clks,
0565 data->num_clks * sizeof(data->clks[0]),
0566 GFP_KERNEL);
0567 if (!ctrlpriv->clks)
0568 return -ENOMEM;
0569
0570 ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
0571 if (ret) {
0572 dev_err(dev,
0573 "Failed to request all necessary clocks\n");
0574 return ret;
0575 }
0576
0577 ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
0578 if (ret) {
0579 dev_err(dev,
0580 "Failed to prepare/enable all necessary clocks\n");
0581 return ret;
0582 }
0583
0584 return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
0585 }
0586
0587 static void caam_remove_debugfs(void *root)
0588 {
0589 debugfs_remove_recursive(root);
0590 }
0591
0592 #ifdef CONFIG_FSL_MC_BUS
0593 static bool check_version(struct fsl_mc_version *mc_version, u32 major,
0594 u32 minor, u32 revision)
0595 {
0596 if (mc_version->major > major)
0597 return true;
0598
0599 if (mc_version->major == major) {
0600 if (mc_version->minor > minor)
0601 return true;
0602
0603 if (mc_version->minor == minor &&
0604 mc_version->revision > revision)
0605 return true;
0606 }
0607
0608 return false;
0609 }
0610 #endif
0611
0612 static bool needs_entropy_delay_adjustment(void)
0613 {
0614 if (of_machine_is_compatible("fsl,imx6sx"))
0615 return true;
0616 return false;
0617 }
0618
0619
0620 static int caam_probe(struct platform_device *pdev)
0621 {
0622 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
0623 u64 caam_id;
0624 const struct soc_device_attribute *imx_soc_match;
0625 struct device *dev;
0626 struct device_node *nprop, *np;
0627 struct caam_ctrl __iomem *ctrl;
0628 struct caam_drv_private *ctrlpriv;
0629 struct dentry *dfs_root;
0630 u32 scfgr, comp_params;
0631 u8 rng_vid;
0632 int pg_size;
0633 int BLOCK_OFFSET = 0;
0634 bool pr_support = false;
0635
0636 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
0637 if (!ctrlpriv)
0638 return -ENOMEM;
0639
0640 dev = &pdev->dev;
0641 dev_set_drvdata(dev, ctrlpriv);
0642 nprop = pdev->dev.of_node;
0643
0644 imx_soc_match = soc_device_match(caam_imx_soc_table);
0645 caam_imx = (bool)imx_soc_match;
0646
0647 if (imx_soc_match) {
0648 if (!imx_soc_match->data) {
0649 dev_err(dev, "No clock data provided for i.MX SoC");
0650 return -EINVAL;
0651 }
0652
0653 ret = init_clocks(dev, imx_soc_match->data);
0654 if (ret)
0655 return ret;
0656 }
0657
0658
0659
0660
0661 ctrl = devm_of_iomap(dev, nprop, 0, NULL);
0662 ret = PTR_ERR_OR_ZERO(ctrl);
0663 if (ret) {
0664 dev_err(dev, "caam: of_iomap() failed\n");
0665 return ret;
0666 }
0667
0668 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
0669 (CSTA_PLEND | CSTA_ALT_PLEND));
0670 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
0671 if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
0672 caam_ptr_sz = sizeof(u64);
0673 else
0674 caam_ptr_sz = sizeof(u32);
0675 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
0676 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
0677
0678 #ifdef CONFIG_CAAM_QI
0679
0680 if (ctrlpriv->qi_present && !caam_dpaa2) {
0681 ret = qman_is_probed();
0682 if (!ret) {
0683 return -EPROBE_DEFER;
0684 } else if (ret < 0) {
0685 dev_err(dev, "failing probe due to qman probe error\n");
0686 return -ENODEV;
0687 }
0688
0689 ret = qman_portals_probed();
0690 if (!ret) {
0691 return -EPROBE_DEFER;
0692 } else if (ret < 0) {
0693 dev_err(dev, "failing probe due to qman portals probe error\n");
0694 return -ENODEV;
0695 }
0696 }
0697 #endif
0698
0699
0700
0701
0702 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
0703 if (pg_size == 0)
0704 BLOCK_OFFSET = PG_SIZE_4K;
0705 else
0706 BLOCK_OFFSET = PG_SIZE_64K;
0707
0708 ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
0709 ctrlpriv->assure = (struct caam_assurance __iomem __force *)
0710 ((__force uint8_t *)ctrl +
0711 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
0712 );
0713 ctrlpriv->deco = (struct caam_deco __iomem __force *)
0714 ((__force uint8_t *)ctrl +
0715 BLOCK_OFFSET * DECO_BLOCK_NUMBER
0716 );
0717
0718
0719 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
0720 np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
0721 ctrlpriv->mc_en = !!np;
0722 of_node_put(np);
0723
0724 #ifdef CONFIG_FSL_MC_BUS
0725 if (ctrlpriv->mc_en) {
0726 struct fsl_mc_version *mc_version;
0727
0728 mc_version = fsl_mc_get_version();
0729 if (mc_version)
0730 pr_support = check_version(mc_version, 10, 20, 0);
0731 else
0732 return -EPROBE_DEFER;
0733 }
0734 #endif
0735
0736
0737
0738
0739
0740
0741
0742 if (!ctrlpriv->mc_en)
0743 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
0744 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
0745 MCFGR_WDENABLE | MCFGR_LARGE_BURST);
0746
0747 handle_imx6_err005766(&ctrl->mcr);
0748
0749
0750
0751
0752
0753 scfgr = rd_reg32(&ctrl->scfgr);
0754
0755 ctrlpriv->virt_en = 0;
0756 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
0757
0758
0759
0760 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
0761 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
0762 (scfgr & SCFGR_VIRT_EN)))
0763 ctrlpriv->virt_en = 1;
0764 } else {
0765
0766 if (comp_params & CTPR_MS_VIRT_EN_POR)
0767 ctrlpriv->virt_en = 1;
0768 }
0769
0770 if (ctrlpriv->virt_en == 1)
0771 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
0772 JRSTART_JR1_START | JRSTART_JR2_START |
0773 JRSTART_JR3_START);
0774
0775 ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
0776 if (ret) {
0777 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
0778 return ret;
0779 }
0780
0781 ctrlpriv->era = caam_get_era(ctrl);
0782 ctrlpriv->domain = iommu_get_domain_for_dev(dev);
0783
0784 dfs_root = debugfs_create_dir(dev_name(dev), NULL);
0785 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
0786 ret = devm_add_action_or_reset(dev, caam_remove_debugfs,
0787 dfs_root);
0788 if (ret)
0789 return ret;
0790 }
0791
0792 caam_debugfs_init(ctrlpriv, dfs_root);
0793
0794
0795 if (ctrlpriv->qi_present && !caam_dpaa2) {
0796 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
0797 ((__force uint8_t *)ctrl +
0798 BLOCK_OFFSET * QI_BLOCK_NUMBER
0799 );
0800
0801 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
0802
0803
0804 #ifdef CONFIG_CAAM_QI
0805 ret = caam_qi_init(pdev);
0806 if (ret)
0807 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
0808 #endif
0809 }
0810
0811 ring = 0;
0812 for_each_available_child_of_node(nprop, np)
0813 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
0814 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
0815 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
0816 ((__force uint8_t *)ctrl +
0817 (ring + JR_BLOCK_NUMBER) *
0818 BLOCK_OFFSET
0819 );
0820 ctrlpriv->total_jobrs++;
0821 ring++;
0822 }
0823
0824
0825 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
0826 dev_err(dev, "no queues configured, terminating\n");
0827 return -ENOMEM;
0828 }
0829
0830 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ls);
0831 ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
0832
0833
0834
0835
0836
0837
0838 if (ctrlpriv->era < 10) {
0839 rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
0840 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
0841 ctrlpriv->blob_present = ctrlpriv->blob_present &&
0842 (rd_reg32(&ctrl->perfmon.cha_num_ls) & CHA_ID_LS_AES_MASK);
0843 } else {
0844 rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
0845 CHA_VER_VID_SHIFT;
0846 ctrlpriv->blob_present = ctrlpriv->blob_present &&
0847 (rd_reg32(&ctrl->vreg.aesa) & CHA_VER_MISC_AES_NUM_MASK);
0848 }
0849
0850
0851
0852
0853
0854
0855 if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
0856 ctrlpriv->rng4_sh_init =
0857 rd_reg32(&ctrl->r4tst[0].rdsta);
0858
0859
0860
0861
0862
0863
0864 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
0865 ctrlpriv->rng4_sh_init &= RDSTA_MASK;
0866 do {
0867 int inst_handles =
0868 rd_reg32(&ctrl->r4tst[0].rdsta) &
0869 RDSTA_MASK;
0870
0871
0872
0873
0874
0875
0876
0877
0878 if (needs_entropy_delay_adjustment())
0879 ent_delay = 12000;
0880 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
0881 dev_info(dev,
0882 "Entropy delay = %u\n",
0883 ent_delay);
0884 kick_trng(pdev, ent_delay);
0885 ent_delay += 400;
0886 }
0887
0888
0889
0890
0891
0892
0893
0894 ret = instantiate_rng(dev, inst_handles,
0895 gen_sk);
0896
0897
0898
0899
0900
0901
0902
0903 if (needs_entropy_delay_adjustment())
0904 break;
0905 if (ret == -EAGAIN)
0906
0907
0908
0909
0910 cpu_relax();
0911 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
0912 if (ret) {
0913 dev_err(dev, "failed to instantiate RNG");
0914 return ret;
0915 }
0916
0917
0918
0919
0920 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
0921
0922
0923 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
0924 }
0925
0926
0927
0928 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
0929 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
0930
0931
0932 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
0933 ctrlpriv->era);
0934 dev_info(dev, "job rings = %d, qi = %d\n",
0935 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
0936
0937 ret = devm_of_platform_populate(dev);
0938 if (ret)
0939 dev_err(dev, "JR platform devices creation error\n");
0940
0941 return ret;
0942 }
0943
0944 static struct platform_driver caam_driver = {
0945 .driver = {
0946 .name = "caam",
0947 .of_match_table = caam_match,
0948 },
0949 .probe = caam_probe,
0950 };
0951
0952 module_platform_driver(caam_driver);
0953
0954 MODULE_LICENSE("GPL");
0955 MODULE_DESCRIPTION("FSL CAAM request backend");
0956 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");