0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/module.h>
0012 #include <linux/kernel.h>
0013 #include <linux/kthread.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/ccp.h>
0016
0017 #include "ccp-dev.h"
0018
0019 static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
0020 {
0021 int start;
0022 struct ccp_device *ccp = cmd_q->ccp;
0023
0024 for (;;) {
0025 mutex_lock(&ccp->sb_mutex);
0026
0027 start = (u32)bitmap_find_next_zero_area(ccp->sb,
0028 ccp->sb_count,
0029 ccp->sb_start,
0030 count, 0);
0031 if (start <= ccp->sb_count) {
0032 bitmap_set(ccp->sb, start, count);
0033
0034 mutex_unlock(&ccp->sb_mutex);
0035 break;
0036 }
0037
0038 ccp->sb_avail = 0;
0039
0040 mutex_unlock(&ccp->sb_mutex);
0041
0042
0043 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
0044 return 0;
0045 }
0046
0047 return KSB_START + start;
0048 }
0049
0050 static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
0051 unsigned int count)
0052 {
0053 struct ccp_device *ccp = cmd_q->ccp;
0054
0055 if (!start)
0056 return;
0057
0058 mutex_lock(&ccp->sb_mutex);
0059
0060 bitmap_clear(ccp->sb, start - KSB_START, count);
0061
0062 ccp->sb_avail = 1;
0063
0064 mutex_unlock(&ccp->sb_mutex);
0065
0066 wake_up_interruptible_all(&ccp->sb_queue);
0067 }
0068
0069 static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
0070 {
0071 return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
0072 }
0073
0074 static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
0075 {
0076 struct ccp_cmd_queue *cmd_q = op->cmd_q;
0077 struct ccp_device *ccp = cmd_q->ccp;
0078 void __iomem *cr_addr;
0079 u32 cr0, cmd;
0080 unsigned int i;
0081 int ret = 0;
0082
0083
0084
0085
0086
0087 cmd_q->free_slots--;
0088
0089 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
0090 | (op->jobid << REQ0_JOBID_SHIFT)
0091 | REQ0_WAIT_FOR_WRITE;
0092
0093 if (op->soc)
0094 cr0 |= REQ0_STOP_ON_COMPLETE
0095 | REQ0_INT_ON_COMPLETE;
0096
0097 if (op->ioc || !cmd_q->free_slots)
0098 cr0 |= REQ0_INT_ON_COMPLETE;
0099
0100
0101 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
0102
0103 mutex_lock(&ccp->req_mutex);
0104
0105
0106 for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
0107 iowrite32(*(cr + i), cr_addr);
0108
0109
0110 wmb();
0111 iowrite32(cr0, ccp->io_regs + CMD_REQ0);
0112
0113 mutex_unlock(&ccp->req_mutex);
0114
0115 if (cr0 & REQ0_INT_ON_COMPLETE) {
0116
0117 ret = wait_event_interruptible(cmd_q->int_queue,
0118 cmd_q->int_rcvd);
0119 if (ret || cmd_q->cmd_error) {
0120
0121 cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
0122 | op->jobid;
0123 if (cmd_q->cmd_error)
0124 ccp_log_error(cmd_q->ccp,
0125 cmd_q->cmd_error);
0126
0127 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
0128
0129 if (!ret)
0130 ret = -EIO;
0131 } else if (op->soc) {
0132
0133 cmd = DEL_Q_ACTIVE
0134 | (cmd_q->id << DEL_Q_ID_SHIFT)
0135 | op->jobid;
0136
0137 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
0138 }
0139
0140 cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
0141
0142 cmd_q->int_rcvd = 0;
0143 }
0144
0145 return ret;
0146 }
0147
0148 static int ccp_perform_aes(struct ccp_op *op)
0149 {
0150 u32 cr[6];
0151
0152
0153 cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
0154 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
0155 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
0156 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
0157 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
0158 cr[1] = op->src.u.dma.length - 1;
0159 cr[2] = ccp_addr_lo(&op->src.u.dma);
0160 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
0161 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
0162 | ccp_addr_hi(&op->src.u.dma);
0163 cr[4] = ccp_addr_lo(&op->dst.u.dma);
0164 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
0165 | ccp_addr_hi(&op->dst.u.dma);
0166
0167 if (op->u.aes.mode == CCP_AES_MODE_CFB)
0168 cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
0169
0170 if (op->eom)
0171 cr[0] |= REQ1_EOM;
0172
0173 if (op->init)
0174 cr[0] |= REQ1_INIT;
0175
0176 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
0177 }
0178
0179 static int ccp_perform_xts_aes(struct ccp_op *op)
0180 {
0181 u32 cr[6];
0182
0183
0184 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
0185 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
0186 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
0187 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
0188 cr[1] = op->src.u.dma.length - 1;
0189 cr[2] = ccp_addr_lo(&op->src.u.dma);
0190 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
0191 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
0192 | ccp_addr_hi(&op->src.u.dma);
0193 cr[4] = ccp_addr_lo(&op->dst.u.dma);
0194 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
0195 | ccp_addr_hi(&op->dst.u.dma);
0196
0197 if (op->eom)
0198 cr[0] |= REQ1_EOM;
0199
0200 if (op->init)
0201 cr[0] |= REQ1_INIT;
0202
0203 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
0204 }
0205
0206 static int ccp_perform_sha(struct ccp_op *op)
0207 {
0208 u32 cr[6];
0209
0210
0211 cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
0212 | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
0213 | REQ1_INIT;
0214 cr[1] = op->src.u.dma.length - 1;
0215 cr[2] = ccp_addr_lo(&op->src.u.dma);
0216 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
0217 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
0218 | ccp_addr_hi(&op->src.u.dma);
0219
0220 if (op->eom) {
0221 cr[0] |= REQ1_EOM;
0222 cr[4] = lower_32_bits(op->u.sha.msg_bits);
0223 cr[5] = upper_32_bits(op->u.sha.msg_bits);
0224 } else {
0225 cr[4] = 0;
0226 cr[5] = 0;
0227 }
0228
0229 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
0230 }
0231
0232 static int ccp_perform_rsa(struct ccp_op *op)
0233 {
0234 u32 cr[6];
0235
0236
0237 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
0238 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
0239 | (op->sb_key << REQ1_KEY_KSB_SHIFT)
0240 | REQ1_EOM;
0241 cr[1] = op->u.rsa.input_len - 1;
0242 cr[2] = ccp_addr_lo(&op->src.u.dma);
0243 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
0244 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
0245 | ccp_addr_hi(&op->src.u.dma);
0246 cr[4] = ccp_addr_lo(&op->dst.u.dma);
0247 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
0248 | ccp_addr_hi(&op->dst.u.dma);
0249
0250 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
0251 }
0252
0253 static int ccp_perform_passthru(struct ccp_op *op)
0254 {
0255 u32 cr[6];
0256
0257
0258 cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
0259 | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
0260 | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
0261
0262 if (op->src.type == CCP_MEMTYPE_SYSTEM)
0263 cr[1] = op->src.u.dma.length - 1;
0264 else
0265 cr[1] = op->dst.u.dma.length - 1;
0266
0267 if (op->src.type == CCP_MEMTYPE_SYSTEM) {
0268 cr[2] = ccp_addr_lo(&op->src.u.dma);
0269 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
0270 | ccp_addr_hi(&op->src.u.dma);
0271
0272 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
0273 cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
0274 } else {
0275 cr[2] = op->src.u.sb * CCP_SB_BYTES;
0276 cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
0277 }
0278
0279 if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
0280 cr[4] = ccp_addr_lo(&op->dst.u.dma);
0281 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
0282 | ccp_addr_hi(&op->dst.u.dma);
0283 } else {
0284 cr[4] = op->dst.u.sb * CCP_SB_BYTES;
0285 cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
0286 }
0287
0288 if (op->eom)
0289 cr[0] |= REQ1_EOM;
0290
0291 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
0292 }
0293
0294 static int ccp_perform_ecc(struct ccp_op *op)
0295 {
0296 u32 cr[6];
0297
0298
0299 cr[0] = REQ1_ECC_AFFINE_CONVERT
0300 | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
0301 | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
0302 | REQ1_EOM;
0303 cr[1] = op->src.u.dma.length - 1;
0304 cr[2] = ccp_addr_lo(&op->src.u.dma);
0305 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
0306 | ccp_addr_hi(&op->src.u.dma);
0307 cr[4] = ccp_addr_lo(&op->dst.u.dma);
0308 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
0309 | ccp_addr_hi(&op->dst.u.dma);
0310
0311 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
0312 }
0313
0314 static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
0315 {
0316 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
0317 }
0318
0319 static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
0320 {
0321 iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
0322 }
0323
0324 static void ccp_irq_bh(unsigned long data)
0325 {
0326 struct ccp_device *ccp = (struct ccp_device *)data;
0327 struct ccp_cmd_queue *cmd_q;
0328 u32 q_int, status;
0329 unsigned int i;
0330
0331 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
0332
0333 for (i = 0; i < ccp->cmd_q_count; i++) {
0334 cmd_q = &ccp->cmd_q[i];
0335
0336 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
0337 if (q_int) {
0338 cmd_q->int_status = status;
0339 cmd_q->q_status = ioread32(cmd_q->reg_status);
0340 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
0341
0342
0343 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
0344 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
0345
0346 cmd_q->int_rcvd = 1;
0347
0348
0349 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
0350 wake_up_interruptible(&cmd_q->int_queue);
0351 }
0352 }
0353 ccp_enable_queue_interrupts(ccp);
0354 }
0355
0356 static irqreturn_t ccp_irq_handler(int irq, void *data)
0357 {
0358 struct ccp_device *ccp = (struct ccp_device *)data;
0359
0360 ccp_disable_queue_interrupts(ccp);
0361 if (ccp->use_tasklet)
0362 tasklet_schedule(&ccp->irq_tasklet);
0363 else
0364 ccp_irq_bh((unsigned long)ccp);
0365
0366 return IRQ_HANDLED;
0367 }
0368
0369 static int ccp_init(struct ccp_device *ccp)
0370 {
0371 struct device *dev = ccp->dev;
0372 struct ccp_cmd_queue *cmd_q;
0373 struct dma_pool *dma_pool;
0374 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
0375 unsigned int qmr, i;
0376 int ret;
0377
0378
0379 ccp->qim = 0;
0380 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
0381 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
0382 if (!(qmr & (1 << i)))
0383 continue;
0384
0385
0386 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
0387 ccp->name, i);
0388 dma_pool = dma_pool_create(dma_pool_name, dev,
0389 CCP_DMAPOOL_MAX_SIZE,
0390 CCP_DMAPOOL_ALIGN, 0);
0391 if (!dma_pool) {
0392 dev_err(dev, "unable to allocate dma pool\n");
0393 ret = -ENOMEM;
0394 goto e_pool;
0395 }
0396
0397 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
0398 ccp->cmd_q_count++;
0399
0400 cmd_q->ccp = ccp;
0401 cmd_q->id = i;
0402 cmd_q->dma_pool = dma_pool;
0403
0404
0405 cmd_q->sb_key = KSB_START + ccp->sb_start++;
0406 cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
0407 ccp->sb_count -= 2;
0408
0409
0410
0411
0412 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
0413 (CMD_Q_STATUS_INCR * i);
0414 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
0415 (CMD_Q_STATUS_INCR * i);
0416 cmd_q->int_ok = 1 << (i * 2);
0417 cmd_q->int_err = 1 << ((i * 2) + 1);
0418
0419 cmd_q->free_slots = ccp_get_free_slots(cmd_q);
0420
0421 init_waitqueue_head(&cmd_q->int_queue);
0422
0423
0424 ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
0425
0426 #ifdef CONFIG_ARM64
0427
0428 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
0429 (CMD_Q_CACHE_INC * i));
0430 #endif
0431
0432 dev_dbg(dev, "queue #%u available\n", i);
0433 }
0434 if (ccp->cmd_q_count == 0) {
0435 dev_notice(dev, "no command queues available\n");
0436 ret = -EIO;
0437 goto e_pool;
0438 }
0439 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
0440
0441
0442 ccp_disable_queue_interrupts(ccp);
0443 for (i = 0; i < ccp->cmd_q_count; i++) {
0444 cmd_q = &ccp->cmd_q[i];
0445
0446 ioread32(cmd_q->reg_int_status);
0447 ioread32(cmd_q->reg_status);
0448 }
0449 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
0450
0451
0452 ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
0453 if (ret) {
0454 dev_err(dev, "unable to allocate an IRQ\n");
0455 goto e_pool;
0456 }
0457
0458
0459 if (ccp->use_tasklet)
0460 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
0461 (unsigned long)ccp);
0462
0463 dev_dbg(dev, "Starting threads...\n");
0464
0465 for (i = 0; i < ccp->cmd_q_count; i++) {
0466 struct task_struct *kthread;
0467
0468 cmd_q = &ccp->cmd_q[i];
0469
0470 kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
0471 "%s-q%u", ccp->name, cmd_q->id);
0472 if (IS_ERR(kthread)) {
0473 dev_err(dev, "error creating queue thread (%ld)\n",
0474 PTR_ERR(kthread));
0475 ret = PTR_ERR(kthread);
0476 goto e_kthread;
0477 }
0478
0479 cmd_q->kthread = kthread;
0480 }
0481
0482 dev_dbg(dev, "Enabling interrupts...\n");
0483
0484 ccp_enable_queue_interrupts(ccp);
0485
0486 dev_dbg(dev, "Registering device...\n");
0487 ccp_add_device(ccp);
0488
0489 ret = ccp_register_rng(ccp);
0490 if (ret)
0491 goto e_kthread;
0492
0493
0494 ret = ccp_dmaengine_register(ccp);
0495 if (ret)
0496 goto e_hwrng;
0497
0498 return 0;
0499
0500 e_hwrng:
0501 ccp_unregister_rng(ccp);
0502
0503 e_kthread:
0504 for (i = 0; i < ccp->cmd_q_count; i++)
0505 if (ccp->cmd_q[i].kthread)
0506 kthread_stop(ccp->cmd_q[i].kthread);
0507
0508 sp_free_ccp_irq(ccp->sp, ccp);
0509
0510 e_pool:
0511 for (i = 0; i < ccp->cmd_q_count; i++)
0512 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
0513
0514 return ret;
0515 }
0516
0517 static void ccp_destroy(struct ccp_device *ccp)
0518 {
0519 struct ccp_cmd_queue *cmd_q;
0520 struct ccp_cmd *cmd;
0521 unsigned int i;
0522
0523
0524 ccp_dmaengine_unregister(ccp);
0525
0526
0527 ccp_unregister_rng(ccp);
0528
0529
0530 ccp_del_device(ccp);
0531
0532
0533 ccp_disable_queue_interrupts(ccp);
0534 for (i = 0; i < ccp->cmd_q_count; i++) {
0535 cmd_q = &ccp->cmd_q[i];
0536
0537 ioread32(cmd_q->reg_int_status);
0538 ioread32(cmd_q->reg_status);
0539 }
0540 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
0541
0542
0543 for (i = 0; i < ccp->cmd_q_count; i++)
0544 if (ccp->cmd_q[i].kthread)
0545 kthread_stop(ccp->cmd_q[i].kthread);
0546
0547 sp_free_ccp_irq(ccp->sp, ccp);
0548
0549 for (i = 0; i < ccp->cmd_q_count; i++)
0550 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
0551
0552
0553 while (!list_empty(&ccp->cmd)) {
0554
0555 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
0556 list_del(&cmd->entry);
0557 cmd->callback(cmd->data, -ENODEV);
0558 }
0559 while (!list_empty(&ccp->backlog)) {
0560
0561 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
0562 list_del(&cmd->entry);
0563 cmd->callback(cmd->data, -ENODEV);
0564 }
0565 }
0566
0567 static const struct ccp_actions ccp3_actions = {
0568 .aes = ccp_perform_aes,
0569 .xts_aes = ccp_perform_xts_aes,
0570 .des3 = NULL,
0571 .sha = ccp_perform_sha,
0572 .rsa = ccp_perform_rsa,
0573 .passthru = ccp_perform_passthru,
0574 .ecc = ccp_perform_ecc,
0575 .sballoc = ccp_alloc_ksb,
0576 .sbfree = ccp_free_ksb,
0577 .init = ccp_init,
0578 .destroy = ccp_destroy,
0579 .get_free_slots = ccp_get_free_slots,
0580 .irqhandler = ccp_irq_handler,
0581 };
0582
0583 const struct ccp_vdata ccpv3_platform = {
0584 .version = CCP_VERSION(3, 0),
0585 .setup = NULL,
0586 .perform = &ccp3_actions,
0587 .offset = 0,
0588 .rsamax = CCP_RSA_MAX_WIDTH,
0589 };
0590
0591 const struct ccp_vdata ccpv3 = {
0592 .version = CCP_VERSION(3, 0),
0593 .setup = NULL,
0594 .perform = &ccp3_actions,
0595 .offset = 0x20000,
0596 .rsamax = CCP_RSA_MAX_WIDTH,
0597 };