0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/module.h>
0012 #include <linux/kernel.h>
0013 #include <linux/kthread.h>
0014 #include <linux/sched.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/spinlock.h>
0017 #include <linux/spinlock_types.h>
0018 #include <linux/types.h>
0019 #include <linux/mutex.h>
0020 #include <linux/delay.h>
0021 #include <linux/hw_random.h>
0022 #include <linux/cpu.h>
0023 #include <linux/atomic.h>
0024 #ifdef CONFIG_X86
0025 #include <asm/cpu_device_id.h>
0026 #endif
0027 #include <linux/ccp.h>
0028
0029 #include "ccp-dev.h"
0030
0031 #define MAX_CCPS 32
0032
0033
0034 static unsigned int nqueues;
0035 module_param(nqueues, uint, 0444);
0036 MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)");
0037
0038
0039 static atomic_t dev_count = ATOMIC_INIT(0);
0040 static unsigned int max_devs = MAX_CCPS;
0041 module_param(max_devs, uint, 0444);
0042 MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)");
0043
0044 struct ccp_tasklet_data {
0045 struct completion completion;
0046 struct ccp_cmd *cmd;
0047 };
0048
0049
0050 #define CCP_MAX_ERROR_CODE 64
0051 static char *ccp_error_codes[] = {
0052 "",
0053 "ILLEGAL_ENGINE",
0054 "ILLEGAL_KEY_ID",
0055 "ILLEGAL_FUNCTION_TYPE",
0056 "ILLEGAL_FUNCTION_MODE",
0057 "ILLEGAL_FUNCTION_ENCRYPT",
0058 "ILLEGAL_FUNCTION_SIZE",
0059 "Zlib_MISSING_INIT_EOM",
0060 "ILLEGAL_FUNCTION_RSVD",
0061 "ILLEGAL_BUFFER_LENGTH",
0062 "VLSB_FAULT",
0063 "ILLEGAL_MEM_ADDR",
0064 "ILLEGAL_MEM_SEL",
0065 "ILLEGAL_CONTEXT_ID",
0066 "ILLEGAL_KEY_ADDR",
0067 "0xF Reserved",
0068 "Zlib_ILLEGAL_MULTI_QUEUE",
0069 "Zlib_ILLEGAL_JOBID_CHANGE",
0070 "CMD_TIMEOUT",
0071 "IDMA0_AXI_SLVERR",
0072 "IDMA0_AXI_DECERR",
0073 "0x15 Reserved",
0074 "IDMA1_AXI_SLAVE_FAULT",
0075 "IDMA1_AIXI_DECERR",
0076 "0x18 Reserved",
0077 "ZLIBVHB_AXI_SLVERR",
0078 "ZLIBVHB_AXI_DECERR",
0079 "0x1B Reserved",
0080 "ZLIB_UNEXPECTED_EOM",
0081 "ZLIB_EXTRA_DATA",
0082 "ZLIB_BTYPE",
0083 "ZLIB_UNDEFINED_SYMBOL",
0084 "ZLIB_UNDEFINED_DISTANCE_S",
0085 "ZLIB_CODE_LENGTH_SYMBOL",
0086 "ZLIB _VHB_ILLEGAL_FETCH",
0087 "ZLIB_UNCOMPRESSED_LEN",
0088 "ZLIB_LIMIT_REACHED",
0089 "ZLIB_CHECKSUM_MISMATCH0",
0090 "ODMA0_AXI_SLVERR",
0091 "ODMA0_AXI_DECERR",
0092 "0x28 Reserved",
0093 "ODMA1_AXI_SLVERR",
0094 "ODMA1_AXI_DECERR",
0095 };
0096
0097 void ccp_log_error(struct ccp_device *d, unsigned int e)
0098 {
0099 if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
0100 return;
0101
0102 if (e < ARRAY_SIZE(ccp_error_codes))
0103 dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
0104 else
0105 dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
0106 }
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 static DEFINE_RWLOCK(ccp_unit_lock);
0119 static LIST_HEAD(ccp_units);
0120
0121
0122 static DEFINE_SPINLOCK(ccp_rr_lock);
0123 static struct ccp_device *ccp_rr;
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 void ccp_add_device(struct ccp_device *ccp)
0136 {
0137 unsigned long flags;
0138
0139 write_lock_irqsave(&ccp_unit_lock, flags);
0140 list_add_tail(&ccp->entry, &ccp_units);
0141 if (!ccp_rr)
0142
0143
0144
0145 ccp_rr = ccp;
0146 write_unlock_irqrestore(&ccp_unit_lock, flags);
0147 }
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 void ccp_del_device(struct ccp_device *ccp)
0159 {
0160 unsigned long flags;
0161
0162 write_lock_irqsave(&ccp_unit_lock, flags);
0163 if (ccp_rr == ccp) {
0164
0165
0166
0167
0168 if (list_is_last(&ccp_rr->entry, &ccp_units))
0169 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
0170 entry);
0171 else
0172 ccp_rr = list_next_entry(ccp_rr, entry);
0173 }
0174 list_del(&ccp->entry);
0175 if (list_empty(&ccp_units))
0176 ccp_rr = NULL;
0177 write_unlock_irqrestore(&ccp_unit_lock, flags);
0178 }
0179
0180
0181
0182 int ccp_register_rng(struct ccp_device *ccp)
0183 {
0184 int ret = 0;
0185
0186 dev_dbg(ccp->dev, "Registering RNG...\n");
0187
0188 ccp->hwrng.name = ccp->rngname;
0189 ccp->hwrng.read = ccp_trng_read;
0190 ret = hwrng_register(&ccp->hwrng);
0191 if (ret)
0192 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
0193
0194 return ret;
0195 }
0196
0197 void ccp_unregister_rng(struct ccp_device *ccp)
0198 {
0199 if (ccp->hwrng.name)
0200 hwrng_unregister(&ccp->hwrng);
0201 }
0202
0203 static struct ccp_device *ccp_get_device(void)
0204 {
0205 unsigned long flags;
0206 struct ccp_device *dp = NULL;
0207
0208
0209
0210
0211 read_lock_irqsave(&ccp_unit_lock, flags);
0212 if (!list_empty(&ccp_units)) {
0213 spin_lock(&ccp_rr_lock);
0214 dp = ccp_rr;
0215 if (list_is_last(&ccp_rr->entry, &ccp_units))
0216 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
0217 entry);
0218 else
0219 ccp_rr = list_next_entry(ccp_rr, entry);
0220 spin_unlock(&ccp_rr_lock);
0221 }
0222 read_unlock_irqrestore(&ccp_unit_lock, flags);
0223
0224 return dp;
0225 }
0226
0227
0228
0229
0230
0231
0232 int ccp_present(void)
0233 {
0234 unsigned long flags;
0235 int ret;
0236
0237 read_lock_irqsave(&ccp_unit_lock, flags);
0238 ret = list_empty(&ccp_units);
0239 read_unlock_irqrestore(&ccp_unit_lock, flags);
0240
0241 return ret ? -ENODEV : 0;
0242 }
0243 EXPORT_SYMBOL_GPL(ccp_present);
0244
0245
0246
0247
0248
0249
0250
0251 unsigned int ccp_version(void)
0252 {
0253 struct ccp_device *dp;
0254 unsigned long flags;
0255 int ret = 0;
0256
0257 read_lock_irqsave(&ccp_unit_lock, flags);
0258 if (!list_empty(&ccp_units)) {
0259 dp = list_first_entry(&ccp_units, struct ccp_device, entry);
0260 ret = dp->vdata->version;
0261 }
0262 read_unlock_irqrestore(&ccp_unit_lock, flags);
0263
0264 return ret;
0265 }
0266 EXPORT_SYMBOL_GPL(ccp_version);
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
0290 {
0291 struct ccp_device *ccp;
0292 unsigned long flags;
0293 unsigned int i;
0294 int ret;
0295
0296
0297 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
0298
0299 if (!ccp)
0300 return -ENODEV;
0301
0302
0303 if (!cmd->callback)
0304 return -EINVAL;
0305
0306 cmd->ccp = ccp;
0307
0308 spin_lock_irqsave(&ccp->cmd_lock, flags);
0309
0310 i = ccp->cmd_q_count;
0311
0312 if (ccp->cmd_count >= MAX_CMD_QLEN) {
0313 if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
0314 ret = -EBUSY;
0315 list_add_tail(&cmd->entry, &ccp->backlog);
0316 } else {
0317 ret = -ENOSPC;
0318 }
0319 } else {
0320 ret = -EINPROGRESS;
0321 ccp->cmd_count++;
0322 list_add_tail(&cmd->entry, &ccp->cmd);
0323
0324
0325 if (!ccp->suspending) {
0326 for (i = 0; i < ccp->cmd_q_count; i++) {
0327 if (ccp->cmd_q[i].active)
0328 continue;
0329
0330 break;
0331 }
0332 }
0333 }
0334
0335 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
0336
0337
0338 if (i < ccp->cmd_q_count)
0339 wake_up_process(ccp->cmd_q[i].kthread);
0340
0341 return ret;
0342 }
0343 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
0344
0345 static void ccp_do_cmd_backlog(struct work_struct *work)
0346 {
0347 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
0348 struct ccp_device *ccp = cmd->ccp;
0349 unsigned long flags;
0350 unsigned int i;
0351
0352 cmd->callback(cmd->data, -EINPROGRESS);
0353
0354 spin_lock_irqsave(&ccp->cmd_lock, flags);
0355
0356 ccp->cmd_count++;
0357 list_add_tail(&cmd->entry, &ccp->cmd);
0358
0359
0360 for (i = 0; i < ccp->cmd_q_count; i++) {
0361 if (ccp->cmd_q[i].active)
0362 continue;
0363
0364 break;
0365 }
0366
0367 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
0368
0369
0370 if (i < ccp->cmd_q_count)
0371 wake_up_process(ccp->cmd_q[i].kthread);
0372 }
0373
0374 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
0375 {
0376 struct ccp_device *ccp = cmd_q->ccp;
0377 struct ccp_cmd *cmd = NULL;
0378 struct ccp_cmd *backlog = NULL;
0379 unsigned long flags;
0380
0381 spin_lock_irqsave(&ccp->cmd_lock, flags);
0382
0383 cmd_q->active = 0;
0384
0385 if (ccp->suspending) {
0386 cmd_q->suspended = 1;
0387
0388 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
0389 wake_up_interruptible(&ccp->suspend_queue);
0390
0391 return NULL;
0392 }
0393
0394 if (ccp->cmd_count) {
0395 cmd_q->active = 1;
0396
0397 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
0398 list_del(&cmd->entry);
0399
0400 ccp->cmd_count--;
0401 }
0402
0403 if (!list_empty(&ccp->backlog)) {
0404 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
0405 entry);
0406 list_del(&backlog->entry);
0407 }
0408
0409 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
0410
0411 if (backlog) {
0412 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
0413 schedule_work(&backlog->work);
0414 }
0415
0416 return cmd;
0417 }
0418
0419 static void ccp_do_cmd_complete(unsigned long data)
0420 {
0421 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
0422 struct ccp_cmd *cmd = tdata->cmd;
0423
0424 cmd->callback(cmd->data, cmd->ret);
0425
0426 complete(&tdata->completion);
0427 }
0428
0429
0430
0431
0432
0433
0434 int ccp_cmd_queue_thread(void *data)
0435 {
0436 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
0437 struct ccp_cmd *cmd;
0438 struct ccp_tasklet_data tdata;
0439 struct tasklet_struct tasklet;
0440
0441 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
0442
0443 set_current_state(TASK_INTERRUPTIBLE);
0444 while (!kthread_should_stop()) {
0445 schedule();
0446
0447 set_current_state(TASK_INTERRUPTIBLE);
0448
0449 cmd = ccp_dequeue_cmd(cmd_q);
0450 if (!cmd)
0451 continue;
0452
0453 __set_current_state(TASK_RUNNING);
0454
0455
0456 cmd->ret = ccp_run_cmd(cmd_q, cmd);
0457
0458
0459 tdata.cmd = cmd;
0460 init_completion(&tdata.completion);
0461 tasklet_schedule(&tasklet);
0462 wait_for_completion(&tdata.completion);
0463 }
0464
0465 __set_current_state(TASK_RUNNING);
0466
0467 return 0;
0468 }
0469
0470
0471
0472
0473
0474
0475 struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
0476 {
0477 struct device *dev = sp->dev;
0478 struct ccp_device *ccp;
0479
0480 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
0481 if (!ccp)
0482 return NULL;
0483 ccp->dev = dev;
0484 ccp->sp = sp;
0485 ccp->axcache = sp->axcache;
0486
0487 INIT_LIST_HEAD(&ccp->cmd);
0488 INIT_LIST_HEAD(&ccp->backlog);
0489
0490 spin_lock_init(&ccp->cmd_lock);
0491 mutex_init(&ccp->req_mutex);
0492 mutex_init(&ccp->sb_mutex);
0493 ccp->sb_count = KSB_COUNT;
0494 ccp->sb_start = 0;
0495
0496
0497 init_waitqueue_head(&ccp->sb_queue);
0498 init_waitqueue_head(&ccp->suspend_queue);
0499
0500 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
0501 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
0502
0503 return ccp;
0504 }
0505
0506 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
0507 {
0508 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
0509 u32 trng_value;
0510 int len = min_t(int, sizeof(trng_value), max);
0511
0512
0513
0514
0515 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
0516 if (!trng_value) {
0517
0518
0519
0520
0521 if (ccp->hwrng_retries++ > TRNG_RETRIES)
0522 return -EIO;
0523
0524 return 0;
0525 }
0526
0527
0528 ccp->hwrng_retries = 0;
0529 memcpy(data, &trng_value, len);
0530
0531 return len;
0532 }
0533
0534 bool ccp_queues_suspended(struct ccp_device *ccp)
0535 {
0536 unsigned int suspended = 0;
0537 unsigned long flags;
0538 unsigned int i;
0539
0540 spin_lock_irqsave(&ccp->cmd_lock, flags);
0541
0542 for (i = 0; i < ccp->cmd_q_count; i++)
0543 if (ccp->cmd_q[i].suspended)
0544 suspended++;
0545
0546 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
0547
0548 return ccp->cmd_q_count == suspended;
0549 }
0550
0551 void ccp_dev_suspend(struct sp_device *sp)
0552 {
0553 struct ccp_device *ccp = sp->ccp_data;
0554 unsigned long flags;
0555 unsigned int i;
0556
0557
0558 if (!ccp)
0559 return;
0560
0561 spin_lock_irqsave(&ccp->cmd_lock, flags);
0562
0563 ccp->suspending = 1;
0564
0565
0566 for (i = 0; i < ccp->cmd_q_count; i++)
0567 wake_up_process(ccp->cmd_q[i].kthread);
0568
0569 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
0570
0571
0572 while (!ccp_queues_suspended(ccp))
0573 wait_event_interruptible(ccp->suspend_queue,
0574 ccp_queues_suspended(ccp));
0575 }
0576
0577 void ccp_dev_resume(struct sp_device *sp)
0578 {
0579 struct ccp_device *ccp = sp->ccp_data;
0580 unsigned long flags;
0581 unsigned int i;
0582
0583
0584 if (!ccp)
0585 return;
0586
0587 spin_lock_irqsave(&ccp->cmd_lock, flags);
0588
0589 ccp->suspending = 0;
0590
0591
0592 for (i = 0; i < ccp->cmd_q_count; i++) {
0593 ccp->cmd_q[i].suspended = 0;
0594 wake_up_process(ccp->cmd_q[i].kthread);
0595 }
0596
0597 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
0598 }
0599
0600 int ccp_dev_init(struct sp_device *sp)
0601 {
0602 struct device *dev = sp->dev;
0603 struct ccp_device *ccp;
0604 int ret;
0605
0606
0607
0608
0609
0610 if (atomic_inc_return(&dev_count) > max_devs)
0611 return 0;
0612
0613 ret = -ENOMEM;
0614 ccp = ccp_alloc_struct(sp);
0615 if (!ccp)
0616 goto e_err;
0617 sp->ccp_data = ccp;
0618
0619 if (!nqueues || (nqueues > MAX_HW_QUEUES))
0620 ccp->max_q_count = MAX_HW_QUEUES;
0621 else
0622 ccp->max_q_count = nqueues;
0623
0624 ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
0625 if (!ccp->vdata || !ccp->vdata->version) {
0626 ret = -ENODEV;
0627 dev_err(dev, "missing driver data\n");
0628 goto e_err;
0629 }
0630
0631 ccp->use_tasklet = sp->use_tasklet;
0632
0633 ccp->io_regs = sp->io_map + ccp->vdata->offset;
0634 if (ccp->vdata->setup)
0635 ccp->vdata->setup(ccp);
0636
0637 ret = ccp->vdata->perform->init(ccp);
0638 if (ret) {
0639
0640
0641
0642 if (ret > 0)
0643 goto e_quiet;
0644
0645
0646 goto e_err;
0647 }
0648
0649 dev_notice(dev, "ccp enabled\n");
0650
0651 return 0;
0652
0653 e_err:
0654 dev_notice(dev, "ccp initialization failed\n");
0655
0656 e_quiet:
0657 sp->ccp_data = NULL;
0658
0659 return ret;
0660 }
0661
0662 void ccp_dev_destroy(struct sp_device *sp)
0663 {
0664 struct ccp_device *ccp = sp->ccp_data;
0665
0666 if (!ccp)
0667 return;
0668
0669 ccp->vdata->perform->destroy(ccp);
0670 }