0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/dma-mapping.h>
0014 #include <linux/dmapool.h>
0015 #include <linux/slab.h>
0016 #include <asm/vio.h>
0017 #include <asm/irq.h>
0018 #include <linux/types.h>
0019 #include <linux/list.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/wait.h>
0023 #include <asm/prom.h>
0024
0025 #include "tpm.h"
0026 #include "tpm_ibmvtpm.h"
0027
0028 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
0029
0030 static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
0031 { "IBM,vtpm", "IBM,vtpm"},
0032 { "IBM,vtpm", "IBM,vtpm20"},
0033 { "", "" }
0034 };
0035 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
0047 {
0048 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
0049 }
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 static int ibmvtpm_send_crq(struct vio_dev *vdev,
0088 u8 valid, u8 msg, u16 len, u32 data)
0089 {
0090 u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
0091 (u64)data;
0092 return ibmvtpm_send_crq_word(vdev, w1);
0093 }
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
0106 {
0107 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
0108 u16 len;
0109
0110 if (!ibmvtpm->rtce_buf) {
0111 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
0112 return 0;
0113 }
0114
0115 len = ibmvtpm->res_len;
0116
0117 if (count < len) {
0118 dev_err(ibmvtpm->dev,
0119 "Invalid size in recv: count=%zd, crq_size=%d\n",
0120 count, len);
0121 return -EIO;
0122 }
0123
0124 spin_lock(&ibmvtpm->rtce_lock);
0125 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
0126 memset(ibmvtpm->rtce_buf, 0, len);
0127 ibmvtpm->res_len = 0;
0128 spin_unlock(&ibmvtpm->rtce_lock);
0129 return len;
0130 }
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
0141 {
0142 int rc;
0143
0144 rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
0145 if (rc != H_SUCCESS)
0146 dev_err(ibmvtpm->dev,
0147 "%s failed rc=%d\n", __func__, rc);
0148
0149 return rc;
0150 }
0151
0152
0153
0154
0155
0156
0157
0158
0159 static int tpm_ibmvtpm_resume(struct device *dev)
0160 {
0161 struct tpm_chip *chip = dev_get_drvdata(dev);
0162 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
0163 int rc = 0;
0164
0165 do {
0166 if (rc)
0167 msleep(100);
0168 rc = plpar_hcall_norets(H_ENABLE_CRQ,
0169 ibmvtpm->vdev->unit_address);
0170 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
0171
0172 if (rc) {
0173 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
0174 return rc;
0175 }
0176
0177 rc = vio_enable_interrupts(ibmvtpm->vdev);
0178 if (rc) {
0179 dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
0180 return rc;
0181 }
0182
0183 rc = ibmvtpm_crq_send_init(ibmvtpm);
0184 if (rc)
0185 dev_err(dev, "Error send_init rc=%d\n", rc);
0186
0187 return rc;
0188 }
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
0201 {
0202 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
0203 bool retry = true;
0204 int rc, sig;
0205
0206 if (!ibmvtpm->rtce_buf) {
0207 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
0208 return 0;
0209 }
0210
0211 if (count > ibmvtpm->rtce_size) {
0212 dev_err(ibmvtpm->dev,
0213 "Invalid size in send: count=%zd, rtce_size=%d\n",
0214 count, ibmvtpm->rtce_size);
0215 return -EIO;
0216 }
0217
0218 if (ibmvtpm->tpm_processing_cmd) {
0219 dev_info(ibmvtpm->dev,
0220 "Need to wait for TPM to finish\n");
0221
0222 sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
0223 if (sig)
0224 return -EINTR;
0225 }
0226
0227 spin_lock(&ibmvtpm->rtce_lock);
0228 ibmvtpm->res_len = 0;
0229 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
0230
0231
0232
0233
0234
0235 ibmvtpm->tpm_processing_cmd = 1;
0236
0237 again:
0238 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
0239 IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
0240 count, ibmvtpm->rtce_dma_handle);
0241 if (rc != H_SUCCESS) {
0242
0243
0244
0245
0246
0247 if (rc == H_CLOSED && retry) {
0248 tpm_ibmvtpm_resume(ibmvtpm->dev);
0249 retry = false;
0250 goto again;
0251 }
0252 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
0253 ibmvtpm->tpm_processing_cmd = 0;
0254 }
0255
0256 spin_unlock(&ibmvtpm->rtce_lock);
0257 return 0;
0258 }
0259
0260 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
0261 {
0262 return;
0263 }
0264
0265 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
0266 {
0267 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
0268
0269 return ibmvtpm->tpm_processing_cmd;
0270 }
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
0282 {
0283 int rc;
0284
0285 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
0286 IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
0287 if (rc != H_SUCCESS)
0288 dev_err(ibmvtpm->dev,
0289 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
0290
0291 return rc;
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
0305 {
0306 int rc;
0307
0308 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
0309 IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
0310 if (rc != H_SUCCESS)
0311 dev_err(ibmvtpm->dev,
0312 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
0313
0314 return rc;
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
0326 {
0327 int rc;
0328
0329 rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
0330 if (rc != H_SUCCESS)
0331 dev_err(ibmvtpm->dev,
0332 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
0333
0334 return rc;
0335 }
0336
0337
0338
0339
0340
0341
0342
0343 static void tpm_ibmvtpm_remove(struct vio_dev *vdev)
0344 {
0345 struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
0346 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
0347 int rc = 0;
0348
0349 tpm_chip_unregister(chip);
0350
0351 free_irq(vdev->irq, ibmvtpm);
0352
0353 do {
0354 if (rc)
0355 msleep(100);
0356 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
0357 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
0358
0359 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
0360 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
0361 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
0362
0363 if (ibmvtpm->rtce_buf) {
0364 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
0365 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
0366 kfree(ibmvtpm->rtce_buf);
0367 }
0368
0369 kfree(ibmvtpm);
0370
0371 dev_set_drvdata(&vdev->dev, NULL);
0372 }
0373
0374
0375
0376
0377
0378
0379
0380
0381 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
0382 {
0383 struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
0384 struct ibmvtpm_dev *ibmvtpm;
0385
0386
0387
0388
0389
0390
0391 if (chip)
0392 ibmvtpm = dev_get_drvdata(&chip->dev);
0393 else
0394 return CRQ_RES_BUF_SIZE + PAGE_SIZE;
0395
0396 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
0397 }
0398
0399
0400
0401
0402
0403
0404
0405 static int tpm_ibmvtpm_suspend(struct device *dev)
0406 {
0407 struct tpm_chip *chip = dev_get_drvdata(dev);
0408 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
0409 int rc = 0;
0410
0411 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
0412 IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
0413 if (rc != H_SUCCESS)
0414 dev_err(ibmvtpm->dev,
0415 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
0416
0417 return rc;
0418 }
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
0430 {
0431 int rc = 0;
0432
0433 do {
0434 if (rc)
0435 msleep(100);
0436 rc = plpar_hcall_norets(H_FREE_CRQ,
0437 ibmvtpm->vdev->unit_address);
0438 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
0439
0440 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
0441 ibmvtpm->crq_queue.index = 0;
0442
0443 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
0444 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
0445 }
0446
0447 static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
0448 {
0449 return (status == 0);
0450 }
0451
0452 static const struct tpm_class_ops tpm_ibmvtpm = {
0453 .recv = tpm_ibmvtpm_recv,
0454 .send = tpm_ibmvtpm_send,
0455 .cancel = tpm_ibmvtpm_cancel,
0456 .status = tpm_ibmvtpm_status,
0457 .req_complete_mask = 1,
0458 .req_complete_val = 0,
0459 .req_canceled = tpm_ibmvtpm_req_canceled,
0460 };
0461
0462 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
0463 .suspend = tpm_ibmvtpm_suspend,
0464 .resume = tpm_ibmvtpm_resume,
0465 };
0466
0467
0468
0469
0470
0471
0472
0473
0474 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
0475 {
0476 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
0477 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
0478
0479 if (crq->valid & VTPM_MSG_RES) {
0480 if (++crq_q->index == crq_q->num_entry)
0481 crq_q->index = 0;
0482 smp_rmb();
0483 } else
0484 crq = NULL;
0485 return crq;
0486 }
0487
0488
0489
0490
0491
0492
0493
0494
0495 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
0496 struct ibmvtpm_dev *ibmvtpm)
0497 {
0498 int rc = 0;
0499
0500 switch (crq->valid) {
0501 case VALID_INIT_CRQ:
0502 switch (crq->msg) {
0503 case INIT_CRQ_RES:
0504 dev_info(ibmvtpm->dev, "CRQ initialized\n");
0505 rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
0506 if (rc)
0507 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
0508 return;
0509 case INIT_CRQ_COMP_RES:
0510 dev_info(ibmvtpm->dev,
0511 "CRQ initialization completed\n");
0512 return;
0513 default:
0514 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
0515 return;
0516 }
0517 case IBMVTPM_VALID_CMD:
0518 switch (crq->msg) {
0519 case VTPM_GET_RTCE_BUFFER_SIZE_RES:
0520 if (be16_to_cpu(crq->len) <= 0) {
0521 dev_err(ibmvtpm->dev, "Invalid rtce size\n");
0522 return;
0523 }
0524 ibmvtpm->rtce_size = be16_to_cpu(crq->len);
0525 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
0526 GFP_ATOMIC);
0527 if (!ibmvtpm->rtce_buf) {
0528 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
0529 return;
0530 }
0531
0532 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
0533 ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
0534 DMA_BIDIRECTIONAL);
0535
0536 if (dma_mapping_error(ibmvtpm->dev,
0537 ibmvtpm->rtce_dma_handle)) {
0538 kfree(ibmvtpm->rtce_buf);
0539 ibmvtpm->rtce_buf = NULL;
0540 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
0541 }
0542
0543 return;
0544 case VTPM_GET_VERSION_RES:
0545 ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
0546 return;
0547 case VTPM_TPM_COMMAND_RES:
0548
0549 ibmvtpm->res_len = be16_to_cpu(crq->len);
0550 ibmvtpm->tpm_processing_cmd = 0;
0551 wake_up_interruptible(&ibmvtpm->wq);
0552 return;
0553 default:
0554 return;
0555 }
0556 }
0557 return;
0558 }
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
0570 {
0571 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
0572 struct ibmvtpm_crq *crq;
0573
0574
0575
0576
0577
0578 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
0579 ibmvtpm_crq_process(crq, ibmvtpm);
0580 wake_up_interruptible(&ibmvtpm->crq_queue.wq);
0581 crq->valid = 0;
0582 smp_wmb();
0583 }
0584
0585 return IRQ_HANDLED;
0586 }
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
0599 const struct vio_device_id *id)
0600 {
0601 struct ibmvtpm_dev *ibmvtpm;
0602 struct device *dev = &vio_dev->dev;
0603 struct ibmvtpm_crq_queue *crq_q;
0604 struct tpm_chip *chip;
0605 int rc = -ENOMEM, rc1;
0606
0607 chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
0608 if (IS_ERR(chip))
0609 return PTR_ERR(chip);
0610
0611 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
0612 if (!ibmvtpm) {
0613 dev_err(dev, "kzalloc for ibmvtpm failed\n");
0614 goto cleanup;
0615 }
0616
0617 ibmvtpm->dev = dev;
0618 ibmvtpm->vdev = vio_dev;
0619
0620 crq_q = &ibmvtpm->crq_queue;
0621 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
0622 if (!crq_q->crq_addr) {
0623 dev_err(dev, "Unable to allocate memory for crq_addr\n");
0624 goto cleanup;
0625 }
0626
0627 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
0628 init_waitqueue_head(&crq_q->wq);
0629 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
0630 CRQ_RES_BUF_SIZE,
0631 DMA_BIDIRECTIONAL);
0632
0633 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
0634 dev_err(dev, "dma mapping failed\n");
0635 goto cleanup;
0636 }
0637
0638 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
0639 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
0640 if (rc == H_RESOURCE)
0641 rc = ibmvtpm_reset_crq(ibmvtpm);
0642
0643 if (rc) {
0644 dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
0645 goto reg_crq_cleanup;
0646 }
0647
0648 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
0649 tpm_ibmvtpm_driver_name, ibmvtpm);
0650 if (rc) {
0651 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
0652 goto init_irq_cleanup;
0653 }
0654
0655 rc = vio_enable_interrupts(vio_dev);
0656 if (rc) {
0657 dev_err(dev, "Error %d enabling interrupts\n", rc);
0658 goto init_irq_cleanup;
0659 }
0660
0661 init_waitqueue_head(&ibmvtpm->wq);
0662
0663 crq_q->index = 0;
0664
0665 dev_set_drvdata(&chip->dev, ibmvtpm);
0666
0667 spin_lock_init(&ibmvtpm->rtce_lock);
0668
0669 rc = ibmvtpm_crq_send_init(ibmvtpm);
0670 if (rc)
0671 goto init_irq_cleanup;
0672
0673 rc = ibmvtpm_crq_get_version(ibmvtpm);
0674 if (rc)
0675 goto init_irq_cleanup;
0676
0677 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
0678 if (rc)
0679 goto init_irq_cleanup;
0680
0681 if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
0682 ibmvtpm->rtce_buf != NULL,
0683 HZ)) {
0684 rc = -ENODEV;
0685 dev_err(dev, "CRQ response timed out\n");
0686 goto init_irq_cleanup;
0687 }
0688
0689
0690 if (!strcmp(id->compat, "IBM,vtpm20"))
0691 chip->flags |= TPM_CHIP_FLAG_TPM2;
0692
0693 rc = tpm_get_timeouts(chip);
0694 if (rc)
0695 goto init_irq_cleanup;
0696
0697 if (chip->flags & TPM_CHIP_FLAG_TPM2) {
0698 rc = tpm2_get_cc_attrs_tbl(chip);
0699 if (rc)
0700 goto init_irq_cleanup;
0701 }
0702
0703 return tpm_chip_register(chip);
0704 init_irq_cleanup:
0705 do {
0706 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
0707 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
0708 reg_crq_cleanup:
0709 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
0710 DMA_BIDIRECTIONAL);
0711 cleanup:
0712 if (ibmvtpm) {
0713 if (crq_q->crq_addr)
0714 free_page((unsigned long)crq_q->crq_addr);
0715 kfree(ibmvtpm);
0716 }
0717
0718 return rc;
0719 }
0720
0721 static struct vio_driver ibmvtpm_driver = {
0722 .id_table = tpm_ibmvtpm_device_table,
0723 .probe = tpm_ibmvtpm_probe,
0724 .remove = tpm_ibmvtpm_remove,
0725 .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
0726 .name = tpm_ibmvtpm_driver_name,
0727 .pm = &tpm_ibmvtpm_pm_ops,
0728 };
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738 static int __init ibmvtpm_module_init(void)
0739 {
0740 return vio_register_driver(&ibmvtpm_driver);
0741 }
0742
0743
0744
0745
0746 static void __exit ibmvtpm_module_exit(void)
0747 {
0748 vio_unregister_driver(&ibmvtpm_driver);
0749 }
0750
0751 module_init(ibmvtpm_module_init);
0752 module_exit(ibmvtpm_module_exit);
0753
0754 MODULE_AUTHOR("adlai@us.ibm.com");
0755 MODULE_DESCRIPTION("IBM vTPM Driver");
0756 MODULE_VERSION("1.0");
0757 MODULE_LICENSE("GPL");