Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2004 IBM Corporation
0004  * Authors:
0005  * Leendert van Doorn <leendert@watson.ibm.com>
0006  * Dave Safford <safford@watson.ibm.com>
0007  * Reiner Sailer <sailer@watson.ibm.com>
0008  * Kylene Hall <kjhall@us.ibm.com>
0009  *
0010  * Copyright (C) 2013 Obsidian Research Corp
0011  * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
0012  *
0013  * Device file system interface to the TPM
0014  */
0015 #include <linux/poll.h>
0016 #include <linux/slab.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/workqueue.h>
0019 #include "tpm.h"
0020 #include "tpm-dev.h"
0021 
0022 static struct workqueue_struct *tpm_dev_wq;
0023 
0024 static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
0025                 u8 *buf, size_t bufsiz)
0026 {
0027     struct tpm_header *header = (void *)buf;
0028     ssize_t ret, len;
0029 
0030     ret = tpm2_prepare_space(chip, space, buf, bufsiz);
0031     /* If the command is not implemented by the TPM, synthesize a
0032      * response with a TPM2_RC_COMMAND_CODE return for user-space.
0033      */
0034     if (ret == -EOPNOTSUPP) {
0035         header->length = cpu_to_be32(sizeof(*header));
0036         header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
0037         header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
0038                           TSS2_RESMGR_TPM_RC_LAYER);
0039         ret = sizeof(*header);
0040     }
0041     if (ret)
0042         goto out_rc;
0043 
0044     len = tpm_transmit(chip, buf, bufsiz);
0045     if (len < 0)
0046         ret = len;
0047 
0048     if (!ret)
0049         ret = tpm2_commit_space(chip, space, buf, &len);
0050 
0051 out_rc:
0052     return ret ? ret : len;
0053 }
0054 
0055 static void tpm_dev_async_work(struct work_struct *work)
0056 {
0057     struct file_priv *priv =
0058             container_of(work, struct file_priv, async_work);
0059     ssize_t ret;
0060 
0061     mutex_lock(&priv->buffer_mutex);
0062     priv->command_enqueued = false;
0063     ret = tpm_try_get_ops(priv->chip);
0064     if (ret) {
0065         priv->response_length = ret;
0066         goto out;
0067     }
0068 
0069     ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
0070                    sizeof(priv->data_buffer));
0071     tpm_put_ops(priv->chip);
0072 
0073     /*
0074      * If ret is > 0 then tpm_dev_transmit returned the size of the
0075      * response. If ret is < 0 then tpm_dev_transmit failed and
0076      * returned an error code.
0077      */
0078     if (ret != 0) {
0079         priv->response_length = ret;
0080         mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
0081     }
0082 out:
0083     mutex_unlock(&priv->buffer_mutex);
0084     wake_up_interruptible(&priv->async_wait);
0085 }
0086 
0087 static void user_reader_timeout(struct timer_list *t)
0088 {
0089     struct file_priv *priv = from_timer(priv, t, user_read_timer);
0090 
0091     pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
0092         task_tgid_nr(current));
0093 
0094     schedule_work(&priv->timeout_work);
0095 }
0096 
0097 static void tpm_timeout_work(struct work_struct *work)
0098 {
0099     struct file_priv *priv = container_of(work, struct file_priv,
0100                           timeout_work);
0101 
0102     mutex_lock(&priv->buffer_mutex);
0103     priv->response_read = true;
0104     priv->response_length = 0;
0105     memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
0106     mutex_unlock(&priv->buffer_mutex);
0107     wake_up_interruptible(&priv->async_wait);
0108 }
0109 
0110 void tpm_common_open(struct file *file, struct tpm_chip *chip,
0111              struct file_priv *priv, struct tpm_space *space)
0112 {
0113     priv->chip = chip;
0114     priv->space = space;
0115     priv->response_read = true;
0116 
0117     mutex_init(&priv->buffer_mutex);
0118     timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
0119     INIT_WORK(&priv->timeout_work, tpm_timeout_work);
0120     INIT_WORK(&priv->async_work, tpm_dev_async_work);
0121     init_waitqueue_head(&priv->async_wait);
0122     file->private_data = priv;
0123 }
0124 
0125 ssize_t tpm_common_read(struct file *file, char __user *buf,
0126             size_t size, loff_t *off)
0127 {
0128     struct file_priv *priv = file->private_data;
0129     ssize_t ret_size = 0;
0130     int rc;
0131 
0132     mutex_lock(&priv->buffer_mutex);
0133 
0134     if (priv->response_length) {
0135         priv->response_read = true;
0136 
0137         ret_size = min_t(ssize_t, size, priv->response_length);
0138         if (ret_size <= 0) {
0139             priv->response_length = 0;
0140             goto out;
0141         }
0142 
0143         rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
0144         if (rc) {
0145             memset(priv->data_buffer, 0, TPM_BUFSIZE);
0146             priv->response_length = 0;
0147             ret_size = -EFAULT;
0148         } else {
0149             memset(priv->data_buffer + *off, 0, ret_size);
0150             priv->response_length -= ret_size;
0151             *off += ret_size;
0152         }
0153     }
0154 
0155 out:
0156     if (!priv->response_length) {
0157         *off = 0;
0158         del_singleshot_timer_sync(&priv->user_read_timer);
0159         flush_work(&priv->timeout_work);
0160     }
0161     mutex_unlock(&priv->buffer_mutex);
0162     return ret_size;
0163 }
0164 
0165 ssize_t tpm_common_write(struct file *file, const char __user *buf,
0166              size_t size, loff_t *off)
0167 {
0168     struct file_priv *priv = file->private_data;
0169     int ret = 0;
0170 
0171     if (size > TPM_BUFSIZE)
0172         return -E2BIG;
0173 
0174     mutex_lock(&priv->buffer_mutex);
0175 
0176     /* Cannot perform a write until the read has cleared either via
0177      * tpm_read or a user_read_timer timeout. This also prevents split
0178      * buffered writes from blocking here.
0179      */
0180     if ((!priv->response_read && priv->response_length) ||
0181         priv->command_enqueued) {
0182         ret = -EBUSY;
0183         goto out;
0184     }
0185 
0186     if (copy_from_user(priv->data_buffer, buf, size)) {
0187         ret = -EFAULT;
0188         goto out;
0189     }
0190 
0191     if (size < 6 ||
0192         size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
0193         ret = -EINVAL;
0194         goto out;
0195     }
0196 
0197     priv->response_length = 0;
0198     priv->response_read = false;
0199     *off = 0;
0200 
0201     /*
0202      * If in nonblocking mode schedule an async job to send
0203      * the command return the size.
0204      * In case of error the err code will be returned in
0205      * the subsequent read call.
0206      */
0207     if (file->f_flags & O_NONBLOCK) {
0208         priv->command_enqueued = true;
0209         queue_work(tpm_dev_wq, &priv->async_work);
0210         mutex_unlock(&priv->buffer_mutex);
0211         return size;
0212     }
0213 
0214     /* atomic tpm command send and result receive. We only hold the ops
0215      * lock during this period so that the tpm can be unregistered even if
0216      * the char dev is held open.
0217      */
0218     if (tpm_try_get_ops(priv->chip)) {
0219         ret = -EPIPE;
0220         goto out;
0221     }
0222 
0223     ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
0224                    sizeof(priv->data_buffer));
0225     tpm_put_ops(priv->chip);
0226 
0227     if (ret > 0) {
0228         priv->response_length = ret;
0229         mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
0230         ret = size;
0231     }
0232 out:
0233     mutex_unlock(&priv->buffer_mutex);
0234     return ret;
0235 }
0236 
0237 __poll_t tpm_common_poll(struct file *file, poll_table *wait)
0238 {
0239     struct file_priv *priv = file->private_data;
0240     __poll_t mask = 0;
0241 
0242     poll_wait(file, &priv->async_wait, wait);
0243     mutex_lock(&priv->buffer_mutex);
0244 
0245     /*
0246      * The response_length indicates if there is still response
0247      * (or part of it) to be consumed. Partial reads decrease it
0248      * by the number of bytes read, and write resets it the zero.
0249      */
0250     if (priv->response_length)
0251         mask = EPOLLIN | EPOLLRDNORM;
0252     else
0253         mask = EPOLLOUT | EPOLLWRNORM;
0254 
0255     mutex_unlock(&priv->buffer_mutex);
0256     return mask;
0257 }
0258 
0259 /*
0260  * Called on file close
0261  */
0262 void tpm_common_release(struct file *file, struct file_priv *priv)
0263 {
0264     flush_work(&priv->async_work);
0265     del_singleshot_timer_sync(&priv->user_read_timer);
0266     flush_work(&priv->timeout_work);
0267     file->private_data = NULL;
0268     priv->response_length = 0;
0269 }
0270 
0271 int __init tpm_dev_common_init(void)
0272 {
0273     tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
0274 
0275     return !tpm_dev_wq ? -ENOMEM : 0;
0276 }
0277 
0278 void __exit tpm_dev_common_exit(void)
0279 {
0280     if (tpm_dev_wq) {
0281         destroy_workqueue(tpm_dev_wq);
0282         tpm_dev_wq = NULL;
0283     }
0284 }