cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tpm-dev-common.c (6997B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2004 IBM Corporation
      4 * Authors:
      5 * Leendert van Doorn <leendert@watson.ibm.com>
      6 * Dave Safford <safford@watson.ibm.com>
      7 * Reiner Sailer <sailer@watson.ibm.com>
      8 * Kylene Hall <kjhall@us.ibm.com>
      9 *
     10 * Copyright (C) 2013 Obsidian Research Corp
     11 * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
     12 *
     13 * Device file system interface to the TPM
     14 */
     15#include <linux/poll.h>
     16#include <linux/slab.h>
     17#include <linux/uaccess.h>
     18#include <linux/workqueue.h>
     19#include "tpm.h"
     20#include "tpm-dev.h"
     21
     22static struct workqueue_struct *tpm_dev_wq;
     23
     24static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
     25				u8 *buf, size_t bufsiz)
     26{
     27	struct tpm_header *header = (void *)buf;
     28	ssize_t ret, len;
     29
     30	ret = tpm2_prepare_space(chip, space, buf, bufsiz);
     31	/* If the command is not implemented by the TPM, synthesize a
     32	 * response with a TPM2_RC_COMMAND_CODE return for user-space.
     33	 */
     34	if (ret == -EOPNOTSUPP) {
     35		header->length = cpu_to_be32(sizeof(*header));
     36		header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
     37		header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
     38						  TSS2_RESMGR_TPM_RC_LAYER);
     39		ret = sizeof(*header);
     40	}
     41	if (ret)
     42		goto out_rc;
     43
     44	len = tpm_transmit(chip, buf, bufsiz);
     45	if (len < 0)
     46		ret = len;
     47
     48	if (!ret)
     49		ret = tpm2_commit_space(chip, space, buf, &len);
     50
     51out_rc:
     52	return ret ? ret : len;
     53}
     54
     55static void tpm_dev_async_work(struct work_struct *work)
     56{
     57	struct file_priv *priv =
     58			container_of(work, struct file_priv, async_work);
     59	ssize_t ret;
     60
     61	mutex_lock(&priv->buffer_mutex);
     62	priv->command_enqueued = false;
     63	ret = tpm_try_get_ops(priv->chip);
     64	if (ret) {
     65		priv->response_length = ret;
     66		goto out;
     67	}
     68
     69	ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
     70			       sizeof(priv->data_buffer));
     71	tpm_put_ops(priv->chip);
     72
     73	/*
     74	 * If ret is > 0 then tpm_dev_transmit returned the size of the
     75	 * response. If ret is < 0 then tpm_dev_transmit failed and
     76	 * returned an error code.
     77	 */
     78	if (ret != 0) {
     79		priv->response_length = ret;
     80		mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
     81	}
     82out:
     83	mutex_unlock(&priv->buffer_mutex);
     84	wake_up_interruptible(&priv->async_wait);
     85}
     86
     87static void user_reader_timeout(struct timer_list *t)
     88{
     89	struct file_priv *priv = from_timer(priv, t, user_read_timer);
     90
     91	pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
     92		task_tgid_nr(current));
     93
     94	schedule_work(&priv->timeout_work);
     95}
     96
     97static void tpm_timeout_work(struct work_struct *work)
     98{
     99	struct file_priv *priv = container_of(work, struct file_priv,
    100					      timeout_work);
    101
    102	mutex_lock(&priv->buffer_mutex);
    103	priv->response_read = true;
    104	priv->response_length = 0;
    105	memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
    106	mutex_unlock(&priv->buffer_mutex);
    107	wake_up_interruptible(&priv->async_wait);
    108}
    109
    110void tpm_common_open(struct file *file, struct tpm_chip *chip,
    111		     struct file_priv *priv, struct tpm_space *space)
    112{
    113	priv->chip = chip;
    114	priv->space = space;
    115	priv->response_read = true;
    116
    117	mutex_init(&priv->buffer_mutex);
    118	timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
    119	INIT_WORK(&priv->timeout_work, tpm_timeout_work);
    120	INIT_WORK(&priv->async_work, tpm_dev_async_work);
    121	init_waitqueue_head(&priv->async_wait);
    122	file->private_data = priv;
    123}
    124
    125ssize_t tpm_common_read(struct file *file, char __user *buf,
    126			size_t size, loff_t *off)
    127{
    128	struct file_priv *priv = file->private_data;
    129	ssize_t ret_size = 0;
    130	int rc;
    131
    132	mutex_lock(&priv->buffer_mutex);
    133
    134	if (priv->response_length) {
    135		priv->response_read = true;
    136
    137		ret_size = min_t(ssize_t, size, priv->response_length);
    138		if (ret_size <= 0) {
    139			priv->response_length = 0;
    140			goto out;
    141		}
    142
    143		rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
    144		if (rc) {
    145			memset(priv->data_buffer, 0, TPM_BUFSIZE);
    146			priv->response_length = 0;
    147			ret_size = -EFAULT;
    148		} else {
    149			memset(priv->data_buffer + *off, 0, ret_size);
    150			priv->response_length -= ret_size;
    151			*off += ret_size;
    152		}
    153	}
    154
    155out:
    156	if (!priv->response_length) {
    157		*off = 0;
    158		del_singleshot_timer_sync(&priv->user_read_timer);
    159		flush_work(&priv->timeout_work);
    160	}
    161	mutex_unlock(&priv->buffer_mutex);
    162	return ret_size;
    163}
    164
    165ssize_t tpm_common_write(struct file *file, const char __user *buf,
    166			 size_t size, loff_t *off)
    167{
    168	struct file_priv *priv = file->private_data;
    169	int ret = 0;
    170
    171	if (size > TPM_BUFSIZE)
    172		return -E2BIG;
    173
    174	mutex_lock(&priv->buffer_mutex);
    175
    176	/* Cannot perform a write until the read has cleared either via
    177	 * tpm_read or a user_read_timer timeout. This also prevents split
    178	 * buffered writes from blocking here.
    179	 */
    180	if ((!priv->response_read && priv->response_length) ||
    181	    priv->command_enqueued) {
    182		ret = -EBUSY;
    183		goto out;
    184	}
    185
    186	if (copy_from_user(priv->data_buffer, buf, size)) {
    187		ret = -EFAULT;
    188		goto out;
    189	}
    190
    191	if (size < 6 ||
    192	    size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
    193		ret = -EINVAL;
    194		goto out;
    195	}
    196
    197	priv->response_length = 0;
    198	priv->response_read = false;
    199	*off = 0;
    200
    201	/*
    202	 * If in nonblocking mode schedule an async job to send
    203	 * the command return the size.
    204	 * In case of error the err code will be returned in
    205	 * the subsequent read call.
    206	 */
    207	if (file->f_flags & O_NONBLOCK) {
    208		priv->command_enqueued = true;
    209		queue_work(tpm_dev_wq, &priv->async_work);
    210		mutex_unlock(&priv->buffer_mutex);
    211		return size;
    212	}
    213
    214	/* atomic tpm command send and result receive. We only hold the ops
    215	 * lock during this period so that the tpm can be unregistered even if
    216	 * the char dev is held open.
    217	 */
    218	if (tpm_try_get_ops(priv->chip)) {
    219		ret = -EPIPE;
    220		goto out;
    221	}
    222
    223	ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
    224			       sizeof(priv->data_buffer));
    225	tpm_put_ops(priv->chip);
    226
    227	if (ret > 0) {
    228		priv->response_length = ret;
    229		mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
    230		ret = size;
    231	}
    232out:
    233	mutex_unlock(&priv->buffer_mutex);
    234	return ret;
    235}
    236
    237__poll_t tpm_common_poll(struct file *file, poll_table *wait)
    238{
    239	struct file_priv *priv = file->private_data;
    240	__poll_t mask = 0;
    241
    242	poll_wait(file, &priv->async_wait, wait);
    243	mutex_lock(&priv->buffer_mutex);
    244
    245	/*
    246	 * The response_length indicates if there is still response
    247	 * (or part of it) to be consumed. Partial reads decrease it
    248	 * by the number of bytes read, and write resets it the zero.
    249	 */
    250	if (priv->response_length)
    251		mask = EPOLLIN | EPOLLRDNORM;
    252	else
    253		mask = EPOLLOUT | EPOLLWRNORM;
    254
    255	mutex_unlock(&priv->buffer_mutex);
    256	return mask;
    257}
    258
    259/*
    260 * Called on file close
    261 */
    262void tpm_common_release(struct file *file, struct file_priv *priv)
    263{
    264	flush_work(&priv->async_work);
    265	del_singleshot_timer_sync(&priv->user_read_timer);
    266	flush_work(&priv->timeout_work);
    267	file->private_data = NULL;
    268	priv->response_length = 0;
    269}
    270
    271int __init tpm_dev_common_init(void)
    272{
    273	tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
    274
    275	return !tpm_dev_wq ? -ENOMEM : 0;
    276}
    277
    278void __exit tpm_dev_common_exit(void)
    279{
    280	if (tpm_dev_wq) {
    281		destroy_workqueue(tpm_dev_wq);
    282		tpm_dev_wq = NULL;
    283	}
    284}