1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2004 IBM Corporation 4 * Authors: 5 * Leendert van Doorn <leendert@watson.ibm.com> 6 * Dave Safford <safford@watson.ibm.com> 7 * Reiner Sailer <sailer@watson.ibm.com> 8 * Kylene Hall <kjhall@us.ibm.com> 9 * 10 * Copyright (C) 2013 Obsidian Research Corp 11 * Jason Gunthorpe <jgunthorpe@obsidianresearch.com> 12 * 13 * Device file system interface to the TPM 14 */ 15#include <linux/poll.h> 16#include <linux/slab.h> 17#include <linux/uaccess.h> 18#include <linux/workqueue.h> 19#include "tpm.h" 20#include "tpm-dev.h" 21 22static struct workqueue_struct *tpm_dev_wq; 23static DEFINE_MUTEX(tpm_dev_wq_lock); 24 25static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, 26 u8 *buf, size_t bufsiz) 27{ 28 struct tpm_header *header = (void *)buf; 29 ssize_t ret, len; 30 31 ret = tpm2_prepare_space(chip, space, buf, bufsiz); 32 /* If the command is not implemented by the TPM, synthesize a 33 * response with a TPM2_RC_COMMAND_CODE return for user-space. 34 */ 35 if (ret == -EOPNOTSUPP) { 36 header->length = cpu_to_be32(sizeof(*header)); 37 header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); 38 header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | 39 TSS2_RESMGR_TPM_RC_LAYER); 40 ret = sizeof(*header); 41 } 42 if (ret) 43 goto out_rc; 44 45 len = tpm_transmit(chip, buf, bufsiz); 46 if (len < 0) 47 ret = len; 48 49 if (!ret) 50 ret = tpm2_commit_space(chip, space, buf, &len); 51 52out_rc: 53 return ret ? ret : len; 54} 55 56static void tpm_dev_async_work(struct work_struct *work) 57{ 58 struct file_priv *priv = 59 container_of(work, struct file_priv, async_work); 60 ssize_t ret; 61 62 mutex_lock(&priv->buffer_mutex); 63 priv->command_enqueued = false; 64 ret = tpm_try_get_ops(priv->chip); 65 if (ret) { 66 priv->response_length = ret; 67 goto out; 68 } 69 70 ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, 71 sizeof(priv->data_buffer)); 72 tpm_put_ops(priv->chip); 73 74 /* 75 * If ret is > 0 then tpm_dev_transmit returned the size of the 76 * response. If ret is < 0 then tpm_dev_transmit failed and 77 * returned an error code. 78 */ 79 if (ret != 0) { 80 priv->response_length = ret; 81 mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); 82 } 83out: 84 mutex_unlock(&priv->buffer_mutex); 85 wake_up_interruptible(&priv->async_wait); 86} 87 88static void user_reader_timeout(struct timer_list *t) 89{ 90 struct file_priv *priv = from_timer(priv, t, user_read_timer); 91 92 pr_warn("TPM user space timeout is deprecated (pid=%d)\n", 93 task_tgid_nr(current)); 94 95 schedule_work(&priv->timeout_work); 96} 97 98static void tpm_timeout_work(struct work_struct *work) 99{ 100 struct file_priv *priv = container_of(work, struct file_priv, 101 timeout_work); 102 103 mutex_lock(&priv->buffer_mutex); 104 priv->response_read = true; 105 priv->response_length = 0; 106 memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); 107 mutex_unlock(&priv->buffer_mutex); 108 wake_up_interruptible(&priv->async_wait); 109} 110 111void tpm_common_open(struct file *file, struct tpm_chip *chip, 112 struct file_priv *priv, struct tpm_space *space) 113{ 114 priv->chip = chip; 115 priv->space = space; 116 priv->response_read = true; 117 118 mutex_init(&priv->buffer_mutex); 119 timer_setup(&priv->user_read_timer, user_reader_timeout, 0); 120 INIT_WORK(&priv->timeout_work, tpm_timeout_work); 121 INIT_WORK(&priv->async_work, tpm_dev_async_work); 122 init_waitqueue_head(&priv->async_wait); 123 file->private_data = priv; 124} 125 126ssize_t tpm_common_read(struct file *file, char __user *buf, 127 size_t size, loff_t *off) 128{ 129 struct file_priv *priv = file->private_data; 130 ssize_t ret_size = 0; 131 int rc; 132 133 mutex_lock(&priv->buffer_mutex); 134 135 if (priv->response_length) { 136 priv->response_read = true; 137 138 ret_size = min_t(ssize_t, size, priv->response_length); 139 if (ret_size <= 0) { 140 priv->response_length = 0; 141 goto out; 142 } 143 144 rc = copy_to_user(buf, priv->data_buffer + *off, ret_size); 145 if (rc) { 146 memset(priv->data_buffer, 0, TPM_BUFSIZE); 147 priv->response_length = 0; 148 ret_size = -EFAULT; 149 } else { 150 memset(priv->data_buffer + *off, 0, ret_size); 151 priv->response_length -= ret_size; 152 *off += ret_size; 153 } 154 } 155 156out: 157 if (!priv->response_length) { 158 *off = 0; 159 del_singleshot_timer_sync(&priv->user_read_timer); 160 flush_work(&priv->timeout_work); 161 } 162 mutex_unlock(&priv->buffer_mutex); 163 return ret_size; 164} 165 166ssize_t tpm_common_write(struct file *file, const char __user *buf, 167 size_t size, loff_t *off) 168{ 169 struct file_priv *priv = file->private_data; 170 int ret = 0; 171 172 if (size > TPM_BUFSIZE) 173 return -E2BIG; 174 175 mutex_lock(&priv->buffer_mutex); 176 177 /* Cannot perform a write until the read has cleared either via 178 * tpm_read or a user_read_timer timeout. This also prevents split 179 * buffered writes from blocking here. 180 */ 181 if ((!priv->response_read && priv->response_length) || 182 priv->command_enqueued) { 183 ret = -EBUSY; 184 goto out; 185 } 186 187 if (copy_from_user(priv->data_buffer, buf, size)) { 188 ret = -EFAULT; 189 goto out; 190 } 191 192 if (size < 6 || 193 size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) { 194 ret = -EINVAL; 195 goto out; 196 } 197 198 priv->response_length = 0; 199 priv->response_read = false; 200 *off = 0; 201 202 /* 203 * If in nonblocking mode schedule an async job to send 204 * the command return the size. 205 * In case of error the err code will be returned in 206 * the subsequent read call. 207 */ 208 if (file->f_flags & O_NONBLOCK) { 209 priv->command_enqueued = true; 210 queue_work(tpm_dev_wq, &priv->async_work); 211 mutex_unlock(&priv->buffer_mutex); 212 return size; 213 } 214 215 /* atomic tpm command send and result receive. We only hold the ops 216 * lock during this period so that the tpm can be unregistered even if 217 * the char dev is held open. 218 */ 219 if (tpm_try_get_ops(priv->chip)) { 220 ret = -EPIPE; 221 goto out; 222 } 223 224 ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, 225 sizeof(priv->data_buffer)); 226 tpm_put_ops(priv->chip); 227 228 if (ret > 0) { 229 priv->response_length = ret; 230 mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); 231 ret = size; 232 } 233out: 234 mutex_unlock(&priv->buffer_mutex); 235 return ret; 236} 237 238__poll_t tpm_common_poll(struct file *file, poll_table *wait) 239{ 240 struct file_priv *priv = file->private_data; 241 __poll_t mask = 0; 242 243 poll_wait(file, &priv->async_wait, wait); 244 mutex_lock(&priv->buffer_mutex); 245 246 /* 247 * The response_length indicates if there is still response 248 * (or part of it) to be consumed. Partial reads decrease it 249 * by the number of bytes read, and write resets it the zero. 250 */ 251 if (priv->response_length) 252 mask = EPOLLIN | EPOLLRDNORM; 253 else 254 mask = EPOLLOUT | EPOLLWRNORM; 255 256 mutex_unlock(&priv->buffer_mutex); 257 return mask; 258} 259 260/* 261 * Called on file close 262 */ 263void tpm_common_release(struct file *file, struct file_priv *priv) 264{ 265 flush_work(&priv->async_work); 266 del_singleshot_timer_sync(&priv->user_read_timer); 267 flush_work(&priv->timeout_work); 268 file->private_data = NULL; 269 priv->response_length = 0; 270} 271 272int __init tpm_dev_common_init(void) 273{ 274 tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0); 275 276 return !tpm_dev_wq ? -ENOMEM : 0; 277} 278 279void __exit tpm_dev_common_exit(void) 280{ 281 if (tpm_dev_wq) { 282 destroy_workqueue(tpm_dev_wq); 283 tpm_dev_wq = NULL; 284 } 285} 286