cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

call.c (13712B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2015-2021, Linaro Limited
      4 */
      5#include <linux/device.h>
      6#include <linux/err.h>
      7#include <linux/errno.h>
      8#include <linux/mm.h>
      9#include <linux/slab.h>
     10#include <linux/tee_drv.h>
     11#include <linux/types.h>
     12#include "optee_private.h"
     13
     14#define MAX_ARG_PARAM_COUNT	6
     15
     16/*
     17 * How much memory we allocate for each entry. This doesn't have to be a
     18 * single page, but it makes sense to keep at least keep it as multiples of
     19 * the page size.
     20 */
     21#define SHM_ENTRY_SIZE		PAGE_SIZE
     22
     23/*
     24 * We need to have a compile time constant to be able to determine the
     25 * maximum needed size of the bit field.
     26 */
     27#define MIN_ARG_SIZE		OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT)
     28#define MAX_ARG_COUNT_PER_ENTRY	(SHM_ENTRY_SIZE / MIN_ARG_SIZE)
     29
     30/*
     31 * Shared memory for argument structs are cached here. The number of
     32 * arguments structs that can fit is determined at runtime depending on the
     33 * needed RPC parameter count reported by secure world
     34 * (optee->rpc_param_count).
     35 */
     36struct optee_shm_arg_entry {
     37	struct list_head list_node;
     38	struct tee_shm *shm;
     39	DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
     40};
     41
     42void optee_cq_wait_init(struct optee_call_queue *cq,
     43			struct optee_call_waiter *w)
     44{
     45	/*
     46	 * We're preparing to make a call to secure world. In case we can't
     47	 * allocate a thread in secure world we'll end up waiting in
     48	 * optee_cq_wait_for_completion().
     49	 *
     50	 * Normally if there's no contention in secure world the call will
     51	 * complete and we can cleanup directly with optee_cq_wait_final().
     52	 */
     53	mutex_lock(&cq->mutex);
     54
     55	/*
     56	 * We add ourselves to the queue, but we don't wait. This
     57	 * guarantees that we don't lose a completion if secure world
     58	 * returns busy and another thread just exited and try to complete
     59	 * someone.
     60	 */
     61	init_completion(&w->c);
     62	list_add_tail(&w->list_node, &cq->waiters);
     63
     64	mutex_unlock(&cq->mutex);
     65}
     66
     67void optee_cq_wait_for_completion(struct optee_call_queue *cq,
     68				  struct optee_call_waiter *w)
     69{
     70	wait_for_completion(&w->c);
     71
     72	mutex_lock(&cq->mutex);
     73
     74	/* Move to end of list to get out of the way for other waiters */
     75	list_del(&w->list_node);
     76	reinit_completion(&w->c);
     77	list_add_tail(&w->list_node, &cq->waiters);
     78
     79	mutex_unlock(&cq->mutex);
     80}
     81
     82static void optee_cq_complete_one(struct optee_call_queue *cq)
     83{
     84	struct optee_call_waiter *w;
     85
     86	list_for_each_entry(w, &cq->waiters, list_node) {
     87		if (!completion_done(&w->c)) {
     88			complete(&w->c);
     89			break;
     90		}
     91	}
     92}
     93
     94void optee_cq_wait_final(struct optee_call_queue *cq,
     95			 struct optee_call_waiter *w)
     96{
     97	/*
     98	 * We're done with the call to secure world. The thread in secure
     99	 * world that was used for this call is now available for some
    100	 * other task to use.
    101	 */
    102	mutex_lock(&cq->mutex);
    103
    104	/* Get out of the list */
    105	list_del(&w->list_node);
    106
    107	/* Wake up one eventual waiting task */
    108	optee_cq_complete_one(cq);
    109
    110	/*
    111	 * If we're completed we've got a completion from another task that
    112	 * was just done with its call to secure world. Since yet another
    113	 * thread now is available in secure world wake up another eventual
    114	 * waiting task.
    115	 */
    116	if (completion_done(&w->c))
    117		optee_cq_complete_one(cq);
    118
    119	mutex_unlock(&cq->mutex);
    120}
    121
    122/* Requires the filpstate mutex to be held */
    123static struct optee_session *find_session(struct optee_context_data *ctxdata,
    124					  u32 session_id)
    125{
    126	struct optee_session *sess;
    127
    128	list_for_each_entry(sess, &ctxdata->sess_list, list_node)
    129		if (sess->session_id == session_id)
    130			return sess;
    131
    132	return NULL;
    133}
    134
    135void optee_shm_arg_cache_init(struct optee *optee, u32 flags)
    136{
    137	INIT_LIST_HEAD(&optee->shm_arg_cache.shm_args);
    138	mutex_init(&optee->shm_arg_cache.mutex);
    139	optee->shm_arg_cache.flags = flags;
    140}
    141
    142void optee_shm_arg_cache_uninit(struct optee *optee)
    143{
    144	struct list_head *head = &optee->shm_arg_cache.shm_args;
    145	struct optee_shm_arg_entry *entry;
    146
    147	mutex_destroy(&optee->shm_arg_cache.mutex);
    148	while (!list_empty(head)) {
    149		entry = list_first_entry(head, struct optee_shm_arg_entry,
    150					 list_node);
    151		list_del(&entry->list_node);
    152		if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) !=
    153		     MAX_ARG_COUNT_PER_ENTRY) {
    154			pr_err("Freeing non-free entry\n");
    155		}
    156		tee_shm_free(entry->shm);
    157		kfree(entry);
    158	}
    159}
    160
    161size_t optee_msg_arg_size(size_t rpc_param_count)
    162{
    163	size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT);
    164
    165	if (rpc_param_count)
    166		sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count);
    167
    168	return sz;
    169}
    170
    171/**
    172 * optee_get_msg_arg() - Provide shared memory for argument struct
    173 * @ctx:	Caller TEE context
    174 * @num_params:	Number of parameter to store
    175 * @entry_ret:	Entry pointer, needed when freeing the buffer
    176 * @shm_ret:	Shared memory buffer
    177 * @offs_ret:	Offset of argument strut in shared memory buffer
    178 *
    179 * @returns a pointer to the argument struct in memory, else an ERR_PTR
    180 */
    181struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
    182					size_t num_params,
    183					struct optee_shm_arg_entry **entry_ret,
    184					struct tee_shm **shm_ret,
    185					u_int *offs_ret)
    186{
    187	struct optee *optee = tee_get_drvdata(ctx->teedev);
    188	size_t sz = optee_msg_arg_size(optee->rpc_param_count);
    189	struct optee_shm_arg_entry *entry;
    190	struct optee_msg_arg *ma;
    191	size_t args_per_entry;
    192	u_long bit;
    193	u_int offs;
    194	void *res;
    195
    196	if (num_params > MAX_ARG_PARAM_COUNT)
    197		return ERR_PTR(-EINVAL);
    198
    199	if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED)
    200		args_per_entry = SHM_ENTRY_SIZE / sz;
    201	else
    202		args_per_entry = 1;
    203
    204	mutex_lock(&optee->shm_arg_cache.mutex);
    205	list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) {
    206		bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY);
    207		if (bit < args_per_entry)
    208			goto have_entry;
    209	}
    210
    211	/*
    212	 * No entry was found, let's allocate a new.
    213	 */
    214	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
    215	if (!entry) {
    216		res = ERR_PTR(-ENOMEM);
    217		goto out;
    218	}
    219
    220	if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV)
    221		res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE);
    222	else
    223		res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE);
    224
    225	if (IS_ERR(res)) {
    226		kfree(entry);
    227		goto out;
    228	}
    229	entry->shm = res;
    230	list_add(&entry->list_node, &optee->shm_arg_cache.shm_args);
    231	bit = 0;
    232
    233have_entry:
    234	offs = bit * sz;
    235	res = tee_shm_get_va(entry->shm, offs);
    236	if (IS_ERR(res))
    237		goto out;
    238	ma = res;
    239	set_bit(bit, entry->map);
    240	memset(ma, 0, sz);
    241	ma->num_params = num_params;
    242	*entry_ret = entry;
    243	*shm_ret = entry->shm;
    244	*offs_ret = offs;
    245out:
    246	mutex_unlock(&optee->shm_arg_cache.mutex);
    247	return res;
    248}
    249
    250/**
    251 * optee_free_msg_arg() - Free previsouly obtained shared memory
    252 * @ctx:	Caller TEE context
    253 * @entry:	Pointer returned when the shared memory was obtained
    254 * @offs:	Offset of shared memory buffer to free
    255 *
    256 * This function frees the shared memory obtained with optee_get_msg_arg().
    257 */
    258void optee_free_msg_arg(struct tee_context *ctx,
    259			struct optee_shm_arg_entry *entry, u_int offs)
    260{
    261	struct optee *optee = tee_get_drvdata(ctx->teedev);
    262	size_t sz = optee_msg_arg_size(optee->rpc_param_count);
    263	u_long bit;
    264
    265	if (offs > SHM_ENTRY_SIZE || offs % sz) {
    266		pr_err("Invalid offs %u\n", offs);
    267		return;
    268	}
    269	bit = offs / sz;
    270
    271	mutex_lock(&optee->shm_arg_cache.mutex);
    272
    273	if (!test_bit(bit, entry->map))
    274		pr_err("Bit pos %lu is already free\n", bit);
    275	clear_bit(bit, entry->map);
    276
    277	mutex_unlock(&optee->shm_arg_cache.mutex);
    278}
    279
    280int optee_open_session(struct tee_context *ctx,
    281		       struct tee_ioctl_open_session_arg *arg,
    282		       struct tee_param *param)
    283{
    284	struct optee *optee = tee_get_drvdata(ctx->teedev);
    285	struct optee_context_data *ctxdata = ctx->data;
    286	struct optee_shm_arg_entry *entry;
    287	struct tee_shm *shm;
    288	struct optee_msg_arg *msg_arg;
    289	struct optee_session *sess = NULL;
    290	uuid_t client_uuid;
    291	u_int offs;
    292	int rc;
    293
    294	/* +2 for the meta parameters added below */
    295	msg_arg = optee_get_msg_arg(ctx, arg->num_params + 2,
    296				    &entry, &shm, &offs);
    297	if (IS_ERR(msg_arg))
    298		return PTR_ERR(msg_arg);
    299
    300	msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
    301	msg_arg->cancel_id = arg->cancel_id;
    302
    303	/*
    304	 * Initialize and add the meta parameters needed when opening a
    305	 * session.
    306	 */
    307	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
    308				  OPTEE_MSG_ATTR_META;
    309	msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
    310				  OPTEE_MSG_ATTR_META;
    311	memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
    312	msg_arg->params[1].u.value.c = arg->clnt_login;
    313
    314	rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
    315					  arg->clnt_uuid);
    316	if (rc)
    317		goto out;
    318	export_uuid(msg_arg->params[1].u.octets, &client_uuid);
    319
    320	rc = optee->ops->to_msg_param(optee, msg_arg->params + 2,
    321				      arg->num_params, param);
    322	if (rc)
    323		goto out;
    324
    325	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
    326	if (!sess) {
    327		rc = -ENOMEM;
    328		goto out;
    329	}
    330
    331	if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
    332		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
    333		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
    334	}
    335
    336	if (msg_arg->ret == TEEC_SUCCESS) {
    337		/* A new session has been created, add it to the list. */
    338		sess->session_id = msg_arg->session;
    339		mutex_lock(&ctxdata->mutex);
    340		list_add(&sess->list_node, &ctxdata->sess_list);
    341		mutex_unlock(&ctxdata->mutex);
    342	} else {
    343		kfree(sess);
    344	}
    345
    346	if (optee->ops->from_msg_param(optee, param, arg->num_params,
    347				       msg_arg->params + 2)) {
    348		arg->ret = TEEC_ERROR_COMMUNICATION;
    349		arg->ret_origin = TEEC_ORIGIN_COMMS;
    350		/* Close session again to avoid leakage */
    351		optee_close_session(ctx, msg_arg->session);
    352	} else {
    353		arg->session = msg_arg->session;
    354		arg->ret = msg_arg->ret;
    355		arg->ret_origin = msg_arg->ret_origin;
    356	}
    357out:
    358	optee_free_msg_arg(ctx, entry, offs);
    359
    360	return rc;
    361}
    362
    363int optee_close_session_helper(struct tee_context *ctx, u32 session)
    364{
    365	struct optee *optee = tee_get_drvdata(ctx->teedev);
    366	struct optee_shm_arg_entry *entry;
    367	struct optee_msg_arg *msg_arg;
    368	struct tee_shm *shm;
    369	u_int offs;
    370
    371	msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
    372	if (IS_ERR(msg_arg))
    373		return PTR_ERR(msg_arg);
    374
    375	msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
    376	msg_arg->session = session;
    377	optee->ops->do_call_with_arg(ctx, shm, offs);
    378
    379	optee_free_msg_arg(ctx, entry, offs);
    380
    381	return 0;
    382}
    383
    384int optee_close_session(struct tee_context *ctx, u32 session)
    385{
    386	struct optee_context_data *ctxdata = ctx->data;
    387	struct optee_session *sess;
    388
    389	/* Check that the session is valid and remove it from the list */
    390	mutex_lock(&ctxdata->mutex);
    391	sess = find_session(ctxdata, session);
    392	if (sess)
    393		list_del(&sess->list_node);
    394	mutex_unlock(&ctxdata->mutex);
    395	if (!sess)
    396		return -EINVAL;
    397	kfree(sess);
    398
    399	return optee_close_session_helper(ctx, session);
    400}
    401
    402int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
    403		      struct tee_param *param)
    404{
    405	struct optee *optee = tee_get_drvdata(ctx->teedev);
    406	struct optee_context_data *ctxdata = ctx->data;
    407	struct optee_shm_arg_entry *entry;
    408	struct optee_msg_arg *msg_arg;
    409	struct optee_session *sess;
    410	struct tee_shm *shm;
    411	u_int offs;
    412	int rc;
    413
    414	/* Check that the session is valid */
    415	mutex_lock(&ctxdata->mutex);
    416	sess = find_session(ctxdata, arg->session);
    417	mutex_unlock(&ctxdata->mutex);
    418	if (!sess)
    419		return -EINVAL;
    420
    421	msg_arg = optee_get_msg_arg(ctx, arg->num_params,
    422				    &entry, &shm, &offs);
    423	if (IS_ERR(msg_arg))
    424		return PTR_ERR(msg_arg);
    425	msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
    426	msg_arg->func = arg->func;
    427	msg_arg->session = arg->session;
    428	msg_arg->cancel_id = arg->cancel_id;
    429
    430	rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params,
    431				      param);
    432	if (rc)
    433		goto out;
    434
    435	if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
    436		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
    437		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
    438	}
    439
    440	if (optee->ops->from_msg_param(optee, param, arg->num_params,
    441				       msg_arg->params)) {
    442		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
    443		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
    444	}
    445
    446	arg->ret = msg_arg->ret;
    447	arg->ret_origin = msg_arg->ret_origin;
    448out:
    449	optee_free_msg_arg(ctx, entry, offs);
    450	return rc;
    451}
    452
    453int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
    454{
    455	struct optee *optee = tee_get_drvdata(ctx->teedev);
    456	struct optee_context_data *ctxdata = ctx->data;
    457	struct optee_shm_arg_entry *entry;
    458	struct optee_msg_arg *msg_arg;
    459	struct optee_session *sess;
    460	struct tee_shm *shm;
    461	u_int offs;
    462
    463	/* Check that the session is valid */
    464	mutex_lock(&ctxdata->mutex);
    465	sess = find_session(ctxdata, session);
    466	mutex_unlock(&ctxdata->mutex);
    467	if (!sess)
    468		return -EINVAL;
    469
    470	msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
    471	if (IS_ERR(msg_arg))
    472		return PTR_ERR(msg_arg);
    473
    474	msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
    475	msg_arg->session = session;
    476	msg_arg->cancel_id = cancel_id;
    477	optee->ops->do_call_with_arg(ctx, shm, offs);
    478
    479	optee_free_msg_arg(ctx, entry, offs);
    480	return 0;
    481}
    482
    483static bool is_normal_memory(pgprot_t p)
    484{
    485#if defined(CONFIG_ARM)
    486	return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
    487		((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
    488#elif defined(CONFIG_ARM64)
    489	return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
    490#else
    491#error "Unuspported architecture"
    492#endif
    493}
    494
    495static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
    496{
    497	while (vma && is_normal_memory(vma->vm_page_prot)) {
    498		if (vma->vm_end >= end)
    499			return 0;
    500		vma = vma->vm_next;
    501	}
    502
    503	return -EINVAL;
    504}
    505
    506int optee_check_mem_type(unsigned long start, size_t num_pages)
    507{
    508	struct mm_struct *mm = current->mm;
    509	int rc;
    510
    511	/*
    512	 * Allow kernel address to register with OP-TEE as kernel
    513	 * pages are configured as normal memory only.
    514	 */
    515	if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start))
    516		return 0;
    517
    518	mmap_read_lock(mm);
    519	rc = __check_mem_type(find_vma(mm, start),
    520			      start + num_pages * PAGE_SIZE);
    521	mmap_read_unlock(mm);
    522
    523	return rc;
    524}