cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ffa_abi.c (23454B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2021, Linaro Limited
      4 */
      5
      6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      7
      8#include <linux/arm_ffa.h>
      9#include <linux/errno.h>
     10#include <linux/scatterlist.h>
     11#include <linux/sched.h>
     12#include <linux/slab.h>
     13#include <linux/string.h>
     14#include <linux/tee_drv.h>
     15#include <linux/types.h>
     16#include "optee_private.h"
     17#include "optee_ffa.h"
     18#include "optee_rpc_cmd.h"
     19
     20/*
     21 * This file implement the FF-A ABI used when communicating with secure world
     22 * OP-TEE OS via FF-A.
     23 * This file is divided into the following sections:
     24 * 1. Maintain a hash table for lookup of a global FF-A memory handle
     25 * 2. Convert between struct tee_param and struct optee_msg_param
     26 * 3. Low level support functions to register shared memory in secure world
     27 * 4. Dynamic shared memory pool based on alloc_pages()
     28 * 5. Do a normal scheduled call into secure world
     29 * 6. Driver initialization.
     30 */
     31
     32/*
     33 * 1. Maintain a hash table for lookup of a global FF-A memory handle
     34 *
     35 * FF-A assigns a global memory handle for each piece shared memory.
     36 * This handle is then used when communicating with secure world.
     37 *
     38 * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle()
     39 */
     40struct shm_rhash {
     41	struct tee_shm *shm;
     42	u64 global_id;
     43	struct rhash_head linkage;
     44};
     45
     46static void rh_free_fn(void *ptr, void *arg)
     47{
     48	kfree(ptr);
     49}
     50
     51static const struct rhashtable_params shm_rhash_params = {
     52	.head_offset = offsetof(struct shm_rhash, linkage),
     53	.key_len     = sizeof(u64),
     54	.key_offset  = offsetof(struct shm_rhash, global_id),
     55	.automatic_shrinking = true,
     56};
     57
     58static struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee,
     59						 u64 global_id)
     60{
     61	struct tee_shm *shm = NULL;
     62	struct shm_rhash *r;
     63
     64	mutex_lock(&optee->ffa.mutex);
     65	r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
     66				   shm_rhash_params);
     67	if (r)
     68		shm = r->shm;
     69	mutex_unlock(&optee->ffa.mutex);
     70
     71	return shm;
     72}
     73
     74static int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm,
     75				    u64 global_id)
     76{
     77	struct shm_rhash *r;
     78	int rc;
     79
     80	r = kmalloc(sizeof(*r), GFP_KERNEL);
     81	if (!r)
     82		return -ENOMEM;
     83	r->shm = shm;
     84	r->global_id = global_id;
     85
     86	mutex_lock(&optee->ffa.mutex);
     87	rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage,
     88					   shm_rhash_params);
     89	mutex_unlock(&optee->ffa.mutex);
     90
     91	if (rc)
     92		kfree(r);
     93
     94	return rc;
     95}
     96
     97static int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id)
     98{
     99	struct shm_rhash *r;
    100	int rc = -ENOENT;
    101
    102	mutex_lock(&optee->ffa.mutex);
    103	r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
    104				   shm_rhash_params);
    105	if (r)
    106		rc = rhashtable_remove_fast(&optee->ffa.global_ids,
    107					    &r->linkage, shm_rhash_params);
    108	mutex_unlock(&optee->ffa.mutex);
    109
    110	if (!rc)
    111		kfree(r);
    112
    113	return rc;
    114}
    115
    116/*
    117 * 2. Convert between struct tee_param and struct optee_msg_param
    118 *
    119 * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main
    120 * functions.
    121 */
    122
    123static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p,
    124				   u32 attr, const struct optee_msg_param *mp)
    125{
    126	struct tee_shm *shm = NULL;
    127	u64 offs_high = 0;
    128	u64 offs_low = 0;
    129
    130	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
    131		  attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
    132	p->u.memref.size = mp->u.fmem.size;
    133
    134	if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
    135		shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id);
    136	p->u.memref.shm = shm;
    137
    138	if (shm) {
    139		offs_low = mp->u.fmem.offs_low;
    140		offs_high = mp->u.fmem.offs_high;
    141	}
    142	p->u.memref.shm_offs = offs_low | offs_high << 32;
    143}
    144
    145/**
    146 * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to
    147 *				struct tee_param
    148 * @optee:	main service struct
    149 * @params:	subsystem internal parameter representation
    150 * @num_params:	number of elements in the parameter arrays
    151 * @msg_params:	OPTEE_MSG parameters
    152 *
    153 * Returns 0 on success or <0 on failure
    154 */
    155static int optee_ffa_from_msg_param(struct optee *optee,
    156				    struct tee_param *params, size_t num_params,
    157				    const struct optee_msg_param *msg_params)
    158{
    159	size_t n;
    160
    161	for (n = 0; n < num_params; n++) {
    162		struct tee_param *p = params + n;
    163		const struct optee_msg_param *mp = msg_params + n;
    164		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
    165
    166		switch (attr) {
    167		case OPTEE_MSG_ATTR_TYPE_NONE:
    168			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
    169			memset(&p->u, 0, sizeof(p->u));
    170			break;
    171		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
    172		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
    173		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
    174			optee_from_msg_param_value(p, attr, mp);
    175			break;
    176		case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT:
    177		case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT:
    178		case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT:
    179			from_msg_param_ffa_mem(optee, p, attr, mp);
    180			break;
    181		default:
    182			return -EINVAL;
    183		}
    184	}
    185
    186	return 0;
    187}
    188
    189static int to_msg_param_ffa_mem(struct optee_msg_param *mp,
    190				const struct tee_param *p)
    191{
    192	struct tee_shm *shm = p->u.memref.shm;
    193
    194	mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr -
    195		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
    196
    197	if (shm) {
    198		u64 shm_offs = p->u.memref.shm_offs;
    199
    200		mp->u.fmem.internal_offs = shm->offset;
    201
    202		mp->u.fmem.offs_low = shm_offs;
    203		mp->u.fmem.offs_high = shm_offs >> 32;
    204		/* Check that the entire offset could be stored. */
    205		if (mp->u.fmem.offs_high != shm_offs >> 32)
    206			return -EINVAL;
    207
    208		mp->u.fmem.global_id = shm->sec_world_id;
    209	} else {
    210		memset(&mp->u, 0, sizeof(mp->u));
    211		mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
    212	}
    213	mp->u.fmem.size = p->u.memref.size;
    214
    215	return 0;
    216}
    217
    218/**
    219 * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG
    220 *			      parameters
    221 * @optee:	main service struct
    222 * @msg_params:	OPTEE_MSG parameters
    223 * @num_params:	number of elements in the parameter arrays
    224 * @params:	subsystem itnernal parameter representation
    225 * Returns 0 on success or <0 on failure
    226 */
    227static int optee_ffa_to_msg_param(struct optee *optee,
    228				  struct optee_msg_param *msg_params,
    229				  size_t num_params,
    230				  const struct tee_param *params)
    231{
    232	size_t n;
    233
    234	for (n = 0; n < num_params; n++) {
    235		const struct tee_param *p = params + n;
    236		struct optee_msg_param *mp = msg_params + n;
    237
    238		switch (p->attr) {
    239		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
    240			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
    241			memset(&mp->u, 0, sizeof(mp->u));
    242			break;
    243		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
    244		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
    245		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
    246			optee_to_msg_param_value(mp, p);
    247			break;
    248		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
    249		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
    250		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
    251			if (to_msg_param_ffa_mem(mp, p))
    252				return -EINVAL;
    253			break;
    254		default:
    255			return -EINVAL;
    256		}
    257	}
    258
    259	return 0;
    260}
    261
    262/*
    263 * 3. Low level support functions to register shared memory in secure world
    264 *
    265 * Functions to register and unregister shared memory both for normal
    266 * clients and for tee-supplicant.
    267 */
    268
    269static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
    270				  struct page **pages, size_t num_pages,
    271				  unsigned long start)
    272{
    273	struct optee *optee = tee_get_drvdata(ctx->teedev);
    274	const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
    275	struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
    276	struct ffa_mem_region_attributes mem_attr = {
    277		.receiver = ffa_dev->vm_id,
    278		.attrs = FFA_MEM_RW,
    279	};
    280	struct ffa_mem_ops_args args = {
    281		.use_txbuf = true,
    282		.attrs = &mem_attr,
    283		.nattrs = 1,
    284	};
    285	struct sg_table sgt;
    286	int rc;
    287
    288	rc = optee_check_mem_type(start, num_pages);
    289	if (rc)
    290		return rc;
    291
    292	rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
    293				       num_pages * PAGE_SIZE, GFP_KERNEL);
    294	if (rc)
    295		return rc;
    296	args.sg = sgt.sgl;
    297	rc = ffa_ops->memory_share(ffa_dev, &args);
    298	sg_free_table(&sgt);
    299	if (rc)
    300		return rc;
    301
    302	rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
    303	if (rc) {
    304		ffa_ops->memory_reclaim(args.g_handle, 0);
    305		return rc;
    306	}
    307
    308	shm->sec_world_id = args.g_handle;
    309
    310	return 0;
    311}
    312
    313static int optee_ffa_shm_unregister(struct tee_context *ctx,
    314				    struct tee_shm *shm)
    315{
    316	struct optee *optee = tee_get_drvdata(ctx->teedev);
    317	const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
    318	struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
    319	u64 global_handle = shm->sec_world_id;
    320	struct ffa_send_direct_data data = {
    321		.data0 = OPTEE_FFA_UNREGISTER_SHM,
    322		.data1 = (u32)global_handle,
    323		.data2 = (u32)(global_handle >> 32)
    324	};
    325	int rc;
    326
    327	optee_shm_rem_ffa_handle(optee, global_handle);
    328	shm->sec_world_id = 0;
    329
    330	rc = ffa_ops->sync_send_receive(ffa_dev, &data);
    331	if (rc)
    332		pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
    333
    334	rc = ffa_ops->memory_reclaim(global_handle, 0);
    335	if (rc)
    336		pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
    337
    338	return rc;
    339}
    340
    341static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
    342					 struct tee_shm *shm)
    343{
    344	struct optee *optee = tee_get_drvdata(ctx->teedev);
    345	const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
    346	u64 global_handle = shm->sec_world_id;
    347	int rc;
    348
    349	/*
    350	 * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call
    351	 * since this is OP-TEE freeing via RPC so it has already retired
    352	 * this ID.
    353	 */
    354
    355	optee_shm_rem_ffa_handle(optee, global_handle);
    356	rc = ffa_ops->memory_reclaim(global_handle, 0);
    357	if (rc)
    358		pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
    359
    360	shm->sec_world_id = 0;
    361
    362	return rc;
    363}
    364
    365/*
    366 * 4. Dynamic shared memory pool based on alloc_pages()
    367 *
    368 * Implements an OP-TEE specific shared memory pool.
    369 * The main function is optee_ffa_shm_pool_alloc_pages().
    370 */
    371
    372static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
    373			     struct tee_shm *shm, size_t size, size_t align)
    374{
    375	return optee_pool_op_alloc_helper(pool, shm, size, align,
    376					  optee_ffa_shm_register);
    377}
    378
    379static void pool_ffa_op_free(struct tee_shm_pool *pool,
    380			     struct tee_shm *shm)
    381{
    382	optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
    383}
    384
    385static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
    386{
    387	kfree(pool);
    388}
    389
    390static const struct tee_shm_pool_ops pool_ffa_ops = {
    391	.alloc = pool_ffa_op_alloc,
    392	.free = pool_ffa_op_free,
    393	.destroy_pool = pool_ffa_op_destroy_pool,
    394};
    395
    396/**
    397 * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool
    398 *
    399 * This pool is used with OP-TEE over FF-A. In this case command buffers
    400 * and such are allocated from kernel's own memory.
    401 */
    402static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void)
    403{
    404	struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
    405
    406	if (!pool)
    407		return ERR_PTR(-ENOMEM);
    408
    409	pool->ops = &pool_ffa_ops;
    410
    411	return pool;
    412}
    413
    414/*
    415 * 5. Do a normal scheduled call into secure world
    416 *
    417 * The function optee_ffa_do_call_with_arg() performs a normal scheduled
    418 * call into secure world. During this call may normal world request help
    419 * from normal world using RPCs, Remote Procedure Calls. This includes
    420 * delivery of non-secure interrupts to for instance allow rescheduling of
    421 * the current task.
    422 */
    423
    424static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
    425					      struct optee *optee,
    426					      struct optee_msg_arg *arg)
    427{
    428	struct tee_shm *shm;
    429
    430	if (arg->num_params != 1 ||
    431	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
    432		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
    433		return;
    434	}
    435
    436	switch (arg->params[0].u.value.a) {
    437	case OPTEE_RPC_SHM_TYPE_APPL:
    438		shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
    439		break;
    440	case OPTEE_RPC_SHM_TYPE_KERNEL:
    441		shm = tee_shm_alloc_priv_buf(optee->ctx,
    442					     arg->params[0].u.value.b);
    443		break;
    444	default:
    445		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
    446		return;
    447	}
    448
    449	if (IS_ERR(shm)) {
    450		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
    451		return;
    452	}
    453
    454	arg->params[0] = (struct optee_msg_param){
    455		.attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT,
    456		.u.fmem.size = tee_shm_get_size(shm),
    457		.u.fmem.global_id = shm->sec_world_id,
    458		.u.fmem.internal_offs = shm->offset,
    459	};
    460
    461	arg->ret = TEEC_SUCCESS;
    462}
    463
    464static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx,
    465					     struct optee *optee,
    466					     struct optee_msg_arg *arg)
    467{
    468	struct tee_shm *shm;
    469
    470	if (arg->num_params != 1 ||
    471	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
    472		goto err_bad_param;
    473
    474	shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b);
    475	if (!shm)
    476		goto err_bad_param;
    477	switch (arg->params[0].u.value.a) {
    478	case OPTEE_RPC_SHM_TYPE_APPL:
    479		optee_rpc_cmd_free_suppl(ctx, shm);
    480		break;
    481	case OPTEE_RPC_SHM_TYPE_KERNEL:
    482		tee_shm_free(shm);
    483		break;
    484	default:
    485		goto err_bad_param;
    486	}
    487	arg->ret = TEEC_SUCCESS;
    488	return;
    489
    490err_bad_param:
    491	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
    492}
    493
    494static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
    495				    struct optee *optee,
    496				    struct optee_msg_arg *arg)
    497{
    498	arg->ret_origin = TEEC_ORIGIN_COMMS;
    499	switch (arg->cmd) {
    500	case OPTEE_RPC_CMD_SHM_ALLOC:
    501		handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
    502		break;
    503	case OPTEE_RPC_CMD_SHM_FREE:
    504		handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
    505		break;
    506	default:
    507		optee_rpc_cmd(ctx, optee, arg);
    508	}
    509}
    510
    511static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
    512				 u32 cmd, struct optee_msg_arg *arg)
    513{
    514	switch (cmd) {
    515	case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
    516		handle_ffa_rpc_func_cmd(ctx, optee, arg);
    517		break;
    518	case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
    519		/* Interrupt delivered by now */
    520		break;
    521	default:
    522		pr_warn("Unknown RPC func 0x%x\n", cmd);
    523		break;
    524	}
    525}
    526
    527static int optee_ffa_yielding_call(struct tee_context *ctx,
    528				   struct ffa_send_direct_data *data,
    529				   struct optee_msg_arg *rpc_arg)
    530{
    531	struct optee *optee = tee_get_drvdata(ctx->teedev);
    532	const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
    533	struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
    534	struct optee_call_waiter w;
    535	u32 cmd = data->data0;
    536	u32 w4 = data->data1;
    537	u32 w5 = data->data2;
    538	u32 w6 = data->data3;
    539	int rc;
    540
    541	/* Initialize waiter */
    542	optee_cq_wait_init(&optee->call_queue, &w);
    543	while (true) {
    544		rc = ffa_ops->sync_send_receive(ffa_dev, data);
    545		if (rc)
    546			goto done;
    547
    548		switch ((int)data->data0) {
    549		case TEEC_SUCCESS:
    550			break;
    551		case TEEC_ERROR_BUSY:
    552			if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) {
    553				rc = -EIO;
    554				goto done;
    555			}
    556
    557			/*
    558			 * Out of threads in secure world, wait for a thread
    559			 * become available.
    560			 */
    561			optee_cq_wait_for_completion(&optee->call_queue, &w);
    562			data->data0 = cmd;
    563			data->data1 = w4;
    564			data->data2 = w5;
    565			data->data3 = w6;
    566			continue;
    567		default:
    568			rc = -EIO;
    569			goto done;
    570		}
    571
    572		if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE)
    573			goto done;
    574
    575		/*
    576		 * OP-TEE has returned with a RPC request.
    577		 *
    578		 * Note that data->data4 (passed in register w7) is already
    579		 * filled in by ffa_ops->sync_send_receive() returning
    580		 * above.
    581		 */
    582		cond_resched();
    583		optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
    584		cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
    585		data->data0 = cmd;
    586		data->data1 = 0;
    587		data->data2 = 0;
    588		data->data3 = 0;
    589	}
    590done:
    591	/*
    592	 * We're done with our thread in secure world, if there's any
    593	 * thread waiters wake up one.
    594	 */
    595	optee_cq_wait_final(&optee->call_queue, &w);
    596
    597	return rc;
    598}
    599
    600/**
    601 * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
    602 * @ctx:	calling context
    603 * @shm:	shared memory holding the message to pass to secure world
    604 * @offs:	offset of the message in @shm
    605 *
    606 * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
    607 * Remote Procedure Calls (RPC) from OP-TEE.
    608 *
    609 * Returns return code from FF-A, 0 is OK
    610 */
    611
    612static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
    613				      struct tee_shm *shm, u_int offs)
    614{
    615	struct ffa_send_direct_data data = {
    616		.data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
    617		.data1 = (u32)shm->sec_world_id,
    618		.data2 = (u32)(shm->sec_world_id >> 32),
    619		.data3 = offs,
    620	};
    621	struct optee_msg_arg *arg;
    622	unsigned int rpc_arg_offs;
    623	struct optee_msg_arg *rpc_arg;
    624
    625	/*
    626	 * The shared memory object has to start on a page when passed as
    627	 * an argument struct. This is also what the shm pool allocator
    628	 * returns, but check this before calling secure world to catch
    629	 * eventual errors early in case something changes.
    630	 */
    631	if (shm->offset)
    632		return -EINVAL;
    633
    634	arg = tee_shm_get_va(shm, offs);
    635	if (IS_ERR(arg))
    636		return PTR_ERR(arg);
    637
    638	rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
    639	rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
    640	if (IS_ERR(rpc_arg))
    641		return PTR_ERR(rpc_arg);
    642
    643	return optee_ffa_yielding_call(ctx, &data, rpc_arg);
    644}
    645
    646/*
    647 * 6. Driver initialization
    648 *
    649 * During driver inititialization is the OP-TEE Secure Partition is probed
    650 * to find out which features it supports so the driver can be initialized
    651 * with a matching configuration.
    652 */
    653
    654static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
    655					const struct ffa_dev_ops *ops)
    656{
    657	struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
    658	int rc;
    659
    660	ops->mode_32bit_set(ffa_dev);
    661
    662	rc = ops->sync_send_receive(ffa_dev, &data);
    663	if (rc) {
    664		pr_err("Unexpected error %d\n", rc);
    665		return false;
    666	}
    667	if (data.data0 != OPTEE_FFA_VERSION_MAJOR ||
    668	    data.data1 < OPTEE_FFA_VERSION_MINOR) {
    669		pr_err("Incompatible OP-TEE API version %lu.%lu",
    670		       data.data0, data.data1);
    671		return false;
    672	}
    673
    674	data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
    675	rc = ops->sync_send_receive(ffa_dev, &data);
    676	if (rc) {
    677		pr_err("Unexpected error %d\n", rc);
    678		return false;
    679	}
    680	if (data.data2)
    681		pr_info("revision %lu.%lu (%08lx)",
    682			data.data0, data.data1, data.data2);
    683	else
    684		pr_info("revision %lu.%lu", data.data0, data.data1);
    685
    686	return true;
    687}
    688
    689static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
    690				    const struct ffa_dev_ops *ops,
    691				    u32 *sec_caps,
    692				    unsigned int *rpc_param_count)
    693{
    694	struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
    695	int rc;
    696
    697	rc = ops->sync_send_receive(ffa_dev, &data);
    698	if (rc) {
    699		pr_err("Unexpected error %d", rc);
    700		return false;
    701	}
    702	if (data.data0) {
    703		pr_err("Unexpected exchange error %lu", data.data0);
    704		return false;
    705	}
    706
    707	*rpc_param_count = (u8)data.data1;
    708	*sec_caps = data.data2;
    709
    710	return true;
    711}
    712
    713static void optee_ffa_get_version(struct tee_device *teedev,
    714				  struct tee_ioctl_version_data *vers)
    715{
    716	struct tee_ioctl_version_data v = {
    717		.impl_id = TEE_IMPL_ID_OPTEE,
    718		.impl_caps = TEE_OPTEE_CAP_TZ,
    719		.gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM |
    720			    TEE_GEN_CAP_MEMREF_NULL,
    721	};
    722
    723	*vers = v;
    724}
    725
    726static int optee_ffa_open(struct tee_context *ctx)
    727{
    728	return optee_open(ctx, true);
    729}
    730
    731static const struct tee_driver_ops optee_ffa_clnt_ops = {
    732	.get_version = optee_ffa_get_version,
    733	.open = optee_ffa_open,
    734	.release = optee_release,
    735	.open_session = optee_open_session,
    736	.close_session = optee_close_session,
    737	.invoke_func = optee_invoke_func,
    738	.cancel_req = optee_cancel_req,
    739	.shm_register = optee_ffa_shm_register,
    740	.shm_unregister = optee_ffa_shm_unregister,
    741};
    742
    743static const struct tee_desc optee_ffa_clnt_desc = {
    744	.name = DRIVER_NAME "-ffa-clnt",
    745	.ops = &optee_ffa_clnt_ops,
    746	.owner = THIS_MODULE,
    747};
    748
    749static const struct tee_driver_ops optee_ffa_supp_ops = {
    750	.get_version = optee_ffa_get_version,
    751	.open = optee_ffa_open,
    752	.release = optee_release_supp,
    753	.supp_recv = optee_supp_recv,
    754	.supp_send = optee_supp_send,
    755	.shm_register = optee_ffa_shm_register, /* same as for clnt ops */
    756	.shm_unregister = optee_ffa_shm_unregister_supp,
    757};
    758
    759static const struct tee_desc optee_ffa_supp_desc = {
    760	.name = DRIVER_NAME "-ffa-supp",
    761	.ops = &optee_ffa_supp_ops,
    762	.owner = THIS_MODULE,
    763	.flags = TEE_DESC_PRIVILEGED,
    764};
    765
    766static const struct optee_ops optee_ffa_ops = {
    767	.do_call_with_arg = optee_ffa_do_call_with_arg,
    768	.to_msg_param = optee_ffa_to_msg_param,
    769	.from_msg_param = optee_ffa_from_msg_param,
    770};
    771
    772static void optee_ffa_remove(struct ffa_device *ffa_dev)
    773{
    774	struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
    775
    776	optee_remove_common(optee);
    777
    778	mutex_destroy(&optee->ffa.mutex);
    779	rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
    780
    781	kfree(optee);
    782}
    783
    784static int optee_ffa_probe(struct ffa_device *ffa_dev)
    785{
    786	const struct ffa_dev_ops *ffa_ops;
    787	unsigned int rpc_param_count;
    788	struct tee_shm_pool *pool;
    789	struct tee_device *teedev;
    790	struct tee_context *ctx;
    791	u32 arg_cache_flags = 0;
    792	struct optee *optee;
    793	u32 sec_caps;
    794	int rc;
    795
    796	ffa_ops = ffa_dev_ops_get(ffa_dev);
    797	if (!ffa_ops) {
    798		pr_warn("failed \"method\" init: ffa\n");
    799		return -ENOENT;
    800	}
    801
    802	if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
    803		return -EINVAL;
    804
    805	if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
    806				     &rpc_param_count))
    807		return -EINVAL;
    808	if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
    809		arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
    810
    811	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
    812	if (!optee)
    813		return -ENOMEM;
    814
    815	pool = optee_ffa_shm_pool_alloc_pages();
    816	if (IS_ERR(pool)) {
    817		rc = PTR_ERR(pool);
    818		goto err_free_optee;
    819	}
    820	optee->pool = pool;
    821
    822	optee->ops = &optee_ffa_ops;
    823	optee->ffa.ffa_dev = ffa_dev;
    824	optee->ffa.ffa_ops = ffa_ops;
    825	optee->rpc_param_count = rpc_param_count;
    826
    827	teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
    828				  optee);
    829	if (IS_ERR(teedev)) {
    830		rc = PTR_ERR(teedev);
    831		goto err_free_pool;
    832	}
    833	optee->teedev = teedev;
    834
    835	teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool,
    836				  optee);
    837	if (IS_ERR(teedev)) {
    838		rc = PTR_ERR(teedev);
    839		goto err_unreg_teedev;
    840	}
    841	optee->supp_teedev = teedev;
    842
    843	rc = tee_device_register(optee->teedev);
    844	if (rc)
    845		goto err_unreg_supp_teedev;
    846
    847	rc = tee_device_register(optee->supp_teedev);
    848	if (rc)
    849		goto err_unreg_supp_teedev;
    850
    851	rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
    852	if (rc)
    853		goto err_unreg_supp_teedev;
    854	mutex_init(&optee->ffa.mutex);
    855	mutex_init(&optee->call_queue.mutex);
    856	INIT_LIST_HEAD(&optee->call_queue.waiters);
    857	optee_supp_init(&optee->supp);
    858	optee_shm_arg_cache_init(optee, arg_cache_flags);
    859	ffa_dev_set_drvdata(ffa_dev, optee);
    860	ctx = teedev_open(optee->teedev);
    861	if (IS_ERR(ctx)) {
    862		rc = PTR_ERR(ctx);
    863		goto err_rhashtable_free;
    864	}
    865	optee->ctx = ctx;
    866	rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
    867	if (rc)
    868		goto err_close_ctx;
    869
    870	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
    871	if (rc)
    872		goto err_unregister_devices;
    873
    874	pr_info("initialized driver\n");
    875	return 0;
    876
    877err_unregister_devices:
    878	optee_unregister_devices();
    879	optee_notif_uninit(optee);
    880err_close_ctx:
    881	teedev_close_context(ctx);
    882err_rhashtable_free:
    883	rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
    884	optee_supp_uninit(&optee->supp);
    885	mutex_destroy(&optee->call_queue.mutex);
    886	mutex_destroy(&optee->ffa.mutex);
    887err_unreg_supp_teedev:
    888	tee_device_unregister(optee->supp_teedev);
    889err_unreg_teedev:
    890	tee_device_unregister(optee->teedev);
    891err_free_pool:
    892	tee_shm_pool_free(pool);
    893err_free_optee:
    894	kfree(optee);
    895	return rc;
    896}
    897
    898static const struct ffa_device_id optee_ffa_device_id[] = {
    899	/* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */
    900	{ UUID_INIT(0x486178e0, 0xe7f8, 0x11e3,
    901		    0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) },
    902	{}
    903};
    904
    905static struct ffa_driver optee_ffa_driver = {
    906	.name = "optee",
    907	.probe = optee_ffa_probe,
    908	.remove = optee_ffa_remove,
    909	.id_table = optee_ffa_device_id,
    910};
    911
    912int optee_ffa_abi_register(void)
    913{
    914	if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
    915		return ffa_register(&optee_ffa_driver);
    916	else
    917		return -EOPNOTSUPP;
    918}
    919
    920void optee_ffa_abi_unregister(void)
    921{
    922	if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
    923		ffa_unregister(&optee_ffa_driver);
    924}