cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vboxguest_utils.c (24090B)


      1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
      2/*
      3 * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
      4 * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
      5 *
      6 * Copyright (C) 2006-2016 Oracle Corporation
      7 */
      8
      9#include <linux/errno.h>
     10#include <linux/io.h>
     11#include <linux/kernel.h>
     12#include <linux/mm.h>
     13#include <linux/module.h>
     14#include <linux/sizes.h>
     15#include <linux/slab.h>
     16#include <linux/uaccess.h>
     17#include <linux/vmalloc.h>
     18#include <linux/vbox_err.h>
     19#include <linux/vbox_utils.h>
     20#include "vboxguest_core.h"
     21
     22/* Get the pointer to the first parameter of a HGCM call request. */
     23#define VMMDEV_HGCM_CALL_PARMS(a) \
     24	((struct vmmdev_hgcm_function_parameter *)( \
     25		(u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
     26
     27/* The max parameter buffer size for a user request. */
     28#define VBG_MAX_HGCM_USER_PARM		(24 * SZ_1M)
     29/* The max parameter buffer size for a kernel request. */
     30#define VBG_MAX_HGCM_KERNEL_PARM	(16 * SZ_1M)
     31
     32#define VBG_DEBUG_PORT			0x504
     33
     34/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
     35static DEFINE_SPINLOCK(vbg_log_lock);
     36static char vbg_log_buf[128];
     37
     38#define VBG_LOG(name, pr_func) \
     39void name(const char *fmt, ...)						\
     40{									\
     41	unsigned long flags;						\
     42	va_list args;							\
     43	int i, count;							\
     44									\
     45	va_start(args, fmt);						\
     46	spin_lock_irqsave(&vbg_log_lock, flags);			\
     47									\
     48	count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
     49	for (i = 0; i < count; i++)					\
     50		outb(vbg_log_buf[i], VBG_DEBUG_PORT);			\
     51									\
     52	pr_func("%s", vbg_log_buf);					\
     53									\
     54	spin_unlock_irqrestore(&vbg_log_lock, flags);			\
     55	va_end(args);							\
     56}									\
     57EXPORT_SYMBOL(name)
     58
     59VBG_LOG(vbg_info, pr_info);
     60VBG_LOG(vbg_warn, pr_warn);
     61VBG_LOG(vbg_err, pr_err);
     62VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
     63#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
     64VBG_LOG(vbg_debug, pr_debug);
     65#endif
     66
     67void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
     68		    u32 requestor)
     69{
     70	struct vmmdev_request_header *req;
     71	int order = get_order(PAGE_ALIGN(len));
     72
     73	req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
     74	if (!req)
     75		return NULL;
     76
     77	memset(req, 0xaa, len);
     78
     79	req->size = len;
     80	req->version = VMMDEV_REQUEST_HEADER_VERSION;
     81	req->request_type = req_type;
     82	req->rc = VERR_GENERAL_FAILURE;
     83	req->reserved1 = 0;
     84	req->requestor = requestor;
     85
     86	return req;
     87}
     88
     89void vbg_req_free(void *req, size_t len)
     90{
     91	if (!req)
     92		return;
     93
     94	free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
     95}
     96
     97/* Note this function returns a VBox status code, not a negative errno!! */
     98int vbg_req_perform(struct vbg_dev *gdev, void *req)
     99{
    100	unsigned long phys_req = virt_to_phys(req);
    101
    102	outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
    103	/*
    104	 * The host changes the request as a result of the outl, make sure
    105	 * the outl and any reads of the req happen in the correct order.
    106	 */
    107	mb();
    108
    109	return ((struct vmmdev_request_header *)req)->rc;
    110}
    111
    112static bool hgcm_req_done(struct vbg_dev *gdev,
    113			  struct vmmdev_hgcmreq_header *header)
    114{
    115	unsigned long flags;
    116	bool done;
    117
    118	spin_lock_irqsave(&gdev->event_spinlock, flags);
    119	done = header->flags & VMMDEV_HGCM_REQ_DONE;
    120	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
    121
    122	return done;
    123}
    124
    125int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
    126		     struct vmmdev_hgcm_service_location *loc,
    127		     u32 *client_id, int *vbox_status)
    128{
    129	struct vmmdev_hgcm_connect *hgcm_connect = NULL;
    130	int rc;
    131
    132	hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
    133				     VMMDEVREQ_HGCM_CONNECT, requestor);
    134	if (!hgcm_connect)
    135		return -ENOMEM;
    136
    137	hgcm_connect->header.flags = 0;
    138	memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
    139	hgcm_connect->client_id = 0;
    140
    141	rc = vbg_req_perform(gdev, hgcm_connect);
    142
    143	if (rc == VINF_HGCM_ASYNC_EXECUTE)
    144		wait_event(gdev->hgcm_wq,
    145			   hgcm_req_done(gdev, &hgcm_connect->header));
    146
    147	if (rc >= 0) {
    148		*client_id = hgcm_connect->client_id;
    149		rc = hgcm_connect->header.result;
    150	}
    151
    152	vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
    153
    154	*vbox_status = rc;
    155	return 0;
    156}
    157EXPORT_SYMBOL(vbg_hgcm_connect);
    158
    159int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
    160			u32 client_id, int *vbox_status)
    161{
    162	struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
    163	int rc;
    164
    165	hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
    166					VMMDEVREQ_HGCM_DISCONNECT,
    167					requestor);
    168	if (!hgcm_disconnect)
    169		return -ENOMEM;
    170
    171	hgcm_disconnect->header.flags = 0;
    172	hgcm_disconnect->client_id = client_id;
    173
    174	rc = vbg_req_perform(gdev, hgcm_disconnect);
    175
    176	if (rc == VINF_HGCM_ASYNC_EXECUTE)
    177		wait_event(gdev->hgcm_wq,
    178			   hgcm_req_done(gdev, &hgcm_disconnect->header));
    179
    180	if (rc >= 0)
    181		rc = hgcm_disconnect->header.result;
    182
    183	vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
    184
    185	*vbox_status = rc;
    186	return 0;
    187}
    188EXPORT_SYMBOL(vbg_hgcm_disconnect);
    189
    190static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
    191{
    192	u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
    193
    194	return size >> PAGE_SHIFT;
    195}
    196
    197static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
    198{
    199	u32 page_count;
    200
    201	page_count = hgcm_call_buf_size_in_pages(buf, len);
    202	*extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
    203}
    204
    205static int hgcm_call_preprocess_linaddr(
    206	const struct vmmdev_hgcm_function_parameter *src_parm,
    207	void **bounce_buf_ret, size_t *extra)
    208{
    209	void *buf, *bounce_buf;
    210	bool copy_in;
    211	u32 len;
    212	int ret;
    213
    214	buf = (void *)src_parm->u.pointer.u.linear_addr;
    215	len = src_parm->u.pointer.size;
    216	copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
    217
    218	if (len > VBG_MAX_HGCM_USER_PARM)
    219		return -E2BIG;
    220
    221	bounce_buf = kvmalloc(len, GFP_KERNEL);
    222	if (!bounce_buf)
    223		return -ENOMEM;
    224
    225	*bounce_buf_ret = bounce_buf;
    226
    227	if (copy_in) {
    228		ret = copy_from_user(bounce_buf, (void __user *)buf, len);
    229		if (ret)
    230			return -EFAULT;
    231	} else {
    232		memset(bounce_buf, 0, len);
    233	}
    234
    235	hgcm_call_add_pagelist_size(bounce_buf, len, extra);
    236	return 0;
    237}
    238
    239/**
    240 * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
    241 * figure out how much extra storage we need for page lists.
    242 * Return: 0 or negative errno value.
    243 * @src_parm:         Pointer to source function call parameters
    244 * @parm_count:       Number of function call parameters.
    245 * @bounce_bufs_ret:  Where to return the allocated bouncebuffer array
    246 * @extra:            Where to return the extra request space needed for
    247 *                    physical page lists.
    248 */
    249static int hgcm_call_preprocess(
    250	const struct vmmdev_hgcm_function_parameter *src_parm,
    251	u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
    252{
    253	void *buf, **bounce_bufs = NULL;
    254	u32 i, len;
    255	int ret;
    256
    257	for (i = 0; i < parm_count; i++, src_parm++) {
    258		switch (src_parm->type) {
    259		case VMMDEV_HGCM_PARM_TYPE_32BIT:
    260		case VMMDEV_HGCM_PARM_TYPE_64BIT:
    261			break;
    262
    263		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
    264		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
    265		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
    266			if (!bounce_bufs) {
    267				bounce_bufs = kcalloc(parm_count,
    268						      sizeof(void *),
    269						      GFP_KERNEL);
    270				if (!bounce_bufs)
    271					return -ENOMEM;
    272
    273				*bounce_bufs_ret = bounce_bufs;
    274			}
    275
    276			ret = hgcm_call_preprocess_linaddr(src_parm,
    277							   &bounce_bufs[i],
    278							   extra);
    279			if (ret)
    280				return ret;
    281
    282			break;
    283
    284		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
    285		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
    286		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
    287			buf = (void *)src_parm->u.pointer.u.linear_addr;
    288			len = src_parm->u.pointer.size;
    289			if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
    290				return -E2BIG;
    291
    292			hgcm_call_add_pagelist_size(buf, len, extra);
    293			break;
    294
    295		default:
    296			return -EINVAL;
    297		}
    298	}
    299
    300	return 0;
    301}
    302
    303/**
    304 * Translates linear address types to page list direction flags.
    305 *
    306 * Return: page list flags.
    307 * @type:  The type.
    308 */
    309static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
    310	enum vmmdev_hgcm_function_parameter_type type)
    311{
    312	switch (type) {
    313	default:
    314		WARN_ON(1);
    315		fallthrough;
    316	case VMMDEV_HGCM_PARM_TYPE_LINADDR:
    317	case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
    318		return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
    319
    320	case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
    321	case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
    322		return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
    323
    324	case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
    325	case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
    326		return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
    327	}
    328}
    329
    330static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
    331	struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
    332	enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
    333{
    334	struct vmmdev_hgcm_pagelist *dst_pg_lst;
    335	struct page *page;
    336	bool is_vmalloc;
    337	u32 i, page_count;
    338
    339	dst_parm->type = type;
    340
    341	if (len == 0) {
    342		dst_parm->u.pointer.size = 0;
    343		dst_parm->u.pointer.u.linear_addr = 0;
    344		return;
    345	}
    346
    347	dst_pg_lst = (void *)call + *off_extra;
    348	page_count = hgcm_call_buf_size_in_pages(buf, len);
    349	is_vmalloc = is_vmalloc_addr(buf);
    350
    351	dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
    352	dst_parm->u.page_list.size = len;
    353	dst_parm->u.page_list.offset = *off_extra;
    354	dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
    355	dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
    356	dst_pg_lst->page_count = page_count;
    357
    358	for (i = 0; i < page_count; i++) {
    359		if (is_vmalloc)
    360			page = vmalloc_to_page(buf);
    361		else
    362			page = virt_to_page(buf);
    363
    364		dst_pg_lst->pages[i] = page_to_phys(page);
    365		buf += PAGE_SIZE;
    366	}
    367
    368	*off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
    369}
    370
    371/**
    372 * Initializes the call request that we're sending to the host.
    373 * @call:            The call to initialize.
    374 * @client_id:       The client ID of the caller.
    375 * @function:        The function number of the function to call.
    376 * @src_parm:        Pointer to source function call parameters.
    377 * @parm_count:      Number of function call parameters.
    378 * @bounce_bufs:     The bouncebuffer array.
    379 */
    380static void hgcm_call_init_call(
    381	struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
    382	const struct vmmdev_hgcm_function_parameter *src_parm,
    383	u32 parm_count, void **bounce_bufs)
    384{
    385	struct vmmdev_hgcm_function_parameter *dst_parm =
    386		VMMDEV_HGCM_CALL_PARMS(call);
    387	u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
    388	void *buf;
    389
    390	call->header.flags = 0;
    391	call->header.result = VINF_SUCCESS;
    392	call->client_id = client_id;
    393	call->function = function;
    394	call->parm_count = parm_count;
    395
    396	for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
    397		switch (src_parm->type) {
    398		case VMMDEV_HGCM_PARM_TYPE_32BIT:
    399		case VMMDEV_HGCM_PARM_TYPE_64BIT:
    400			*dst_parm = *src_parm;
    401			break;
    402
    403		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
    404		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
    405		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
    406			hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
    407					       src_parm->u.pointer.size,
    408					       src_parm->type, &off_extra);
    409			break;
    410
    411		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
    412		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
    413		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
    414			buf = (void *)src_parm->u.pointer.u.linear_addr;
    415			hgcm_call_init_linaddr(call, dst_parm, buf,
    416					       src_parm->u.pointer.size,
    417					       src_parm->type, &off_extra);
    418			break;
    419
    420		default:
    421			WARN_ON(1);
    422			dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
    423		}
    424	}
    425}
    426
    427/**
    428 * Tries to cancel a pending HGCM call.
    429 *
    430 * Return: VBox status code
    431 */
    432static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
    433{
    434	int rc;
    435
    436	/*
    437	 * We use a pre-allocated request for cancellations, which is
    438	 * protected by cancel_req_mutex. This means that all cancellations
    439	 * get serialized, this should be fine since they should be rare.
    440	 */
    441	mutex_lock(&gdev->cancel_req_mutex);
    442	gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
    443	rc = vbg_req_perform(gdev, gdev->cancel_req);
    444	mutex_unlock(&gdev->cancel_req_mutex);
    445
    446	if (rc == VERR_NOT_IMPLEMENTED) {
    447		call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
    448		call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
    449
    450		rc = vbg_req_perform(gdev, call);
    451		if (rc == VERR_INVALID_PARAMETER)
    452			rc = VERR_NOT_FOUND;
    453	}
    454
    455	if (rc >= 0)
    456		call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
    457
    458	return rc;
    459}
    460
    461/**
    462 * Performs the call and completion wait.
    463 * Return: 0 or negative errno value.
    464 * @gdev:        The VBoxGuest device extension.
    465 * @call:        The call to execute.
    466 * @timeout_ms:  Timeout in ms.
    467 * @leak_it:     Where to return the leak it / free it, indicator.
    468 *               Cancellation fun.
    469 */
    470static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
    471			    u32 timeout_ms, bool interruptible, bool *leak_it)
    472{
    473	int rc, cancel_rc, ret;
    474	long timeout;
    475
    476	*leak_it = false;
    477
    478	rc = vbg_req_perform(gdev, call);
    479
    480	/*
    481	 * If the call failed, then pretend success. Upper layers will
    482	 * interpret the result code in the packet.
    483	 */
    484	if (rc < 0) {
    485		call->header.result = rc;
    486		return 0;
    487	}
    488
    489	if (rc != VINF_HGCM_ASYNC_EXECUTE)
    490		return 0;
    491
    492	/* Host decided to process the request asynchronously, wait for it */
    493	if (timeout_ms == U32_MAX)
    494		timeout = MAX_SCHEDULE_TIMEOUT;
    495	else
    496		timeout = msecs_to_jiffies(timeout_ms);
    497
    498	if (interruptible) {
    499		timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
    500							   hgcm_req_done(gdev, &call->header),
    501							   timeout);
    502	} else {
    503		timeout = wait_event_timeout(gdev->hgcm_wq,
    504					     hgcm_req_done(gdev, &call->header),
    505					     timeout);
    506	}
    507
    508	/* timeout > 0 means hgcm_req_done has returned true, so success */
    509	if (timeout > 0)
    510		return 0;
    511
    512	if (timeout == 0)
    513		ret = -ETIMEDOUT;
    514	else
    515		ret = -EINTR;
    516
    517	/* Cancel the request */
    518	cancel_rc = hgcm_cancel_call(gdev, call);
    519	if (cancel_rc >= 0)
    520		return ret;
    521
    522	/*
    523	 * Failed to cancel, this should mean that the cancel has lost the
    524	 * race with normal completion, wait while the host completes it.
    525	 */
    526	if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
    527		timeout = msecs_to_jiffies(500);
    528	else
    529		timeout = msecs_to_jiffies(2000);
    530
    531	timeout = wait_event_timeout(gdev->hgcm_wq,
    532				     hgcm_req_done(gdev, &call->header),
    533				     timeout);
    534
    535	if (WARN_ON(timeout == 0)) {
    536		/* We really should never get here */
    537		vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
    538			__func__);
    539		*leak_it = true;
    540		return ret;
    541	}
    542
    543	/* The call has completed normally after all */
    544	return 0;
    545}
    546
    547/**
    548 * Copies the result of the call back to the caller info structure and user
    549 * buffers.
    550 * Return: 0 or negative errno value.
    551 * @call:            HGCM call request.
    552 * @dst_parm:        Pointer to function call parameters destination.
    553 * @parm_count:      Number of function call parameters.
    554 * @bounce_bufs:     The bouncebuffer array.
    555 */
    556static int hgcm_call_copy_back_result(
    557	const struct vmmdev_hgcm_call *call,
    558	struct vmmdev_hgcm_function_parameter *dst_parm,
    559	u32 parm_count, void **bounce_bufs)
    560{
    561	const struct vmmdev_hgcm_function_parameter *src_parm =
    562		VMMDEV_HGCM_CALL_PARMS(call);
    563	void __user *p;
    564	int ret;
    565	u32 i;
    566
    567	/* Copy back parameters. */
    568	for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
    569		switch (dst_parm->type) {
    570		case VMMDEV_HGCM_PARM_TYPE_32BIT:
    571		case VMMDEV_HGCM_PARM_TYPE_64BIT:
    572			*dst_parm = *src_parm;
    573			break;
    574
    575		case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
    576			dst_parm->u.page_list.size = src_parm->u.page_list.size;
    577			break;
    578
    579		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
    580		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
    581		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
    582		case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
    583			dst_parm->u.pointer.size = src_parm->u.pointer.size;
    584			break;
    585
    586		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
    587		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
    588			dst_parm->u.pointer.size = src_parm->u.pointer.size;
    589
    590			p = (void __user *)dst_parm->u.pointer.u.linear_addr;
    591			ret = copy_to_user(p, bounce_bufs[i],
    592					   min(src_parm->u.pointer.size,
    593					       dst_parm->u.pointer.size));
    594			if (ret)
    595				return -EFAULT;
    596			break;
    597
    598		default:
    599			WARN_ON(1);
    600			return -EINVAL;
    601		}
    602	}
    603
    604	return 0;
    605}
    606
    607int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
    608		  u32 function, u32 timeout_ms,
    609		  struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
    610		  int *vbox_status)
    611{
    612	struct vmmdev_hgcm_call *call;
    613	void **bounce_bufs = NULL;
    614	bool leak_it;
    615	size_t size;
    616	int i, ret;
    617
    618	size = sizeof(struct vmmdev_hgcm_call) +
    619		   parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
    620	/*
    621	 * Validate and buffer the parameters for the call. This also increases
    622	 * call_size with the amount of extra space needed for page lists.
    623	 */
    624	ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
    625	if (ret) {
    626		/* Even on error bounce bufs may still have been allocated */
    627		goto free_bounce_bufs;
    628	}
    629
    630	call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
    631	if (!call) {
    632		ret = -ENOMEM;
    633		goto free_bounce_bufs;
    634	}
    635
    636	hgcm_call_init_call(call, client_id, function, parms, parm_count,
    637			    bounce_bufs);
    638
    639	ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
    640			       requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
    641	if (ret == 0) {
    642		*vbox_status = call->header.result;
    643		ret = hgcm_call_copy_back_result(call, parms, parm_count,
    644						 bounce_bufs);
    645	}
    646
    647	if (!leak_it)
    648		vbg_req_free(call, size);
    649
    650free_bounce_bufs:
    651	if (bounce_bufs) {
    652		for (i = 0; i < parm_count; i++)
    653			kvfree(bounce_bufs[i]);
    654		kfree(bounce_bufs);
    655	}
    656
    657	return ret;
    658}
    659EXPORT_SYMBOL(vbg_hgcm_call);
    660
    661#ifdef CONFIG_COMPAT
    662int vbg_hgcm_call32(
    663	struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
    664	u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
    665	u32 parm_count, int *vbox_status)
    666{
    667	struct vmmdev_hgcm_function_parameter *parm64 = NULL;
    668	u32 i, size;
    669	int ret = 0;
    670
    671	/* KISS allocate a temporary request and convert the parameters. */
    672	size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
    673	parm64 = kzalloc(size, GFP_KERNEL);
    674	if (!parm64)
    675		return -ENOMEM;
    676
    677	for (i = 0; i < parm_count; i++) {
    678		switch (parm32[i].type) {
    679		case VMMDEV_HGCM_PARM_TYPE_32BIT:
    680			parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
    681			parm64[i].u.value32 = parm32[i].u.value32;
    682			break;
    683
    684		case VMMDEV_HGCM_PARM_TYPE_64BIT:
    685			parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
    686			parm64[i].u.value64 = parm32[i].u.value64;
    687			break;
    688
    689		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
    690		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
    691		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
    692			parm64[i].type = parm32[i].type;
    693			parm64[i].u.pointer.size = parm32[i].u.pointer.size;
    694			parm64[i].u.pointer.u.linear_addr =
    695			    parm32[i].u.pointer.u.linear_addr;
    696			break;
    697
    698		default:
    699			ret = -EINVAL;
    700		}
    701		if (ret < 0)
    702			goto out_free;
    703	}
    704
    705	ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
    706			    parm64, parm_count, vbox_status);
    707	if (ret < 0)
    708		goto out_free;
    709
    710	/* Copy back. */
    711	for (i = 0; i < parm_count; i++, parm32++, parm64++) {
    712		switch (parm64[i].type) {
    713		case VMMDEV_HGCM_PARM_TYPE_32BIT:
    714			parm32[i].u.value32 = parm64[i].u.value32;
    715			break;
    716
    717		case VMMDEV_HGCM_PARM_TYPE_64BIT:
    718			parm32[i].u.value64 = parm64[i].u.value64;
    719			break;
    720
    721		case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
    722		case VMMDEV_HGCM_PARM_TYPE_LINADDR:
    723		case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
    724			parm32[i].u.pointer.size = parm64[i].u.pointer.size;
    725			break;
    726
    727		default:
    728			WARN_ON(1);
    729			ret = -EINVAL;
    730		}
    731	}
    732
    733out_free:
    734	kfree(parm64);
    735	return ret;
    736}
    737#endif
    738
    739static const int vbg_status_code_to_errno_table[] = {
    740	[-VERR_ACCESS_DENIED]                            = -EPERM,
    741	[-VERR_FILE_NOT_FOUND]                           = -ENOENT,
    742	[-VERR_PROCESS_NOT_FOUND]                        = -ESRCH,
    743	[-VERR_INTERRUPTED]                              = -EINTR,
    744	[-VERR_DEV_IO_ERROR]                             = -EIO,
    745	[-VERR_TOO_MUCH_DATA]                            = -E2BIG,
    746	[-VERR_BAD_EXE_FORMAT]                           = -ENOEXEC,
    747	[-VERR_INVALID_HANDLE]                           = -EBADF,
    748	[-VERR_TRY_AGAIN]                                = -EAGAIN,
    749	[-VERR_NO_MEMORY]                                = -ENOMEM,
    750	[-VERR_INVALID_POINTER]                          = -EFAULT,
    751	[-VERR_RESOURCE_BUSY]                            = -EBUSY,
    752	[-VERR_ALREADY_EXISTS]                           = -EEXIST,
    753	[-VERR_NOT_SAME_DEVICE]                          = -EXDEV,
    754	[-VERR_NOT_A_DIRECTORY]                          = -ENOTDIR,
    755	[-VERR_PATH_NOT_FOUND]                           = -ENOTDIR,
    756	[-VERR_INVALID_NAME]                             = -ENOENT,
    757	[-VERR_IS_A_DIRECTORY]                           = -EISDIR,
    758	[-VERR_INVALID_PARAMETER]                        = -EINVAL,
    759	[-VERR_TOO_MANY_OPEN_FILES]                      = -ENFILE,
    760	[-VERR_INVALID_FUNCTION]                         = -ENOTTY,
    761	[-VERR_SHARING_VIOLATION]                        = -ETXTBSY,
    762	[-VERR_FILE_TOO_BIG]                             = -EFBIG,
    763	[-VERR_DISK_FULL]                                = -ENOSPC,
    764	[-VERR_SEEK_ON_DEVICE]                           = -ESPIPE,
    765	[-VERR_WRITE_PROTECT]                            = -EROFS,
    766	[-VERR_BROKEN_PIPE]                              = -EPIPE,
    767	[-VERR_DEADLOCK]                                 = -EDEADLK,
    768	[-VERR_FILENAME_TOO_LONG]                        = -ENAMETOOLONG,
    769	[-VERR_FILE_LOCK_FAILED]                         = -ENOLCK,
    770	[-VERR_NOT_IMPLEMENTED]                          = -ENOSYS,
    771	[-VERR_NOT_SUPPORTED]                            = -ENOSYS,
    772	[-VERR_DIR_NOT_EMPTY]                            = -ENOTEMPTY,
    773	[-VERR_TOO_MANY_SYMLINKS]                        = -ELOOP,
    774	[-VERR_NO_MORE_FILES]				 = -ENODATA,
    775	[-VERR_NO_DATA]                                  = -ENODATA,
    776	[-VERR_NET_NO_NETWORK]                           = -ENONET,
    777	[-VERR_NET_NOT_UNIQUE_NAME]                      = -ENOTUNIQ,
    778	[-VERR_NO_TRANSLATION]                           = -EILSEQ,
    779	[-VERR_NET_NOT_SOCKET]                           = -ENOTSOCK,
    780	[-VERR_NET_DEST_ADDRESS_REQUIRED]                = -EDESTADDRREQ,
    781	[-VERR_NET_MSG_SIZE]                             = -EMSGSIZE,
    782	[-VERR_NET_PROTOCOL_TYPE]                        = -EPROTOTYPE,
    783	[-VERR_NET_PROTOCOL_NOT_AVAILABLE]               = -ENOPROTOOPT,
    784	[-VERR_NET_PROTOCOL_NOT_SUPPORTED]               = -EPROTONOSUPPORT,
    785	[-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED]            = -ESOCKTNOSUPPORT,
    786	[-VERR_NET_OPERATION_NOT_SUPPORTED]              = -EOPNOTSUPP,
    787	[-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED]        = -EPFNOSUPPORT,
    788	[-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED]         = -EAFNOSUPPORT,
    789	[-VERR_NET_ADDRESS_IN_USE]                       = -EADDRINUSE,
    790	[-VERR_NET_ADDRESS_NOT_AVAILABLE]                = -EADDRNOTAVAIL,
    791	[-VERR_NET_DOWN]                                 = -ENETDOWN,
    792	[-VERR_NET_UNREACHABLE]                          = -ENETUNREACH,
    793	[-VERR_NET_CONNECTION_RESET]                     = -ENETRESET,
    794	[-VERR_NET_CONNECTION_ABORTED]                   = -ECONNABORTED,
    795	[-VERR_NET_CONNECTION_RESET_BY_PEER]             = -ECONNRESET,
    796	[-VERR_NET_NO_BUFFER_SPACE]                      = -ENOBUFS,
    797	[-VERR_NET_ALREADY_CONNECTED]                    = -EISCONN,
    798	[-VERR_NET_NOT_CONNECTED]                        = -ENOTCONN,
    799	[-VERR_NET_SHUTDOWN]                             = -ESHUTDOWN,
    800	[-VERR_NET_TOO_MANY_REFERENCES]                  = -ETOOMANYREFS,
    801	[-VERR_TIMEOUT]                                  = -ETIMEDOUT,
    802	[-VERR_NET_CONNECTION_REFUSED]                   = -ECONNREFUSED,
    803	[-VERR_NET_HOST_DOWN]                            = -EHOSTDOWN,
    804	[-VERR_NET_HOST_UNREACHABLE]                     = -EHOSTUNREACH,
    805	[-VERR_NET_ALREADY_IN_PROGRESS]                  = -EALREADY,
    806	[-VERR_NET_IN_PROGRESS]                          = -EINPROGRESS,
    807	[-VERR_MEDIA_NOT_PRESENT]                        = -ENOMEDIUM,
    808	[-VERR_MEDIA_NOT_RECOGNIZED]                     = -EMEDIUMTYPE,
    809};
    810
    811int vbg_status_code_to_errno(int rc)
    812{
    813	if (rc >= 0)
    814		return 0;
    815
    816	rc = -rc;
    817	if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
    818	    vbg_status_code_to_errno_table[rc] == 0) {
    819		vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
    820		return -EPROTO;
    821	}
    822
    823	return vbg_status_code_to_errno_table[rc];
    824}
    825EXPORT_SYMBOL(vbg_status_code_to_errno);