cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ioreq.c (17637B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * ACRN_HSM: Handle I/O requests
      4 *
      5 * Copyright (C) 2020 Intel Corporation. All rights reserved.
      6 *
      7 * Authors:
      8 *	Jason Chen CJ <jason.cj.chen@intel.com>
      9 *	Fengwei Yin <fengwei.yin@intel.com>
     10 */
     11
     12#include <linux/interrupt.h>
     13#include <linux/io.h>
     14#include <linux/kthread.h>
     15#include <linux/mm.h>
     16#include <linux/slab.h>
     17
     18#include <asm/acrn.h>
     19
     20#include "acrn_drv.h"
     21
     22static void ioreq_pause(void);
     23static void ioreq_resume(void);
     24
     25static void ioreq_dispatcher(struct work_struct *work);
     26static struct workqueue_struct *ioreq_wq;
     27static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
     28
     29static inline bool has_pending_request(struct acrn_ioreq_client *client)
     30{
     31	return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
     32}
     33
     34static inline bool is_destroying(struct acrn_ioreq_client *client)
     35{
     36	return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
     37}
     38
     39static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
     40				  struct acrn_io_request *acrn_req)
     41{
     42	bool polling_mode;
     43	int ret = 0;
     44
     45	polling_mode = acrn_req->completion_polling;
     46	/* Add barrier() to make sure the writes are done before completion */
     47	smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
     48
     49	/*
     50	 * To fulfill the requirement of real-time in several industry
     51	 * scenarios, like automotive, ACRN can run under the partition mode,
     52	 * in which User VMs and Service VM are bound to dedicated CPU cores.
     53	 * Polling mode of handling the I/O request is introduced to achieve a
     54	 * faster I/O request handling. In polling mode, the hypervisor polls
     55	 * I/O request's completion. Once an I/O request is marked as
     56	 * ACRN_IOREQ_STATE_COMPLETE, hypervisor resumes from the polling point
     57	 * to continue the I/O request flow. Thus, the completion notification
     58	 * from HSM of I/O request is not needed.  Please note,
     59	 * completion_polling needs to be read before the I/O request being
     60	 * marked as ACRN_IOREQ_STATE_COMPLETE to avoid racing with the
     61	 * hypervisor.
     62	 */
     63	if (!polling_mode) {
     64		ret = hcall_notify_req_finish(vm->vmid, vcpu);
     65		if (ret < 0)
     66			dev_err(acrn_dev.this_device,
     67				"Notify I/O request finished failed!\n");
     68	}
     69
     70	return ret;
     71}
     72
     73static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
     74				       u16 vcpu,
     75				       struct acrn_io_request *acrn_req)
     76{
     77	int ret;
     78
     79	if (vcpu >= client->vm->vcpu_num)
     80		return -EINVAL;
     81
     82	clear_bit(vcpu, client->ioreqs_map);
     83	if (!acrn_req) {
     84		acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
     85		acrn_req += vcpu;
     86	}
     87
     88	ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
     89
     90	return ret;
     91}
     92
     93int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
     94{
     95	int ret = 0;
     96
     97	spin_lock_bh(&vm->ioreq_clients_lock);
     98	if (vm->default_client)
     99		ret = acrn_ioreq_complete_request(vm->default_client,
    100						  vcpu, NULL);
    101	spin_unlock_bh(&vm->ioreq_clients_lock);
    102
    103	return ret;
    104}
    105
    106/**
    107 * acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
    108 * @client:	The ioreq client
    109 * @type:	Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
    110 * @start:	Start address of iorange
    111 * @end:	End address of iorange
    112 *
    113 * Return: 0 on success, <0 on error
    114 */
    115int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
    116			 u32 type, u64 start, u64 end)
    117{
    118	struct acrn_ioreq_range *range;
    119
    120	if (end < start) {
    121		dev_err(acrn_dev.this_device,
    122			"Invalid IO range [0x%llx,0x%llx]\n", start, end);
    123		return -EINVAL;
    124	}
    125
    126	range = kzalloc(sizeof(*range), GFP_KERNEL);
    127	if (!range)
    128		return -ENOMEM;
    129
    130	range->type = type;
    131	range->start = start;
    132	range->end = end;
    133
    134	write_lock_bh(&client->range_lock);
    135	list_add(&range->list, &client->range_list);
    136	write_unlock_bh(&client->range_lock);
    137
    138	return 0;
    139}
    140
    141/**
    142 * acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
    143 * @client:	The ioreq client
    144 * @type:	Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
    145 * @start:	Start address of iorange
    146 * @end:	End address of iorange
    147 */
    148void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
    149			  u32 type, u64 start, u64 end)
    150{
    151	struct acrn_ioreq_range *range;
    152
    153	write_lock_bh(&client->range_lock);
    154	list_for_each_entry(range, &client->range_list, list) {
    155		if (type == range->type &&
    156		    start == range->start &&
    157		    end == range->end) {
    158			list_del(&range->list);
    159			kfree(range);
    160			break;
    161		}
    162	}
    163	write_unlock_bh(&client->range_lock);
    164}
    165
    166/*
    167 * ioreq_task() is the execution entity of handler thread of an I/O client.
    168 * The handler callback of the I/O client is called within the handler thread.
    169 */
    170static int ioreq_task(void *data)
    171{
    172	struct acrn_ioreq_client *client = data;
    173	struct acrn_io_request *req;
    174	unsigned long *ioreqs_map;
    175	int vcpu, ret;
    176
    177	/*
    178	 * Lockless access to ioreqs_map is safe, because
    179	 * 1) set_bit() and clear_bit() are atomic operations.
    180	 * 2) I/O requests arrives serialized. The access flow of ioreqs_map is:
    181	 *	set_bit() - in ioreq_work handler
    182	 *	Handler callback handles corresponding I/O request
    183	 *	clear_bit() - in handler thread (include ACRN userspace)
    184	 *	Mark corresponding I/O request completed
    185	 *	Loop again if a new I/O request occurs
    186	 */
    187	ioreqs_map = client->ioreqs_map;
    188	while (!kthread_should_stop()) {
    189		acrn_ioreq_client_wait(client);
    190		while (has_pending_request(client)) {
    191			vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
    192			req = client->vm->ioreq_buf->req_slot + vcpu;
    193			ret = client->handler(client, req);
    194			if (ret < 0) {
    195				dev_err(acrn_dev.this_device,
    196					"IO handle failure: %d\n", ret);
    197				break;
    198			}
    199			acrn_ioreq_complete_request(client, vcpu, req);
    200		}
    201	}
    202
    203	return 0;
    204}
    205
    206/*
    207 * For the non-default I/O clients, give them chance to complete the current
    208 * I/O requests if there are any. For the default I/O client, it is safe to
    209 * clear all pending I/O requests because the clearing request is from ACRN
    210 * userspace.
    211 */
    212void acrn_ioreq_request_clear(struct acrn_vm *vm)
    213{
    214	struct acrn_ioreq_client *client;
    215	bool has_pending = false;
    216	unsigned long vcpu;
    217	int retry = 10;
    218
    219	/*
    220	 * IO requests of this VM will be completed directly in
    221	 * acrn_ioreq_dispatch if ACRN_VM_FLAG_CLEARING_IOREQ flag is set.
    222	 */
    223	set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
    224
    225	/*
    226	 * acrn_ioreq_request_clear is only called in VM reset case. Simply
    227	 * wait 100ms in total for the IO requests' completion.
    228	 */
    229	do {
    230		spin_lock_bh(&vm->ioreq_clients_lock);
    231		list_for_each_entry(client, &vm->ioreq_clients, list) {
    232			has_pending = has_pending_request(client);
    233			if (has_pending)
    234				break;
    235		}
    236		spin_unlock_bh(&vm->ioreq_clients_lock);
    237
    238		if (has_pending)
    239			schedule_timeout_interruptible(HZ / 100);
    240	} while (has_pending && --retry > 0);
    241	if (retry == 0)
    242		dev_warn(acrn_dev.this_device,
    243			 "%s cannot flush pending request!\n", client->name);
    244
    245	/* Clear all ioreqs belonging to the default client */
    246	spin_lock_bh(&vm->ioreq_clients_lock);
    247	client = vm->default_client;
    248	if (client) {
    249		vcpu = find_first_bit(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
    250		while (vcpu < ACRN_IO_REQUEST_MAX) {
    251			acrn_ioreq_complete_request(client, vcpu, NULL);
    252			vcpu = find_next_bit(client->ioreqs_map,
    253					     ACRN_IO_REQUEST_MAX, vcpu + 1);
    254		}
    255	}
    256	spin_unlock_bh(&vm->ioreq_clients_lock);
    257
    258	/* Clear ACRN_VM_FLAG_CLEARING_IOREQ flag after the clearing */
    259	clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
    260}
    261
    262int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
    263{
    264	if (client->is_default) {
    265		/*
    266		 * In the default client, a user space thread waits on the
    267		 * waitqueue. The is_destroying() check is used to notify user
    268		 * space the client is going to be destroyed.
    269		 */
    270		wait_event_interruptible(client->wq,
    271					 has_pending_request(client) ||
    272					 is_destroying(client));
    273		if (is_destroying(client))
    274			return -ENODEV;
    275	} else {
    276		wait_event_interruptible(client->wq,
    277					 has_pending_request(client) ||
    278					 kthread_should_stop());
    279	}
    280
    281	return 0;
    282}
    283
    284static bool is_cfg_addr(struct acrn_io_request *req)
    285{
    286	return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
    287		(req->reqs.pio_request.address == 0xcf8));
    288}
    289
    290static bool is_cfg_data(struct acrn_io_request *req)
    291{
    292	return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
    293		((req->reqs.pio_request.address >= 0xcfc) &&
    294		 (req->reqs.pio_request.address < (0xcfc + 4))));
    295}
    296
    297/* The low 8-bit of supported pci_reg addr.*/
    298#define PCI_LOWREG_MASK  0xFC
    299/* The high 4-bit of supported pci_reg addr */
    300#define PCI_HIGHREG_MASK 0xF00
    301/* Max number of supported functions */
    302#define PCI_FUNCMAX	7
    303/* Max number of supported slots */
    304#define PCI_SLOTMAX	31
    305/* Max number of supported buses */
    306#define PCI_BUSMAX	255
    307#define CONF1_ENABLE	0x80000000UL
    308/*
    309 * A PCI configuration space access via PIO 0xCF8 and 0xCFC normally has two
    310 * following steps:
    311 *   1) writes address into 0xCF8 port
    312 *   2) accesses data in/from 0xCFC
    313 * This function combines such paired PCI configuration space I/O requests into
    314 * one ACRN_IOREQ_TYPE_PCICFG type I/O request and continues the processing.
    315 */
    316static bool handle_cf8cfc(struct acrn_vm *vm,
    317			  struct acrn_io_request *req, u16 vcpu)
    318{
    319	int offset, pci_cfg_addr, pci_reg;
    320	bool is_handled = false;
    321
    322	if (is_cfg_addr(req)) {
    323		WARN_ON(req->reqs.pio_request.size != 4);
    324		if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
    325			vm->pci_conf_addr = req->reqs.pio_request.value;
    326		else
    327			req->reqs.pio_request.value = vm->pci_conf_addr;
    328		is_handled = true;
    329	} else if (is_cfg_data(req)) {
    330		if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
    331			if (req->reqs.pio_request.direction ==
    332					ACRN_IOREQ_DIR_READ)
    333				req->reqs.pio_request.value = 0xffffffff;
    334			is_handled = true;
    335		} else {
    336			offset = req->reqs.pio_request.address - 0xcfc;
    337
    338			req->type = ACRN_IOREQ_TYPE_PCICFG;
    339			pci_cfg_addr = vm->pci_conf_addr;
    340			req->reqs.pci_request.bus =
    341					(pci_cfg_addr >> 16) & PCI_BUSMAX;
    342			req->reqs.pci_request.dev =
    343					(pci_cfg_addr >> 11) & PCI_SLOTMAX;
    344			req->reqs.pci_request.func =
    345					(pci_cfg_addr >> 8) & PCI_FUNCMAX;
    346			pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
    347				   ((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
    348			req->reqs.pci_request.reg = pci_reg + offset;
    349		}
    350	}
    351
    352	if (is_handled)
    353		ioreq_complete_request(vm, vcpu, req);
    354
    355	return is_handled;
    356}
    357
    358static bool in_range(struct acrn_ioreq_range *range,
    359		     struct acrn_io_request *req)
    360{
    361	bool ret = false;
    362
    363	if (range->type == req->type) {
    364		switch (req->type) {
    365		case ACRN_IOREQ_TYPE_MMIO:
    366			if (req->reqs.mmio_request.address >= range->start &&
    367			    (req->reqs.mmio_request.address +
    368			     req->reqs.mmio_request.size - 1) <= range->end)
    369				ret = true;
    370			break;
    371		case ACRN_IOREQ_TYPE_PORTIO:
    372			if (req->reqs.pio_request.address >= range->start &&
    373			    (req->reqs.pio_request.address +
    374			     req->reqs.pio_request.size - 1) <= range->end)
    375				ret = true;
    376			break;
    377		default:
    378			break;
    379		}
    380	}
    381
    382	return ret;
    383}
    384
    385static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
    386						   struct acrn_io_request *req)
    387{
    388	struct acrn_ioreq_client *client, *found = NULL;
    389	struct acrn_ioreq_range *range;
    390
    391	lockdep_assert_held(&vm->ioreq_clients_lock);
    392
    393	list_for_each_entry(client, &vm->ioreq_clients, list) {
    394		read_lock_bh(&client->range_lock);
    395		list_for_each_entry(range, &client->range_list, list) {
    396			if (in_range(range, req)) {
    397				found = client;
    398				break;
    399			}
    400		}
    401		read_unlock_bh(&client->range_lock);
    402		if (found)
    403			break;
    404	}
    405	return found ? found : vm->default_client;
    406}
    407
    408/**
    409 * acrn_ioreq_client_create() - Create an ioreq client
    410 * @vm:		The VM that this client belongs to
    411 * @handler:	The ioreq_handler of ioreq client acrn_hsm will create a kernel
    412 *		thread and call the handler to handle I/O requests.
    413 * @priv:	Private data for the handler
    414 * @is_default:	If it is the default client
    415 * @name:	The name of ioreq client
    416 *
    417 * Return: acrn_ioreq_client pointer on success, NULL on error
    418 */
    419struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
    420						   ioreq_handler_t handler,
    421						   void *priv, bool is_default,
    422						   const char *name)
    423{
    424	struct acrn_ioreq_client *client;
    425
    426	if (!handler && !is_default) {
    427		dev_dbg(acrn_dev.this_device,
    428			"Cannot create non-default client w/o handler!\n");
    429		return NULL;
    430	}
    431	client = kzalloc(sizeof(*client), GFP_KERNEL);
    432	if (!client)
    433		return NULL;
    434
    435	client->handler = handler;
    436	client->vm = vm;
    437	client->priv = priv;
    438	client->is_default = is_default;
    439	if (name)
    440		strncpy(client->name, name, sizeof(client->name) - 1);
    441	rwlock_init(&client->range_lock);
    442	INIT_LIST_HEAD(&client->range_list);
    443	init_waitqueue_head(&client->wq);
    444
    445	if (client->handler) {
    446		client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
    447					     client->vm->vmid, client->name);
    448		if (IS_ERR(client->thread)) {
    449			kfree(client);
    450			return NULL;
    451		}
    452	}
    453
    454	spin_lock_bh(&vm->ioreq_clients_lock);
    455	if (is_default)
    456		vm->default_client = client;
    457	else
    458		list_add(&client->list, &vm->ioreq_clients);
    459	spin_unlock_bh(&vm->ioreq_clients_lock);
    460
    461	dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
    462	return client;
    463}
    464
    465/**
    466 * acrn_ioreq_client_destroy() - Destroy an ioreq client
    467 * @client:	The ioreq client
    468 */
    469void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
    470{
    471	struct acrn_ioreq_range *range, *next;
    472	struct acrn_vm *vm = client->vm;
    473
    474	dev_dbg(acrn_dev.this_device,
    475		"Destroy ioreq client %s.\n", client->name);
    476	ioreq_pause();
    477	set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
    478	if (client->is_default)
    479		wake_up_interruptible(&client->wq);
    480	else
    481		kthread_stop(client->thread);
    482
    483	spin_lock_bh(&vm->ioreq_clients_lock);
    484	if (client->is_default)
    485		vm->default_client = NULL;
    486	else
    487		list_del(&client->list);
    488	spin_unlock_bh(&vm->ioreq_clients_lock);
    489
    490	write_lock_bh(&client->range_lock);
    491	list_for_each_entry_safe(range, next, &client->range_list, list) {
    492		list_del(&range->list);
    493		kfree(range);
    494	}
    495	write_unlock_bh(&client->range_lock);
    496	kfree(client);
    497
    498	ioreq_resume();
    499}
    500
    501static int acrn_ioreq_dispatch(struct acrn_vm *vm)
    502{
    503	struct acrn_ioreq_client *client;
    504	struct acrn_io_request *req;
    505	int i;
    506
    507	for (i = 0; i < vm->vcpu_num; i++) {
    508		req = vm->ioreq_buf->req_slot + i;
    509
    510		/* barrier the read of processed of acrn_io_request */
    511		if (smp_load_acquire(&req->processed) ==
    512				     ACRN_IOREQ_STATE_PENDING) {
    513			/* Complete the IO request directly in clearing stage */
    514			if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
    515				ioreq_complete_request(vm, i, req);
    516				continue;
    517			}
    518			if (handle_cf8cfc(vm, req, i))
    519				continue;
    520
    521			spin_lock_bh(&vm->ioreq_clients_lock);
    522			client = find_ioreq_client(vm, req);
    523			if (!client) {
    524				dev_err(acrn_dev.this_device,
    525					"Failed to find ioreq client!\n");
    526				spin_unlock_bh(&vm->ioreq_clients_lock);
    527				return -EINVAL;
    528			}
    529			if (!client->is_default)
    530				req->kernel_handled = 1;
    531			else
    532				req->kernel_handled = 0;
    533			/*
    534			 * Add barrier() to make sure the writes are done
    535			 * before setting ACRN_IOREQ_STATE_PROCESSING
    536			 */
    537			smp_store_release(&req->processed,
    538					  ACRN_IOREQ_STATE_PROCESSING);
    539			set_bit(i, client->ioreqs_map);
    540			wake_up_interruptible(&client->wq);
    541			spin_unlock_bh(&vm->ioreq_clients_lock);
    542		}
    543	}
    544
    545	return 0;
    546}
    547
    548static void ioreq_dispatcher(struct work_struct *work)
    549{
    550	struct acrn_vm *vm;
    551
    552	read_lock(&acrn_vm_list_lock);
    553	list_for_each_entry(vm, &acrn_vm_list, list) {
    554		if (!vm->ioreq_buf)
    555			break;
    556		acrn_ioreq_dispatch(vm);
    557	}
    558	read_unlock(&acrn_vm_list_lock);
    559}
    560
    561static void ioreq_intr_handler(void)
    562{
    563	queue_work(ioreq_wq, &ioreq_work);
    564}
    565
    566static void ioreq_pause(void)
    567{
    568	/* Flush and unarm the handler to ensure no I/O requests pending */
    569	acrn_remove_intr_handler();
    570	drain_workqueue(ioreq_wq);
    571}
    572
    573static void ioreq_resume(void)
    574{
    575	/* Schedule after enabling in case other clients miss interrupt */
    576	acrn_setup_intr_handler(ioreq_intr_handler);
    577	queue_work(ioreq_wq, &ioreq_work);
    578}
    579
    580int acrn_ioreq_intr_setup(void)
    581{
    582	acrn_setup_intr_handler(ioreq_intr_handler);
    583	ioreq_wq = alloc_workqueue("ioreq_wq",
    584				   WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
    585	if (!ioreq_wq) {
    586		dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
    587		acrn_remove_intr_handler();
    588		return -ENOMEM;
    589	}
    590	return 0;
    591}
    592
    593void acrn_ioreq_intr_remove(void)
    594{
    595	if (ioreq_wq)
    596		destroy_workqueue(ioreq_wq);
    597	acrn_remove_intr_handler();
    598}
    599
    600int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
    601{
    602	struct acrn_ioreq_buffer *set_buffer;
    603	struct page *page;
    604	int ret;
    605
    606	if (vm->ioreq_buf)
    607		return -EEXIST;
    608
    609	set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
    610	if (!set_buffer)
    611		return -ENOMEM;
    612
    613	ret = pin_user_pages_fast(buf_vma, 1,
    614				  FOLL_WRITE | FOLL_LONGTERM, &page);
    615	if (unlikely(ret != 1) || !page) {
    616		dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
    617		ret = -EFAULT;
    618		goto free_buf;
    619	}
    620
    621	vm->ioreq_buf = page_address(page);
    622	vm->ioreq_page = page;
    623	set_buffer->ioreq_buf = page_to_phys(page);
    624	ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
    625	if (ret < 0) {
    626		dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
    627		unpin_user_page(page);
    628		vm->ioreq_buf = NULL;
    629		goto free_buf;
    630	}
    631
    632	dev_dbg(acrn_dev.this_device,
    633		"Init ioreq buffer %pK!\n", vm->ioreq_buf);
    634	ret = 0;
    635free_buf:
    636	kfree(set_buffer);
    637	return ret;
    638}
    639
    640void acrn_ioreq_deinit(struct acrn_vm *vm)
    641{
    642	struct acrn_ioreq_client *client, *next;
    643
    644	dev_dbg(acrn_dev.this_device,
    645		"Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
    646	/* Destroy all clients belonging to this VM */
    647	list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
    648		acrn_ioreq_client_destroy(client);
    649	if (vm->default_client)
    650		acrn_ioreq_client_destroy(vm->default_client);
    651
    652	if (vm->ioreq_buf && vm->ioreq_page) {
    653		unpin_user_page(vm->ioreq_page);
    654		vm->ioreq_buf = NULL;
    655	}
    656}