cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

virtio_uml.c (36132B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Virtio vhost-user driver
      4 *
      5 * Copyright(c) 2019 Intel Corporation
      6 *
      7 * This driver allows virtio devices to be used over a vhost-user socket.
      8 *
      9 * Guest devices can be instantiated by kernel module or command line
     10 * parameters. One device will be created for each parameter. Syntax:
     11 *
     12 *		virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
     13 * where:
     14 *		<socket>	:= vhost-user socket path to connect
     15 *		<virtio_id>	:= virtio device id (as in virtio_ids.h)
     16 *		<platform_id>	:= (optional) platform device id
     17 *
     18 * example:
     19 *		virtio_uml.device=/var/uml.socket:1
     20 *
     21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
     22 */
     23#include <linux/module.h>
     24#include <linux/of.h>
     25#include <linux/platform_device.h>
     26#include <linux/slab.h>
     27#include <linux/virtio.h>
     28#include <linux/virtio_config.h>
     29#include <linux/virtio_ring.h>
     30#include <linux/time-internal.h>
     31#include <linux/virtio-uml.h>
     32#include <shared/as-layout.h>
     33#include <irq_kern.h>
     34#include <init.h>
     35#include <os.h>
     36#include "vhost_user.h"
     37
     38#define MAX_SUPPORTED_QUEUE_SIZE	256
     39
     40#define to_virtio_uml_device(_vdev) \
     41	container_of(_vdev, struct virtio_uml_device, vdev)
     42
     43struct virtio_uml_platform_data {
     44	u32 virtio_device_id;
     45	const char *socket_path;
     46	struct work_struct conn_broken_wk;
     47	struct platform_device *pdev;
     48};
     49
     50struct virtio_uml_device {
     51	struct virtio_device vdev;
     52	struct platform_device *pdev;
     53	struct virtio_uml_platform_data *pdata;
     54
     55	spinlock_t sock_lock;
     56	int sock, req_fd, irq;
     57	u64 features;
     58	u64 protocol_features;
     59	u8 status;
     60	u8 registered:1;
     61	u8 suspended:1;
     62	u8 no_vq_suspend:1;
     63
     64	u8 config_changed_irq:1;
     65	uint64_t vq_irq_vq_map;
     66	int recv_rc;
     67};
     68
     69struct virtio_uml_vq_info {
     70	int kick_fd, call_fd;
     71	char name[32];
     72	bool suspended;
     73};
     74
     75extern unsigned long long physmem_size, highmem;
     76
     77#define vu_err(vu_dev, ...)	dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
     78
     79/* Vhost-user protocol */
     80
     81static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
     82			    const int *fds, unsigned int fds_num)
     83{
     84	int rc;
     85
     86	do {
     87		rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
     88		if (rc > 0) {
     89			buf += rc;
     90			len -= rc;
     91			fds = NULL;
     92			fds_num = 0;
     93		}
     94	} while (len && (rc >= 0 || rc == -EINTR));
     95
     96	if (rc < 0)
     97		return rc;
     98	return 0;
     99}
    100
    101static int full_read(int fd, void *buf, int len, bool abortable)
    102{
    103	int rc;
    104
    105	if (!len)
    106		return 0;
    107
    108	do {
    109		rc = os_read_file(fd, buf, len);
    110		if (rc > 0) {
    111			buf += rc;
    112			len -= rc;
    113		}
    114	} while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
    115
    116	if (rc < 0)
    117		return rc;
    118	if (rc == 0)
    119		return -ECONNRESET;
    120	return 0;
    121}
    122
    123static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
    124{
    125	return full_read(fd, msg, sizeof(msg->header), true);
    126}
    127
    128static int vhost_user_recv(struct virtio_uml_device *vu_dev,
    129			   int fd, struct vhost_user_msg *msg,
    130			   size_t max_payload_size, bool wait)
    131{
    132	size_t size;
    133	int rc;
    134
    135	/*
    136	 * In virtio time-travel mode, we're handling all the vhost-user
    137	 * FDs by polling them whenever appropriate. However, we may get
    138	 * into a situation where we're sending out an interrupt message
    139	 * to a device (e.g. a net device) and need to handle a simulation
    140	 * time message while doing so, e.g. one that tells us to update
    141	 * our idea of how long we can run without scheduling.
    142	 *
    143	 * Thus, we need to not just read() from the given fd, but need
    144	 * to also handle messages for the simulation time - this function
    145	 * does that for us while waiting for the given fd to be readable.
    146	 */
    147	if (wait)
    148		time_travel_wait_readable(fd);
    149
    150	rc = vhost_user_recv_header(fd, msg);
    151
    152	if (rc)
    153		return rc;
    154	size = msg->header.size;
    155	if (size > max_payload_size)
    156		return -EPROTO;
    157	return full_read(fd, &msg->payload, size, false);
    158}
    159
    160static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
    161				   int rc)
    162{
    163	struct virtio_uml_platform_data *pdata = vu_dev->pdata;
    164
    165	if (rc != -ECONNRESET)
    166		return;
    167
    168	if (!vu_dev->registered)
    169		return;
    170
    171	virtio_break_device(&vu_dev->vdev);
    172	schedule_work(&pdata->conn_broken_wk);
    173}
    174
    175static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
    176				struct vhost_user_msg *msg,
    177				size_t max_payload_size)
    178{
    179	int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
    180				 max_payload_size, true);
    181
    182	if (rc) {
    183		vhost_user_check_reset(vu_dev, rc);
    184		return rc;
    185	}
    186
    187	if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
    188		return -EPROTO;
    189
    190	return 0;
    191}
    192
    193static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
    194			       u64 *value)
    195{
    196	struct vhost_user_msg msg;
    197	int rc = vhost_user_recv_resp(vu_dev, &msg,
    198				      sizeof(msg.payload.integer));
    199
    200	if (rc)
    201		return rc;
    202	if (msg.header.size != sizeof(msg.payload.integer))
    203		return -EPROTO;
    204	*value = msg.payload.integer;
    205	return 0;
    206}
    207
    208static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
    209			       struct vhost_user_msg *msg,
    210			       size_t max_payload_size)
    211{
    212	int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
    213				 max_payload_size, false);
    214
    215	if (rc)
    216		return rc;
    217
    218	if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
    219			VHOST_USER_VERSION)
    220		return -EPROTO;
    221
    222	return 0;
    223}
    224
    225static int vhost_user_send(struct virtio_uml_device *vu_dev,
    226			   bool need_response, struct vhost_user_msg *msg,
    227			   int *fds, size_t num_fds)
    228{
    229	size_t size = sizeof(msg->header) + msg->header.size;
    230	unsigned long flags;
    231	bool request_ack;
    232	int rc;
    233
    234	msg->header.flags |= VHOST_USER_VERSION;
    235
    236	/*
    237	 * The need_response flag indicates that we already need a response,
    238	 * e.g. to read the features. In these cases, don't request an ACK as
    239	 * it is meaningless. Also request an ACK only if supported.
    240	 */
    241	request_ack = !need_response;
    242	if (!(vu_dev->protocol_features &
    243			BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
    244		request_ack = false;
    245
    246	if (request_ack)
    247		msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
    248
    249	spin_lock_irqsave(&vu_dev->sock_lock, flags);
    250	rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
    251	if (rc < 0)
    252		goto out;
    253
    254	if (request_ack) {
    255		uint64_t status;
    256
    257		rc = vhost_user_recv_u64(vu_dev, &status);
    258		if (rc)
    259			goto out;
    260
    261		if (status) {
    262			vu_err(vu_dev, "slave reports error: %llu\n", status);
    263			rc = -EIO;
    264			goto out;
    265		}
    266	}
    267
    268out:
    269	spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
    270	return rc;
    271}
    272
    273static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
    274				      bool need_response, u32 request)
    275{
    276	struct vhost_user_msg msg = {
    277		.header.request = request,
    278	};
    279
    280	return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
    281}
    282
    283static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
    284					 u32 request, int fd)
    285{
    286	struct vhost_user_msg msg = {
    287		.header.request = request,
    288	};
    289
    290	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
    291}
    292
    293static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
    294			       u32 request, u64 value)
    295{
    296	struct vhost_user_msg msg = {
    297		.header.request = request,
    298		.header.size = sizeof(msg.payload.integer),
    299		.payload.integer = value,
    300	};
    301
    302	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
    303}
    304
    305static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
    306{
    307	return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
    308}
    309
    310static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
    311				   u64 *features)
    312{
    313	int rc = vhost_user_send_no_payload(vu_dev, true,
    314					    VHOST_USER_GET_FEATURES);
    315
    316	if (rc)
    317		return rc;
    318	return vhost_user_recv_u64(vu_dev, features);
    319}
    320
    321static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
    322				   u64 features)
    323{
    324	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
    325}
    326
    327static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
    328					    u64 *protocol_features)
    329{
    330	int rc = vhost_user_send_no_payload(vu_dev, true,
    331			VHOST_USER_GET_PROTOCOL_FEATURES);
    332
    333	if (rc)
    334		return rc;
    335	return vhost_user_recv_u64(vu_dev, protocol_features);
    336}
    337
    338static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
    339					    u64 protocol_features)
    340{
    341	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
    342				   protocol_features);
    343}
    344
    345static void vhost_user_reply(struct virtio_uml_device *vu_dev,
    346			     struct vhost_user_msg *msg, int response)
    347{
    348	struct vhost_user_msg reply = {
    349		.payload.integer = response,
    350	};
    351	size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
    352	int rc;
    353
    354	reply.header = msg->header;
    355	reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
    356	reply.header.flags |= VHOST_USER_FLAG_REPLY;
    357	reply.header.size = sizeof(reply.payload.integer);
    358
    359	rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
    360
    361	if (rc)
    362		vu_err(vu_dev,
    363		       "sending reply to slave request failed: %d (size %zu)\n",
    364		       rc, size);
    365}
    366
    367static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
    368				       struct time_travel_event *ev)
    369{
    370	struct virtqueue *vq;
    371	int response = 1;
    372	struct {
    373		struct vhost_user_msg msg;
    374		u8 extra_payload[512];
    375	} msg;
    376	int rc;
    377
    378	rc = vhost_user_recv_req(vu_dev, &msg.msg,
    379				 sizeof(msg.msg.payload) +
    380				 sizeof(msg.extra_payload));
    381
    382	vu_dev->recv_rc = rc;
    383	if (rc)
    384		return IRQ_NONE;
    385
    386	switch (msg.msg.header.request) {
    387	case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
    388		vu_dev->config_changed_irq = true;
    389		response = 0;
    390		break;
    391	case VHOST_USER_SLAVE_VRING_CALL:
    392		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
    393			if (vq->index == msg.msg.payload.vring_state.index) {
    394				response = 0;
    395				vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
    396				break;
    397			}
    398		}
    399		break;
    400	case VHOST_USER_SLAVE_IOTLB_MSG:
    401		/* not supported - VIRTIO_F_ACCESS_PLATFORM */
    402	case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
    403		/* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
    404	default:
    405		vu_err(vu_dev, "unexpected slave request %d\n",
    406		       msg.msg.header.request);
    407	}
    408
    409	if (ev && !vu_dev->suspended)
    410		time_travel_add_irq_event(ev);
    411
    412	if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
    413		vhost_user_reply(vu_dev, &msg.msg, response);
    414
    415	return IRQ_HANDLED;
    416}
    417
    418static irqreturn_t vu_req_interrupt(int irq, void *data)
    419{
    420	struct virtio_uml_device *vu_dev = data;
    421	irqreturn_t ret = IRQ_HANDLED;
    422
    423	if (!um_irq_timetravel_handler_used())
    424		ret = vu_req_read_message(vu_dev, NULL);
    425
    426	if (vu_dev->recv_rc) {
    427		vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
    428	} else if (vu_dev->vq_irq_vq_map) {
    429		struct virtqueue *vq;
    430
    431		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
    432			if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
    433				vring_interrupt(0 /* ignored */, vq);
    434		}
    435		vu_dev->vq_irq_vq_map = 0;
    436	} else if (vu_dev->config_changed_irq) {
    437		virtio_config_changed(&vu_dev->vdev);
    438		vu_dev->config_changed_irq = false;
    439	}
    440
    441	return ret;
    442}
    443
    444static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
    445					  struct time_travel_event *ev)
    446{
    447	vu_req_read_message(data, ev);
    448}
    449
    450static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
    451{
    452	int rc, req_fds[2];
    453
    454	/* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
    455	rc = os_pipe(req_fds, true, true);
    456	if (rc < 0)
    457		return rc;
    458	vu_dev->req_fd = req_fds[0];
    459
    460	rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
    461			       vu_req_interrupt, IRQF_SHARED,
    462			       vu_dev->pdev->name, vu_dev,
    463			       vu_req_interrupt_comm_handler);
    464	if (rc < 0)
    465		goto err_close;
    466
    467	vu_dev->irq = rc;
    468
    469	rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
    470					   req_fds[1]);
    471	if (rc)
    472		goto err_free_irq;
    473
    474	goto out;
    475
    476err_free_irq:
    477	um_free_irq(vu_dev->irq, vu_dev);
    478err_close:
    479	os_close_file(req_fds[0]);
    480out:
    481	/* Close unused write end of request fds */
    482	os_close_file(req_fds[1]);
    483	return rc;
    484}
    485
    486static int vhost_user_init(struct virtio_uml_device *vu_dev)
    487{
    488	int rc = vhost_user_set_owner(vu_dev);
    489
    490	if (rc)
    491		return rc;
    492	rc = vhost_user_get_features(vu_dev, &vu_dev->features);
    493	if (rc)
    494		return rc;
    495
    496	if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
    497		rc = vhost_user_get_protocol_features(vu_dev,
    498				&vu_dev->protocol_features);
    499		if (rc)
    500			return rc;
    501		vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
    502		rc = vhost_user_set_protocol_features(vu_dev,
    503				vu_dev->protocol_features);
    504		if (rc)
    505			return rc;
    506	}
    507
    508	if (vu_dev->protocol_features &
    509			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
    510		rc = vhost_user_init_slave_req(vu_dev);
    511		if (rc)
    512			return rc;
    513	}
    514
    515	return 0;
    516}
    517
    518static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
    519				  u32 offset, void *buf, u32 len)
    520{
    521	u32 cfg_size = offset + len;
    522	struct vhost_user_msg *msg;
    523	size_t payload_size = sizeof(msg->payload.config) + cfg_size;
    524	size_t msg_size = sizeof(msg->header) + payload_size;
    525	int rc;
    526
    527	if (!(vu_dev->protocol_features &
    528	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
    529		return;
    530
    531	msg = kzalloc(msg_size, GFP_KERNEL);
    532	if (!msg)
    533		return;
    534	msg->header.request = VHOST_USER_GET_CONFIG;
    535	msg->header.size = payload_size;
    536	msg->payload.config.offset = 0;
    537	msg->payload.config.size = cfg_size;
    538
    539	rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
    540	if (rc) {
    541		vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
    542		       rc);
    543		goto free;
    544	}
    545
    546	rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
    547	if (rc) {
    548		vu_err(vu_dev,
    549		       "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
    550		       rc);
    551		goto free;
    552	}
    553
    554	if (msg->header.size != payload_size ||
    555	    msg->payload.config.size != cfg_size) {
    556		rc = -EPROTO;
    557		vu_err(vu_dev,
    558		       "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
    559		       msg->header.size, payload_size,
    560		       msg->payload.config.size, cfg_size);
    561		goto free;
    562	}
    563	memcpy(buf, msg->payload.config.payload + offset, len);
    564
    565free:
    566	kfree(msg);
    567}
    568
    569static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
    570				  u32 offset, const void *buf, u32 len)
    571{
    572	struct vhost_user_msg *msg;
    573	size_t payload_size = sizeof(msg->payload.config) + len;
    574	size_t msg_size = sizeof(msg->header) + payload_size;
    575	int rc;
    576
    577	if (!(vu_dev->protocol_features &
    578	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
    579		return;
    580
    581	msg = kzalloc(msg_size, GFP_KERNEL);
    582	if (!msg)
    583		return;
    584	msg->header.request = VHOST_USER_SET_CONFIG;
    585	msg->header.size = payload_size;
    586	msg->payload.config.offset = offset;
    587	msg->payload.config.size = len;
    588	memcpy(msg->payload.config.payload, buf, len);
    589
    590	rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
    591	if (rc)
    592		vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
    593		       rc);
    594
    595	kfree(msg);
    596}
    597
    598static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
    599				      struct vhost_user_mem_region *region_out)
    600{
    601	unsigned long long mem_offset;
    602	int rc = phys_mapping(addr, &mem_offset);
    603
    604	if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
    605		return -EFAULT;
    606	*fd_out = rc;
    607	region_out->guest_addr = addr;
    608	region_out->user_addr = addr;
    609	region_out->size = size;
    610	region_out->mmap_offset = mem_offset;
    611
    612	/* Ensure mapping is valid for the entire region */
    613	rc = phys_mapping(addr + size - 1, &mem_offset);
    614	if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
    615		 addr + size - 1, rc, *fd_out))
    616		return -EFAULT;
    617	return 0;
    618}
    619
    620static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
    621{
    622	struct vhost_user_msg msg = {
    623		.header.request = VHOST_USER_SET_MEM_TABLE,
    624		.header.size = sizeof(msg.payload.mem_regions),
    625		.payload.mem_regions.num = 1,
    626	};
    627	unsigned long reserved = uml_reserved - uml_physmem;
    628	int fds[2];
    629	int rc;
    630
    631	/*
    632	 * This is a bit tricky, see also the comment with setup_physmem().
    633	 *
    634	 * Essentially, setup_physmem() uses a file to mmap() our physmem,
    635	 * but the code and data we *already* have is omitted. To us, this
    636	 * is no difference, since they both become part of our address
    637	 * space and memory consumption. To somebody looking in from the
    638	 * outside, however, it is different because the part of our memory
    639	 * consumption that's already part of the binary (code/data) is not
    640	 * mapped from the file, so it's not visible to another mmap from
    641	 * the file descriptor.
    642	 *
    643	 * Thus, don't advertise this space to the vhost-user slave. This
    644	 * means that the slave will likely abort or similar when we give
    645	 * it an address from the hidden range, since it's not marked as
    646	 * a valid address, but at least that way we detect the issue and
    647	 * don't just have the slave read an all-zeroes buffer from the
    648	 * shared memory file, or write something there that we can never
    649	 * see (depending on the direction of the virtqueue traffic.)
    650	 *
    651	 * Since we usually don't want to use .text for virtio buffers,
    652	 * this effectively means that you cannot use
    653	 *  1) global variables, which are in the .bss and not in the shm
    654	 *     file-backed memory
    655	 *  2) the stack in some processes, depending on where they have
    656	 *     their stack (or maybe only no interrupt stack?)
    657	 *
    658	 * The stack is already not typically valid for DMA, so this isn't
    659	 * much of a restriction, but global variables might be encountered.
    660	 *
    661	 * It might be possible to fix it by copying around the data that's
    662	 * between bss_start and where we map the file now, but it's not
    663	 * something that you typically encounter with virtio drivers, so
    664	 * it didn't seem worthwhile.
    665	 */
    666	rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
    667					&fds[0],
    668					&msg.payload.mem_regions.regions[0]);
    669
    670	if (rc < 0)
    671		return rc;
    672	if (highmem) {
    673		msg.payload.mem_regions.num++;
    674		rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
    675				&fds[1], &msg.payload.mem_regions.regions[1]);
    676		if (rc < 0)
    677			return rc;
    678	}
    679
    680	return vhost_user_send(vu_dev, false, &msg, fds,
    681			       msg.payload.mem_regions.num);
    682}
    683
    684static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
    685				      u32 request, u32 index, u32 num)
    686{
    687	struct vhost_user_msg msg = {
    688		.header.request = request,
    689		.header.size = sizeof(msg.payload.vring_state),
    690		.payload.vring_state.index = index,
    691		.payload.vring_state.num = num,
    692	};
    693
    694	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
    695}
    696
    697static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
    698				    u32 index, u32 num)
    699{
    700	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
    701					  index, num);
    702}
    703
    704static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
    705				     u32 index, u32 offset)
    706{
    707	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
    708					  index, offset);
    709}
    710
    711static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
    712				     u32 index, u64 desc, u64 used, u64 avail,
    713				     u64 log)
    714{
    715	struct vhost_user_msg msg = {
    716		.header.request = VHOST_USER_SET_VRING_ADDR,
    717		.header.size = sizeof(msg.payload.vring_addr),
    718		.payload.vring_addr.index = index,
    719		.payload.vring_addr.desc = desc,
    720		.payload.vring_addr.used = used,
    721		.payload.vring_addr.avail = avail,
    722		.payload.vring_addr.log = log,
    723	};
    724
    725	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
    726}
    727
    728static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
    729				   u32 request, int index, int fd)
    730{
    731	struct vhost_user_msg msg = {
    732		.header.request = request,
    733		.header.size = sizeof(msg.payload.integer),
    734		.payload.integer = index,
    735	};
    736
    737	if (index & ~VHOST_USER_VRING_INDEX_MASK)
    738		return -EINVAL;
    739	if (fd < 0) {
    740		msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
    741		return vhost_user_send(vu_dev, false, &msg, NULL, 0);
    742	}
    743	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
    744}
    745
    746static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
    747				     int index, int fd)
    748{
    749	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
    750				       index, fd);
    751}
    752
    753static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
    754				     int index, int fd)
    755{
    756	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
    757				       index, fd);
    758}
    759
    760static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
    761				       u32 index, bool enable)
    762{
    763	if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
    764		return 0;
    765
    766	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
    767					  index, enable);
    768}
    769
    770
    771/* Virtio interface */
    772
    773static bool vu_notify(struct virtqueue *vq)
    774{
    775	struct virtio_uml_vq_info *info = vq->priv;
    776	const uint64_t n = 1;
    777	int rc;
    778
    779	if (info->suspended)
    780		return true;
    781
    782	time_travel_propagate_time();
    783
    784	if (info->kick_fd < 0) {
    785		struct virtio_uml_device *vu_dev;
    786
    787		vu_dev = to_virtio_uml_device(vq->vdev);
    788
    789		return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
    790						  vq->index, 0) == 0;
    791	}
    792
    793	do {
    794		rc = os_write_file(info->kick_fd, &n, sizeof(n));
    795	} while (rc == -EINTR);
    796	return !WARN(rc != sizeof(n), "write returned %d\n", rc);
    797}
    798
    799static irqreturn_t vu_interrupt(int irq, void *opaque)
    800{
    801	struct virtqueue *vq = opaque;
    802	struct virtio_uml_vq_info *info = vq->priv;
    803	uint64_t n;
    804	int rc;
    805	irqreturn_t ret = IRQ_NONE;
    806
    807	do {
    808		rc = os_read_file(info->call_fd, &n, sizeof(n));
    809		if (rc == sizeof(n))
    810			ret |= vring_interrupt(irq, vq);
    811	} while (rc == sizeof(n) || rc == -EINTR);
    812	WARN(rc != -EAGAIN, "read returned %d\n", rc);
    813	return ret;
    814}
    815
    816
    817static void vu_get(struct virtio_device *vdev, unsigned offset,
    818		   void *buf, unsigned len)
    819{
    820	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    821
    822	vhost_user_get_config(vu_dev, offset, buf, len);
    823}
    824
    825static void vu_set(struct virtio_device *vdev, unsigned offset,
    826		   const void *buf, unsigned len)
    827{
    828	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    829
    830	vhost_user_set_config(vu_dev, offset, buf, len);
    831}
    832
    833static u8 vu_get_status(struct virtio_device *vdev)
    834{
    835	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    836
    837	return vu_dev->status;
    838}
    839
    840static void vu_set_status(struct virtio_device *vdev, u8 status)
    841{
    842	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    843
    844	vu_dev->status = status;
    845}
    846
    847static void vu_reset(struct virtio_device *vdev)
    848{
    849	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    850
    851	vu_dev->status = 0;
    852}
    853
    854static void vu_del_vq(struct virtqueue *vq)
    855{
    856	struct virtio_uml_vq_info *info = vq->priv;
    857
    858	if (info->call_fd >= 0) {
    859		struct virtio_uml_device *vu_dev;
    860
    861		vu_dev = to_virtio_uml_device(vq->vdev);
    862
    863		um_free_irq(vu_dev->irq, vq);
    864		os_close_file(info->call_fd);
    865	}
    866
    867	if (info->kick_fd >= 0)
    868		os_close_file(info->kick_fd);
    869
    870	vring_del_virtqueue(vq);
    871	kfree(info);
    872}
    873
    874static void vu_del_vqs(struct virtio_device *vdev)
    875{
    876	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    877	struct virtqueue *vq, *n;
    878	u64 features;
    879
    880	/* Note: reverse order as a workaround to a decoding bug in snabb */
    881	list_for_each_entry_reverse(vq, &vdev->vqs, list)
    882		WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
    883
    884	/* Ensure previous messages have been processed */
    885	WARN_ON(vhost_user_get_features(vu_dev, &features));
    886
    887	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
    888		vu_del_vq(vq);
    889}
    890
    891static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
    892			       struct virtqueue *vq)
    893{
    894	struct virtio_uml_vq_info *info = vq->priv;
    895	int call_fds[2];
    896	int rc;
    897
    898	/* no call FD needed/desired in this case */
    899	if (vu_dev->protocol_features &
    900			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
    901	    vu_dev->protocol_features &
    902			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
    903		info->call_fd = -1;
    904		return 0;
    905	}
    906
    907	/* Use a pipe for call fd, since SIGIO is not supported for eventfd */
    908	rc = os_pipe(call_fds, true, true);
    909	if (rc < 0)
    910		return rc;
    911
    912	info->call_fd = call_fds[0];
    913	rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
    914			    vu_interrupt, IRQF_SHARED, info->name, vq);
    915	if (rc < 0)
    916		goto close_both;
    917
    918	rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
    919	if (rc)
    920		goto release_irq;
    921
    922	goto out;
    923
    924release_irq:
    925	um_free_irq(vu_dev->irq, vq);
    926close_both:
    927	os_close_file(call_fds[0]);
    928out:
    929	/* Close (unused) write end of call fds */
    930	os_close_file(call_fds[1]);
    931
    932	return rc;
    933}
    934
    935static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
    936				     unsigned index, vq_callback_t *callback,
    937				     const char *name, bool ctx)
    938{
    939	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
    940	struct platform_device *pdev = vu_dev->pdev;
    941	struct virtio_uml_vq_info *info;
    942	struct virtqueue *vq;
    943	int num = MAX_SUPPORTED_QUEUE_SIZE;
    944	int rc;
    945
    946	info = kzalloc(sizeof(*info), GFP_KERNEL);
    947	if (!info) {
    948		rc = -ENOMEM;
    949		goto error_kzalloc;
    950	}
    951	snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
    952		 pdev->id, name);
    953
    954	vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
    955				    ctx, vu_notify, callback, info->name);
    956	if (!vq) {
    957		rc = -ENOMEM;
    958		goto error_create;
    959	}
    960	vq->priv = info;
    961	num = virtqueue_get_vring_size(vq);
    962
    963	if (vu_dev->protocol_features &
    964			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
    965		info->kick_fd = -1;
    966	} else {
    967		rc = os_eventfd(0, 0);
    968		if (rc < 0)
    969			goto error_kick;
    970		info->kick_fd = rc;
    971	}
    972
    973	rc = vu_setup_vq_call_fd(vu_dev, vq);
    974	if (rc)
    975		goto error_call;
    976
    977	rc = vhost_user_set_vring_num(vu_dev, index, num);
    978	if (rc)
    979		goto error_setup;
    980
    981	rc = vhost_user_set_vring_base(vu_dev, index, 0);
    982	if (rc)
    983		goto error_setup;
    984
    985	rc = vhost_user_set_vring_addr(vu_dev, index,
    986				       virtqueue_get_desc_addr(vq),
    987				       virtqueue_get_used_addr(vq),
    988				       virtqueue_get_avail_addr(vq),
    989				       (u64) -1);
    990	if (rc)
    991		goto error_setup;
    992
    993	return vq;
    994
    995error_setup:
    996	if (info->call_fd >= 0) {
    997		um_free_irq(vu_dev->irq, vq);
    998		os_close_file(info->call_fd);
    999	}
   1000error_call:
   1001	if (info->kick_fd >= 0)
   1002		os_close_file(info->kick_fd);
   1003error_kick:
   1004	vring_del_virtqueue(vq);
   1005error_create:
   1006	kfree(info);
   1007error_kzalloc:
   1008	return ERR_PTR(rc);
   1009}
   1010
   1011static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
   1012		       struct virtqueue *vqs[], vq_callback_t *callbacks[],
   1013		       const char * const names[], const bool *ctx,
   1014		       struct irq_affinity *desc)
   1015{
   1016	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
   1017	int i, queue_idx = 0, rc;
   1018	struct virtqueue *vq;
   1019
   1020	/* not supported for now */
   1021	if (WARN_ON(nvqs > 64))
   1022		return -EINVAL;
   1023
   1024	rc = vhost_user_set_mem_table(vu_dev);
   1025	if (rc)
   1026		return rc;
   1027
   1028	for (i = 0; i < nvqs; ++i) {
   1029		if (!names[i]) {
   1030			vqs[i] = NULL;
   1031			continue;
   1032		}
   1033
   1034		vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
   1035				     ctx ? ctx[i] : false);
   1036		if (IS_ERR(vqs[i])) {
   1037			rc = PTR_ERR(vqs[i]);
   1038			goto error_setup;
   1039		}
   1040	}
   1041
   1042	list_for_each_entry(vq, &vdev->vqs, list) {
   1043		struct virtio_uml_vq_info *info = vq->priv;
   1044
   1045		if (info->kick_fd >= 0) {
   1046			rc = vhost_user_set_vring_kick(vu_dev, vq->index,
   1047						       info->kick_fd);
   1048			if (rc)
   1049				goto error_setup;
   1050		}
   1051
   1052		rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
   1053		if (rc)
   1054			goto error_setup;
   1055	}
   1056
   1057	return 0;
   1058
   1059error_setup:
   1060	vu_del_vqs(vdev);
   1061	return rc;
   1062}
   1063
   1064static u64 vu_get_features(struct virtio_device *vdev)
   1065{
   1066	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
   1067
   1068	return vu_dev->features;
   1069}
   1070
   1071static int vu_finalize_features(struct virtio_device *vdev)
   1072{
   1073	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
   1074	u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
   1075
   1076	vring_transport_features(vdev);
   1077	vu_dev->features = vdev->features | supported;
   1078
   1079	return vhost_user_set_features(vu_dev, vu_dev->features);
   1080}
   1081
   1082static const char *vu_bus_name(struct virtio_device *vdev)
   1083{
   1084	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
   1085
   1086	return vu_dev->pdev->name;
   1087}
   1088
   1089static const struct virtio_config_ops virtio_uml_config_ops = {
   1090	.get = vu_get,
   1091	.set = vu_set,
   1092	.get_status = vu_get_status,
   1093	.set_status = vu_set_status,
   1094	.reset = vu_reset,
   1095	.find_vqs = vu_find_vqs,
   1096	.del_vqs = vu_del_vqs,
   1097	.get_features = vu_get_features,
   1098	.finalize_features = vu_finalize_features,
   1099	.bus_name = vu_bus_name,
   1100};
   1101
   1102static void virtio_uml_release_dev(struct device *d)
   1103{
   1104	struct virtio_device *vdev =
   1105			container_of(d, struct virtio_device, dev);
   1106	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
   1107
   1108	time_travel_propagate_time();
   1109
   1110	/* might not have been opened due to not negotiating the feature */
   1111	if (vu_dev->req_fd >= 0) {
   1112		um_free_irq(vu_dev->irq, vu_dev);
   1113		os_close_file(vu_dev->req_fd);
   1114	}
   1115
   1116	os_close_file(vu_dev->sock);
   1117	kfree(vu_dev);
   1118}
   1119
   1120void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
   1121				  bool no_vq_suspend)
   1122{
   1123	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
   1124
   1125	if (WARN_ON(vdev->config != &virtio_uml_config_ops))
   1126		return;
   1127
   1128	vu_dev->no_vq_suspend = no_vq_suspend;
   1129	dev_info(&vdev->dev, "%sabled VQ suspend\n",
   1130		 no_vq_suspend ? "dis" : "en");
   1131}
   1132
   1133static void vu_of_conn_broken(struct work_struct *wk)
   1134{
   1135	/*
   1136	 * We can't remove the device from the devicetree so the only thing we
   1137	 * can do is warn.
   1138	 */
   1139	WARN_ON(1);
   1140}
   1141
   1142/* Platform device */
   1143
   1144static struct virtio_uml_platform_data *
   1145virtio_uml_create_pdata(struct platform_device *pdev)
   1146{
   1147	struct device_node *np = pdev->dev.of_node;
   1148	struct virtio_uml_platform_data *pdata;
   1149	int ret;
   1150
   1151	if (!np)
   1152		return ERR_PTR(-EINVAL);
   1153
   1154	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
   1155	if (!pdata)
   1156		return ERR_PTR(-ENOMEM);
   1157
   1158	INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
   1159	pdata->pdev = pdev;
   1160
   1161	ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
   1162	if (ret)
   1163		return ERR_PTR(ret);
   1164
   1165	ret = of_property_read_u32(np, "virtio-device-id",
   1166				   &pdata->virtio_device_id);
   1167	if (ret)
   1168		return ERR_PTR(ret);
   1169
   1170	return pdata;
   1171}
   1172
   1173static int virtio_uml_probe(struct platform_device *pdev)
   1174{
   1175	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
   1176	struct virtio_uml_device *vu_dev;
   1177	int rc;
   1178
   1179	if (!pdata) {
   1180		pdata = virtio_uml_create_pdata(pdev);
   1181		if (IS_ERR(pdata))
   1182			return PTR_ERR(pdata);
   1183	}
   1184
   1185	vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
   1186	if (!vu_dev)
   1187		return -ENOMEM;
   1188
   1189	vu_dev->pdata = pdata;
   1190	vu_dev->vdev.dev.parent = &pdev->dev;
   1191	vu_dev->vdev.dev.release = virtio_uml_release_dev;
   1192	vu_dev->vdev.config = &virtio_uml_config_ops;
   1193	vu_dev->vdev.id.device = pdata->virtio_device_id;
   1194	vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
   1195	vu_dev->pdev = pdev;
   1196	vu_dev->req_fd = -1;
   1197
   1198	time_travel_propagate_time();
   1199
   1200	do {
   1201		rc = os_connect_socket(pdata->socket_path);
   1202	} while (rc == -EINTR);
   1203	if (rc < 0)
   1204		goto error_free;
   1205	vu_dev->sock = rc;
   1206
   1207	spin_lock_init(&vu_dev->sock_lock);
   1208
   1209	rc = vhost_user_init(vu_dev);
   1210	if (rc)
   1211		goto error_init;
   1212
   1213	platform_set_drvdata(pdev, vu_dev);
   1214
   1215	device_set_wakeup_capable(&vu_dev->vdev.dev, true);
   1216
   1217	rc = register_virtio_device(&vu_dev->vdev);
   1218	if (rc)
   1219		put_device(&vu_dev->vdev.dev);
   1220	vu_dev->registered = 1;
   1221	return rc;
   1222
   1223error_init:
   1224	os_close_file(vu_dev->sock);
   1225error_free:
   1226	kfree(vu_dev);
   1227	return rc;
   1228}
   1229
   1230static int virtio_uml_remove(struct platform_device *pdev)
   1231{
   1232	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
   1233
   1234	unregister_virtio_device(&vu_dev->vdev);
   1235	return 0;
   1236}
   1237
   1238/* Command line device list */
   1239
   1240static void vu_cmdline_release_dev(struct device *d)
   1241{
   1242}
   1243
   1244static struct device vu_cmdline_parent = {
   1245	.init_name = "virtio-uml-cmdline",
   1246	.release = vu_cmdline_release_dev,
   1247};
   1248
   1249static bool vu_cmdline_parent_registered;
   1250static int vu_cmdline_id;
   1251
   1252static int vu_unregister_cmdline_device(struct device *dev, void *data)
   1253{
   1254	struct platform_device *pdev = to_platform_device(dev);
   1255	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
   1256
   1257	kfree(pdata->socket_path);
   1258	platform_device_unregister(pdev);
   1259	return 0;
   1260}
   1261
   1262static void vu_conn_broken(struct work_struct *wk)
   1263{
   1264	struct virtio_uml_platform_data *pdata;
   1265
   1266	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
   1267	vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
   1268}
   1269
   1270static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
   1271{
   1272	const char *ids = strchr(device, ':');
   1273	unsigned int virtio_device_id;
   1274	int processed, consumed, err;
   1275	char *socket_path;
   1276	struct virtio_uml_platform_data pdata, *ppdata;
   1277	struct platform_device *pdev;
   1278
   1279	if (!ids || ids == device)
   1280		return -EINVAL;
   1281
   1282	processed = sscanf(ids, ":%u%n:%d%n",
   1283			   &virtio_device_id, &consumed,
   1284			   &vu_cmdline_id, &consumed);
   1285
   1286	if (processed < 1 || ids[consumed])
   1287		return -EINVAL;
   1288
   1289	if (!vu_cmdline_parent_registered) {
   1290		err = device_register(&vu_cmdline_parent);
   1291		if (err) {
   1292			pr_err("Failed to register parent device!\n");
   1293			put_device(&vu_cmdline_parent);
   1294			return err;
   1295		}
   1296		vu_cmdline_parent_registered = true;
   1297	}
   1298
   1299	socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
   1300	if (!socket_path)
   1301		return -ENOMEM;
   1302
   1303	pdata.virtio_device_id = (u32) virtio_device_id;
   1304	pdata.socket_path = socket_path;
   1305
   1306	pr_info("Registering device virtio-uml.%d id=%d at %s\n",
   1307		vu_cmdline_id, virtio_device_id, socket_path);
   1308
   1309	pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
   1310					     vu_cmdline_id++, &pdata,
   1311					     sizeof(pdata));
   1312	err = PTR_ERR_OR_ZERO(pdev);
   1313	if (err)
   1314		goto free;
   1315
   1316	ppdata = pdev->dev.platform_data;
   1317	ppdata->pdev = pdev;
   1318	INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
   1319
   1320	return 0;
   1321
   1322free:
   1323	kfree(socket_path);
   1324	return err;
   1325}
   1326
   1327static int vu_cmdline_get_device(struct device *dev, void *data)
   1328{
   1329	struct platform_device *pdev = to_platform_device(dev);
   1330	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
   1331	char *buffer = data;
   1332	unsigned int len = strlen(buffer);
   1333
   1334	snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
   1335		 pdata->socket_path, pdata->virtio_device_id, pdev->id);
   1336	return 0;
   1337}
   1338
   1339static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
   1340{
   1341	buffer[0] = '\0';
   1342	if (vu_cmdline_parent_registered)
   1343		device_for_each_child(&vu_cmdline_parent, buffer,
   1344				      vu_cmdline_get_device);
   1345	return strlen(buffer) + 1;
   1346}
   1347
   1348static const struct kernel_param_ops vu_cmdline_param_ops = {
   1349	.set = vu_cmdline_set,
   1350	.get = vu_cmdline_get,
   1351};
   1352
   1353device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
   1354__uml_help(vu_cmdline_param_ops,
   1355"virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
   1356"    Configure a virtio device over a vhost-user socket.\n"
   1357"    See virtio_ids.h for a list of possible virtio device id values.\n"
   1358"    Optionally use a specific platform_device id.\n\n"
   1359);
   1360
   1361
   1362static void vu_unregister_cmdline_devices(void)
   1363{
   1364	if (vu_cmdline_parent_registered) {
   1365		device_for_each_child(&vu_cmdline_parent, NULL,
   1366				      vu_unregister_cmdline_device);
   1367		device_unregister(&vu_cmdline_parent);
   1368		vu_cmdline_parent_registered = false;
   1369	}
   1370}
   1371
   1372/* Platform driver */
   1373
   1374static const struct of_device_id virtio_uml_match[] = {
   1375	{ .compatible = "virtio,uml", },
   1376	{ }
   1377};
   1378MODULE_DEVICE_TABLE(of, virtio_uml_match);
   1379
   1380static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
   1381{
   1382	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
   1383
   1384	if (!vu_dev->no_vq_suspend) {
   1385		struct virtqueue *vq;
   1386
   1387		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
   1388			struct virtio_uml_vq_info *info = vq->priv;
   1389
   1390			info->suspended = true;
   1391			vhost_user_set_vring_enable(vu_dev, vq->index, false);
   1392		}
   1393	}
   1394
   1395	if (!device_may_wakeup(&vu_dev->vdev.dev)) {
   1396		vu_dev->suspended = true;
   1397		return 0;
   1398	}
   1399
   1400	return irq_set_irq_wake(vu_dev->irq, 1);
   1401}
   1402
   1403static int virtio_uml_resume(struct platform_device *pdev)
   1404{
   1405	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
   1406
   1407	if (!vu_dev->no_vq_suspend) {
   1408		struct virtqueue *vq;
   1409
   1410		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
   1411			struct virtio_uml_vq_info *info = vq->priv;
   1412
   1413			info->suspended = false;
   1414			vhost_user_set_vring_enable(vu_dev, vq->index, true);
   1415		}
   1416	}
   1417
   1418	vu_dev->suspended = false;
   1419
   1420	if (!device_may_wakeup(&vu_dev->vdev.dev))
   1421		return 0;
   1422
   1423	return irq_set_irq_wake(vu_dev->irq, 0);
   1424}
   1425
   1426static struct platform_driver virtio_uml_driver = {
   1427	.probe = virtio_uml_probe,
   1428	.remove = virtio_uml_remove,
   1429	.driver = {
   1430		.name = "virtio-uml",
   1431		.of_match_table = virtio_uml_match,
   1432	},
   1433	.suspend = virtio_uml_suspend,
   1434	.resume = virtio_uml_resume,
   1435};
   1436
   1437static int __init virtio_uml_init(void)
   1438{
   1439	return platform_driver_register(&virtio_uml_driver);
   1440}
   1441
   1442static void __exit virtio_uml_exit(void)
   1443{
   1444	platform_driver_unregister(&virtio_uml_driver);
   1445	vu_unregister_cmdline_devices();
   1446}
   1447
   1448module_init(virtio_uml_init);
   1449module_exit(virtio_uml_exit);
   1450__uml_exitcall(virtio_uml_exit);
   1451
   1452MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
   1453MODULE_LICENSE("GPL");