cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

goldfish_pipe.c (26659B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2012 Intel, Inc.
      4 * Copyright (C) 2013 Intel, Inc.
      5 * Copyright (C) 2014 Linaro Limited
      6 * Copyright (C) 2011-2016 Google, Inc.
      7 *
      8 * This software is licensed under the terms of the GNU General Public
      9 * License version 2, as published by the Free Software Foundation, and
     10 * may be copied, distributed, and modified under those terms.
     11 *
     12 * This program is distributed in the hope that it will be useful,
     13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15 * GNU General Public License for more details.
     16 *
     17 */
     18
     19/* This source file contains the implementation of a special device driver
     20 * that intends to provide a *very* fast communication channel between the
     21 * guest system and the QEMU emulator.
     22 *
     23 * Usage from the guest is simply the following (error handling simplified):
     24 *
     25 *    int  fd = open("/dev/qemu_pipe",O_RDWR);
     26 *    .... write() or read() through the pipe.
     27 *
     28 * This driver doesn't deal with the exact protocol used during the session.
     29 * It is intended to be as simple as something like:
     30 *
     31 *    // do this _just_ after opening the fd to connect to a specific
     32 *    // emulator service.
     33 *    const char*  msg = "<pipename>";
     34 *    if (write(fd, msg, strlen(msg)+1) < 0) {
     35 *       ... could not connect to <pipename> service
     36 *       close(fd);
     37 *    }
     38 *
     39 *    // after this, simply read() and write() to communicate with the
     40 *    // service. Exact protocol details left as an exercise to the reader.
     41 *
     42 * This driver is very fast because it doesn't copy any data through
     43 * intermediate buffers, since the emulator is capable of translating
     44 * guest user addresses into host ones.
     45 *
     46 * Note that we must however ensure that each user page involved in the
     47 * exchange is properly mapped during a transfer.
     48 */
     49
     50#include <linux/module.h>
     51#include <linux/mod_devicetable.h>
     52#include <linux/interrupt.h>
     53#include <linux/kernel.h>
     54#include <linux/spinlock.h>
     55#include <linux/miscdevice.h>
     56#include <linux/platform_device.h>
     57#include <linux/poll.h>
     58#include <linux/sched.h>
     59#include <linux/bitops.h>
     60#include <linux/slab.h>
     61#include <linux/io.h>
     62#include <linux/dma-mapping.h>
     63#include <linux/mm.h>
     64#include <linux/acpi.h>
     65#include <linux/bug.h>
     66#include "goldfish_pipe_qemu.h"
     67
     68/*
     69 * Update this when something changes in the driver's behavior so the host
     70 * can benefit from knowing it
     71 */
     72enum {
     73	PIPE_DRIVER_VERSION = 2,
     74	PIPE_CURRENT_DEVICE_VERSION = 2
     75};
     76
     77enum {
     78	MAX_BUFFERS_PER_COMMAND = 336,
     79	MAX_SIGNALLED_PIPES = 64,
     80	INITIAL_PIPES_CAPACITY = 64
     81};
     82
     83struct goldfish_pipe_dev;
     84
     85/* A per-pipe command structure, shared with the host */
     86struct goldfish_pipe_command {
     87	s32 cmd;	/* PipeCmdCode, guest -> host */
     88	s32 id;		/* pipe id, guest -> host */
     89	s32 status;	/* command execution status, host -> guest */
     90	s32 reserved;	/* to pad to 64-bit boundary */
     91	union {
     92		/* Parameters for PIPE_CMD_{READ,WRITE} */
     93		struct {
     94			/* number of buffers, guest -> host */
     95			u32 buffers_count;
     96			/* number of consumed bytes, host -> guest */
     97			s32 consumed_size;
     98			/* buffer pointers, guest -> host */
     99			u64 ptrs[MAX_BUFFERS_PER_COMMAND];
    100			/* buffer sizes, guest -> host */
    101			u32 sizes[MAX_BUFFERS_PER_COMMAND];
    102		} rw_params;
    103	};
    104};
    105
    106/* A single signalled pipe information */
    107struct signalled_pipe_buffer {
    108	u32 id;
    109	u32 flags;
    110};
    111
    112/* Parameters for the PIPE_CMD_OPEN command */
    113struct open_command_param {
    114	u64 command_buffer_ptr;
    115	u32 rw_params_max_count;
    116};
    117
    118/* Device-level set of buffers shared with the host */
    119struct goldfish_pipe_dev_buffers {
    120	struct open_command_param open_command_params;
    121	struct signalled_pipe_buffer
    122		signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
    123};
    124
    125/* This data type models a given pipe instance */
    126struct goldfish_pipe {
    127	/* pipe ID - index into goldfish_pipe_dev::pipes array */
    128	u32 id;
    129
    130	/* The wake flags pipe is waiting for
    131	 * Note: not protected with any lock, uses atomic operations
    132	 *  and barriers to make it thread-safe.
    133	 */
    134	unsigned long flags;
    135
    136	/* wake flags host have signalled,
    137	 *  - protected by goldfish_pipe_dev::lock
    138	 */
    139	unsigned long signalled_flags;
    140
    141	/* A pointer to command buffer */
    142	struct goldfish_pipe_command *command_buffer;
    143
    144	/* doubly linked list of signalled pipes, protected by
    145	 * goldfish_pipe_dev::lock
    146	 */
    147	struct goldfish_pipe *prev_signalled;
    148	struct goldfish_pipe *next_signalled;
    149
    150	/*
    151	 * A pipe's own lock. Protects the following:
    152	 *  - *command_buffer - makes sure a command can safely write its
    153	 *    parameters to the host and read the results back.
    154	 */
    155	struct mutex lock;
    156
    157	/* A wake queue for sleeping until host signals an event */
    158	wait_queue_head_t wake_queue;
    159
    160	/* Pointer to the parent goldfish_pipe_dev instance */
    161	struct goldfish_pipe_dev *dev;
    162
    163	/* A buffer of pages, too large to fit into a stack frame */
    164	struct page *pages[MAX_BUFFERS_PER_COMMAND];
    165};
    166
    167/* The global driver data. Holds a reference to the i/o page used to
    168 * communicate with the emulator, and a wake queue for blocked tasks
    169 * waiting to be awoken.
    170 */
    171struct goldfish_pipe_dev {
    172	/* A magic number to check if this is an instance of this struct */
    173	void *magic;
    174
    175	/*
    176	 * Global device spinlock. Protects the following members:
    177	 *  - pipes, pipes_capacity
    178	 *  - [*pipes, *pipes + pipes_capacity) - array data
    179	 *  - first_signalled_pipe,
    180	 *      goldfish_pipe::prev_signalled,
    181	 *      goldfish_pipe::next_signalled,
    182	 *      goldfish_pipe::signalled_flags - all singnalled-related fields,
    183	 *                                       in all allocated pipes
    184	 *  - open_command_params - PIPE_CMD_OPEN-related buffers
    185	 *
    186	 * It looks like a lot of different fields, but the trick is that
    187	 * the only operation that happens often is the signalled pipes array
    188	 * manipulation. That's why it's OK for now to keep the rest of the
    189	 * fields under the same lock. If we notice too much contention because
    190	 * of PIPE_CMD_OPEN, then we should add a separate lock there.
    191	 */
    192	spinlock_t lock;
    193
    194	/*
    195	 * Array of the pipes of |pipes_capacity| elements,
    196	 * indexed by goldfish_pipe::id
    197	 */
    198	struct goldfish_pipe **pipes;
    199	u32 pipes_capacity;
    200
    201	/* Pointers to the buffers host uses for interaction with this driver */
    202	struct goldfish_pipe_dev_buffers *buffers;
    203
    204	/* Head of a doubly linked list of signalled pipes */
    205	struct goldfish_pipe *first_signalled_pipe;
    206
    207	/* ptr to platform device's device struct */
    208	struct device *pdev_dev;
    209
    210	/* Some device-specific data */
    211	int irq;
    212	int version;
    213	unsigned char __iomem *base;
    214
    215	struct miscdevice miscdev;
    216};
    217
    218static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
    219				    enum PipeCmdCode cmd)
    220{
    221	pipe->command_buffer->cmd = cmd;
    222	/* failure by default */
    223	pipe->command_buffer->status = PIPE_ERROR_INVAL;
    224	writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
    225	return pipe->command_buffer->status;
    226}
    227
    228static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
    229{
    230	int status;
    231
    232	if (mutex_lock_interruptible(&pipe->lock))
    233		return PIPE_ERROR_IO;
    234	status = goldfish_pipe_cmd_locked(pipe, cmd);
    235	mutex_unlock(&pipe->lock);
    236	return status;
    237}
    238
    239/*
    240 * This function converts an error code returned by the emulator through
    241 * the PIPE_REG_STATUS i/o register into a valid negative errno value.
    242 */
    243static int goldfish_pipe_error_convert(int status)
    244{
    245	switch (status) {
    246	case PIPE_ERROR_AGAIN:
    247		return -EAGAIN;
    248	case PIPE_ERROR_NOMEM:
    249		return -ENOMEM;
    250	case PIPE_ERROR_IO:
    251		return -EIO;
    252	default:
    253		return -EINVAL;
    254	}
    255}
    256
    257static int goldfish_pin_pages(unsigned long first_page,
    258			      unsigned long last_page,
    259			      unsigned int last_page_size,
    260			      int is_write,
    261			      struct page *pages[MAX_BUFFERS_PER_COMMAND],
    262			      unsigned int *iter_last_page_size)
    263{
    264	int ret;
    265	int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
    266
    267	if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
    268		requested_pages = MAX_BUFFERS_PER_COMMAND;
    269		*iter_last_page_size = PAGE_SIZE;
    270	} else {
    271		*iter_last_page_size = last_page_size;
    272	}
    273
    274	ret = pin_user_pages_fast(first_page, requested_pages,
    275				  !is_write ? FOLL_WRITE : 0,
    276				  pages);
    277	if (ret <= 0)
    278		return -EFAULT;
    279	if (ret < requested_pages)
    280		*iter_last_page_size = PAGE_SIZE;
    281
    282	return ret;
    283}
    284
    285/* Populate the call parameters, merging adjacent pages together */
    286static void populate_rw_params(struct page **pages,
    287			       int pages_count,
    288			       unsigned long address,
    289			       unsigned long address_end,
    290			       unsigned long first_page,
    291			       unsigned long last_page,
    292			       unsigned int iter_last_page_size,
    293			       int is_write,
    294			       struct goldfish_pipe_command *command)
    295{
    296	/*
    297	 * Process the first page separately - it's the only page that
    298	 * needs special handling for its start address.
    299	 */
    300	unsigned long xaddr = page_to_phys(pages[0]);
    301	unsigned long xaddr_prev = xaddr;
    302	int buffer_idx = 0;
    303	int i = 1;
    304	int size_on_page = first_page == last_page
    305			? (int)(address_end - address)
    306			: (PAGE_SIZE - (address & ~PAGE_MASK));
    307	command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
    308	command->rw_params.sizes[0] = size_on_page;
    309	for (; i < pages_count; ++i) {
    310		xaddr = page_to_phys(pages[i]);
    311		size_on_page = (i == pages_count - 1) ?
    312			iter_last_page_size : PAGE_SIZE;
    313		if (xaddr == xaddr_prev + PAGE_SIZE) {
    314			command->rw_params.sizes[buffer_idx] += size_on_page;
    315		} else {
    316			++buffer_idx;
    317			command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
    318			command->rw_params.sizes[buffer_idx] = size_on_page;
    319		}
    320		xaddr_prev = xaddr;
    321	}
    322	command->rw_params.buffers_count = buffer_idx + 1;
    323}
    324
    325static int transfer_max_buffers(struct goldfish_pipe *pipe,
    326				unsigned long address,
    327				unsigned long address_end,
    328				int is_write,
    329				unsigned long last_page,
    330				unsigned int last_page_size,
    331				s32 *consumed_size,
    332				int *status)
    333{
    334	unsigned long first_page = address & PAGE_MASK;
    335	unsigned int iter_last_page_size;
    336	int pages_count;
    337
    338	/* Serialize access to the pipe command buffers */
    339	if (mutex_lock_interruptible(&pipe->lock))
    340		return -ERESTARTSYS;
    341
    342	pages_count = goldfish_pin_pages(first_page, last_page,
    343					 last_page_size, is_write,
    344					 pipe->pages, &iter_last_page_size);
    345	if (pages_count < 0) {
    346		mutex_unlock(&pipe->lock);
    347		return pages_count;
    348	}
    349
    350	populate_rw_params(pipe->pages, pages_count, address, address_end,
    351			   first_page, last_page, iter_last_page_size, is_write,
    352			   pipe->command_buffer);
    353
    354	/* Transfer the data */
    355	*status = goldfish_pipe_cmd_locked(pipe,
    356				is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
    357
    358	*consumed_size = pipe->command_buffer->rw_params.consumed_size;
    359
    360	unpin_user_pages_dirty_lock(pipe->pages, pages_count,
    361				    !is_write && *consumed_size > 0);
    362
    363	mutex_unlock(&pipe->lock);
    364	return 0;
    365}
    366
    367static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
    368{
    369	u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
    370
    371	set_bit(wake_bit, &pipe->flags);
    372
    373	/* Tell the emulator we're going to wait for a wake event */
    374	goldfish_pipe_cmd(pipe,
    375		is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
    376
    377	while (test_bit(wake_bit, &pipe->flags)) {
    378		if (wait_event_interruptible(pipe->wake_queue,
    379					     !test_bit(wake_bit, &pipe->flags)))
    380			return -ERESTARTSYS;
    381
    382		if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
    383			return -EIO;
    384	}
    385
    386	return 0;
    387}
    388
    389static ssize_t goldfish_pipe_read_write(struct file *filp,
    390					char __user *buffer,
    391					size_t bufflen,
    392					int is_write)
    393{
    394	struct goldfish_pipe *pipe = filp->private_data;
    395	int count = 0, ret = -EINVAL;
    396	unsigned long address, address_end, last_page;
    397	unsigned int last_page_size;
    398
    399	/* If the emulator already closed the pipe, no need to go further */
    400	if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
    401		return -EIO;
    402	/* Null reads or writes succeeds */
    403	if (unlikely(bufflen == 0))
    404		return 0;
    405	/* Check the buffer range for access */
    406	if (unlikely(!access_ok(buffer, bufflen)))
    407		return -EFAULT;
    408
    409	address = (unsigned long)buffer;
    410	address_end = address + bufflen;
    411	last_page = (address_end - 1) & PAGE_MASK;
    412	last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
    413
    414	while (address < address_end) {
    415		s32 consumed_size;
    416		int status;
    417
    418		ret = transfer_max_buffers(pipe, address, address_end, is_write,
    419					   last_page, last_page_size,
    420					   &consumed_size, &status);
    421		if (ret < 0)
    422			break;
    423
    424		if (consumed_size > 0) {
    425			/* No matter what's the status, we've transferred
    426			 * something.
    427			 */
    428			count += consumed_size;
    429			address += consumed_size;
    430		}
    431		if (status > 0)
    432			continue;
    433		if (status == 0) {
    434			/* EOF */
    435			ret = 0;
    436			break;
    437		}
    438		if (count > 0) {
    439			/*
    440			 * An error occurred, but we already transferred
    441			 * something on one of the previous iterations.
    442			 * Just return what we already copied and log this
    443			 * err.
    444			 */
    445			if (status != PIPE_ERROR_AGAIN)
    446				dev_err_ratelimited(pipe->dev->pdev_dev,
    447					"backend error %d on %s\n",
    448					status, is_write ? "write" : "read");
    449			break;
    450		}
    451
    452		/*
    453		 * If the error is not PIPE_ERROR_AGAIN, or if we are in
    454		 * non-blocking mode, just return the error code.
    455		 */
    456		if (status != PIPE_ERROR_AGAIN ||
    457			(filp->f_flags & O_NONBLOCK) != 0) {
    458			ret = goldfish_pipe_error_convert(status);
    459			break;
    460		}
    461
    462		status = wait_for_host_signal(pipe, is_write);
    463		if (status < 0)
    464			return status;
    465	}
    466
    467	if (count > 0)
    468		return count;
    469	return ret;
    470}
    471
    472static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
    473				  size_t bufflen, loff_t *ppos)
    474{
    475	return goldfish_pipe_read_write(filp, buffer, bufflen,
    476					/* is_write */ 0);
    477}
    478
    479static ssize_t goldfish_pipe_write(struct file *filp,
    480				   const char __user *buffer, size_t bufflen,
    481				   loff_t *ppos)
    482{
    483	/* cast away the const */
    484	char __user *no_const_buffer = (char __user *)buffer;
    485
    486	return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
    487					/* is_write */ 1);
    488}
    489
    490static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
    491{
    492	struct goldfish_pipe *pipe = filp->private_data;
    493	__poll_t mask = 0;
    494	int status;
    495
    496	poll_wait(filp, &pipe->wake_queue, wait);
    497
    498	status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
    499	if (status < 0)
    500		return -ERESTARTSYS;
    501
    502	if (status & PIPE_POLL_IN)
    503		mask |= EPOLLIN | EPOLLRDNORM;
    504	if (status & PIPE_POLL_OUT)
    505		mask |= EPOLLOUT | EPOLLWRNORM;
    506	if (status & PIPE_POLL_HUP)
    507		mask |= EPOLLHUP;
    508	if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
    509		mask |= EPOLLERR;
    510
    511	return mask;
    512}
    513
    514static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
    515				       u32 id, u32 flags)
    516{
    517	struct goldfish_pipe *pipe;
    518
    519	if (WARN_ON(id >= dev->pipes_capacity))
    520		return;
    521
    522	pipe = dev->pipes[id];
    523	if (!pipe)
    524		return;
    525	pipe->signalled_flags |= flags;
    526
    527	if (pipe->prev_signalled || pipe->next_signalled ||
    528		dev->first_signalled_pipe == pipe)
    529		return;	/* already in the list */
    530	pipe->next_signalled = dev->first_signalled_pipe;
    531	if (dev->first_signalled_pipe)
    532		dev->first_signalled_pipe->prev_signalled = pipe;
    533	dev->first_signalled_pipe = pipe;
    534}
    535
    536static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
    537					  struct goldfish_pipe *pipe)
    538{
    539	if (pipe->prev_signalled)
    540		pipe->prev_signalled->next_signalled = pipe->next_signalled;
    541	if (pipe->next_signalled)
    542		pipe->next_signalled->prev_signalled = pipe->prev_signalled;
    543	if (pipe == dev->first_signalled_pipe)
    544		dev->first_signalled_pipe = pipe->next_signalled;
    545	pipe->prev_signalled = NULL;
    546	pipe->next_signalled = NULL;
    547}
    548
    549static struct goldfish_pipe *signalled_pipes_pop_front(
    550		struct goldfish_pipe_dev *dev, int *wakes)
    551{
    552	struct goldfish_pipe *pipe;
    553	unsigned long flags;
    554
    555	spin_lock_irqsave(&dev->lock, flags);
    556
    557	pipe = dev->first_signalled_pipe;
    558	if (pipe) {
    559		*wakes = pipe->signalled_flags;
    560		pipe->signalled_flags = 0;
    561		/*
    562		 * This is an optimized version of
    563		 * signalled_pipes_remove_locked()
    564		 * - We want to make it as fast as possible to
    565		 * wake the sleeping pipe operations faster.
    566		 */
    567		dev->first_signalled_pipe = pipe->next_signalled;
    568		if (dev->first_signalled_pipe)
    569			dev->first_signalled_pipe->prev_signalled = NULL;
    570		pipe->next_signalled = NULL;
    571	}
    572
    573	spin_unlock_irqrestore(&dev->lock, flags);
    574	return pipe;
    575}
    576
    577static irqreturn_t goldfish_interrupt_task(int irq, void *dev_addr)
    578{
    579	/* Iterate over the signalled pipes and wake them one by one */
    580	struct goldfish_pipe_dev *dev = dev_addr;
    581	struct goldfish_pipe *pipe;
    582	int wakes;
    583
    584	while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
    585		if (wakes & PIPE_WAKE_CLOSED) {
    586			pipe->flags = 1 << BIT_CLOSED_ON_HOST;
    587		} else {
    588			if (wakes & PIPE_WAKE_READ)
    589				clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
    590			if (wakes & PIPE_WAKE_WRITE)
    591				clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
    592		}
    593		/*
    594		 * wake_up_interruptible() implies a write barrier, so don't
    595		 * explicitly add another one here.
    596		 */
    597		wake_up_interruptible(&pipe->wake_queue);
    598	}
    599	return IRQ_HANDLED;
    600}
    601
    602static void goldfish_pipe_device_deinit(struct platform_device *pdev,
    603					struct goldfish_pipe_dev *dev);
    604
    605/*
    606 * The general idea of the (threaded) interrupt handling:
    607 *
    608 *  1. device raises an interrupt if there's at least one signalled pipe
    609 *  2. IRQ handler reads the signalled pipes and their count from the device
    610 *  3. device writes them into a shared buffer and returns the count
    611 *      it only resets the IRQ if it has returned all signalled pipes,
    612 *      otherwise it leaves it raised, so IRQ handler will be called
    613 *      again for the next chunk
    614 *  4. IRQ handler adds all returned pipes to the device's signalled pipes list
    615 *  5. IRQ handler defers processing the signalled pipes from the list in a
    616 *      separate context
    617 */
    618static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
    619{
    620	u32 count;
    621	u32 i;
    622	unsigned long flags;
    623	struct goldfish_pipe_dev *dev = dev_id;
    624
    625	if (dev->magic != &goldfish_pipe_device_deinit)
    626		return IRQ_NONE;
    627
    628	/* Request the signalled pipes from the device */
    629	spin_lock_irqsave(&dev->lock, flags);
    630
    631	count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
    632	if (count == 0) {
    633		spin_unlock_irqrestore(&dev->lock, flags);
    634		return IRQ_NONE;
    635	}
    636	if (count > MAX_SIGNALLED_PIPES)
    637		count = MAX_SIGNALLED_PIPES;
    638
    639	for (i = 0; i < count; ++i)
    640		signalled_pipes_add_locked(dev,
    641			dev->buffers->signalled_pipe_buffers[i].id,
    642			dev->buffers->signalled_pipe_buffers[i].flags);
    643
    644	spin_unlock_irqrestore(&dev->lock, flags);
    645
    646	return IRQ_WAKE_THREAD;
    647}
    648
    649static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
    650{
    651	int id;
    652
    653	for (id = 0; id < dev->pipes_capacity; ++id)
    654		if (!dev->pipes[id])
    655			return id;
    656
    657	{
    658		/* Reallocate the array.
    659		 * Since get_free_pipe_id_locked runs with interrupts disabled,
    660		 * we don't want to make calls that could lead to sleep.
    661		 */
    662		u32 new_capacity = 2 * dev->pipes_capacity;
    663		struct goldfish_pipe **pipes =
    664			kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
    665		if (!pipes)
    666			return -ENOMEM;
    667		memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
    668		kfree(dev->pipes);
    669		dev->pipes = pipes;
    670		id = dev->pipes_capacity;
    671		dev->pipes_capacity = new_capacity;
    672	}
    673	return id;
    674}
    675
    676/* A helper function to get the instance of goldfish_pipe_dev from file */
    677static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
    678{
    679	struct miscdevice *miscdev = file->private_data;
    680
    681	return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
    682}
    683
    684/**
    685 *	goldfish_pipe_open - open a channel to the AVD
    686 *	@inode: inode of device
    687 *	@file: file struct of opener
    688 *
    689 *	Create a new pipe link between the emulator and the use application.
    690 *	Each new request produces a new pipe.
    691 *
    692 *	Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
    693 *	right now so this is fine. A move to 64bit will need this addressing
    694 */
    695static int goldfish_pipe_open(struct inode *inode, struct file *file)
    696{
    697	struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
    698	unsigned long flags;
    699	int id;
    700	int status;
    701
    702	/* Allocate new pipe kernel object */
    703	struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
    704
    705	if (!pipe)
    706		return -ENOMEM;
    707
    708	pipe->dev = dev;
    709	mutex_init(&pipe->lock);
    710	init_waitqueue_head(&pipe->wake_queue);
    711
    712	/*
    713	 * Command buffer needs to be allocated on its own page to make sure
    714	 * it is physically contiguous in host's address space.
    715	 */
    716	BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
    717	pipe->command_buffer =
    718		(struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
    719	if (!pipe->command_buffer) {
    720		status = -ENOMEM;
    721		goto err_pipe;
    722	}
    723
    724	spin_lock_irqsave(&dev->lock, flags);
    725
    726	id = get_free_pipe_id_locked(dev);
    727	if (id < 0) {
    728		status = id;
    729		goto err_id_locked;
    730	}
    731
    732	dev->pipes[id] = pipe;
    733	pipe->id = id;
    734	pipe->command_buffer->id = id;
    735
    736	/* Now tell the emulator we're opening a new pipe. */
    737	dev->buffers->open_command_params.rw_params_max_count =
    738			MAX_BUFFERS_PER_COMMAND;
    739	dev->buffers->open_command_params.command_buffer_ptr =
    740			(u64)(unsigned long)__pa(pipe->command_buffer);
    741	status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
    742	spin_unlock_irqrestore(&dev->lock, flags);
    743	if (status < 0)
    744		goto err_cmd;
    745	/* All is done, save the pipe into the file's private data field */
    746	file->private_data = pipe;
    747	return 0;
    748
    749err_cmd:
    750	spin_lock_irqsave(&dev->lock, flags);
    751	dev->pipes[id] = NULL;
    752err_id_locked:
    753	spin_unlock_irqrestore(&dev->lock, flags);
    754	free_page((unsigned long)pipe->command_buffer);
    755err_pipe:
    756	kfree(pipe);
    757	return status;
    758}
    759
    760static int goldfish_pipe_release(struct inode *inode, struct file *filp)
    761{
    762	unsigned long flags;
    763	struct goldfish_pipe *pipe = filp->private_data;
    764	struct goldfish_pipe_dev *dev = pipe->dev;
    765
    766	/* The guest is closing the channel, so tell the emulator right now */
    767	goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
    768
    769	spin_lock_irqsave(&dev->lock, flags);
    770	dev->pipes[pipe->id] = NULL;
    771	signalled_pipes_remove_locked(dev, pipe);
    772	spin_unlock_irqrestore(&dev->lock, flags);
    773
    774	filp->private_data = NULL;
    775	free_page((unsigned long)pipe->command_buffer);
    776	kfree(pipe);
    777	return 0;
    778}
    779
    780static const struct file_operations goldfish_pipe_fops = {
    781	.owner = THIS_MODULE,
    782	.read = goldfish_pipe_read,
    783	.write = goldfish_pipe_write,
    784	.poll = goldfish_pipe_poll,
    785	.open = goldfish_pipe_open,
    786	.release = goldfish_pipe_release,
    787};
    788
    789static void init_miscdevice(struct miscdevice *miscdev)
    790{
    791	memset(miscdev, 0, sizeof(*miscdev));
    792
    793	miscdev->minor = MISC_DYNAMIC_MINOR;
    794	miscdev->name = "goldfish_pipe";
    795	miscdev->fops = &goldfish_pipe_fops;
    796}
    797
    798static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
    799{
    800	const unsigned long paddr = __pa(addr);
    801
    802	writel(upper_32_bits(paddr), porth);
    803	writel(lower_32_bits(paddr), portl);
    804}
    805
    806static int goldfish_pipe_device_init(struct platform_device *pdev,
    807				     struct goldfish_pipe_dev *dev)
    808{
    809	int err;
    810
    811	err = devm_request_threaded_irq(&pdev->dev, dev->irq,
    812					goldfish_pipe_interrupt,
    813					goldfish_interrupt_task,
    814					IRQF_SHARED, "goldfish_pipe", dev);
    815	if (err) {
    816		dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
    817		return err;
    818	}
    819
    820	init_miscdevice(&dev->miscdev);
    821	err = misc_register(&dev->miscdev);
    822	if (err) {
    823		dev_err(&pdev->dev, "unable to register v2 device\n");
    824		return err;
    825	}
    826
    827	dev->pdev_dev = &pdev->dev;
    828	dev->first_signalled_pipe = NULL;
    829	dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
    830	dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
    831			     GFP_KERNEL);
    832	if (!dev->pipes) {
    833		misc_deregister(&dev->miscdev);
    834		return -ENOMEM;
    835	}
    836
    837	/*
    838	 * We're going to pass two buffers, open_command_params and
    839	 * signalled_pipe_buffers, to the host. This means each of those buffers
    840	 * needs to be contained in a single physical page. The easiest choice
    841	 * is to just allocate a page and place the buffers in it.
    842	 */
    843	BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
    844	dev->buffers = (struct goldfish_pipe_dev_buffers *)
    845		__get_free_page(GFP_KERNEL);
    846	if (!dev->buffers) {
    847		kfree(dev->pipes);
    848		misc_deregister(&dev->miscdev);
    849		return -ENOMEM;
    850	}
    851
    852	/* Send the buffer addresses to the host */
    853	write_pa_addr(&dev->buffers->signalled_pipe_buffers,
    854		      dev->base + PIPE_REG_SIGNAL_BUFFER,
    855		      dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
    856
    857	writel(MAX_SIGNALLED_PIPES,
    858	       dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
    859
    860	write_pa_addr(&dev->buffers->open_command_params,
    861		      dev->base + PIPE_REG_OPEN_BUFFER,
    862		      dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
    863
    864	platform_set_drvdata(pdev, dev);
    865	return 0;
    866}
    867
    868static void goldfish_pipe_device_deinit(struct platform_device *pdev,
    869					struct goldfish_pipe_dev *dev)
    870{
    871	misc_deregister(&dev->miscdev);
    872	kfree(dev->pipes);
    873	free_page((unsigned long)dev->buffers);
    874}
    875
    876static int goldfish_pipe_probe(struct platform_device *pdev)
    877{
    878	struct resource *r;
    879	struct goldfish_pipe_dev *dev;
    880
    881	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
    882	if (!dev)
    883		return -ENOMEM;
    884
    885	dev->magic = &goldfish_pipe_device_deinit;
    886	spin_lock_init(&dev->lock);
    887
    888	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    889	if (!r || resource_size(r) < PAGE_SIZE) {
    890		dev_err(&pdev->dev, "can't allocate i/o page\n");
    891		return -EINVAL;
    892	}
    893	dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
    894	if (!dev->base) {
    895		dev_err(&pdev->dev, "ioremap failed\n");
    896		return -EINVAL;
    897	}
    898
    899	dev->irq = platform_get_irq(pdev, 0);
    900	if (dev->irq < 0)
    901		return dev->irq;
    902
    903	/*
    904	 * Exchange the versions with the host device
    905	 *
    906	 * Note: v1 driver used to not report its version, so we write it before
    907	 *  reading device version back: this allows the host implementation to
    908	 *  detect the old driver (if there was no version write before read).
    909	 */
    910	writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
    911	dev->version = readl(dev->base + PIPE_REG_VERSION);
    912	if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
    913		return -EINVAL;
    914
    915	return goldfish_pipe_device_init(pdev, dev);
    916}
    917
    918static int goldfish_pipe_remove(struct platform_device *pdev)
    919{
    920	struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
    921
    922	goldfish_pipe_device_deinit(pdev, dev);
    923	return 0;
    924}
    925
    926static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
    927	{ "GFSH0003", 0 },
    928	{ },
    929};
    930MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
    931
    932static const struct of_device_id goldfish_pipe_of_match[] = {
    933	{ .compatible = "google,android-pipe", },
    934	{},
    935};
    936MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
    937
    938static struct platform_driver goldfish_pipe_driver = {
    939	.probe = goldfish_pipe_probe,
    940	.remove = goldfish_pipe_remove,
    941	.driver = {
    942		.name = "goldfish_pipe",
    943		.of_match_table = goldfish_pipe_of_match,
    944		.acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
    945	}
    946};
    947
    948module_platform_driver(goldfish_pipe_driver);
    949MODULE_AUTHOR("David Turner <digit@google.com>");
    950MODULE_LICENSE("GPL v2");