cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

f_fs.c (92537B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * f_fs.c -- user mode file system API for USB composite function controllers
      4 *
      5 * Copyright (C) 2010 Samsung Electronics
      6 * Author: Michal Nazarewicz <mina86@mina86.com>
      7 *
      8 * Based on inode.c (GadgetFS) which was:
      9 * Copyright (C) 2003-2004 David Brownell
     10 * Copyright (C) 2003 Agilent Technologies
     11 */
     12
     13
     14/* #define DEBUG */
     15/* #define VERBOSE_DEBUG */
     16
     17#include <linux/blkdev.h>
     18#include <linux/pagemap.h>
     19#include <linux/export.h>
     20#include <linux/fs_parser.h>
     21#include <linux/hid.h>
     22#include <linux/mm.h>
     23#include <linux/module.h>
     24#include <linux/scatterlist.h>
     25#include <linux/sched/signal.h>
     26#include <linux/uio.h>
     27#include <linux/vmalloc.h>
     28#include <asm/unaligned.h>
     29
     30#include <linux/usb/ccid.h>
     31#include <linux/usb/composite.h>
     32#include <linux/usb/functionfs.h>
     33
     34#include <linux/aio.h>
     35#include <linux/kthread.h>
     36#include <linux/poll.h>
     37#include <linux/eventfd.h>
     38
     39#include "u_fs.h"
     40#include "u_f.h"
     41#include "u_os_desc.h"
     42#include "configfs.h"
     43
     44#define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
     45
     46/* Reference counter handling */
     47static void ffs_data_get(struct ffs_data *ffs);
     48static void ffs_data_put(struct ffs_data *ffs);
     49/* Creates new ffs_data object. */
     50static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
     51	__attribute__((malloc));
     52
     53/* Opened counter handling. */
     54static void ffs_data_opened(struct ffs_data *ffs);
     55static void ffs_data_closed(struct ffs_data *ffs);
     56
     57/* Called with ffs->mutex held; take over ownership of data. */
     58static int __must_check
     59__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
     60static int __must_check
     61__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
     62
     63
     64/* The function structure ***************************************************/
     65
     66struct ffs_ep;
     67
     68struct ffs_function {
     69	struct usb_configuration	*conf;
     70	struct usb_gadget		*gadget;
     71	struct ffs_data			*ffs;
     72
     73	struct ffs_ep			*eps;
     74	u8				eps_revmap[16];
     75	short				*interfaces_nums;
     76
     77	struct usb_function		function;
     78};
     79
     80
     81static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
     82{
     83	return container_of(f, struct ffs_function, function);
     84}
     85
     86
     87static inline enum ffs_setup_state
     88ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
     89{
     90	return (enum ffs_setup_state)
     91		cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
     92}
     93
     94
     95static void ffs_func_eps_disable(struct ffs_function *func);
     96static int __must_check ffs_func_eps_enable(struct ffs_function *func);
     97
     98static int ffs_func_bind(struct usb_configuration *,
     99			 struct usb_function *);
    100static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
    101static void ffs_func_disable(struct usb_function *);
    102static int ffs_func_setup(struct usb_function *,
    103			  const struct usb_ctrlrequest *);
    104static bool ffs_func_req_match(struct usb_function *,
    105			       const struct usb_ctrlrequest *,
    106			       bool config0);
    107static void ffs_func_suspend(struct usb_function *);
    108static void ffs_func_resume(struct usb_function *);
    109
    110
    111static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
    112static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
    113
    114
    115/* The endpoints structures *************************************************/
    116
    117struct ffs_ep {
    118	struct usb_ep			*ep;	/* P: ffs->eps_lock */
    119	struct usb_request		*req;	/* P: epfile->mutex */
    120
    121	/* [0]: full speed, [1]: high speed, [2]: super speed */
    122	struct usb_endpoint_descriptor	*descs[3];
    123
    124	u8				num;
    125};
    126
    127struct ffs_epfile {
    128	/* Protects ep->ep and ep->req. */
    129	struct mutex			mutex;
    130
    131	struct ffs_data			*ffs;
    132	struct ffs_ep			*ep;	/* P: ffs->eps_lock */
    133
    134	struct dentry			*dentry;
    135
    136	/*
    137	 * Buffer for holding data from partial reads which may happen since
    138	 * we’re rounding user read requests to a multiple of a max packet size.
    139	 *
    140	 * The pointer is initialised with NULL value and may be set by
    141	 * __ffs_epfile_read_data function to point to a temporary buffer.
    142	 *
    143	 * In normal operation, calls to __ffs_epfile_read_buffered will consume
    144	 * data from said buffer and eventually free it.  Importantly, while the
    145	 * function is using the buffer, it sets the pointer to NULL.  This is
    146	 * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
    147	 * can never run concurrently (they are synchronised by epfile->mutex)
    148	 * so the latter will not assign a new value to the pointer.
    149	 *
    150	 * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
    151	 * valid) and sets the pointer to READ_BUFFER_DROP value.  This special
    152	 * value is crux of the synchronisation between ffs_func_eps_disable and
    153	 * __ffs_epfile_read_data.
    154	 *
    155	 * Once __ffs_epfile_read_data is about to finish it will try to set the
    156	 * pointer back to its old value (as described above), but seeing as the
    157	 * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
    158	 * the buffer.
    159	 *
    160	 * == State transitions ==
    161	 *
    162	 * • ptr == NULL:  (initial state)
    163	 *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
    164	 *   ◦ __ffs_epfile_read_buffered:    nop
    165	 *   ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
    166	 *   ◦ reading finishes:              n/a, not in ‘and reading’ state
    167	 * • ptr == DROP:
    168	 *   ◦ __ffs_epfile_read_buffer_free: nop
    169	 *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL
    170	 *   ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
    171	 *   ◦ reading finishes:              n/a, not in ‘and reading’ state
    172	 * • ptr == buf:
    173	 *   ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
    174	 *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL and reading
    175	 *   ◦ __ffs_epfile_read_data:        n/a, __ffs_epfile_read_buffered
    176	 *                                    is always called first
    177	 *   ◦ reading finishes:              n/a, not in ‘and reading’ state
    178	 * • ptr == NULL and reading:
    179	 *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
    180	 *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
    181	 *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
    182	 *   ◦ reading finishes and …
    183	 *     … all data read:               free buf, go to ptr == NULL
    184	 *     … otherwise:                   go to ptr == buf and reading
    185	 * • ptr == DROP and reading:
    186	 *   ◦ __ffs_epfile_read_buffer_free: nop
    187	 *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
    188	 *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
    189	 *   ◦ reading finishes:              free buf, go to ptr == DROP
    190	 */
    191	struct ffs_buffer		*read_buffer;
    192#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
    193
    194	char				name[5];
    195
    196	unsigned char			in;	/* P: ffs->eps_lock */
    197	unsigned char			isoc;	/* P: ffs->eps_lock */
    198
    199	unsigned char			_pad;
    200};
    201
    202struct ffs_buffer {
    203	size_t length;
    204	char *data;
    205	char storage[];
    206};
    207
    208/*  ffs_io_data structure ***************************************************/
    209
    210struct ffs_io_data {
    211	bool aio;
    212	bool read;
    213
    214	struct kiocb *kiocb;
    215	struct iov_iter data;
    216	const void *to_free;
    217	char *buf;
    218
    219	struct mm_struct *mm;
    220	struct work_struct work;
    221
    222	struct usb_ep *ep;
    223	struct usb_request *req;
    224	struct sg_table sgt;
    225	bool use_sg;
    226
    227	struct ffs_data *ffs;
    228
    229	int status;
    230	struct completion done;
    231};
    232
    233struct ffs_desc_helper {
    234	struct ffs_data *ffs;
    235	unsigned interfaces_count;
    236	unsigned eps_count;
    237};
    238
    239static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
    240static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
    241
    242static struct dentry *
    243ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
    244		   const struct file_operations *fops);
    245
    246/* Devices management *******************************************************/
    247
    248DEFINE_MUTEX(ffs_lock);
    249EXPORT_SYMBOL_GPL(ffs_lock);
    250
    251static struct ffs_dev *_ffs_find_dev(const char *name);
    252static struct ffs_dev *_ffs_alloc_dev(void);
    253static void _ffs_free_dev(struct ffs_dev *dev);
    254static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
    255static void ffs_release_dev(struct ffs_dev *ffs_dev);
    256static int ffs_ready(struct ffs_data *ffs);
    257static void ffs_closed(struct ffs_data *ffs);
    258
    259/* Misc helper functions ****************************************************/
    260
    261static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
    262	__attribute__((warn_unused_result, nonnull));
    263static char *ffs_prepare_buffer(const char __user *buf, size_t len)
    264	__attribute__((warn_unused_result, nonnull));
    265
    266
    267/* Control file aka ep0 *****************************************************/
    268
    269static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
    270{
    271	struct ffs_data *ffs = req->context;
    272
    273	complete(&ffs->ep0req_completion);
    274}
    275
    276static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
    277	__releases(&ffs->ev.waitq.lock)
    278{
    279	struct usb_request *req = ffs->ep0req;
    280	int ret;
    281
    282	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
    283
    284	spin_unlock_irq(&ffs->ev.waitq.lock);
    285
    286	req->buf      = data;
    287	req->length   = len;
    288
    289	/*
    290	 * UDC layer requires to provide a buffer even for ZLP, but should
    291	 * not use it at all. Let's provide some poisoned pointer to catch
    292	 * possible bug in the driver.
    293	 */
    294	if (req->buf == NULL)
    295		req->buf = (void *)0xDEADBABE;
    296
    297	reinit_completion(&ffs->ep0req_completion);
    298
    299	ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
    300	if (ret < 0)
    301		return ret;
    302
    303	ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
    304	if (ret) {
    305		usb_ep_dequeue(ffs->gadget->ep0, req);
    306		return -EINTR;
    307	}
    308
    309	ffs->setup_state = FFS_NO_SETUP;
    310	return req->status ? req->status : req->actual;
    311}
    312
    313static int __ffs_ep0_stall(struct ffs_data *ffs)
    314{
    315	if (ffs->ev.can_stall) {
    316		pr_vdebug("ep0 stall\n");
    317		usb_ep_set_halt(ffs->gadget->ep0);
    318		ffs->setup_state = FFS_NO_SETUP;
    319		return -EL2HLT;
    320	} else {
    321		pr_debug("bogus ep0 stall!\n");
    322		return -ESRCH;
    323	}
    324}
    325
    326static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
    327			     size_t len, loff_t *ptr)
    328{
    329	struct ffs_data *ffs = file->private_data;
    330	ssize_t ret;
    331	char *data;
    332
    333	ENTER();
    334
    335	/* Fast check if setup was canceled */
    336	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
    337		return -EIDRM;
    338
    339	/* Acquire mutex */
    340	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
    341	if (ret < 0)
    342		return ret;
    343
    344	/* Check state */
    345	switch (ffs->state) {
    346	case FFS_READ_DESCRIPTORS:
    347	case FFS_READ_STRINGS:
    348		/* Copy data */
    349		if (len < 16) {
    350			ret = -EINVAL;
    351			break;
    352		}
    353
    354		data = ffs_prepare_buffer(buf, len);
    355		if (IS_ERR(data)) {
    356			ret = PTR_ERR(data);
    357			break;
    358		}
    359
    360		/* Handle data */
    361		if (ffs->state == FFS_READ_DESCRIPTORS) {
    362			pr_info("read descriptors\n");
    363			ret = __ffs_data_got_descs(ffs, data, len);
    364			if (ret < 0)
    365				break;
    366
    367			ffs->state = FFS_READ_STRINGS;
    368			ret = len;
    369		} else {
    370			pr_info("read strings\n");
    371			ret = __ffs_data_got_strings(ffs, data, len);
    372			if (ret < 0)
    373				break;
    374
    375			ret = ffs_epfiles_create(ffs);
    376			if (ret) {
    377				ffs->state = FFS_CLOSING;
    378				break;
    379			}
    380
    381			ffs->state = FFS_ACTIVE;
    382			mutex_unlock(&ffs->mutex);
    383
    384			ret = ffs_ready(ffs);
    385			if (ret < 0) {
    386				ffs->state = FFS_CLOSING;
    387				return ret;
    388			}
    389
    390			return len;
    391		}
    392		break;
    393
    394	case FFS_ACTIVE:
    395		data = NULL;
    396		/*
    397		 * We're called from user space, we can use _irq
    398		 * rather then _irqsave
    399		 */
    400		spin_lock_irq(&ffs->ev.waitq.lock);
    401		switch (ffs_setup_state_clear_cancelled(ffs)) {
    402		case FFS_SETUP_CANCELLED:
    403			ret = -EIDRM;
    404			goto done_spin;
    405
    406		case FFS_NO_SETUP:
    407			ret = -ESRCH;
    408			goto done_spin;
    409
    410		case FFS_SETUP_PENDING:
    411			break;
    412		}
    413
    414		/* FFS_SETUP_PENDING */
    415		if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
    416			spin_unlock_irq(&ffs->ev.waitq.lock);
    417			ret = __ffs_ep0_stall(ffs);
    418			break;
    419		}
    420
    421		/* FFS_SETUP_PENDING and not stall */
    422		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
    423
    424		spin_unlock_irq(&ffs->ev.waitq.lock);
    425
    426		data = ffs_prepare_buffer(buf, len);
    427		if (IS_ERR(data)) {
    428			ret = PTR_ERR(data);
    429			break;
    430		}
    431
    432		spin_lock_irq(&ffs->ev.waitq.lock);
    433
    434		/*
    435		 * We are guaranteed to be still in FFS_ACTIVE state
    436		 * but the state of setup could have changed from
    437		 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
    438		 * to check for that.  If that happened we copied data
    439		 * from user space in vain but it's unlikely.
    440		 *
    441		 * For sure we are not in FFS_NO_SETUP since this is
    442		 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
    443		 * transition can be performed and it's protected by
    444		 * mutex.
    445		 */
    446		if (ffs_setup_state_clear_cancelled(ffs) ==
    447		    FFS_SETUP_CANCELLED) {
    448			ret = -EIDRM;
    449done_spin:
    450			spin_unlock_irq(&ffs->ev.waitq.lock);
    451		} else {
    452			/* unlocks spinlock */
    453			ret = __ffs_ep0_queue_wait(ffs, data, len);
    454		}
    455		kfree(data);
    456		break;
    457
    458	default:
    459		ret = -EBADFD;
    460		break;
    461	}
    462
    463	mutex_unlock(&ffs->mutex);
    464	return ret;
    465}
    466
    467/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
    468static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
    469				     size_t n)
    470	__releases(&ffs->ev.waitq.lock)
    471{
    472	/*
    473	 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
    474	 * size of ffs->ev.types array (which is four) so that's how much space
    475	 * we reserve.
    476	 */
    477	struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
    478	const size_t size = n * sizeof *events;
    479	unsigned i = 0;
    480
    481	memset(events, 0, size);
    482
    483	do {
    484		events[i].type = ffs->ev.types[i];
    485		if (events[i].type == FUNCTIONFS_SETUP) {
    486			events[i].u.setup = ffs->ev.setup;
    487			ffs->setup_state = FFS_SETUP_PENDING;
    488		}
    489	} while (++i < n);
    490
    491	ffs->ev.count -= n;
    492	if (ffs->ev.count)
    493		memmove(ffs->ev.types, ffs->ev.types + n,
    494			ffs->ev.count * sizeof *ffs->ev.types);
    495
    496	spin_unlock_irq(&ffs->ev.waitq.lock);
    497	mutex_unlock(&ffs->mutex);
    498
    499	return copy_to_user(buf, events, size) ? -EFAULT : size;
    500}
    501
    502static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
    503			    size_t len, loff_t *ptr)
    504{
    505	struct ffs_data *ffs = file->private_data;
    506	char *data = NULL;
    507	size_t n;
    508	int ret;
    509
    510	ENTER();
    511
    512	/* Fast check if setup was canceled */
    513	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
    514		return -EIDRM;
    515
    516	/* Acquire mutex */
    517	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
    518	if (ret < 0)
    519		return ret;
    520
    521	/* Check state */
    522	if (ffs->state != FFS_ACTIVE) {
    523		ret = -EBADFD;
    524		goto done_mutex;
    525	}
    526
    527	/*
    528	 * We're called from user space, we can use _irq rather then
    529	 * _irqsave
    530	 */
    531	spin_lock_irq(&ffs->ev.waitq.lock);
    532
    533	switch (ffs_setup_state_clear_cancelled(ffs)) {
    534	case FFS_SETUP_CANCELLED:
    535		ret = -EIDRM;
    536		break;
    537
    538	case FFS_NO_SETUP:
    539		n = len / sizeof(struct usb_functionfs_event);
    540		if (!n) {
    541			ret = -EINVAL;
    542			break;
    543		}
    544
    545		if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
    546			ret = -EAGAIN;
    547			break;
    548		}
    549
    550		if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
    551							ffs->ev.count)) {
    552			ret = -EINTR;
    553			break;
    554		}
    555
    556		/* unlocks spinlock */
    557		return __ffs_ep0_read_events(ffs, buf,
    558					     min(n, (size_t)ffs->ev.count));
    559
    560	case FFS_SETUP_PENDING:
    561		if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
    562			spin_unlock_irq(&ffs->ev.waitq.lock);
    563			ret = __ffs_ep0_stall(ffs);
    564			goto done_mutex;
    565		}
    566
    567		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
    568
    569		spin_unlock_irq(&ffs->ev.waitq.lock);
    570
    571		if (len) {
    572			data = kmalloc(len, GFP_KERNEL);
    573			if (!data) {
    574				ret = -ENOMEM;
    575				goto done_mutex;
    576			}
    577		}
    578
    579		spin_lock_irq(&ffs->ev.waitq.lock);
    580
    581		/* See ffs_ep0_write() */
    582		if (ffs_setup_state_clear_cancelled(ffs) ==
    583		    FFS_SETUP_CANCELLED) {
    584			ret = -EIDRM;
    585			break;
    586		}
    587
    588		/* unlocks spinlock */
    589		ret = __ffs_ep0_queue_wait(ffs, data, len);
    590		if ((ret > 0) && (copy_to_user(buf, data, len)))
    591			ret = -EFAULT;
    592		goto done_mutex;
    593
    594	default:
    595		ret = -EBADFD;
    596		break;
    597	}
    598
    599	spin_unlock_irq(&ffs->ev.waitq.lock);
    600done_mutex:
    601	mutex_unlock(&ffs->mutex);
    602	kfree(data);
    603	return ret;
    604}
    605
    606static int ffs_ep0_open(struct inode *inode, struct file *file)
    607{
    608	struct ffs_data *ffs = inode->i_private;
    609
    610	ENTER();
    611
    612	if (ffs->state == FFS_CLOSING)
    613		return -EBUSY;
    614
    615	file->private_data = ffs;
    616	ffs_data_opened(ffs);
    617
    618	return stream_open(inode, file);
    619}
    620
    621static int ffs_ep0_release(struct inode *inode, struct file *file)
    622{
    623	struct ffs_data *ffs = file->private_data;
    624
    625	ENTER();
    626
    627	ffs_data_closed(ffs);
    628
    629	return 0;
    630}
    631
    632static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
    633{
    634	struct ffs_data *ffs = file->private_data;
    635	struct usb_gadget *gadget = ffs->gadget;
    636	long ret;
    637
    638	ENTER();
    639
    640	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
    641		struct ffs_function *func = ffs->func;
    642		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
    643	} else if (gadget && gadget->ops->ioctl) {
    644		ret = gadget->ops->ioctl(gadget, code, value);
    645	} else {
    646		ret = -ENOTTY;
    647	}
    648
    649	return ret;
    650}
    651
    652static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
    653{
    654	struct ffs_data *ffs = file->private_data;
    655	__poll_t mask = EPOLLWRNORM;
    656	int ret;
    657
    658	poll_wait(file, &ffs->ev.waitq, wait);
    659
    660	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
    661	if (ret < 0)
    662		return mask;
    663
    664	switch (ffs->state) {
    665	case FFS_READ_DESCRIPTORS:
    666	case FFS_READ_STRINGS:
    667		mask |= EPOLLOUT;
    668		break;
    669
    670	case FFS_ACTIVE:
    671		switch (ffs->setup_state) {
    672		case FFS_NO_SETUP:
    673			if (ffs->ev.count)
    674				mask |= EPOLLIN;
    675			break;
    676
    677		case FFS_SETUP_PENDING:
    678		case FFS_SETUP_CANCELLED:
    679			mask |= (EPOLLIN | EPOLLOUT);
    680			break;
    681		}
    682		break;
    683
    684	case FFS_CLOSING:
    685		break;
    686	case FFS_DEACTIVATED:
    687		break;
    688	}
    689
    690	mutex_unlock(&ffs->mutex);
    691
    692	return mask;
    693}
    694
    695static const struct file_operations ffs_ep0_operations = {
    696	.llseek =	no_llseek,
    697
    698	.open =		ffs_ep0_open,
    699	.write =	ffs_ep0_write,
    700	.read =		ffs_ep0_read,
    701	.release =	ffs_ep0_release,
    702	.unlocked_ioctl =	ffs_ep0_ioctl,
    703	.poll =		ffs_ep0_poll,
    704};
    705
    706
    707/* "Normal" endpoints operations ********************************************/
    708
    709static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
    710{
    711	struct ffs_io_data *io_data = req->context;
    712
    713	ENTER();
    714	if (req->status)
    715		io_data->status = req->status;
    716	else
    717		io_data->status = req->actual;
    718
    719	complete(&io_data->done);
    720}
    721
    722static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
    723{
    724	ssize_t ret = copy_to_iter(data, data_len, iter);
    725	if (ret == data_len)
    726		return ret;
    727
    728	if (iov_iter_count(iter))
    729		return -EFAULT;
    730
    731	/*
    732	 * Dear user space developer!
    733	 *
    734	 * TL;DR: To stop getting below error message in your kernel log, change
    735	 * user space code using functionfs to align read buffers to a max
    736	 * packet size.
    737	 *
    738	 * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
    739	 * packet size.  When unaligned buffer is passed to functionfs, it
    740	 * internally uses a larger, aligned buffer so that such UDCs are happy.
    741	 *
    742	 * Unfortunately, this means that host may send more data than was
    743	 * requested in read(2) system call.  f_fs doesn’t know what to do with
    744	 * that excess data so it simply drops it.
    745	 *
    746	 * Was the buffer aligned in the first place, no such problem would
    747	 * happen.
    748	 *
    749	 * Data may be dropped only in AIO reads.  Synchronous reads are handled
    750	 * by splitting a request into multiple parts.  This splitting may still
    751	 * be a problem though so it’s likely best to align the buffer
    752	 * regardless of it being AIO or not..
    753	 *
    754	 * This only affects OUT endpoints, i.e. reading data with a read(2),
    755	 * aio_read(2) etc. system calls.  Writing data to an IN endpoint is not
    756	 * affected.
    757	 */
    758	pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
    759	       "Align read buffer size to max packet size to avoid the problem.\n",
    760	       data_len, ret);
    761
    762	return ret;
    763}
    764
    765/*
    766 * allocate a virtually contiguous buffer and create a scatterlist describing it
    767 * @sg_table	- pointer to a place to be filled with sg_table contents
    768 * @size	- required buffer size
    769 */
    770static void *ffs_build_sg_list(struct sg_table *sgt, size_t sz)
    771{
    772	struct page **pages;
    773	void *vaddr, *ptr;
    774	unsigned int n_pages;
    775	int i;
    776
    777	vaddr = vmalloc(sz);
    778	if (!vaddr)
    779		return NULL;
    780
    781	n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
    782	pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
    783	if (!pages) {
    784		vfree(vaddr);
    785
    786		return NULL;
    787	}
    788	for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE)
    789		pages[i] = vmalloc_to_page(ptr);
    790
    791	if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) {
    792		kvfree(pages);
    793		vfree(vaddr);
    794
    795		return NULL;
    796	}
    797	kvfree(pages);
    798
    799	return vaddr;
    800}
    801
    802static inline void *ffs_alloc_buffer(struct ffs_io_data *io_data,
    803	size_t data_len)
    804{
    805	if (io_data->use_sg)
    806		return ffs_build_sg_list(&io_data->sgt, data_len);
    807
    808	return kmalloc(data_len, GFP_KERNEL);
    809}
    810
    811static inline void ffs_free_buffer(struct ffs_io_data *io_data)
    812{
    813	if (!io_data->buf)
    814		return;
    815
    816	if (io_data->use_sg) {
    817		sg_free_table(&io_data->sgt);
    818		vfree(io_data->buf);
    819	} else {
    820		kfree(io_data->buf);
    821	}
    822}
    823
    824static void ffs_user_copy_worker(struct work_struct *work)
    825{
    826	struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
    827						   work);
    828	int ret = io_data->req->status ? io_data->req->status :
    829					 io_data->req->actual;
    830	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
    831
    832	if (io_data->read && ret > 0) {
    833		kthread_use_mm(io_data->mm);
    834		ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
    835		kthread_unuse_mm(io_data->mm);
    836	}
    837
    838	io_data->kiocb->ki_complete(io_data->kiocb, ret);
    839
    840	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
    841		eventfd_signal(io_data->ffs->ffs_eventfd, 1);
    842
    843	usb_ep_free_request(io_data->ep, io_data->req);
    844
    845	if (io_data->read)
    846		kfree(io_data->to_free);
    847	ffs_free_buffer(io_data);
    848	kfree(io_data);
    849}
    850
    851static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
    852					 struct usb_request *req)
    853{
    854	struct ffs_io_data *io_data = req->context;
    855	struct ffs_data *ffs = io_data->ffs;
    856
    857	ENTER();
    858
    859	INIT_WORK(&io_data->work, ffs_user_copy_worker);
    860	queue_work(ffs->io_completion_wq, &io_data->work);
    861}
    862
    863static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
    864{
    865	/*
    866	 * See comment in struct ffs_epfile for full read_buffer pointer
    867	 * synchronisation story.
    868	 */
    869	struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
    870	if (buf && buf != READ_BUFFER_DROP)
    871		kfree(buf);
    872}
    873
    874/* Assumes epfile->mutex is held. */
    875static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
    876					  struct iov_iter *iter)
    877{
    878	/*
    879	 * Null out epfile->read_buffer so ffs_func_eps_disable does not free
    880	 * the buffer while we are using it.  See comment in struct ffs_epfile
    881	 * for full read_buffer pointer synchronisation story.
    882	 */
    883	struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
    884	ssize_t ret;
    885	if (!buf || buf == READ_BUFFER_DROP)
    886		return 0;
    887
    888	ret = copy_to_iter(buf->data, buf->length, iter);
    889	if (buf->length == ret) {
    890		kfree(buf);
    891		return ret;
    892	}
    893
    894	if (iov_iter_count(iter)) {
    895		ret = -EFAULT;
    896	} else {
    897		buf->length -= ret;
    898		buf->data += ret;
    899	}
    900
    901	if (cmpxchg(&epfile->read_buffer, NULL, buf))
    902		kfree(buf);
    903
    904	return ret;
    905}
    906
    907/* Assumes epfile->mutex is held. */
    908static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
    909				      void *data, int data_len,
    910				      struct iov_iter *iter)
    911{
    912	struct ffs_buffer *buf;
    913
    914	ssize_t ret = copy_to_iter(data, data_len, iter);
    915	if (data_len == ret)
    916		return ret;
    917
    918	if (iov_iter_count(iter))
    919		return -EFAULT;
    920
    921	/* See ffs_copy_to_iter for more context. */
    922	pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
    923		data_len, ret);
    924
    925	data_len -= ret;
    926	buf = kmalloc(struct_size(buf, storage, data_len), GFP_KERNEL);
    927	if (!buf)
    928		return -ENOMEM;
    929	buf->length = data_len;
    930	buf->data = buf->storage;
    931	memcpy(buf->storage, data + ret, flex_array_size(buf, storage, data_len));
    932
    933	/*
    934	 * At this point read_buffer is NULL or READ_BUFFER_DROP (if
    935	 * ffs_func_eps_disable has been called in the meanwhile).  See comment
    936	 * in struct ffs_epfile for full read_buffer pointer synchronisation
    937	 * story.
    938	 */
    939	if (cmpxchg(&epfile->read_buffer, NULL, buf))
    940		kfree(buf);
    941
    942	return ret;
    943}
    944
    945static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
    946{
    947	struct ffs_epfile *epfile = file->private_data;
    948	struct usb_request *req;
    949	struct ffs_ep *ep;
    950	char *data = NULL;
    951	ssize_t ret, data_len = -EINVAL;
    952	int halt;
    953
    954	/* Are we still active? */
    955	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
    956		return -ENODEV;
    957
    958	/* Wait for endpoint to be enabled */
    959	ep = epfile->ep;
    960	if (!ep) {
    961		if (file->f_flags & O_NONBLOCK)
    962			return -EAGAIN;
    963
    964		ret = wait_event_interruptible(
    965				epfile->ffs->wait, (ep = epfile->ep));
    966		if (ret)
    967			return -EINTR;
    968	}
    969
    970	/* Do we halt? */
    971	halt = (!io_data->read == !epfile->in);
    972	if (halt && epfile->isoc)
    973		return -EINVAL;
    974
    975	/* We will be using request and read_buffer */
    976	ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
    977	if (ret)
    978		goto error;
    979
    980	/* Allocate & copy */
    981	if (!halt) {
    982		struct usb_gadget *gadget;
    983
    984		/*
    985		 * Do we have buffered data from previous partial read?  Check
    986		 * that for synchronous case only because we do not have
    987		 * facility to ‘wake up’ a pending asynchronous read and push
    988		 * buffered data to it which we would need to make things behave
    989		 * consistently.
    990		 */
    991		if (!io_data->aio && io_data->read) {
    992			ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
    993			if (ret)
    994				goto error_mutex;
    995		}
    996
    997		/*
    998		 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
    999		 * before the waiting completes, so do not assign to 'gadget'
   1000		 * earlier
   1001		 */
   1002		gadget = epfile->ffs->gadget;
   1003
   1004		spin_lock_irq(&epfile->ffs->eps_lock);
   1005		/* In the meantime, endpoint got disabled or changed. */
   1006		if (epfile->ep != ep) {
   1007			ret = -ESHUTDOWN;
   1008			goto error_lock;
   1009		}
   1010		data_len = iov_iter_count(&io_data->data);
   1011		/*
   1012		 * Controller may require buffer size to be aligned to
   1013		 * maxpacketsize of an out endpoint.
   1014		 */
   1015		if (io_data->read)
   1016			data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
   1017
   1018		io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
   1019		spin_unlock_irq(&epfile->ffs->eps_lock);
   1020
   1021		data = ffs_alloc_buffer(io_data, data_len);
   1022		if (!data) {
   1023			ret = -ENOMEM;
   1024			goto error_mutex;
   1025		}
   1026		if (!io_data->read &&
   1027		    !copy_from_iter_full(data, data_len, &io_data->data)) {
   1028			ret = -EFAULT;
   1029			goto error_mutex;
   1030		}
   1031	}
   1032
   1033	spin_lock_irq(&epfile->ffs->eps_lock);
   1034
   1035	if (epfile->ep != ep) {
   1036		/* In the meantime, endpoint got disabled or changed. */
   1037		ret = -ESHUTDOWN;
   1038	} else if (halt) {
   1039		ret = usb_ep_set_halt(ep->ep);
   1040		if (!ret)
   1041			ret = -EBADMSG;
   1042	} else if (data_len == -EINVAL) {
   1043		/*
   1044		 * Sanity Check: even though data_len can't be used
   1045		 * uninitialized at the time I write this comment, some
   1046		 * compilers complain about this situation.
   1047		 * In order to keep the code clean from warnings, data_len is
   1048		 * being initialized to -EINVAL during its declaration, which
   1049		 * means we can't rely on compiler anymore to warn no future
   1050		 * changes won't result in data_len being used uninitialized.
   1051		 * For such reason, we're adding this redundant sanity check
   1052		 * here.
   1053		 */
   1054		WARN(1, "%s: data_len == -EINVAL\n", __func__);
   1055		ret = -EINVAL;
   1056	} else if (!io_data->aio) {
   1057		bool interrupted = false;
   1058
   1059		req = ep->req;
   1060		if (io_data->use_sg) {
   1061			req->buf = NULL;
   1062			req->sg	= io_data->sgt.sgl;
   1063			req->num_sgs = io_data->sgt.nents;
   1064		} else {
   1065			req->buf = data;
   1066			req->num_sgs = 0;
   1067		}
   1068		req->length = data_len;
   1069
   1070		io_data->buf = data;
   1071
   1072		init_completion(&io_data->done);
   1073		req->context  = io_data;
   1074		req->complete = ffs_epfile_io_complete;
   1075
   1076		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
   1077		if (ret < 0)
   1078			goto error_lock;
   1079
   1080		spin_unlock_irq(&epfile->ffs->eps_lock);
   1081
   1082		if (wait_for_completion_interruptible(&io_data->done)) {
   1083			spin_lock_irq(&epfile->ffs->eps_lock);
   1084			if (epfile->ep != ep) {
   1085				ret = -ESHUTDOWN;
   1086				goto error_lock;
   1087			}
   1088			/*
   1089			 * To avoid race condition with ffs_epfile_io_complete,
   1090			 * dequeue the request first then check
   1091			 * status. usb_ep_dequeue API should guarantee no race
   1092			 * condition with req->complete callback.
   1093			 */
   1094			usb_ep_dequeue(ep->ep, req);
   1095			spin_unlock_irq(&epfile->ffs->eps_lock);
   1096			wait_for_completion(&io_data->done);
   1097			interrupted = io_data->status < 0;
   1098		}
   1099
   1100		if (interrupted)
   1101			ret = -EINTR;
   1102		else if (io_data->read && io_data->status > 0)
   1103			ret = __ffs_epfile_read_data(epfile, data, io_data->status,
   1104						     &io_data->data);
   1105		else
   1106			ret = io_data->status;
   1107		goto error_mutex;
   1108	} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
   1109		ret = -ENOMEM;
   1110	} else {
   1111		if (io_data->use_sg) {
   1112			req->buf = NULL;
   1113			req->sg	= io_data->sgt.sgl;
   1114			req->num_sgs = io_data->sgt.nents;
   1115		} else {
   1116			req->buf = data;
   1117			req->num_sgs = 0;
   1118		}
   1119		req->length = data_len;
   1120
   1121		io_data->buf = data;
   1122		io_data->ep = ep->ep;
   1123		io_data->req = req;
   1124		io_data->ffs = epfile->ffs;
   1125
   1126		req->context  = io_data;
   1127		req->complete = ffs_epfile_async_io_complete;
   1128
   1129		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
   1130		if (ret) {
   1131			io_data->req = NULL;
   1132			usb_ep_free_request(ep->ep, req);
   1133			goto error_lock;
   1134		}
   1135
   1136		ret = -EIOCBQUEUED;
   1137		/*
   1138		 * Do not kfree the buffer in this function.  It will be freed
   1139		 * by ffs_user_copy_worker.
   1140		 */
   1141		data = NULL;
   1142	}
   1143
   1144error_lock:
   1145	spin_unlock_irq(&epfile->ffs->eps_lock);
   1146error_mutex:
   1147	mutex_unlock(&epfile->mutex);
   1148error:
   1149	if (ret != -EIOCBQUEUED) /* don't free if there is iocb queued */
   1150		ffs_free_buffer(io_data);
   1151	return ret;
   1152}
   1153
   1154static int
   1155ffs_epfile_open(struct inode *inode, struct file *file)
   1156{
   1157	struct ffs_epfile *epfile = inode->i_private;
   1158
   1159	ENTER();
   1160
   1161	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
   1162		return -ENODEV;
   1163
   1164	file->private_data = epfile;
   1165	ffs_data_opened(epfile->ffs);
   1166
   1167	return stream_open(inode, file);
   1168}
   1169
   1170static int ffs_aio_cancel(struct kiocb *kiocb)
   1171{
   1172	struct ffs_io_data *io_data = kiocb->private;
   1173	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
   1174	unsigned long flags;
   1175	int value;
   1176
   1177	ENTER();
   1178
   1179	spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
   1180
   1181	if (io_data && io_data->ep && io_data->req)
   1182		value = usb_ep_dequeue(io_data->ep, io_data->req);
   1183	else
   1184		value = -EINVAL;
   1185
   1186	spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
   1187
   1188	return value;
   1189}
   1190
   1191static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
   1192{
   1193	struct ffs_io_data io_data, *p = &io_data;
   1194	ssize_t res;
   1195
   1196	ENTER();
   1197
   1198	if (!is_sync_kiocb(kiocb)) {
   1199		p = kzalloc(sizeof(io_data), GFP_KERNEL);
   1200		if (!p)
   1201			return -ENOMEM;
   1202		p->aio = true;
   1203	} else {
   1204		memset(p, 0, sizeof(*p));
   1205		p->aio = false;
   1206	}
   1207
   1208	p->read = false;
   1209	p->kiocb = kiocb;
   1210	p->data = *from;
   1211	p->mm = current->mm;
   1212
   1213	kiocb->private = p;
   1214
   1215	if (p->aio)
   1216		kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
   1217
   1218	res = ffs_epfile_io(kiocb->ki_filp, p);
   1219	if (res == -EIOCBQUEUED)
   1220		return res;
   1221	if (p->aio)
   1222		kfree(p);
   1223	else
   1224		*from = p->data;
   1225	return res;
   1226}
   1227
   1228static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
   1229{
   1230	struct ffs_io_data io_data, *p = &io_data;
   1231	ssize_t res;
   1232
   1233	ENTER();
   1234
   1235	if (!is_sync_kiocb(kiocb)) {
   1236		p = kzalloc(sizeof(io_data), GFP_KERNEL);
   1237		if (!p)
   1238			return -ENOMEM;
   1239		p->aio = true;
   1240	} else {
   1241		memset(p, 0, sizeof(*p));
   1242		p->aio = false;
   1243	}
   1244
   1245	p->read = true;
   1246	p->kiocb = kiocb;
   1247	if (p->aio) {
   1248		p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
   1249		if (!p->to_free) {
   1250			kfree(p);
   1251			return -ENOMEM;
   1252		}
   1253	} else {
   1254		p->data = *to;
   1255		p->to_free = NULL;
   1256	}
   1257	p->mm = current->mm;
   1258
   1259	kiocb->private = p;
   1260
   1261	if (p->aio)
   1262		kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
   1263
   1264	res = ffs_epfile_io(kiocb->ki_filp, p);
   1265	if (res == -EIOCBQUEUED)
   1266		return res;
   1267
   1268	if (p->aio) {
   1269		kfree(p->to_free);
   1270		kfree(p);
   1271	} else {
   1272		*to = p->data;
   1273	}
   1274	return res;
   1275}
   1276
   1277static int
   1278ffs_epfile_release(struct inode *inode, struct file *file)
   1279{
   1280	struct ffs_epfile *epfile = inode->i_private;
   1281
   1282	ENTER();
   1283
   1284	__ffs_epfile_read_buffer_free(epfile);
   1285	ffs_data_closed(epfile->ffs);
   1286
   1287	return 0;
   1288}
   1289
   1290static long ffs_epfile_ioctl(struct file *file, unsigned code,
   1291			     unsigned long value)
   1292{
   1293	struct ffs_epfile *epfile = file->private_data;
   1294	struct ffs_ep *ep;
   1295	int ret;
   1296
   1297	ENTER();
   1298
   1299	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
   1300		return -ENODEV;
   1301
   1302	/* Wait for endpoint to be enabled */
   1303	ep = epfile->ep;
   1304	if (!ep) {
   1305		if (file->f_flags & O_NONBLOCK)
   1306			return -EAGAIN;
   1307
   1308		ret = wait_event_interruptible(
   1309				epfile->ffs->wait, (ep = epfile->ep));
   1310		if (ret)
   1311			return -EINTR;
   1312	}
   1313
   1314	spin_lock_irq(&epfile->ffs->eps_lock);
   1315
   1316	/* In the meantime, endpoint got disabled or changed. */
   1317	if (epfile->ep != ep) {
   1318		spin_unlock_irq(&epfile->ffs->eps_lock);
   1319		return -ESHUTDOWN;
   1320	}
   1321
   1322	switch (code) {
   1323	case FUNCTIONFS_FIFO_STATUS:
   1324		ret = usb_ep_fifo_status(epfile->ep->ep);
   1325		break;
   1326	case FUNCTIONFS_FIFO_FLUSH:
   1327		usb_ep_fifo_flush(epfile->ep->ep);
   1328		ret = 0;
   1329		break;
   1330	case FUNCTIONFS_CLEAR_HALT:
   1331		ret = usb_ep_clear_halt(epfile->ep->ep);
   1332		break;
   1333	case FUNCTIONFS_ENDPOINT_REVMAP:
   1334		ret = epfile->ep->num;
   1335		break;
   1336	case FUNCTIONFS_ENDPOINT_DESC:
   1337	{
   1338		int desc_idx;
   1339		struct usb_endpoint_descriptor desc1, *desc;
   1340
   1341		switch (epfile->ffs->gadget->speed) {
   1342		case USB_SPEED_SUPER:
   1343		case USB_SPEED_SUPER_PLUS:
   1344			desc_idx = 2;
   1345			break;
   1346		case USB_SPEED_HIGH:
   1347			desc_idx = 1;
   1348			break;
   1349		default:
   1350			desc_idx = 0;
   1351		}
   1352
   1353		desc = epfile->ep->descs[desc_idx];
   1354		memcpy(&desc1, desc, desc->bLength);
   1355
   1356		spin_unlock_irq(&epfile->ffs->eps_lock);
   1357		ret = copy_to_user((void __user *)value, &desc1, desc1.bLength);
   1358		if (ret)
   1359			ret = -EFAULT;
   1360		return ret;
   1361	}
   1362	default:
   1363		ret = -ENOTTY;
   1364	}
   1365	spin_unlock_irq(&epfile->ffs->eps_lock);
   1366
   1367	return ret;
   1368}
   1369
   1370static const struct file_operations ffs_epfile_operations = {
   1371	.llseek =	no_llseek,
   1372
   1373	.open =		ffs_epfile_open,
   1374	.write_iter =	ffs_epfile_write_iter,
   1375	.read_iter =	ffs_epfile_read_iter,
   1376	.release =	ffs_epfile_release,
   1377	.unlocked_ioctl =	ffs_epfile_ioctl,
   1378	.compat_ioctl = compat_ptr_ioctl,
   1379};
   1380
   1381
   1382/* File system and super block operations ***********************************/
   1383
   1384/*
   1385 * Mounting the file system creates a controller file, used first for
   1386 * function configuration then later for event monitoring.
   1387 */
   1388
   1389static struct inode *__must_check
   1390ffs_sb_make_inode(struct super_block *sb, void *data,
   1391		  const struct file_operations *fops,
   1392		  const struct inode_operations *iops,
   1393		  struct ffs_file_perms *perms)
   1394{
   1395	struct inode *inode;
   1396
   1397	ENTER();
   1398
   1399	inode = new_inode(sb);
   1400
   1401	if (inode) {
   1402		struct timespec64 ts = current_time(inode);
   1403
   1404		inode->i_ino	 = get_next_ino();
   1405		inode->i_mode    = perms->mode;
   1406		inode->i_uid     = perms->uid;
   1407		inode->i_gid     = perms->gid;
   1408		inode->i_atime   = ts;
   1409		inode->i_mtime   = ts;
   1410		inode->i_ctime   = ts;
   1411		inode->i_private = data;
   1412		if (fops)
   1413			inode->i_fop = fops;
   1414		if (iops)
   1415			inode->i_op  = iops;
   1416	}
   1417
   1418	return inode;
   1419}
   1420
   1421/* Create "regular" file */
   1422static struct dentry *ffs_sb_create_file(struct super_block *sb,
   1423					const char *name, void *data,
   1424					const struct file_operations *fops)
   1425{
   1426	struct ffs_data	*ffs = sb->s_fs_info;
   1427	struct dentry	*dentry;
   1428	struct inode	*inode;
   1429
   1430	ENTER();
   1431
   1432	dentry = d_alloc_name(sb->s_root, name);
   1433	if (!dentry)
   1434		return NULL;
   1435
   1436	inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
   1437	if (!inode) {
   1438		dput(dentry);
   1439		return NULL;
   1440	}
   1441
   1442	d_add(dentry, inode);
   1443	return dentry;
   1444}
   1445
   1446/* Super block */
   1447static const struct super_operations ffs_sb_operations = {
   1448	.statfs =	simple_statfs,
   1449	.drop_inode =	generic_delete_inode,
   1450};
   1451
   1452struct ffs_sb_fill_data {
   1453	struct ffs_file_perms perms;
   1454	umode_t root_mode;
   1455	const char *dev_name;
   1456	bool no_disconnect;
   1457	struct ffs_data *ffs_data;
   1458};
   1459
   1460static int ffs_sb_fill(struct super_block *sb, struct fs_context *fc)
   1461{
   1462	struct ffs_sb_fill_data *data = fc->fs_private;
   1463	struct inode	*inode;
   1464	struct ffs_data	*ffs = data->ffs_data;
   1465
   1466	ENTER();
   1467
   1468	ffs->sb              = sb;
   1469	data->ffs_data       = NULL;
   1470	sb->s_fs_info        = ffs;
   1471	sb->s_blocksize      = PAGE_SIZE;
   1472	sb->s_blocksize_bits = PAGE_SHIFT;
   1473	sb->s_magic          = FUNCTIONFS_MAGIC;
   1474	sb->s_op             = &ffs_sb_operations;
   1475	sb->s_time_gran      = 1;
   1476
   1477	/* Root inode */
   1478	data->perms.mode = data->root_mode;
   1479	inode = ffs_sb_make_inode(sb, NULL,
   1480				  &simple_dir_operations,
   1481				  &simple_dir_inode_operations,
   1482				  &data->perms);
   1483	sb->s_root = d_make_root(inode);
   1484	if (!sb->s_root)
   1485		return -ENOMEM;
   1486
   1487	/* EP0 file */
   1488	if (!ffs_sb_create_file(sb, "ep0", ffs, &ffs_ep0_operations))
   1489		return -ENOMEM;
   1490
   1491	return 0;
   1492}
   1493
   1494enum {
   1495	Opt_no_disconnect,
   1496	Opt_rmode,
   1497	Opt_fmode,
   1498	Opt_mode,
   1499	Opt_uid,
   1500	Opt_gid,
   1501};
   1502
   1503static const struct fs_parameter_spec ffs_fs_fs_parameters[] = {
   1504	fsparam_bool	("no_disconnect",	Opt_no_disconnect),
   1505	fsparam_u32	("rmode",		Opt_rmode),
   1506	fsparam_u32	("fmode",		Opt_fmode),
   1507	fsparam_u32	("mode",		Opt_mode),
   1508	fsparam_u32	("uid",			Opt_uid),
   1509	fsparam_u32	("gid",			Opt_gid),
   1510	{}
   1511};
   1512
   1513static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
   1514{
   1515	struct ffs_sb_fill_data *data = fc->fs_private;
   1516	struct fs_parse_result result;
   1517	int opt;
   1518
   1519	ENTER();
   1520
   1521	opt = fs_parse(fc, ffs_fs_fs_parameters, param, &result);
   1522	if (opt < 0)
   1523		return opt;
   1524
   1525	switch (opt) {
   1526	case Opt_no_disconnect:
   1527		data->no_disconnect = result.boolean;
   1528		break;
   1529	case Opt_rmode:
   1530		data->root_mode  = (result.uint_32 & 0555) | S_IFDIR;
   1531		break;
   1532	case Opt_fmode:
   1533		data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
   1534		break;
   1535	case Opt_mode:
   1536		data->root_mode  = (result.uint_32 & 0555) | S_IFDIR;
   1537		data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
   1538		break;
   1539
   1540	case Opt_uid:
   1541		data->perms.uid = make_kuid(current_user_ns(), result.uint_32);
   1542		if (!uid_valid(data->perms.uid))
   1543			goto unmapped_value;
   1544		break;
   1545	case Opt_gid:
   1546		data->perms.gid = make_kgid(current_user_ns(), result.uint_32);
   1547		if (!gid_valid(data->perms.gid))
   1548			goto unmapped_value;
   1549		break;
   1550
   1551	default:
   1552		return -ENOPARAM;
   1553	}
   1554
   1555	return 0;
   1556
   1557unmapped_value:
   1558	return invalf(fc, "%s: unmapped value: %u", param->key, result.uint_32);
   1559}
   1560
   1561/*
   1562 * Set up the superblock for a mount.
   1563 */
   1564static int ffs_fs_get_tree(struct fs_context *fc)
   1565{
   1566	struct ffs_sb_fill_data *ctx = fc->fs_private;
   1567	struct ffs_data	*ffs;
   1568	int ret;
   1569
   1570	ENTER();
   1571
   1572	if (!fc->source)
   1573		return invalf(fc, "No source specified");
   1574
   1575	ffs = ffs_data_new(fc->source);
   1576	if (!ffs)
   1577		return -ENOMEM;
   1578	ffs->file_perms = ctx->perms;
   1579	ffs->no_disconnect = ctx->no_disconnect;
   1580
   1581	ffs->dev_name = kstrdup(fc->source, GFP_KERNEL);
   1582	if (!ffs->dev_name) {
   1583		ffs_data_put(ffs);
   1584		return -ENOMEM;
   1585	}
   1586
   1587	ret = ffs_acquire_dev(ffs->dev_name, ffs);
   1588	if (ret) {
   1589		ffs_data_put(ffs);
   1590		return ret;
   1591	}
   1592
   1593	ctx->ffs_data = ffs;
   1594	return get_tree_nodev(fc, ffs_sb_fill);
   1595}
   1596
   1597static void ffs_fs_free_fc(struct fs_context *fc)
   1598{
   1599	struct ffs_sb_fill_data *ctx = fc->fs_private;
   1600
   1601	if (ctx) {
   1602		if (ctx->ffs_data) {
   1603			ffs_data_put(ctx->ffs_data);
   1604		}
   1605
   1606		kfree(ctx);
   1607	}
   1608}
   1609
   1610static const struct fs_context_operations ffs_fs_context_ops = {
   1611	.free		= ffs_fs_free_fc,
   1612	.parse_param	= ffs_fs_parse_param,
   1613	.get_tree	= ffs_fs_get_tree,
   1614};
   1615
   1616static int ffs_fs_init_fs_context(struct fs_context *fc)
   1617{
   1618	struct ffs_sb_fill_data *ctx;
   1619
   1620	ctx = kzalloc(sizeof(struct ffs_sb_fill_data), GFP_KERNEL);
   1621	if (!ctx)
   1622		return -ENOMEM;
   1623
   1624	ctx->perms.mode = S_IFREG | 0600;
   1625	ctx->perms.uid = GLOBAL_ROOT_UID;
   1626	ctx->perms.gid = GLOBAL_ROOT_GID;
   1627	ctx->root_mode = S_IFDIR | 0500;
   1628	ctx->no_disconnect = false;
   1629
   1630	fc->fs_private = ctx;
   1631	fc->ops = &ffs_fs_context_ops;
   1632	return 0;
   1633}
   1634
   1635static void
   1636ffs_fs_kill_sb(struct super_block *sb)
   1637{
   1638	ENTER();
   1639
   1640	kill_litter_super(sb);
   1641	if (sb->s_fs_info)
   1642		ffs_data_closed(sb->s_fs_info);
   1643}
   1644
   1645static struct file_system_type ffs_fs_type = {
   1646	.owner		= THIS_MODULE,
   1647	.name		= "functionfs",
   1648	.init_fs_context = ffs_fs_init_fs_context,
   1649	.parameters	= ffs_fs_fs_parameters,
   1650	.kill_sb	= ffs_fs_kill_sb,
   1651};
   1652MODULE_ALIAS_FS("functionfs");
   1653
   1654
   1655/* Driver's main init/cleanup functions *************************************/
   1656
   1657static int functionfs_init(void)
   1658{
   1659	int ret;
   1660
   1661	ENTER();
   1662
   1663	ret = register_filesystem(&ffs_fs_type);
   1664	if (!ret)
   1665		pr_info("file system registered\n");
   1666	else
   1667		pr_err("failed registering file system (%d)\n", ret);
   1668
   1669	return ret;
   1670}
   1671
   1672static void functionfs_cleanup(void)
   1673{
   1674	ENTER();
   1675
   1676	pr_info("unloading\n");
   1677	unregister_filesystem(&ffs_fs_type);
   1678}
   1679
   1680
   1681/* ffs_data and ffs_function construction and destruction code **************/
   1682
   1683static void ffs_data_clear(struct ffs_data *ffs);
   1684static void ffs_data_reset(struct ffs_data *ffs);
   1685
   1686static void ffs_data_get(struct ffs_data *ffs)
   1687{
   1688	ENTER();
   1689
   1690	refcount_inc(&ffs->ref);
   1691}
   1692
   1693static void ffs_data_opened(struct ffs_data *ffs)
   1694{
   1695	ENTER();
   1696
   1697	refcount_inc(&ffs->ref);
   1698	if (atomic_add_return(1, &ffs->opened) == 1 &&
   1699			ffs->state == FFS_DEACTIVATED) {
   1700		ffs->state = FFS_CLOSING;
   1701		ffs_data_reset(ffs);
   1702	}
   1703}
   1704
   1705static void ffs_data_put(struct ffs_data *ffs)
   1706{
   1707	ENTER();
   1708
   1709	if (refcount_dec_and_test(&ffs->ref)) {
   1710		pr_info("%s(): freeing\n", __func__);
   1711		ffs_data_clear(ffs);
   1712		ffs_release_dev(ffs->private_data);
   1713		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
   1714		       swait_active(&ffs->ep0req_completion.wait) ||
   1715		       waitqueue_active(&ffs->wait));
   1716		destroy_workqueue(ffs->io_completion_wq);
   1717		kfree(ffs->dev_name);
   1718		kfree(ffs);
   1719	}
   1720}
   1721
   1722static void ffs_data_closed(struct ffs_data *ffs)
   1723{
   1724	struct ffs_epfile *epfiles;
   1725	unsigned long flags;
   1726
   1727	ENTER();
   1728
   1729	if (atomic_dec_and_test(&ffs->opened)) {
   1730		if (ffs->no_disconnect) {
   1731			ffs->state = FFS_DEACTIVATED;
   1732			spin_lock_irqsave(&ffs->eps_lock, flags);
   1733			epfiles = ffs->epfiles;
   1734			ffs->epfiles = NULL;
   1735			spin_unlock_irqrestore(&ffs->eps_lock,
   1736							flags);
   1737
   1738			if (epfiles)
   1739				ffs_epfiles_destroy(epfiles,
   1740						 ffs->eps_count);
   1741
   1742			if (ffs->setup_state == FFS_SETUP_PENDING)
   1743				__ffs_ep0_stall(ffs);
   1744		} else {
   1745			ffs->state = FFS_CLOSING;
   1746			ffs_data_reset(ffs);
   1747		}
   1748	}
   1749	if (atomic_read(&ffs->opened) < 0) {
   1750		ffs->state = FFS_CLOSING;
   1751		ffs_data_reset(ffs);
   1752	}
   1753
   1754	ffs_data_put(ffs);
   1755}
   1756
   1757static struct ffs_data *ffs_data_new(const char *dev_name)
   1758{
   1759	struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
   1760	if (!ffs)
   1761		return NULL;
   1762
   1763	ENTER();
   1764
   1765	ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
   1766	if (!ffs->io_completion_wq) {
   1767		kfree(ffs);
   1768		return NULL;
   1769	}
   1770
   1771	refcount_set(&ffs->ref, 1);
   1772	atomic_set(&ffs->opened, 0);
   1773	ffs->state = FFS_READ_DESCRIPTORS;
   1774	mutex_init(&ffs->mutex);
   1775	spin_lock_init(&ffs->eps_lock);
   1776	init_waitqueue_head(&ffs->ev.waitq);
   1777	init_waitqueue_head(&ffs->wait);
   1778	init_completion(&ffs->ep0req_completion);
   1779
   1780	/* XXX REVISIT need to update it in some places, or do we? */
   1781	ffs->ev.can_stall = 1;
   1782
   1783	return ffs;
   1784}
   1785
   1786static void ffs_data_clear(struct ffs_data *ffs)
   1787{
   1788	struct ffs_epfile *epfiles;
   1789	unsigned long flags;
   1790
   1791	ENTER();
   1792
   1793	ffs_closed(ffs);
   1794
   1795	BUG_ON(ffs->gadget);
   1796
   1797	spin_lock_irqsave(&ffs->eps_lock, flags);
   1798	epfiles = ffs->epfiles;
   1799	ffs->epfiles = NULL;
   1800	spin_unlock_irqrestore(&ffs->eps_lock, flags);
   1801
   1802	/*
   1803	 * potential race possible between ffs_func_eps_disable
   1804	 * & ffs_epfile_release therefore maintaining a local
   1805	 * copy of epfile will save us from use-after-free.
   1806	 */
   1807	if (epfiles) {
   1808		ffs_epfiles_destroy(epfiles, ffs->eps_count);
   1809		ffs->epfiles = NULL;
   1810	}
   1811
   1812	if (ffs->ffs_eventfd) {
   1813		eventfd_ctx_put(ffs->ffs_eventfd);
   1814		ffs->ffs_eventfd = NULL;
   1815	}
   1816
   1817	kfree(ffs->raw_descs_data);
   1818	kfree(ffs->raw_strings);
   1819	kfree(ffs->stringtabs);
   1820}
   1821
   1822static void ffs_data_reset(struct ffs_data *ffs)
   1823{
   1824	ENTER();
   1825
   1826	ffs_data_clear(ffs);
   1827
   1828	ffs->raw_descs_data = NULL;
   1829	ffs->raw_descs = NULL;
   1830	ffs->raw_strings = NULL;
   1831	ffs->stringtabs = NULL;
   1832
   1833	ffs->raw_descs_length = 0;
   1834	ffs->fs_descs_count = 0;
   1835	ffs->hs_descs_count = 0;
   1836	ffs->ss_descs_count = 0;
   1837
   1838	ffs->strings_count = 0;
   1839	ffs->interfaces_count = 0;
   1840	ffs->eps_count = 0;
   1841
   1842	ffs->ev.count = 0;
   1843
   1844	ffs->state = FFS_READ_DESCRIPTORS;
   1845	ffs->setup_state = FFS_NO_SETUP;
   1846	ffs->flags = 0;
   1847
   1848	ffs->ms_os_descs_ext_prop_count = 0;
   1849	ffs->ms_os_descs_ext_prop_name_len = 0;
   1850	ffs->ms_os_descs_ext_prop_data_len = 0;
   1851}
   1852
   1853
   1854static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
   1855{
   1856	struct usb_gadget_strings **lang;
   1857	int first_id;
   1858
   1859	ENTER();
   1860
   1861	if (WARN_ON(ffs->state != FFS_ACTIVE
   1862		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
   1863		return -EBADFD;
   1864
   1865	first_id = usb_string_ids_n(cdev, ffs->strings_count);
   1866	if (first_id < 0)
   1867		return first_id;
   1868
   1869	ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
   1870	if (!ffs->ep0req)
   1871		return -ENOMEM;
   1872	ffs->ep0req->complete = ffs_ep0_complete;
   1873	ffs->ep0req->context = ffs;
   1874
   1875	lang = ffs->stringtabs;
   1876	if (lang) {
   1877		for (; *lang; ++lang) {
   1878			struct usb_string *str = (*lang)->strings;
   1879			int id = first_id;
   1880			for (; str->s; ++id, ++str)
   1881				str->id = id;
   1882		}
   1883	}
   1884
   1885	ffs->gadget = cdev->gadget;
   1886	ffs_data_get(ffs);
   1887	return 0;
   1888}
   1889
   1890static void functionfs_unbind(struct ffs_data *ffs)
   1891{
   1892	ENTER();
   1893
   1894	if (!WARN_ON(!ffs->gadget)) {
   1895		usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
   1896		ffs->ep0req = NULL;
   1897		ffs->gadget = NULL;
   1898		clear_bit(FFS_FL_BOUND, &ffs->flags);
   1899		ffs_data_put(ffs);
   1900	}
   1901}
   1902
   1903static int ffs_epfiles_create(struct ffs_data *ffs)
   1904{
   1905	struct ffs_epfile *epfile, *epfiles;
   1906	unsigned i, count;
   1907
   1908	ENTER();
   1909
   1910	count = ffs->eps_count;
   1911	epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
   1912	if (!epfiles)
   1913		return -ENOMEM;
   1914
   1915	epfile = epfiles;
   1916	for (i = 1; i <= count; ++i, ++epfile) {
   1917		epfile->ffs = ffs;
   1918		mutex_init(&epfile->mutex);
   1919		if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
   1920			sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
   1921		else
   1922			sprintf(epfile->name, "ep%u", i);
   1923		epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
   1924						 epfile,
   1925						 &ffs_epfile_operations);
   1926		if (!epfile->dentry) {
   1927			ffs_epfiles_destroy(epfiles, i - 1);
   1928			return -ENOMEM;
   1929		}
   1930	}
   1931
   1932	ffs->epfiles = epfiles;
   1933	return 0;
   1934}
   1935
   1936static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
   1937{
   1938	struct ffs_epfile *epfile = epfiles;
   1939
   1940	ENTER();
   1941
   1942	for (; count; --count, ++epfile) {
   1943		BUG_ON(mutex_is_locked(&epfile->mutex));
   1944		if (epfile->dentry) {
   1945			d_delete(epfile->dentry);
   1946			dput(epfile->dentry);
   1947			epfile->dentry = NULL;
   1948		}
   1949	}
   1950
   1951	kfree(epfiles);
   1952}
   1953
   1954static void ffs_func_eps_disable(struct ffs_function *func)
   1955{
   1956	struct ffs_ep *ep;
   1957	struct ffs_epfile *epfile;
   1958	unsigned short count;
   1959	unsigned long flags;
   1960
   1961	spin_lock_irqsave(&func->ffs->eps_lock, flags);
   1962	count = func->ffs->eps_count;
   1963	epfile = func->ffs->epfiles;
   1964	ep = func->eps;
   1965	while (count--) {
   1966		/* pending requests get nuked */
   1967		if (ep->ep)
   1968			usb_ep_disable(ep->ep);
   1969		++ep;
   1970
   1971		if (epfile) {
   1972			epfile->ep = NULL;
   1973			__ffs_epfile_read_buffer_free(epfile);
   1974			++epfile;
   1975		}
   1976	}
   1977	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
   1978}
   1979
   1980static int ffs_func_eps_enable(struct ffs_function *func)
   1981{
   1982	struct ffs_data *ffs;
   1983	struct ffs_ep *ep;
   1984	struct ffs_epfile *epfile;
   1985	unsigned short count;
   1986	unsigned long flags;
   1987	int ret = 0;
   1988
   1989	spin_lock_irqsave(&func->ffs->eps_lock, flags);
   1990	ffs = func->ffs;
   1991	ep = func->eps;
   1992	epfile = ffs->epfiles;
   1993	count = ffs->eps_count;
   1994	while(count--) {
   1995		ep->ep->driver_data = ep;
   1996
   1997		ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
   1998		if (ret) {
   1999			pr_err("%s: config_ep_by_speed(%s) returned %d\n",
   2000					__func__, ep->ep->name, ret);
   2001			break;
   2002		}
   2003
   2004		ret = usb_ep_enable(ep->ep);
   2005		if (!ret) {
   2006			epfile->ep = ep;
   2007			epfile->in = usb_endpoint_dir_in(ep->ep->desc);
   2008			epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
   2009		} else {
   2010			break;
   2011		}
   2012
   2013		++ep;
   2014		++epfile;
   2015	}
   2016
   2017	wake_up_interruptible(&ffs->wait);
   2018	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
   2019
   2020	return ret;
   2021}
   2022
   2023
   2024/* Parsing and building descriptors and strings *****************************/
   2025
   2026/*
   2027 * This validates if data pointed by data is a valid USB descriptor as
   2028 * well as record how many interfaces, endpoints and strings are
   2029 * required by given configuration.  Returns address after the
   2030 * descriptor or NULL if data is invalid.
   2031 */
   2032
   2033enum ffs_entity_type {
   2034	FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
   2035};
   2036
   2037enum ffs_os_desc_type {
   2038	FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
   2039};
   2040
   2041typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
   2042				   u8 *valuep,
   2043				   struct usb_descriptor_header *desc,
   2044				   void *priv);
   2045
   2046typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
   2047				    struct usb_os_desc_header *h, void *data,
   2048				    unsigned len, void *priv);
   2049
   2050static int __must_check ffs_do_single_desc(char *data, unsigned len,
   2051					   ffs_entity_callback entity,
   2052					   void *priv, int *current_class)
   2053{
   2054	struct usb_descriptor_header *_ds = (void *)data;
   2055	u8 length;
   2056	int ret;
   2057
   2058	ENTER();
   2059
   2060	/* At least two bytes are required: length and type */
   2061	if (len < 2) {
   2062		pr_vdebug("descriptor too short\n");
   2063		return -EINVAL;
   2064	}
   2065
   2066	/* If we have at least as many bytes as the descriptor takes? */
   2067	length = _ds->bLength;
   2068	if (len < length) {
   2069		pr_vdebug("descriptor longer then available data\n");
   2070		return -EINVAL;
   2071	}
   2072
   2073#define __entity_check_INTERFACE(val)  1
   2074#define __entity_check_STRING(val)     (val)
   2075#define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
   2076#define __entity(type, val) do {					\
   2077		pr_vdebug("entity " #type "(%02x)\n", (val));		\
   2078		if (!__entity_check_ ##type(val)) {			\
   2079			pr_vdebug("invalid entity's value\n");		\
   2080			return -EINVAL;					\
   2081		}							\
   2082		ret = entity(FFS_ ##type, &val, _ds, priv);		\
   2083		if (ret < 0) {						\
   2084			pr_debug("entity " #type "(%02x); ret = %d\n",	\
   2085				 (val), ret);				\
   2086			return ret;					\
   2087		}							\
   2088	} while (0)
   2089
   2090	/* Parse descriptor depending on type. */
   2091	switch (_ds->bDescriptorType) {
   2092	case USB_DT_DEVICE:
   2093	case USB_DT_CONFIG:
   2094	case USB_DT_STRING:
   2095	case USB_DT_DEVICE_QUALIFIER:
   2096		/* function can't have any of those */
   2097		pr_vdebug("descriptor reserved for gadget: %d\n",
   2098		      _ds->bDescriptorType);
   2099		return -EINVAL;
   2100
   2101	case USB_DT_INTERFACE: {
   2102		struct usb_interface_descriptor *ds = (void *)_ds;
   2103		pr_vdebug("interface descriptor\n");
   2104		if (length != sizeof *ds)
   2105			goto inv_length;
   2106
   2107		__entity(INTERFACE, ds->bInterfaceNumber);
   2108		if (ds->iInterface)
   2109			__entity(STRING, ds->iInterface);
   2110		*current_class = ds->bInterfaceClass;
   2111	}
   2112		break;
   2113
   2114	case USB_DT_ENDPOINT: {
   2115		struct usb_endpoint_descriptor *ds = (void *)_ds;
   2116		pr_vdebug("endpoint descriptor\n");
   2117		if (length != USB_DT_ENDPOINT_SIZE &&
   2118		    length != USB_DT_ENDPOINT_AUDIO_SIZE)
   2119			goto inv_length;
   2120		__entity(ENDPOINT, ds->bEndpointAddress);
   2121	}
   2122		break;
   2123
   2124	case USB_TYPE_CLASS | 0x01:
   2125		if (*current_class == USB_INTERFACE_CLASS_HID) {
   2126			pr_vdebug("hid descriptor\n");
   2127			if (length != sizeof(struct hid_descriptor))
   2128				goto inv_length;
   2129			break;
   2130		} else if (*current_class == USB_INTERFACE_CLASS_CCID) {
   2131			pr_vdebug("ccid descriptor\n");
   2132			if (length != sizeof(struct ccid_descriptor))
   2133				goto inv_length;
   2134			break;
   2135		} else {
   2136			pr_vdebug("unknown descriptor: %d for class %d\n",
   2137			      _ds->bDescriptorType, *current_class);
   2138			return -EINVAL;
   2139		}
   2140
   2141	case USB_DT_OTG:
   2142		if (length != sizeof(struct usb_otg_descriptor))
   2143			goto inv_length;
   2144		break;
   2145
   2146	case USB_DT_INTERFACE_ASSOCIATION: {
   2147		struct usb_interface_assoc_descriptor *ds = (void *)_ds;
   2148		pr_vdebug("interface association descriptor\n");
   2149		if (length != sizeof *ds)
   2150			goto inv_length;
   2151		if (ds->iFunction)
   2152			__entity(STRING, ds->iFunction);
   2153	}
   2154		break;
   2155
   2156	case USB_DT_SS_ENDPOINT_COMP:
   2157		pr_vdebug("EP SS companion descriptor\n");
   2158		if (length != sizeof(struct usb_ss_ep_comp_descriptor))
   2159			goto inv_length;
   2160		break;
   2161
   2162	case USB_DT_OTHER_SPEED_CONFIG:
   2163	case USB_DT_INTERFACE_POWER:
   2164	case USB_DT_DEBUG:
   2165	case USB_DT_SECURITY:
   2166	case USB_DT_CS_RADIO_CONTROL:
   2167		/* TODO */
   2168		pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
   2169		return -EINVAL;
   2170
   2171	default:
   2172		/* We should never be here */
   2173		pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
   2174		return -EINVAL;
   2175
   2176inv_length:
   2177		pr_vdebug("invalid length: %d (descriptor %d)\n",
   2178			  _ds->bLength, _ds->bDescriptorType);
   2179		return -EINVAL;
   2180	}
   2181
   2182#undef __entity
   2183#undef __entity_check_DESCRIPTOR
   2184#undef __entity_check_INTERFACE
   2185#undef __entity_check_STRING
   2186#undef __entity_check_ENDPOINT
   2187
   2188	return length;
   2189}
   2190
   2191static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
   2192				     ffs_entity_callback entity, void *priv)
   2193{
   2194	const unsigned _len = len;
   2195	unsigned long num = 0;
   2196	int current_class = -1;
   2197
   2198	ENTER();
   2199
   2200	for (;;) {
   2201		int ret;
   2202
   2203		if (num == count)
   2204			data = NULL;
   2205
   2206		/* Record "descriptor" entity */
   2207		ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
   2208		if (ret < 0) {
   2209			pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
   2210				 num, ret);
   2211			return ret;
   2212		}
   2213
   2214		if (!data)
   2215			return _len - len;
   2216
   2217		ret = ffs_do_single_desc(data, len, entity, priv,
   2218			&current_class);
   2219		if (ret < 0) {
   2220			pr_debug("%s returns %d\n", __func__, ret);
   2221			return ret;
   2222		}
   2223
   2224		len -= ret;
   2225		data += ret;
   2226		++num;
   2227	}
   2228}
   2229
   2230static int __ffs_data_do_entity(enum ffs_entity_type type,
   2231				u8 *valuep, struct usb_descriptor_header *desc,
   2232				void *priv)
   2233{
   2234	struct ffs_desc_helper *helper = priv;
   2235	struct usb_endpoint_descriptor *d;
   2236
   2237	ENTER();
   2238
   2239	switch (type) {
   2240	case FFS_DESCRIPTOR:
   2241		break;
   2242
   2243	case FFS_INTERFACE:
   2244		/*
   2245		 * Interfaces are indexed from zero so if we
   2246		 * encountered interface "n" then there are at least
   2247		 * "n+1" interfaces.
   2248		 */
   2249		if (*valuep >= helper->interfaces_count)
   2250			helper->interfaces_count = *valuep + 1;
   2251		break;
   2252
   2253	case FFS_STRING:
   2254		/*
   2255		 * Strings are indexed from 1 (0 is reserved
   2256		 * for languages list)
   2257		 */
   2258		if (*valuep > helper->ffs->strings_count)
   2259			helper->ffs->strings_count = *valuep;
   2260		break;
   2261
   2262	case FFS_ENDPOINT:
   2263		d = (void *)desc;
   2264		helper->eps_count++;
   2265		if (helper->eps_count >= FFS_MAX_EPS_COUNT)
   2266			return -EINVAL;
   2267		/* Check if descriptors for any speed were already parsed */
   2268		if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
   2269			helper->ffs->eps_addrmap[helper->eps_count] =
   2270				d->bEndpointAddress;
   2271		else if (helper->ffs->eps_addrmap[helper->eps_count] !=
   2272				d->bEndpointAddress)
   2273			return -EINVAL;
   2274		break;
   2275	}
   2276
   2277	return 0;
   2278}
   2279
   2280static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
   2281				   struct usb_os_desc_header *desc)
   2282{
   2283	u16 bcd_version = le16_to_cpu(desc->bcdVersion);
   2284	u16 w_index = le16_to_cpu(desc->wIndex);
   2285
   2286	if (bcd_version != 1) {
   2287		pr_vdebug("unsupported os descriptors version: %d",
   2288			  bcd_version);
   2289		return -EINVAL;
   2290	}
   2291	switch (w_index) {
   2292	case 0x4:
   2293		*next_type = FFS_OS_DESC_EXT_COMPAT;
   2294		break;
   2295	case 0x5:
   2296		*next_type = FFS_OS_DESC_EXT_PROP;
   2297		break;
   2298	default:
   2299		pr_vdebug("unsupported os descriptor type: %d", w_index);
   2300		return -EINVAL;
   2301	}
   2302
   2303	return sizeof(*desc);
   2304}
   2305
   2306/*
   2307 * Process all extended compatibility/extended property descriptors
   2308 * of a feature descriptor
   2309 */
   2310static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
   2311					      enum ffs_os_desc_type type,
   2312					      u16 feature_count,
   2313					      ffs_os_desc_callback entity,
   2314					      void *priv,
   2315					      struct usb_os_desc_header *h)
   2316{
   2317	int ret;
   2318	const unsigned _len = len;
   2319
   2320	ENTER();
   2321
   2322	/* loop over all ext compat/ext prop descriptors */
   2323	while (feature_count--) {
   2324		ret = entity(type, h, data, len, priv);
   2325		if (ret < 0) {
   2326			pr_debug("bad OS descriptor, type: %d\n", type);
   2327			return ret;
   2328		}
   2329		data += ret;
   2330		len -= ret;
   2331	}
   2332	return _len - len;
   2333}
   2334
   2335/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
   2336static int __must_check ffs_do_os_descs(unsigned count,
   2337					char *data, unsigned len,
   2338					ffs_os_desc_callback entity, void *priv)
   2339{
   2340	const unsigned _len = len;
   2341	unsigned long num = 0;
   2342
   2343	ENTER();
   2344
   2345	for (num = 0; num < count; ++num) {
   2346		int ret;
   2347		enum ffs_os_desc_type type;
   2348		u16 feature_count;
   2349		struct usb_os_desc_header *desc = (void *)data;
   2350
   2351		if (len < sizeof(*desc))
   2352			return -EINVAL;
   2353
   2354		/*
   2355		 * Record "descriptor" entity.
   2356		 * Process dwLength, bcdVersion, wIndex, get b/wCount.
   2357		 * Move the data pointer to the beginning of extended
   2358		 * compatibilities proper or extended properties proper
   2359		 * portions of the data
   2360		 */
   2361		if (le32_to_cpu(desc->dwLength) > len)
   2362			return -EINVAL;
   2363
   2364		ret = __ffs_do_os_desc_header(&type, desc);
   2365		if (ret < 0) {
   2366			pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
   2367				 num, ret);
   2368			return ret;
   2369		}
   2370		/*
   2371		 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
   2372		 */
   2373		feature_count = le16_to_cpu(desc->wCount);
   2374		if (type == FFS_OS_DESC_EXT_COMPAT &&
   2375		    (feature_count > 255 || desc->Reserved))
   2376				return -EINVAL;
   2377		len -= ret;
   2378		data += ret;
   2379
   2380		/*
   2381		 * Process all function/property descriptors
   2382		 * of this Feature Descriptor
   2383		 */
   2384		ret = ffs_do_single_os_desc(data, len, type,
   2385					    feature_count, entity, priv, desc);
   2386		if (ret < 0) {
   2387			pr_debug("%s returns %d\n", __func__, ret);
   2388			return ret;
   2389		}
   2390
   2391		len -= ret;
   2392		data += ret;
   2393	}
   2394	return _len - len;
   2395}
   2396
   2397/*
   2398 * Validate contents of the buffer from userspace related to OS descriptors.
   2399 */
   2400static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
   2401				 struct usb_os_desc_header *h, void *data,
   2402				 unsigned len, void *priv)
   2403{
   2404	struct ffs_data *ffs = priv;
   2405	u8 length;
   2406
   2407	ENTER();
   2408
   2409	switch (type) {
   2410	case FFS_OS_DESC_EXT_COMPAT: {
   2411		struct usb_ext_compat_desc *d = data;
   2412		int i;
   2413
   2414		if (len < sizeof(*d) ||
   2415		    d->bFirstInterfaceNumber >= ffs->interfaces_count)
   2416			return -EINVAL;
   2417		if (d->Reserved1 != 1) {
   2418			/*
   2419			 * According to the spec, Reserved1 must be set to 1
   2420			 * but older kernels incorrectly rejected non-zero
   2421			 * values.  We fix it here to avoid returning EINVAL
   2422			 * in response to values we used to accept.
   2423			 */
   2424			pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
   2425			d->Reserved1 = 1;
   2426		}
   2427		for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
   2428			if (d->Reserved2[i])
   2429				return -EINVAL;
   2430
   2431		length = sizeof(struct usb_ext_compat_desc);
   2432	}
   2433		break;
   2434	case FFS_OS_DESC_EXT_PROP: {
   2435		struct usb_ext_prop_desc *d = data;
   2436		u32 type, pdl;
   2437		u16 pnl;
   2438
   2439		if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
   2440			return -EINVAL;
   2441		length = le32_to_cpu(d->dwSize);
   2442		if (len < length)
   2443			return -EINVAL;
   2444		type = le32_to_cpu(d->dwPropertyDataType);
   2445		if (type < USB_EXT_PROP_UNICODE ||
   2446		    type > USB_EXT_PROP_UNICODE_MULTI) {
   2447			pr_vdebug("unsupported os descriptor property type: %d",
   2448				  type);
   2449			return -EINVAL;
   2450		}
   2451		pnl = le16_to_cpu(d->wPropertyNameLength);
   2452		if (length < 14 + pnl) {
   2453			pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
   2454				  length, pnl, type);
   2455			return -EINVAL;
   2456		}
   2457		pdl = le32_to_cpu(*(__le32 *)((u8 *)data + 10 + pnl));
   2458		if (length != 14 + pnl + pdl) {
   2459			pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
   2460				  length, pnl, pdl, type);
   2461			return -EINVAL;
   2462		}
   2463		++ffs->ms_os_descs_ext_prop_count;
   2464		/* property name reported to the host as "WCHAR"s */
   2465		ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
   2466		ffs->ms_os_descs_ext_prop_data_len += pdl;
   2467	}
   2468		break;
   2469	default:
   2470		pr_vdebug("unknown descriptor: %d\n", type);
   2471		return -EINVAL;
   2472	}
   2473	return length;
   2474}
   2475
   2476static int __ffs_data_got_descs(struct ffs_data *ffs,
   2477				char *const _data, size_t len)
   2478{
   2479	char *data = _data, *raw_descs;
   2480	unsigned os_descs_count = 0, counts[3], flags;
   2481	int ret = -EINVAL, i;
   2482	struct ffs_desc_helper helper;
   2483
   2484	ENTER();
   2485
   2486	if (get_unaligned_le32(data + 4) != len)
   2487		goto error;
   2488
   2489	switch (get_unaligned_le32(data)) {
   2490	case FUNCTIONFS_DESCRIPTORS_MAGIC:
   2491		flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
   2492		data += 8;
   2493		len  -= 8;
   2494		break;
   2495	case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
   2496		flags = get_unaligned_le32(data + 8);
   2497		ffs->user_flags = flags;
   2498		if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
   2499			      FUNCTIONFS_HAS_HS_DESC |
   2500			      FUNCTIONFS_HAS_SS_DESC |
   2501			      FUNCTIONFS_HAS_MS_OS_DESC |
   2502			      FUNCTIONFS_VIRTUAL_ADDR |
   2503			      FUNCTIONFS_EVENTFD |
   2504			      FUNCTIONFS_ALL_CTRL_RECIP |
   2505			      FUNCTIONFS_CONFIG0_SETUP)) {
   2506			ret = -ENOSYS;
   2507			goto error;
   2508		}
   2509		data += 12;
   2510		len  -= 12;
   2511		break;
   2512	default:
   2513		goto error;
   2514	}
   2515
   2516	if (flags & FUNCTIONFS_EVENTFD) {
   2517		if (len < 4)
   2518			goto error;
   2519		ffs->ffs_eventfd =
   2520			eventfd_ctx_fdget((int)get_unaligned_le32(data));
   2521		if (IS_ERR(ffs->ffs_eventfd)) {
   2522			ret = PTR_ERR(ffs->ffs_eventfd);
   2523			ffs->ffs_eventfd = NULL;
   2524			goto error;
   2525		}
   2526		data += 4;
   2527		len  -= 4;
   2528	}
   2529
   2530	/* Read fs_count, hs_count and ss_count (if present) */
   2531	for (i = 0; i < 3; ++i) {
   2532		if (!(flags & (1 << i))) {
   2533			counts[i] = 0;
   2534		} else if (len < 4) {
   2535			goto error;
   2536		} else {
   2537			counts[i] = get_unaligned_le32(data);
   2538			data += 4;
   2539			len  -= 4;
   2540		}
   2541	}
   2542	if (flags & (1 << i)) {
   2543		if (len < 4) {
   2544			goto error;
   2545		}
   2546		os_descs_count = get_unaligned_le32(data);
   2547		data += 4;
   2548		len -= 4;
   2549	}
   2550
   2551	/* Read descriptors */
   2552	raw_descs = data;
   2553	helper.ffs = ffs;
   2554	for (i = 0; i < 3; ++i) {
   2555		if (!counts[i])
   2556			continue;
   2557		helper.interfaces_count = 0;
   2558		helper.eps_count = 0;
   2559		ret = ffs_do_descs(counts[i], data, len,
   2560				   __ffs_data_do_entity, &helper);
   2561		if (ret < 0)
   2562			goto error;
   2563		if (!ffs->eps_count && !ffs->interfaces_count) {
   2564			ffs->eps_count = helper.eps_count;
   2565			ffs->interfaces_count = helper.interfaces_count;
   2566		} else {
   2567			if (ffs->eps_count != helper.eps_count) {
   2568				ret = -EINVAL;
   2569				goto error;
   2570			}
   2571			if (ffs->interfaces_count != helper.interfaces_count) {
   2572				ret = -EINVAL;
   2573				goto error;
   2574			}
   2575		}
   2576		data += ret;
   2577		len  -= ret;
   2578	}
   2579	if (os_descs_count) {
   2580		ret = ffs_do_os_descs(os_descs_count, data, len,
   2581				      __ffs_data_do_os_desc, ffs);
   2582		if (ret < 0)
   2583			goto error;
   2584		data += ret;
   2585		len -= ret;
   2586	}
   2587
   2588	if (raw_descs == data || len) {
   2589		ret = -EINVAL;
   2590		goto error;
   2591	}
   2592
   2593	ffs->raw_descs_data	= _data;
   2594	ffs->raw_descs		= raw_descs;
   2595	ffs->raw_descs_length	= data - raw_descs;
   2596	ffs->fs_descs_count	= counts[0];
   2597	ffs->hs_descs_count	= counts[1];
   2598	ffs->ss_descs_count	= counts[2];
   2599	ffs->ms_os_descs_count	= os_descs_count;
   2600
   2601	return 0;
   2602
   2603error:
   2604	kfree(_data);
   2605	return ret;
   2606}
   2607
   2608static int __ffs_data_got_strings(struct ffs_data *ffs,
   2609				  char *const _data, size_t len)
   2610{
   2611	u32 str_count, needed_count, lang_count;
   2612	struct usb_gadget_strings **stringtabs, *t;
   2613	const char *data = _data;
   2614	struct usb_string *s;
   2615
   2616	ENTER();
   2617
   2618	if (len < 16 ||
   2619	    get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
   2620	    get_unaligned_le32(data + 4) != len)
   2621		goto error;
   2622	str_count  = get_unaligned_le32(data + 8);
   2623	lang_count = get_unaligned_le32(data + 12);
   2624
   2625	/* if one is zero the other must be zero */
   2626	if (!str_count != !lang_count)
   2627		goto error;
   2628
   2629	/* Do we have at least as many strings as descriptors need? */
   2630	needed_count = ffs->strings_count;
   2631	if (str_count < needed_count)
   2632		goto error;
   2633
   2634	/*
   2635	 * If we don't need any strings just return and free all
   2636	 * memory.
   2637	 */
   2638	if (!needed_count) {
   2639		kfree(_data);
   2640		return 0;
   2641	}
   2642
   2643	/* Allocate everything in one chunk so there's less maintenance. */
   2644	{
   2645		unsigned i = 0;
   2646		vla_group(d);
   2647		vla_item(d, struct usb_gadget_strings *, stringtabs,
   2648			lang_count + 1);
   2649		vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
   2650		vla_item(d, struct usb_string, strings,
   2651			lang_count*(needed_count+1));
   2652
   2653		char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
   2654
   2655		if (!vlabuf) {
   2656			kfree(_data);
   2657			return -ENOMEM;
   2658		}
   2659
   2660		/* Initialize the VLA pointers */
   2661		stringtabs = vla_ptr(vlabuf, d, stringtabs);
   2662		t = vla_ptr(vlabuf, d, stringtab);
   2663		i = lang_count;
   2664		do {
   2665			*stringtabs++ = t++;
   2666		} while (--i);
   2667		*stringtabs = NULL;
   2668
   2669		/* stringtabs = vlabuf = d_stringtabs for later kfree */
   2670		stringtabs = vla_ptr(vlabuf, d, stringtabs);
   2671		t = vla_ptr(vlabuf, d, stringtab);
   2672		s = vla_ptr(vlabuf, d, strings);
   2673	}
   2674
   2675	/* For each language */
   2676	data += 16;
   2677	len -= 16;
   2678
   2679	do { /* lang_count > 0 so we can use do-while */
   2680		unsigned needed = needed_count;
   2681		u32 str_per_lang = str_count;
   2682
   2683		if (len < 3)
   2684			goto error_free;
   2685		t->language = get_unaligned_le16(data);
   2686		t->strings  = s;
   2687		++t;
   2688
   2689		data += 2;
   2690		len -= 2;
   2691
   2692		/* For each string */
   2693		do { /* str_count > 0 so we can use do-while */
   2694			size_t length = strnlen(data, len);
   2695
   2696			if (length == len)
   2697				goto error_free;
   2698
   2699			/*
   2700			 * User may provide more strings then we need,
   2701			 * if that's the case we simply ignore the
   2702			 * rest
   2703			 */
   2704			if (needed) {
   2705				/*
   2706				 * s->id will be set while adding
   2707				 * function to configuration so for
   2708				 * now just leave garbage here.
   2709				 */
   2710				s->s = data;
   2711				--needed;
   2712				++s;
   2713			}
   2714
   2715			data += length + 1;
   2716			len -= length + 1;
   2717		} while (--str_per_lang);
   2718
   2719		s->id = 0;   /* terminator */
   2720		s->s = NULL;
   2721		++s;
   2722
   2723	} while (--lang_count);
   2724
   2725	/* Some garbage left? */
   2726	if (len)
   2727		goto error_free;
   2728
   2729	/* Done! */
   2730	ffs->stringtabs = stringtabs;
   2731	ffs->raw_strings = _data;
   2732
   2733	return 0;
   2734
   2735error_free:
   2736	kfree(stringtabs);
   2737error:
   2738	kfree(_data);
   2739	return -EINVAL;
   2740}
   2741
   2742
   2743/* Events handling and management *******************************************/
   2744
   2745static void __ffs_event_add(struct ffs_data *ffs,
   2746			    enum usb_functionfs_event_type type)
   2747{
   2748	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
   2749	int neg = 0;
   2750
   2751	/*
   2752	 * Abort any unhandled setup
   2753	 *
   2754	 * We do not need to worry about some cmpxchg() changing value
   2755	 * of ffs->setup_state without holding the lock because when
   2756	 * state is FFS_SETUP_PENDING cmpxchg() in several places in
   2757	 * the source does nothing.
   2758	 */
   2759	if (ffs->setup_state == FFS_SETUP_PENDING)
   2760		ffs->setup_state = FFS_SETUP_CANCELLED;
   2761
   2762	/*
   2763	 * Logic of this function guarantees that there are at most four pending
   2764	 * evens on ffs->ev.types queue.  This is important because the queue
   2765	 * has space for four elements only and __ffs_ep0_read_events function
   2766	 * depends on that limit as well.  If more event types are added, those
   2767	 * limits have to be revisited or guaranteed to still hold.
   2768	 */
   2769	switch (type) {
   2770	case FUNCTIONFS_RESUME:
   2771		rem_type2 = FUNCTIONFS_SUSPEND;
   2772		fallthrough;
   2773	case FUNCTIONFS_SUSPEND:
   2774	case FUNCTIONFS_SETUP:
   2775		rem_type1 = type;
   2776		/* Discard all similar events */
   2777		break;
   2778
   2779	case FUNCTIONFS_BIND:
   2780	case FUNCTIONFS_UNBIND:
   2781	case FUNCTIONFS_DISABLE:
   2782	case FUNCTIONFS_ENABLE:
   2783		/* Discard everything other then power management. */
   2784		rem_type1 = FUNCTIONFS_SUSPEND;
   2785		rem_type2 = FUNCTIONFS_RESUME;
   2786		neg = 1;
   2787		break;
   2788
   2789	default:
   2790		WARN(1, "%d: unknown event, this should not happen\n", type);
   2791		return;
   2792	}
   2793
   2794	{
   2795		u8 *ev  = ffs->ev.types, *out = ev;
   2796		unsigned n = ffs->ev.count;
   2797		for (; n; --n, ++ev)
   2798			if ((*ev == rem_type1 || *ev == rem_type2) == neg)
   2799				*out++ = *ev;
   2800			else
   2801				pr_vdebug("purging event %d\n", *ev);
   2802		ffs->ev.count = out - ffs->ev.types;
   2803	}
   2804
   2805	pr_vdebug("adding event %d\n", type);
   2806	ffs->ev.types[ffs->ev.count++] = type;
   2807	wake_up_locked(&ffs->ev.waitq);
   2808	if (ffs->ffs_eventfd)
   2809		eventfd_signal(ffs->ffs_eventfd, 1);
   2810}
   2811
   2812static void ffs_event_add(struct ffs_data *ffs,
   2813			  enum usb_functionfs_event_type type)
   2814{
   2815	unsigned long flags;
   2816	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
   2817	__ffs_event_add(ffs, type);
   2818	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
   2819}
   2820
   2821/* Bind/unbind USB function hooks *******************************************/
   2822
   2823static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address)
   2824{
   2825	int i;
   2826
   2827	for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i)
   2828		if (ffs->eps_addrmap[i] == endpoint_address)
   2829			return i;
   2830	return -ENOENT;
   2831}
   2832
   2833static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
   2834				    struct usb_descriptor_header *desc,
   2835				    void *priv)
   2836{
   2837	struct usb_endpoint_descriptor *ds = (void *)desc;
   2838	struct ffs_function *func = priv;
   2839	struct ffs_ep *ffs_ep;
   2840	unsigned ep_desc_id;
   2841	int idx;
   2842	static const char *speed_names[] = { "full", "high", "super" };
   2843
   2844	if (type != FFS_DESCRIPTOR)
   2845		return 0;
   2846
   2847	/*
   2848	 * If ss_descriptors is not NULL, we are reading super speed
   2849	 * descriptors; if hs_descriptors is not NULL, we are reading high
   2850	 * speed descriptors; otherwise, we are reading full speed
   2851	 * descriptors.
   2852	 */
   2853	if (func->function.ss_descriptors) {
   2854		ep_desc_id = 2;
   2855		func->function.ss_descriptors[(long)valuep] = desc;
   2856	} else if (func->function.hs_descriptors) {
   2857		ep_desc_id = 1;
   2858		func->function.hs_descriptors[(long)valuep] = desc;
   2859	} else {
   2860		ep_desc_id = 0;
   2861		func->function.fs_descriptors[(long)valuep]    = desc;
   2862	}
   2863
   2864	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
   2865		return 0;
   2866
   2867	idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1;
   2868	if (idx < 0)
   2869		return idx;
   2870
   2871	ffs_ep = func->eps + idx;
   2872
   2873	if (ffs_ep->descs[ep_desc_id]) {
   2874		pr_err("two %sspeed descriptors for EP %d\n",
   2875			  speed_names[ep_desc_id],
   2876			  ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
   2877		return -EINVAL;
   2878	}
   2879	ffs_ep->descs[ep_desc_id] = ds;
   2880
   2881	ffs_dump_mem(": Original  ep desc", ds, ds->bLength);
   2882	if (ffs_ep->ep) {
   2883		ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
   2884		if (!ds->wMaxPacketSize)
   2885			ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
   2886	} else {
   2887		struct usb_request *req;
   2888		struct usb_ep *ep;
   2889		u8 bEndpointAddress;
   2890		u16 wMaxPacketSize;
   2891
   2892		/*
   2893		 * We back up bEndpointAddress because autoconfig overwrites
   2894		 * it with physical endpoint address.
   2895		 */
   2896		bEndpointAddress = ds->bEndpointAddress;
   2897		/*
   2898		 * We back up wMaxPacketSize because autoconfig treats
   2899		 * endpoint descriptors as if they were full speed.
   2900		 */
   2901		wMaxPacketSize = ds->wMaxPacketSize;
   2902		pr_vdebug("autoconfig\n");
   2903		ep = usb_ep_autoconfig(func->gadget, ds);
   2904		if (!ep)
   2905			return -ENOTSUPP;
   2906		ep->driver_data = func->eps + idx;
   2907
   2908		req = usb_ep_alloc_request(ep, GFP_KERNEL);
   2909		if (!req)
   2910			return -ENOMEM;
   2911
   2912		ffs_ep->ep  = ep;
   2913		ffs_ep->req = req;
   2914		func->eps_revmap[ds->bEndpointAddress &
   2915				 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
   2916		/*
   2917		 * If we use virtual address mapping, we restore
   2918		 * original bEndpointAddress value.
   2919		 */
   2920		if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
   2921			ds->bEndpointAddress = bEndpointAddress;
   2922		/*
   2923		 * Restore wMaxPacketSize which was potentially
   2924		 * overwritten by autoconfig.
   2925		 */
   2926		ds->wMaxPacketSize = wMaxPacketSize;
   2927	}
   2928	ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
   2929
   2930	return 0;
   2931}
   2932
   2933static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
   2934				   struct usb_descriptor_header *desc,
   2935				   void *priv)
   2936{
   2937	struct ffs_function *func = priv;
   2938	unsigned idx;
   2939	u8 newValue;
   2940
   2941	switch (type) {
   2942	default:
   2943	case FFS_DESCRIPTOR:
   2944		/* Handled in previous pass by __ffs_func_bind_do_descs() */
   2945		return 0;
   2946
   2947	case FFS_INTERFACE:
   2948		idx = *valuep;
   2949		if (func->interfaces_nums[idx] < 0) {
   2950			int id = usb_interface_id(func->conf, &func->function);
   2951			if (id < 0)
   2952				return id;
   2953			func->interfaces_nums[idx] = id;
   2954		}
   2955		newValue = func->interfaces_nums[idx];
   2956		break;
   2957
   2958	case FFS_STRING:
   2959		/* String' IDs are allocated when fsf_data is bound to cdev */
   2960		newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
   2961		break;
   2962
   2963	case FFS_ENDPOINT:
   2964		/*
   2965		 * USB_DT_ENDPOINT are handled in
   2966		 * __ffs_func_bind_do_descs().
   2967		 */
   2968		if (desc->bDescriptorType == USB_DT_ENDPOINT)
   2969			return 0;
   2970
   2971		idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
   2972		if (!func->eps[idx].ep)
   2973			return -EINVAL;
   2974
   2975		{
   2976			struct usb_endpoint_descriptor **descs;
   2977			descs = func->eps[idx].descs;
   2978			newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
   2979		}
   2980		break;
   2981	}
   2982
   2983	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
   2984	*valuep = newValue;
   2985	return 0;
   2986}
   2987
   2988static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
   2989				      struct usb_os_desc_header *h, void *data,
   2990				      unsigned len, void *priv)
   2991{
   2992	struct ffs_function *func = priv;
   2993	u8 length = 0;
   2994
   2995	switch (type) {
   2996	case FFS_OS_DESC_EXT_COMPAT: {
   2997		struct usb_ext_compat_desc *desc = data;
   2998		struct usb_os_desc_table *t;
   2999
   3000		t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
   3001		t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
   3002		memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
   3003		       ARRAY_SIZE(desc->CompatibleID) +
   3004		       ARRAY_SIZE(desc->SubCompatibleID));
   3005		length = sizeof(*desc);
   3006	}
   3007		break;
   3008	case FFS_OS_DESC_EXT_PROP: {
   3009		struct usb_ext_prop_desc *desc = data;
   3010		struct usb_os_desc_table *t;
   3011		struct usb_os_desc_ext_prop *ext_prop;
   3012		char *ext_prop_name;
   3013		char *ext_prop_data;
   3014
   3015		t = &func->function.os_desc_table[h->interface];
   3016		t->if_id = func->interfaces_nums[h->interface];
   3017
   3018		ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
   3019		func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
   3020
   3021		ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
   3022		ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
   3023		ext_prop->data_len = le32_to_cpu(*(__le32 *)
   3024			usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
   3025		length = ext_prop->name_len + ext_prop->data_len + 14;
   3026
   3027		ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
   3028		func->ffs->ms_os_descs_ext_prop_name_avail +=
   3029			ext_prop->name_len;
   3030
   3031		ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
   3032		func->ffs->ms_os_descs_ext_prop_data_avail +=
   3033			ext_prop->data_len;
   3034		memcpy(ext_prop_data,
   3035		       usb_ext_prop_data_ptr(data, ext_prop->name_len),
   3036		       ext_prop->data_len);
   3037		/* unicode data reported to the host as "WCHAR"s */
   3038		switch (ext_prop->type) {
   3039		case USB_EXT_PROP_UNICODE:
   3040		case USB_EXT_PROP_UNICODE_ENV:
   3041		case USB_EXT_PROP_UNICODE_LINK:
   3042		case USB_EXT_PROP_UNICODE_MULTI:
   3043			ext_prop->data_len *= 2;
   3044			break;
   3045		}
   3046		ext_prop->data = ext_prop_data;
   3047
   3048		memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
   3049		       ext_prop->name_len);
   3050		/* property name reported to the host as "WCHAR"s */
   3051		ext_prop->name_len *= 2;
   3052		ext_prop->name = ext_prop_name;
   3053
   3054		t->os_desc->ext_prop_len +=
   3055			ext_prop->name_len + ext_prop->data_len + 14;
   3056		++t->os_desc->ext_prop_count;
   3057		list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
   3058	}
   3059		break;
   3060	default:
   3061		pr_vdebug("unknown descriptor: %d\n", type);
   3062	}
   3063
   3064	return length;
   3065}
   3066
   3067static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
   3068						struct usb_configuration *c)
   3069{
   3070	struct ffs_function *func = ffs_func_from_usb(f);
   3071	struct f_fs_opts *ffs_opts =
   3072		container_of(f->fi, struct f_fs_opts, func_inst);
   3073	struct ffs_data *ffs_data;
   3074	int ret;
   3075
   3076	ENTER();
   3077
   3078	/*
   3079	 * Legacy gadget triggers binding in functionfs_ready_callback,
   3080	 * which already uses locking; taking the same lock here would
   3081	 * cause a deadlock.
   3082	 *
   3083	 * Configfs-enabled gadgets however do need ffs_dev_lock.
   3084	 */
   3085	if (!ffs_opts->no_configfs)
   3086		ffs_dev_lock();
   3087	ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
   3088	ffs_data = ffs_opts->dev->ffs_data;
   3089	if (!ffs_opts->no_configfs)
   3090		ffs_dev_unlock();
   3091	if (ret)
   3092		return ERR_PTR(ret);
   3093
   3094	func->ffs = ffs_data;
   3095	func->conf = c;
   3096	func->gadget = c->cdev->gadget;
   3097
   3098	/*
   3099	 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
   3100	 * configurations are bound in sequence with list_for_each_entry,
   3101	 * in each configuration its functions are bound in sequence
   3102	 * with list_for_each_entry, so we assume no race condition
   3103	 * with regard to ffs_opts->bound access
   3104	 */
   3105	if (!ffs_opts->refcnt) {
   3106		ret = functionfs_bind(func->ffs, c->cdev);
   3107		if (ret)
   3108			return ERR_PTR(ret);
   3109	}
   3110	ffs_opts->refcnt++;
   3111	func->function.strings = func->ffs->stringtabs;
   3112
   3113	return ffs_opts;
   3114}
   3115
   3116static int _ffs_func_bind(struct usb_configuration *c,
   3117			  struct usb_function *f)
   3118{
   3119	struct ffs_function *func = ffs_func_from_usb(f);
   3120	struct ffs_data *ffs = func->ffs;
   3121
   3122	const int full = !!func->ffs->fs_descs_count;
   3123	const int high = !!func->ffs->hs_descs_count;
   3124	const int super = !!func->ffs->ss_descs_count;
   3125
   3126	int fs_len, hs_len, ss_len, ret, i;
   3127	struct ffs_ep *eps_ptr;
   3128
   3129	/* Make it a single chunk, less management later on */
   3130	vla_group(d);
   3131	vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
   3132	vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
   3133		full ? ffs->fs_descs_count + 1 : 0);
   3134	vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
   3135		high ? ffs->hs_descs_count + 1 : 0);
   3136	vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
   3137		super ? ffs->ss_descs_count + 1 : 0);
   3138	vla_item_with_sz(d, short, inums, ffs->interfaces_count);
   3139	vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
   3140			 c->cdev->use_os_string ? ffs->interfaces_count : 0);
   3141	vla_item_with_sz(d, char[16], ext_compat,
   3142			 c->cdev->use_os_string ? ffs->interfaces_count : 0);
   3143	vla_item_with_sz(d, struct usb_os_desc, os_desc,
   3144			 c->cdev->use_os_string ? ffs->interfaces_count : 0);
   3145	vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
   3146			 ffs->ms_os_descs_ext_prop_count);
   3147	vla_item_with_sz(d, char, ext_prop_name,
   3148			 ffs->ms_os_descs_ext_prop_name_len);
   3149	vla_item_with_sz(d, char, ext_prop_data,
   3150			 ffs->ms_os_descs_ext_prop_data_len);
   3151	vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
   3152	char *vlabuf;
   3153
   3154	ENTER();
   3155
   3156	/* Has descriptors only for speeds gadget does not support */
   3157	if (!(full | high | super))
   3158		return -ENOTSUPP;
   3159
   3160	/* Allocate a single chunk, less management later on */
   3161	vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
   3162	if (!vlabuf)
   3163		return -ENOMEM;
   3164
   3165	ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
   3166	ffs->ms_os_descs_ext_prop_name_avail =
   3167		vla_ptr(vlabuf, d, ext_prop_name);
   3168	ffs->ms_os_descs_ext_prop_data_avail =
   3169		vla_ptr(vlabuf, d, ext_prop_data);
   3170
   3171	/* Copy descriptors  */
   3172	memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
   3173	       ffs->raw_descs_length);
   3174
   3175	memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
   3176	eps_ptr = vla_ptr(vlabuf, d, eps);
   3177	for (i = 0; i < ffs->eps_count; i++)
   3178		eps_ptr[i].num = -1;
   3179
   3180	/* Save pointers
   3181	 * d_eps == vlabuf, func->eps used to kfree vlabuf later
   3182	*/
   3183	func->eps             = vla_ptr(vlabuf, d, eps);
   3184	func->interfaces_nums = vla_ptr(vlabuf, d, inums);
   3185
   3186	/*
   3187	 * Go through all the endpoint descriptors and allocate
   3188	 * endpoints first, so that later we can rewrite the endpoint
   3189	 * numbers without worrying that it may be described later on.
   3190	 */
   3191	if (full) {
   3192		func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
   3193		fs_len = ffs_do_descs(ffs->fs_descs_count,
   3194				      vla_ptr(vlabuf, d, raw_descs),
   3195				      d_raw_descs__sz,
   3196				      __ffs_func_bind_do_descs, func);
   3197		if (fs_len < 0) {
   3198			ret = fs_len;
   3199			goto error;
   3200		}
   3201	} else {
   3202		fs_len = 0;
   3203	}
   3204
   3205	if (high) {
   3206		func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
   3207		hs_len = ffs_do_descs(ffs->hs_descs_count,
   3208				      vla_ptr(vlabuf, d, raw_descs) + fs_len,
   3209				      d_raw_descs__sz - fs_len,
   3210				      __ffs_func_bind_do_descs, func);
   3211		if (hs_len < 0) {
   3212			ret = hs_len;
   3213			goto error;
   3214		}
   3215	} else {
   3216		hs_len = 0;
   3217	}
   3218
   3219	if (super) {
   3220		func->function.ss_descriptors = func->function.ssp_descriptors =
   3221			vla_ptr(vlabuf, d, ss_descs);
   3222		ss_len = ffs_do_descs(ffs->ss_descs_count,
   3223				vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
   3224				d_raw_descs__sz - fs_len - hs_len,
   3225				__ffs_func_bind_do_descs, func);
   3226		if (ss_len < 0) {
   3227			ret = ss_len;
   3228			goto error;
   3229		}
   3230	} else {
   3231		ss_len = 0;
   3232	}
   3233
   3234	/*
   3235	 * Now handle interface numbers allocation and interface and
   3236	 * endpoint numbers rewriting.  We can do that in one go
   3237	 * now.
   3238	 */
   3239	ret = ffs_do_descs(ffs->fs_descs_count +
   3240			   (high ? ffs->hs_descs_count : 0) +
   3241			   (super ? ffs->ss_descs_count : 0),
   3242			   vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
   3243			   __ffs_func_bind_do_nums, func);
   3244	if (ret < 0)
   3245		goto error;
   3246
   3247	func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
   3248	if (c->cdev->use_os_string) {
   3249		for (i = 0; i < ffs->interfaces_count; ++i) {
   3250			struct usb_os_desc *desc;
   3251
   3252			desc = func->function.os_desc_table[i].os_desc =
   3253				vla_ptr(vlabuf, d, os_desc) +
   3254				i * sizeof(struct usb_os_desc);
   3255			desc->ext_compat_id =
   3256				vla_ptr(vlabuf, d, ext_compat) + i * 16;
   3257			INIT_LIST_HEAD(&desc->ext_prop);
   3258		}
   3259		ret = ffs_do_os_descs(ffs->ms_os_descs_count,
   3260				      vla_ptr(vlabuf, d, raw_descs) +
   3261				      fs_len + hs_len + ss_len,
   3262				      d_raw_descs__sz - fs_len - hs_len -
   3263				      ss_len,
   3264				      __ffs_func_bind_do_os_desc, func);
   3265		if (ret < 0)
   3266			goto error;
   3267	}
   3268	func->function.os_desc_n =
   3269		c->cdev->use_os_string ? ffs->interfaces_count : 0;
   3270
   3271	/* And we're done */
   3272	ffs_event_add(ffs, FUNCTIONFS_BIND);
   3273	return 0;
   3274
   3275error:
   3276	/* XXX Do we need to release all claimed endpoints here? */
   3277	return ret;
   3278}
   3279
   3280static int ffs_func_bind(struct usb_configuration *c,
   3281			 struct usb_function *f)
   3282{
   3283	struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
   3284	struct ffs_function *func = ffs_func_from_usb(f);
   3285	int ret;
   3286
   3287	if (IS_ERR(ffs_opts))
   3288		return PTR_ERR(ffs_opts);
   3289
   3290	ret = _ffs_func_bind(c, f);
   3291	if (ret && !--ffs_opts->refcnt)
   3292		functionfs_unbind(func->ffs);
   3293
   3294	return ret;
   3295}
   3296
   3297
   3298/* Other USB function hooks *************************************************/
   3299
   3300static void ffs_reset_work(struct work_struct *work)
   3301{
   3302	struct ffs_data *ffs = container_of(work,
   3303		struct ffs_data, reset_work);
   3304	ffs_data_reset(ffs);
   3305}
   3306
   3307static int ffs_func_set_alt(struct usb_function *f,
   3308			    unsigned interface, unsigned alt)
   3309{
   3310	struct ffs_function *func = ffs_func_from_usb(f);
   3311	struct ffs_data *ffs = func->ffs;
   3312	int ret = 0, intf;
   3313
   3314	if (alt != (unsigned)-1) {
   3315		intf = ffs_func_revmap_intf(func, interface);
   3316		if (intf < 0)
   3317			return intf;
   3318	}
   3319
   3320	if (ffs->func)
   3321		ffs_func_eps_disable(ffs->func);
   3322
   3323	if (ffs->state == FFS_DEACTIVATED) {
   3324		ffs->state = FFS_CLOSING;
   3325		INIT_WORK(&ffs->reset_work, ffs_reset_work);
   3326		schedule_work(&ffs->reset_work);
   3327		return -ENODEV;
   3328	}
   3329
   3330	if (ffs->state != FFS_ACTIVE)
   3331		return -ENODEV;
   3332
   3333	if (alt == (unsigned)-1) {
   3334		ffs->func = NULL;
   3335		ffs_event_add(ffs, FUNCTIONFS_DISABLE);
   3336		return 0;
   3337	}
   3338
   3339	ffs->func = func;
   3340	ret = ffs_func_eps_enable(func);
   3341	if (ret >= 0)
   3342		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
   3343	return ret;
   3344}
   3345
   3346static void ffs_func_disable(struct usb_function *f)
   3347{
   3348	ffs_func_set_alt(f, 0, (unsigned)-1);
   3349}
   3350
   3351static int ffs_func_setup(struct usb_function *f,
   3352			  const struct usb_ctrlrequest *creq)
   3353{
   3354	struct ffs_function *func = ffs_func_from_usb(f);
   3355	struct ffs_data *ffs = func->ffs;
   3356	unsigned long flags;
   3357	int ret;
   3358
   3359	ENTER();
   3360
   3361	pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
   3362	pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
   3363	pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
   3364	pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
   3365	pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
   3366
   3367	/*
   3368	 * Most requests directed to interface go through here
   3369	 * (notable exceptions are set/get interface) so we need to
   3370	 * handle them.  All other either handled by composite or
   3371	 * passed to usb_configuration->setup() (if one is set).  No
   3372	 * matter, we will handle requests directed to endpoint here
   3373	 * as well (as it's straightforward).  Other request recipient
   3374	 * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
   3375	 * is being used.
   3376	 */
   3377	if (ffs->state != FFS_ACTIVE)
   3378		return -ENODEV;
   3379
   3380	switch (creq->bRequestType & USB_RECIP_MASK) {
   3381	case USB_RECIP_INTERFACE:
   3382		ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
   3383		if (ret < 0)
   3384			return ret;
   3385		break;
   3386
   3387	case USB_RECIP_ENDPOINT:
   3388		ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
   3389		if (ret < 0)
   3390			return ret;
   3391		if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
   3392			ret = func->ffs->eps_addrmap[ret];
   3393		break;
   3394
   3395	default:
   3396		if (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP)
   3397			ret = le16_to_cpu(creq->wIndex);
   3398		else
   3399			return -EOPNOTSUPP;
   3400	}
   3401
   3402	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
   3403	ffs->ev.setup = *creq;
   3404	ffs->ev.setup.wIndex = cpu_to_le16(ret);
   3405	__ffs_event_add(ffs, FUNCTIONFS_SETUP);
   3406	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
   3407
   3408	return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
   3409}
   3410
   3411static bool ffs_func_req_match(struct usb_function *f,
   3412			       const struct usb_ctrlrequest *creq,
   3413			       bool config0)
   3414{
   3415	struct ffs_function *func = ffs_func_from_usb(f);
   3416
   3417	if (config0 && !(func->ffs->user_flags & FUNCTIONFS_CONFIG0_SETUP))
   3418		return false;
   3419
   3420	switch (creq->bRequestType & USB_RECIP_MASK) {
   3421	case USB_RECIP_INTERFACE:
   3422		return (ffs_func_revmap_intf(func,
   3423					     le16_to_cpu(creq->wIndex)) >= 0);
   3424	case USB_RECIP_ENDPOINT:
   3425		return (ffs_func_revmap_ep(func,
   3426					   le16_to_cpu(creq->wIndex)) >= 0);
   3427	default:
   3428		return (bool) (func->ffs->user_flags &
   3429			       FUNCTIONFS_ALL_CTRL_RECIP);
   3430	}
   3431}
   3432
   3433static void ffs_func_suspend(struct usb_function *f)
   3434{
   3435	ENTER();
   3436	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
   3437}
   3438
   3439static void ffs_func_resume(struct usb_function *f)
   3440{
   3441	ENTER();
   3442	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
   3443}
   3444
   3445
   3446/* Endpoint and interface numbers reverse mapping ***************************/
   3447
   3448static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
   3449{
   3450	num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
   3451	return num ? num : -EDOM;
   3452}
   3453
   3454static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
   3455{
   3456	short *nums = func->interfaces_nums;
   3457	unsigned count = func->ffs->interfaces_count;
   3458
   3459	for (; count; --count, ++nums) {
   3460		if (*nums >= 0 && *nums == intf)
   3461			return nums - func->interfaces_nums;
   3462	}
   3463
   3464	return -EDOM;
   3465}
   3466
   3467
   3468/* Devices management *******************************************************/
   3469
   3470static LIST_HEAD(ffs_devices);
   3471
   3472static struct ffs_dev *_ffs_do_find_dev(const char *name)
   3473{
   3474	struct ffs_dev *dev;
   3475
   3476	if (!name)
   3477		return NULL;
   3478
   3479	list_for_each_entry(dev, &ffs_devices, entry) {
   3480		if (strcmp(dev->name, name) == 0)
   3481			return dev;
   3482	}
   3483
   3484	return NULL;
   3485}
   3486
   3487/*
   3488 * ffs_lock must be taken by the caller of this function
   3489 */
   3490static struct ffs_dev *_ffs_get_single_dev(void)
   3491{
   3492	struct ffs_dev *dev;
   3493
   3494	if (list_is_singular(&ffs_devices)) {
   3495		dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
   3496		if (dev->single)
   3497			return dev;
   3498	}
   3499
   3500	return NULL;
   3501}
   3502
   3503/*
   3504 * ffs_lock must be taken by the caller of this function
   3505 */
   3506static struct ffs_dev *_ffs_find_dev(const char *name)
   3507{
   3508	struct ffs_dev *dev;
   3509
   3510	dev = _ffs_get_single_dev();
   3511	if (dev)
   3512		return dev;
   3513
   3514	return _ffs_do_find_dev(name);
   3515}
   3516
   3517/* Configfs support *********************************************************/
   3518
   3519static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
   3520{
   3521	return container_of(to_config_group(item), struct f_fs_opts,
   3522			    func_inst.group);
   3523}
   3524
   3525static void ffs_attr_release(struct config_item *item)
   3526{
   3527	struct f_fs_opts *opts = to_ffs_opts(item);
   3528
   3529	usb_put_function_instance(&opts->func_inst);
   3530}
   3531
   3532static struct configfs_item_operations ffs_item_ops = {
   3533	.release	= ffs_attr_release,
   3534};
   3535
   3536static const struct config_item_type ffs_func_type = {
   3537	.ct_item_ops	= &ffs_item_ops,
   3538	.ct_owner	= THIS_MODULE,
   3539};
   3540
   3541
   3542/* Function registration interface ******************************************/
   3543
   3544static void ffs_free_inst(struct usb_function_instance *f)
   3545{
   3546	struct f_fs_opts *opts;
   3547
   3548	opts = to_f_fs_opts(f);
   3549	ffs_release_dev(opts->dev);
   3550	ffs_dev_lock();
   3551	_ffs_free_dev(opts->dev);
   3552	ffs_dev_unlock();
   3553	kfree(opts);
   3554}
   3555
   3556static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
   3557{
   3558	if (strlen(name) >= sizeof_field(struct ffs_dev, name))
   3559		return -ENAMETOOLONG;
   3560	return ffs_name_dev(to_f_fs_opts(fi)->dev, name);
   3561}
   3562
   3563static struct usb_function_instance *ffs_alloc_inst(void)
   3564{
   3565	struct f_fs_opts *opts;
   3566	struct ffs_dev *dev;
   3567
   3568	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
   3569	if (!opts)
   3570		return ERR_PTR(-ENOMEM);
   3571
   3572	opts->func_inst.set_inst_name = ffs_set_inst_name;
   3573	opts->func_inst.free_func_inst = ffs_free_inst;
   3574	ffs_dev_lock();
   3575	dev = _ffs_alloc_dev();
   3576	ffs_dev_unlock();
   3577	if (IS_ERR(dev)) {
   3578		kfree(opts);
   3579		return ERR_CAST(dev);
   3580	}
   3581	opts->dev = dev;
   3582	dev->opts = opts;
   3583
   3584	config_group_init_type_name(&opts->func_inst.group, "",
   3585				    &ffs_func_type);
   3586	return &opts->func_inst;
   3587}
   3588
   3589static void ffs_free(struct usb_function *f)
   3590{
   3591	kfree(ffs_func_from_usb(f));
   3592}
   3593
   3594static void ffs_func_unbind(struct usb_configuration *c,
   3595			    struct usb_function *f)
   3596{
   3597	struct ffs_function *func = ffs_func_from_usb(f);
   3598	struct ffs_data *ffs = func->ffs;
   3599	struct f_fs_opts *opts =
   3600		container_of(f->fi, struct f_fs_opts, func_inst);
   3601	struct ffs_ep *ep = func->eps;
   3602	unsigned count = ffs->eps_count;
   3603	unsigned long flags;
   3604
   3605	ENTER();
   3606	if (ffs->func == func) {
   3607		ffs_func_eps_disable(func);
   3608		ffs->func = NULL;
   3609	}
   3610
   3611	/* Drain any pending AIO completions */
   3612	drain_workqueue(ffs->io_completion_wq);
   3613
   3614	if (!--opts->refcnt)
   3615		functionfs_unbind(ffs);
   3616
   3617	/* cleanup after autoconfig */
   3618	spin_lock_irqsave(&func->ffs->eps_lock, flags);
   3619	while (count--) {
   3620		if (ep->ep && ep->req)
   3621			usb_ep_free_request(ep->ep, ep->req);
   3622		ep->req = NULL;
   3623		++ep;
   3624	}
   3625	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
   3626	kfree(func->eps);
   3627	func->eps = NULL;
   3628	/*
   3629	 * eps, descriptors and interfaces_nums are allocated in the
   3630	 * same chunk so only one free is required.
   3631	 */
   3632	func->function.fs_descriptors = NULL;
   3633	func->function.hs_descriptors = NULL;
   3634	func->function.ss_descriptors = NULL;
   3635	func->function.ssp_descriptors = NULL;
   3636	func->interfaces_nums = NULL;
   3637
   3638	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
   3639}
   3640
   3641static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
   3642{
   3643	struct ffs_function *func;
   3644
   3645	ENTER();
   3646
   3647	func = kzalloc(sizeof(*func), GFP_KERNEL);
   3648	if (!func)
   3649		return ERR_PTR(-ENOMEM);
   3650
   3651	func->function.name    = "Function FS Gadget";
   3652
   3653	func->function.bind    = ffs_func_bind;
   3654	func->function.unbind  = ffs_func_unbind;
   3655	func->function.set_alt = ffs_func_set_alt;
   3656	func->function.disable = ffs_func_disable;
   3657	func->function.setup   = ffs_func_setup;
   3658	func->function.req_match = ffs_func_req_match;
   3659	func->function.suspend = ffs_func_suspend;
   3660	func->function.resume  = ffs_func_resume;
   3661	func->function.free_func = ffs_free;
   3662
   3663	return &func->function;
   3664}
   3665
   3666/*
   3667 * ffs_lock must be taken by the caller of this function
   3668 */
   3669static struct ffs_dev *_ffs_alloc_dev(void)
   3670{
   3671	struct ffs_dev *dev;
   3672	int ret;
   3673
   3674	if (_ffs_get_single_dev())
   3675			return ERR_PTR(-EBUSY);
   3676
   3677	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
   3678	if (!dev)
   3679		return ERR_PTR(-ENOMEM);
   3680
   3681	if (list_empty(&ffs_devices)) {
   3682		ret = functionfs_init();
   3683		if (ret) {
   3684			kfree(dev);
   3685			return ERR_PTR(ret);
   3686		}
   3687	}
   3688
   3689	list_add(&dev->entry, &ffs_devices);
   3690
   3691	return dev;
   3692}
   3693
   3694int ffs_name_dev(struct ffs_dev *dev, const char *name)
   3695{
   3696	struct ffs_dev *existing;
   3697	int ret = 0;
   3698
   3699	ffs_dev_lock();
   3700
   3701	existing = _ffs_do_find_dev(name);
   3702	if (!existing)
   3703		strlcpy(dev->name, name, ARRAY_SIZE(dev->name));
   3704	else if (existing != dev)
   3705		ret = -EBUSY;
   3706
   3707	ffs_dev_unlock();
   3708
   3709	return ret;
   3710}
   3711EXPORT_SYMBOL_GPL(ffs_name_dev);
   3712
   3713int ffs_single_dev(struct ffs_dev *dev)
   3714{
   3715	int ret;
   3716
   3717	ret = 0;
   3718	ffs_dev_lock();
   3719
   3720	if (!list_is_singular(&ffs_devices))
   3721		ret = -EBUSY;
   3722	else
   3723		dev->single = true;
   3724
   3725	ffs_dev_unlock();
   3726	return ret;
   3727}
   3728EXPORT_SYMBOL_GPL(ffs_single_dev);
   3729
   3730/*
   3731 * ffs_lock must be taken by the caller of this function
   3732 */
   3733static void _ffs_free_dev(struct ffs_dev *dev)
   3734{
   3735	list_del(&dev->entry);
   3736
   3737	kfree(dev);
   3738	if (list_empty(&ffs_devices))
   3739		functionfs_cleanup();
   3740}
   3741
   3742static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
   3743{
   3744	int ret = 0;
   3745	struct ffs_dev *ffs_dev;
   3746
   3747	ENTER();
   3748	ffs_dev_lock();
   3749
   3750	ffs_dev = _ffs_find_dev(dev_name);
   3751	if (!ffs_dev) {
   3752		ret = -ENOENT;
   3753	} else if (ffs_dev->mounted) {
   3754		ret = -EBUSY;
   3755	} else if (ffs_dev->ffs_acquire_dev_callback &&
   3756		   ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
   3757		ret = -ENOENT;
   3758	} else {
   3759		ffs_dev->mounted = true;
   3760		ffs_dev->ffs_data = ffs_data;
   3761		ffs_data->private_data = ffs_dev;
   3762	}
   3763
   3764	ffs_dev_unlock();
   3765	return ret;
   3766}
   3767
   3768static void ffs_release_dev(struct ffs_dev *ffs_dev)
   3769{
   3770	ENTER();
   3771	ffs_dev_lock();
   3772
   3773	if (ffs_dev && ffs_dev->mounted) {
   3774		ffs_dev->mounted = false;
   3775		if (ffs_dev->ffs_data) {
   3776			ffs_dev->ffs_data->private_data = NULL;
   3777			ffs_dev->ffs_data = NULL;
   3778		}
   3779
   3780		if (ffs_dev->ffs_release_dev_callback)
   3781			ffs_dev->ffs_release_dev_callback(ffs_dev);
   3782	}
   3783
   3784	ffs_dev_unlock();
   3785}
   3786
   3787static int ffs_ready(struct ffs_data *ffs)
   3788{
   3789	struct ffs_dev *ffs_obj;
   3790	int ret = 0;
   3791
   3792	ENTER();
   3793	ffs_dev_lock();
   3794
   3795	ffs_obj = ffs->private_data;
   3796	if (!ffs_obj) {
   3797		ret = -EINVAL;
   3798		goto done;
   3799	}
   3800	if (WARN_ON(ffs_obj->desc_ready)) {
   3801		ret = -EBUSY;
   3802		goto done;
   3803	}
   3804
   3805	ffs_obj->desc_ready = true;
   3806
   3807	if (ffs_obj->ffs_ready_callback) {
   3808		ret = ffs_obj->ffs_ready_callback(ffs);
   3809		if (ret)
   3810			goto done;
   3811	}
   3812
   3813	set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
   3814done:
   3815	ffs_dev_unlock();
   3816	return ret;
   3817}
   3818
   3819static void ffs_closed(struct ffs_data *ffs)
   3820{
   3821	struct ffs_dev *ffs_obj;
   3822	struct f_fs_opts *opts;
   3823	struct config_item *ci;
   3824
   3825	ENTER();
   3826	ffs_dev_lock();
   3827
   3828	ffs_obj = ffs->private_data;
   3829	if (!ffs_obj)
   3830		goto done;
   3831
   3832	ffs_obj->desc_ready = false;
   3833
   3834	if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
   3835	    ffs_obj->ffs_closed_callback)
   3836		ffs_obj->ffs_closed_callback(ffs);
   3837
   3838	if (ffs_obj->opts)
   3839		opts = ffs_obj->opts;
   3840	else
   3841		goto done;
   3842
   3843	if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
   3844	    || !kref_read(&opts->func_inst.group.cg_item.ci_kref))
   3845		goto done;
   3846
   3847	ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
   3848	ffs_dev_unlock();
   3849
   3850	if (test_bit(FFS_FL_BOUND, &ffs->flags))
   3851		unregister_gadget_item(ci);
   3852	return;
   3853done:
   3854	ffs_dev_unlock();
   3855}
   3856
   3857/* Misc helper functions ****************************************************/
   3858
   3859static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
   3860{
   3861	return nonblock
   3862		? mutex_trylock(mutex) ? 0 : -EAGAIN
   3863		: mutex_lock_interruptible(mutex);
   3864}
   3865
   3866static char *ffs_prepare_buffer(const char __user *buf, size_t len)
   3867{
   3868	char *data;
   3869
   3870	if (!len)
   3871		return NULL;
   3872
   3873	data = memdup_user(buf, len);
   3874	if (IS_ERR(data))
   3875		return data;
   3876
   3877	pr_vdebug("Buffer from user space:\n");
   3878	ffs_dump_mem("", data, len);
   3879
   3880	return data;
   3881}
   3882
   3883DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
   3884MODULE_LICENSE("GPL");
   3885MODULE_AUTHOR("Michal Nazarewicz");