cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

f_mass_storage.c (96975B)


      1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
      2/*
      3 * f_mass_storage.c -- Mass Storage USB Composite Function
      4 *
      5 * Copyright (C) 2003-2008 Alan Stern
      6 * Copyright (C) 2009 Samsung Electronics
      7 *                    Author: Michal Nazarewicz <mina86@mina86.com>
      8 * All rights reserved.
      9 */
     10
     11/*
     12 * The Mass Storage Function acts as a USB Mass Storage device,
     13 * appearing to the host as a disk drive or as a CD-ROM drive.  In
     14 * addition to providing an example of a genuinely useful composite
     15 * function for a USB device, it also illustrates a technique of
     16 * double-buffering for increased throughput.
     17 *
     18 * For more information about MSF and in particular its module
     19 * parameters and sysfs interface read the
     20 * <Documentation/usb/mass-storage.rst> file.
     21 */
     22
     23/*
     24 * MSF is configured by specifying a fsg_config structure.  It has the
     25 * following fields:
     26 *
     27 *	nluns		Number of LUNs function have (anywhere from 1
     28 *				to FSG_MAX_LUNS).
     29 *	luns		An array of LUN configuration values.  This
     30 *				should be filled for each LUN that
     31 *				function will include (ie. for "nluns"
     32 *				LUNs).  Each element of the array has
     33 *				the following fields:
     34 *	->filename	The path to the backing file for the LUN.
     35 *				Required if LUN is not marked as
     36 *				removable.
     37 *	->ro		Flag specifying access to the LUN shall be
     38 *				read-only.  This is implied if CD-ROM
     39 *				emulation is enabled as well as when
     40 *				it was impossible to open "filename"
     41 *				in R/W mode.
     42 *	->removable	Flag specifying that LUN shall be indicated as
     43 *				being removable.
     44 *	->cdrom		Flag specifying that LUN shall be reported as
     45 *				being a CD-ROM.
     46 *	->nofua		Flag specifying that FUA flag in SCSI WRITE(10,12)
     47 *				commands for this LUN shall be ignored.
     48 *
     49 *	vendor_name
     50 *	product_name
     51 *	release		Information used as a reply to INQUIRY
     52 *				request.  To use default set to NULL,
     53 *				NULL, 0xffff respectively.  The first
     54 *				field should be 8 and the second 16
     55 *				characters or less.
     56 *
     57 *	can_stall	Set to permit function to halt bulk endpoints.
     58 *				Disabled on some USB devices known not
     59 *				to work correctly.  You should set it
     60 *				to true.
     61 *
     62 * If "removable" is not set for a LUN then a backing file must be
     63 * specified.  If it is set, then NULL filename means the LUN's medium
     64 * is not loaded (an empty string as "filename" in the fsg_config
     65 * structure causes error).  The CD-ROM emulation includes a single
     66 * data track and no audio tracks; hence there need be only one
     67 * backing file per LUN.
     68 *
     69 * This function is heavily based on "File-backed Storage Gadget" by
     70 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
     71 * Brownell.  The driver's SCSI command interface was based on the
     72 * "Information technology - Small Computer System Interface - 2"
     73 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
     74 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
     75 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
     76 * was based on the "Universal Serial Bus Mass Storage Class UFI
     77 * Command Specification" document, Revision 1.0, December 14, 1998,
     78 * available at
     79 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
     80 */
     81
     82/*
     83 *				Driver Design
     84 *
     85 * The MSF is fairly straightforward.  There is a main kernel
     86 * thread that handles most of the work.  Interrupt routines field
     87 * callbacks from the controller driver: bulk- and interrupt-request
     88 * completion notifications, endpoint-0 events, and disconnect events.
     89 * Completion events are passed to the main thread by wakeup calls.  Many
     90 * ep0 requests are handled at interrupt time, but SetInterface,
     91 * SetConfiguration, and device reset requests are forwarded to the
     92 * thread in the form of "exceptions" using SIGUSR1 signals (since they
     93 * should interrupt any ongoing file I/O operations).
     94 *
     95 * The thread's main routine implements the standard command/data/status
     96 * parts of a SCSI interaction.  It and its subroutines are full of tests
     97 * for pending signals/exceptions -- all this polling is necessary since
     98 * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
     99 * indication that the driver really wants to be running in userspace.)
    100 * An important point is that so long as the thread is alive it keeps an
    101 * open reference to the backing file.  This will prevent unmounting
    102 * the backing file's underlying filesystem and could cause problems
    103 * during system shutdown, for example.  To prevent such problems, the
    104 * thread catches INT, TERM, and KILL signals and converts them into
    105 * an EXIT exception.
    106 *
    107 * In normal operation the main thread is started during the gadget's
    108 * fsg_bind() callback and stopped during fsg_unbind().  But it can
    109 * also exit when it receives a signal, and there's no point leaving
    110 * the gadget running when the thread is dead.  As of this moment, MSF
    111 * provides no way to deregister the gadget when thread dies -- maybe
    112 * a callback functions is needed.
    113 *
    114 * To provide maximum throughput, the driver uses a circular pipeline of
    115 * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
    116 * arbitrarily long; in practice the benefits don't justify having more
    117 * than 2 stages (i.e., double buffering).  But it helps to think of the
    118 * pipeline as being a long one.  Each buffer head contains a bulk-in and
    119 * a bulk-out request pointer (since the buffer can be used for both
    120 * output and input -- directions always are given from the host's
    121 * point of view) as well as a pointer to the buffer and various state
    122 * variables.
    123 *
    124 * Use of the pipeline follows a simple protocol.  There is a variable
    125 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
    126 * At any time that buffer head may still be in use from an earlier
    127 * request, so each buffer head has a state variable indicating whether
    128 * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
    129 * buffer head to be EMPTY, filling the buffer either by file I/O or by
    130 * USB I/O (during which the buffer head is BUSY), and marking the buffer
    131 * head FULL when the I/O is complete.  Then the buffer will be emptied
    132 * (again possibly by USB I/O, during which it is marked BUSY) and
    133 * finally marked EMPTY again (possibly by a completion routine).
    134 *
    135 * A module parameter tells the driver to avoid stalling the bulk
    136 * endpoints wherever the transport specification allows.  This is
    137 * necessary for some UDCs like the SuperH, which cannot reliably clear a
    138 * halt on a bulk endpoint.  However, under certain circumstances the
    139 * Bulk-only specification requires a stall.  In such cases the driver
    140 * will halt the endpoint and set a flag indicating that it should clear
    141 * the halt in software during the next device reset.  Hopefully this
    142 * will permit everything to work correctly.  Furthermore, although the
    143 * specification allows the bulk-out endpoint to halt when the host sends
    144 * too much data, implementing this would cause an unavoidable race.
    145 * The driver will always use the "no-stall" approach for OUT transfers.
    146 *
    147 * One subtle point concerns sending status-stage responses for ep0
    148 * requests.  Some of these requests, such as device reset, can involve
    149 * interrupting an ongoing file I/O operation, which might take an
    150 * arbitrarily long time.  During that delay the host might give up on
    151 * the original ep0 request and issue a new one.  When that happens the
    152 * driver should not notify the host about completion of the original
    153 * request, as the host will no longer be waiting for it.  So the driver
    154 * assigns to each ep0 request a unique tag, and it keeps track of the
    155 * tag value of the request associated with a long-running exception
    156 * (device-reset, interface-change, or configuration-change).  When the
    157 * exception handler is finished, the status-stage response is submitted
    158 * only if the current ep0 request tag is equal to the exception request
    159 * tag.  Thus only the most recently received ep0 request will get a
    160 * status-stage response.
    161 *
    162 * Warning: This driver source file is too long.  It ought to be split up
    163 * into a header file plus about 3 separate .c files, to handle the details
    164 * of the Gadget, USB Mass Storage, and SCSI protocols.
    165 */
    166
    167
    168/* #define VERBOSE_DEBUG */
    169/* #define DUMP_MSGS */
    170
    171#include <linux/blkdev.h>
    172#include <linux/completion.h>
    173#include <linux/dcache.h>
    174#include <linux/delay.h>
    175#include <linux/device.h>
    176#include <linux/fcntl.h>
    177#include <linux/file.h>
    178#include <linux/fs.h>
    179#include <linux/kthread.h>
    180#include <linux/sched/signal.h>
    181#include <linux/limits.h>
    182#include <linux/pagemap.h>
    183#include <linux/rwsem.h>
    184#include <linux/slab.h>
    185#include <linux/spinlock.h>
    186#include <linux/string.h>
    187#include <linux/freezer.h>
    188#include <linux/module.h>
    189#include <linux/uaccess.h>
    190#include <asm/unaligned.h>
    191
    192#include <linux/usb/ch9.h>
    193#include <linux/usb/gadget.h>
    194#include <linux/usb/composite.h>
    195
    196#include <linux/nospec.h>
    197
    198#include "configfs.h"
    199
    200
    201/*------------------------------------------------------------------------*/
    202
    203#define FSG_DRIVER_DESC		"Mass Storage Function"
    204#define FSG_DRIVER_VERSION	"2009/09/11"
    205
    206static const char fsg_string_interface[] = "Mass Storage";
    207
    208#include "storage_common.h"
    209#include "f_mass_storage.h"
    210
    211/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
    212static struct usb_string		fsg_strings[] = {
    213	{FSG_STRING_INTERFACE,		fsg_string_interface},
    214	{}
    215};
    216
    217static struct usb_gadget_strings	fsg_stringtab = {
    218	.language	= 0x0409,		/* en-us */
    219	.strings	= fsg_strings,
    220};
    221
    222static struct usb_gadget_strings *fsg_strings_array[] = {
    223	&fsg_stringtab,
    224	NULL,
    225};
    226
    227/*-------------------------------------------------------------------------*/
    228
    229struct fsg_dev;
    230struct fsg_common;
    231
    232/* Data shared by all the FSG instances. */
    233struct fsg_common {
    234	struct usb_gadget	*gadget;
    235	struct usb_composite_dev *cdev;
    236	struct fsg_dev		*fsg;
    237	wait_queue_head_t	io_wait;
    238	wait_queue_head_t	fsg_wait;
    239
    240	/* filesem protects: backing files in use */
    241	struct rw_semaphore	filesem;
    242
    243	/* lock protects: state and thread_task */
    244	spinlock_t		lock;
    245
    246	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
    247	struct usb_request	*ep0req;	/* Copy of cdev->req */
    248	unsigned int		ep0_req_tag;
    249
    250	struct fsg_buffhd	*next_buffhd_to_fill;
    251	struct fsg_buffhd	*next_buffhd_to_drain;
    252	struct fsg_buffhd	*buffhds;
    253	unsigned int		fsg_num_buffers;
    254
    255	int			cmnd_size;
    256	u8			cmnd[MAX_COMMAND_SIZE];
    257
    258	unsigned int		lun;
    259	struct fsg_lun		*luns[FSG_MAX_LUNS];
    260	struct fsg_lun		*curlun;
    261
    262	unsigned int		bulk_out_maxpacket;
    263	enum fsg_state		state;		/* For exception handling */
    264	unsigned int		exception_req_tag;
    265	void			*exception_arg;
    266
    267	enum data_direction	data_dir;
    268	u32			data_size;
    269	u32			data_size_from_cmnd;
    270	u32			tag;
    271	u32			residue;
    272	u32			usb_amount_left;
    273
    274	unsigned int		can_stall:1;
    275	unsigned int		free_storage_on_release:1;
    276	unsigned int		phase_error:1;
    277	unsigned int		short_packet_received:1;
    278	unsigned int		bad_lun_okay:1;
    279	unsigned int		running:1;
    280	unsigned int		sysfs:1;
    281
    282	struct completion	thread_notifier;
    283	struct task_struct	*thread_task;
    284
    285	/* Gadget's private data. */
    286	void			*private_data;
    287
    288	char inquiry_string[INQUIRY_STRING_LEN];
    289};
    290
    291struct fsg_dev {
    292	struct usb_function	function;
    293	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
    294	struct fsg_common	*common;
    295
    296	u16			interface_number;
    297
    298	unsigned int		bulk_in_enabled:1;
    299	unsigned int		bulk_out_enabled:1;
    300
    301	unsigned long		atomic_bitflags;
    302#define IGNORE_BULK_OUT		0
    303
    304	struct usb_ep		*bulk_in;
    305	struct usb_ep		*bulk_out;
    306};
    307
    308static inline int __fsg_is_set(struct fsg_common *common,
    309			       const char *func, unsigned line)
    310{
    311	if (common->fsg)
    312		return 1;
    313	ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
    314	WARN_ON(1);
    315	return 0;
    316}
    317
    318#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
    319
    320static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
    321{
    322	return container_of(f, struct fsg_dev, function);
    323}
    324
    325static int exception_in_progress(struct fsg_common *common)
    326{
    327	return common->state > FSG_STATE_NORMAL;
    328}
    329
    330/* Make bulk-out requests be divisible by the maxpacket size */
    331static void set_bulk_out_req_length(struct fsg_common *common,
    332				    struct fsg_buffhd *bh, unsigned int length)
    333{
    334	unsigned int	rem;
    335
    336	bh->bulk_out_intended_length = length;
    337	rem = length % common->bulk_out_maxpacket;
    338	if (rem > 0)
    339		length += common->bulk_out_maxpacket - rem;
    340	bh->outreq->length = length;
    341}
    342
    343
    344/*-------------------------------------------------------------------------*/
    345
    346static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
    347{
    348	const char	*name;
    349
    350	if (ep == fsg->bulk_in)
    351		name = "bulk-in";
    352	else if (ep == fsg->bulk_out)
    353		name = "bulk-out";
    354	else
    355		name = ep->name;
    356	DBG(fsg, "%s set halt\n", name);
    357	return usb_ep_set_halt(ep);
    358}
    359
    360
    361/*-------------------------------------------------------------------------*/
    362
    363/* These routines may be called in process context or in_irq */
    364
    365static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
    366			      void *arg)
    367{
    368	unsigned long		flags;
    369
    370	/*
    371	 * Do nothing if a higher-priority exception is already in progress.
    372	 * If a lower-or-equal priority exception is in progress, preempt it
    373	 * and notify the main thread by sending it a signal.
    374	 */
    375	spin_lock_irqsave(&common->lock, flags);
    376	if (common->state <= new_state) {
    377		common->exception_req_tag = common->ep0_req_tag;
    378		common->state = new_state;
    379		common->exception_arg = arg;
    380		if (common->thread_task)
    381			send_sig_info(SIGUSR1, SEND_SIG_PRIV,
    382				      common->thread_task);
    383	}
    384	spin_unlock_irqrestore(&common->lock, flags);
    385}
    386
    387static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
    388{
    389	__raise_exception(common, new_state, NULL);
    390}
    391
    392/*-------------------------------------------------------------------------*/
    393
    394static int ep0_queue(struct fsg_common *common)
    395{
    396	int	rc;
    397
    398	rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
    399	common->ep0->driver_data = common;
    400	if (rc != 0 && rc != -ESHUTDOWN) {
    401		/* We can't do much more than wait for a reset */
    402		WARNING(common, "error in submission: %s --> %d\n",
    403			common->ep0->name, rc);
    404	}
    405	return rc;
    406}
    407
    408
    409/*-------------------------------------------------------------------------*/
    410
    411/* Completion handlers. These always run in_irq. */
    412
    413static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
    414{
    415	struct fsg_common	*common = ep->driver_data;
    416	struct fsg_buffhd	*bh = req->context;
    417
    418	if (req->status || req->actual != req->length)
    419		DBG(common, "%s --> %d, %u/%u\n", __func__,
    420		    req->status, req->actual, req->length);
    421	if (req->status == -ECONNRESET)		/* Request was cancelled */
    422		usb_ep_fifo_flush(ep);
    423
    424	/* Synchronize with the smp_load_acquire() in sleep_thread() */
    425	smp_store_release(&bh->state, BUF_STATE_EMPTY);
    426	wake_up(&common->io_wait);
    427}
    428
    429static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
    430{
    431	struct fsg_common	*common = ep->driver_data;
    432	struct fsg_buffhd	*bh = req->context;
    433
    434	dump_msg(common, "bulk-out", req->buf, req->actual);
    435	if (req->status || req->actual != bh->bulk_out_intended_length)
    436		DBG(common, "%s --> %d, %u/%u\n", __func__,
    437		    req->status, req->actual, bh->bulk_out_intended_length);
    438	if (req->status == -ECONNRESET)		/* Request was cancelled */
    439		usb_ep_fifo_flush(ep);
    440
    441	/* Synchronize with the smp_load_acquire() in sleep_thread() */
    442	smp_store_release(&bh->state, BUF_STATE_FULL);
    443	wake_up(&common->io_wait);
    444}
    445
    446static int _fsg_common_get_max_lun(struct fsg_common *common)
    447{
    448	int i = ARRAY_SIZE(common->luns) - 1;
    449
    450	while (i >= 0 && !common->luns[i])
    451		--i;
    452
    453	return i;
    454}
    455
    456static int fsg_setup(struct usb_function *f,
    457		     const struct usb_ctrlrequest *ctrl)
    458{
    459	struct fsg_dev		*fsg = fsg_from_func(f);
    460	struct usb_request	*req = fsg->common->ep0req;
    461	u16			w_index = le16_to_cpu(ctrl->wIndex);
    462	u16			w_value = le16_to_cpu(ctrl->wValue);
    463	u16			w_length = le16_to_cpu(ctrl->wLength);
    464
    465	if (!fsg_is_set(fsg->common))
    466		return -EOPNOTSUPP;
    467
    468	++fsg->common->ep0_req_tag;	/* Record arrival of a new request */
    469	req->context = NULL;
    470	req->length = 0;
    471	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
    472
    473	switch (ctrl->bRequest) {
    474
    475	case US_BULK_RESET_REQUEST:
    476		if (ctrl->bRequestType !=
    477		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
    478			break;
    479		if (w_index != fsg->interface_number || w_value != 0 ||
    480				w_length != 0)
    481			return -EDOM;
    482
    483		/*
    484		 * Raise an exception to stop the current operation
    485		 * and reinitialize our state.
    486		 */
    487		DBG(fsg, "bulk reset request\n");
    488		raise_exception(fsg->common, FSG_STATE_PROTOCOL_RESET);
    489		return USB_GADGET_DELAYED_STATUS;
    490
    491	case US_BULK_GET_MAX_LUN:
    492		if (ctrl->bRequestType !=
    493		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
    494			break;
    495		if (w_index != fsg->interface_number || w_value != 0 ||
    496				w_length != 1)
    497			return -EDOM;
    498		VDBG(fsg, "get max LUN\n");
    499		*(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
    500
    501		/* Respond with data/status */
    502		req->length = min((u16)1, w_length);
    503		return ep0_queue(fsg->common);
    504	}
    505
    506	VDBG(fsg,
    507	     "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
    508	     ctrl->bRequestType, ctrl->bRequest,
    509	     le16_to_cpu(ctrl->wValue), w_index, w_length);
    510	return -EOPNOTSUPP;
    511}
    512
    513
    514/*-------------------------------------------------------------------------*/
    515
    516/* All the following routines run in process context */
    517
    518/* Use this for bulk or interrupt transfers, not ep0 */
    519static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
    520			   struct usb_request *req)
    521{
    522	int	rc;
    523
    524	if (ep == fsg->bulk_in)
    525		dump_msg(fsg, "bulk-in", req->buf, req->length);
    526
    527	rc = usb_ep_queue(ep, req, GFP_KERNEL);
    528	if (rc) {
    529
    530		/* We can't do much more than wait for a reset */
    531		req->status = rc;
    532
    533		/*
    534		 * Note: currently the net2280 driver fails zero-length
    535		 * submissions if DMA is enabled.
    536		 */
    537		if (rc != -ESHUTDOWN &&
    538				!(rc == -EOPNOTSUPP && req->length == 0))
    539			WARNING(fsg, "error in submission: %s --> %d\n",
    540					ep->name, rc);
    541	}
    542	return rc;
    543}
    544
    545static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
    546{
    547	if (!fsg_is_set(common))
    548		return false;
    549	bh->state = BUF_STATE_SENDING;
    550	if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
    551		bh->state = BUF_STATE_EMPTY;
    552	return true;
    553}
    554
    555static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
    556{
    557	if (!fsg_is_set(common))
    558		return false;
    559	bh->state = BUF_STATE_RECEIVING;
    560	if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
    561		bh->state = BUF_STATE_FULL;
    562	return true;
    563}
    564
    565static int sleep_thread(struct fsg_common *common, bool can_freeze,
    566		struct fsg_buffhd *bh)
    567{
    568	int	rc;
    569
    570	/* Wait until a signal arrives or bh is no longer busy */
    571	if (can_freeze)
    572		/*
    573		 * synchronize with the smp_store_release(&bh->state) in
    574		 * bulk_in_complete() or bulk_out_complete()
    575		 */
    576		rc = wait_event_freezable(common->io_wait,
    577				bh && smp_load_acquire(&bh->state) >=
    578					BUF_STATE_EMPTY);
    579	else
    580		rc = wait_event_interruptible(common->io_wait,
    581				bh && smp_load_acquire(&bh->state) >=
    582					BUF_STATE_EMPTY);
    583	return rc ? -EINTR : 0;
    584}
    585
    586
    587/*-------------------------------------------------------------------------*/
    588
    589static int do_read(struct fsg_common *common)
    590{
    591	struct fsg_lun		*curlun = common->curlun;
    592	u64			lba;
    593	struct fsg_buffhd	*bh;
    594	int			rc;
    595	u32			amount_left;
    596	loff_t			file_offset, file_offset_tmp;
    597	unsigned int		amount;
    598	ssize_t			nread;
    599
    600	/*
    601	 * Get the starting Logical Block Address and check that it's
    602	 * not too big.
    603	 */
    604	if (common->cmnd[0] == READ_6)
    605		lba = get_unaligned_be24(&common->cmnd[1]);
    606	else {
    607		if (common->cmnd[0] == READ_16)
    608			lba = get_unaligned_be64(&common->cmnd[2]);
    609		else		/* READ_10 or READ_12 */
    610			lba = get_unaligned_be32(&common->cmnd[2]);
    611
    612		/*
    613		 * We allow DPO (Disable Page Out = don't save data in the
    614		 * cache) and FUA (Force Unit Access = don't read from the
    615		 * cache), but we don't implement them.
    616		 */
    617		if ((common->cmnd[1] & ~0x18) != 0) {
    618			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
    619			return -EINVAL;
    620		}
    621	}
    622	if (lba >= curlun->num_sectors) {
    623		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
    624		return -EINVAL;
    625	}
    626	file_offset = ((loff_t) lba) << curlun->blkbits;
    627
    628	/* Carry out the file reads */
    629	amount_left = common->data_size_from_cmnd;
    630	if (unlikely(amount_left == 0))
    631		return -EIO;		/* No default reply */
    632
    633	for (;;) {
    634		/*
    635		 * Figure out how much we need to read:
    636		 * Try to read the remaining amount.
    637		 * But don't read more than the buffer size.
    638		 * And don't try to read past the end of the file.
    639		 */
    640		amount = min(amount_left, FSG_BUFLEN);
    641		amount = min((loff_t)amount,
    642			     curlun->file_length - file_offset);
    643
    644		/* Wait for the next buffer to become available */
    645		bh = common->next_buffhd_to_fill;
    646		rc = sleep_thread(common, false, bh);
    647		if (rc)
    648			return rc;
    649
    650		/*
    651		 * If we were asked to read past the end of file,
    652		 * end with an empty buffer.
    653		 */
    654		if (amount == 0) {
    655			curlun->sense_data =
    656					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
    657			curlun->sense_data_info =
    658					file_offset >> curlun->blkbits;
    659			curlun->info_valid = 1;
    660			bh->inreq->length = 0;
    661			bh->state = BUF_STATE_FULL;
    662			break;
    663		}
    664
    665		/* Perform the read */
    666		file_offset_tmp = file_offset;
    667		nread = kernel_read(curlun->filp, bh->buf, amount,
    668				&file_offset_tmp);
    669		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
    670		      (unsigned long long)file_offset, (int)nread);
    671		if (signal_pending(current))
    672			return -EINTR;
    673
    674		if (nread < 0) {
    675			LDBG(curlun, "error in file read: %d\n", (int)nread);
    676			nread = 0;
    677		} else if (nread < amount) {
    678			LDBG(curlun, "partial file read: %d/%u\n",
    679			     (int)nread, amount);
    680			nread = round_down(nread, curlun->blksize);
    681		}
    682		file_offset  += nread;
    683		amount_left  -= nread;
    684		common->residue -= nread;
    685
    686		/*
    687		 * Except at the end of the transfer, nread will be
    688		 * equal to the buffer size, which is divisible by the
    689		 * bulk-in maxpacket size.
    690		 */
    691		bh->inreq->length = nread;
    692		bh->state = BUF_STATE_FULL;
    693
    694		/* If an error occurred, report it and its position */
    695		if (nread < amount) {
    696			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
    697			curlun->sense_data_info =
    698					file_offset >> curlun->blkbits;
    699			curlun->info_valid = 1;
    700			break;
    701		}
    702
    703		if (amount_left == 0)
    704			break;		/* No more left to read */
    705
    706		/* Send this buffer and go read some more */
    707		bh->inreq->zero = 0;
    708		if (!start_in_transfer(common, bh))
    709			/* Don't know what to do if common->fsg is NULL */
    710			return -EIO;
    711		common->next_buffhd_to_fill = bh->next;
    712	}
    713
    714	return -EIO;		/* No default reply */
    715}
    716
    717
    718/*-------------------------------------------------------------------------*/
    719
    720static int do_write(struct fsg_common *common)
    721{
    722	struct fsg_lun		*curlun = common->curlun;
    723	u64			lba;
    724	struct fsg_buffhd	*bh;
    725	int			get_some_more;
    726	u32			amount_left_to_req, amount_left_to_write;
    727	loff_t			usb_offset, file_offset, file_offset_tmp;
    728	unsigned int		amount;
    729	ssize_t			nwritten;
    730	int			rc;
    731
    732	if (curlun->ro) {
    733		curlun->sense_data = SS_WRITE_PROTECTED;
    734		return -EINVAL;
    735	}
    736	spin_lock(&curlun->filp->f_lock);
    737	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
    738	spin_unlock(&curlun->filp->f_lock);
    739
    740	/*
    741	 * Get the starting Logical Block Address and check that it's
    742	 * not too big
    743	 */
    744	if (common->cmnd[0] == WRITE_6)
    745		lba = get_unaligned_be24(&common->cmnd[1]);
    746	else {
    747		if (common->cmnd[0] == WRITE_16)
    748			lba = get_unaligned_be64(&common->cmnd[2]);
    749		else		/* WRITE_10 or WRITE_12 */
    750			lba = get_unaligned_be32(&common->cmnd[2]);
    751
    752		/*
    753		 * We allow DPO (Disable Page Out = don't save data in the
    754		 * cache) and FUA (Force Unit Access = write directly to the
    755		 * medium).  We don't implement DPO; we implement FUA by
    756		 * performing synchronous output.
    757		 */
    758		if (common->cmnd[1] & ~0x18) {
    759			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
    760			return -EINVAL;
    761		}
    762		if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
    763			spin_lock(&curlun->filp->f_lock);
    764			curlun->filp->f_flags |= O_SYNC;
    765			spin_unlock(&curlun->filp->f_lock);
    766		}
    767	}
    768	if (lba >= curlun->num_sectors) {
    769		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
    770		return -EINVAL;
    771	}
    772
    773	/* Carry out the file writes */
    774	get_some_more = 1;
    775	file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
    776	amount_left_to_req = common->data_size_from_cmnd;
    777	amount_left_to_write = common->data_size_from_cmnd;
    778
    779	while (amount_left_to_write > 0) {
    780
    781		/* Queue a request for more data from the host */
    782		bh = common->next_buffhd_to_fill;
    783		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
    784
    785			/*
    786			 * Figure out how much we want to get:
    787			 * Try to get the remaining amount,
    788			 * but not more than the buffer size.
    789			 */
    790			amount = min(amount_left_to_req, FSG_BUFLEN);
    791
    792			/* Beyond the end of the backing file? */
    793			if (usb_offset >= curlun->file_length) {
    794				get_some_more = 0;
    795				curlun->sense_data =
    796					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
    797				curlun->sense_data_info =
    798					usb_offset >> curlun->blkbits;
    799				curlun->info_valid = 1;
    800				continue;
    801			}
    802
    803			/* Get the next buffer */
    804			usb_offset += amount;
    805			common->usb_amount_left -= amount;
    806			amount_left_to_req -= amount;
    807			if (amount_left_to_req == 0)
    808				get_some_more = 0;
    809
    810			/*
    811			 * Except at the end of the transfer, amount will be
    812			 * equal to the buffer size, which is divisible by
    813			 * the bulk-out maxpacket size.
    814			 */
    815			set_bulk_out_req_length(common, bh, amount);
    816			if (!start_out_transfer(common, bh))
    817				/* Dunno what to do if common->fsg is NULL */
    818				return -EIO;
    819			common->next_buffhd_to_fill = bh->next;
    820			continue;
    821		}
    822
    823		/* Write the received data to the backing file */
    824		bh = common->next_buffhd_to_drain;
    825		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
    826			break;			/* We stopped early */
    827
    828		/* Wait for the data to be received */
    829		rc = sleep_thread(common, false, bh);
    830		if (rc)
    831			return rc;
    832
    833		common->next_buffhd_to_drain = bh->next;
    834		bh->state = BUF_STATE_EMPTY;
    835
    836		/* Did something go wrong with the transfer? */
    837		if (bh->outreq->status != 0) {
    838			curlun->sense_data = SS_COMMUNICATION_FAILURE;
    839			curlun->sense_data_info =
    840					file_offset >> curlun->blkbits;
    841			curlun->info_valid = 1;
    842			break;
    843		}
    844
    845		amount = bh->outreq->actual;
    846		if (curlun->file_length - file_offset < amount) {
    847			LERROR(curlun, "write %u @ %llu beyond end %llu\n",
    848				       amount, (unsigned long long)file_offset,
    849				       (unsigned long long)curlun->file_length);
    850			amount = curlun->file_length - file_offset;
    851		}
    852
    853		/*
    854		 * Don't accept excess data.  The spec doesn't say
    855		 * what to do in this case.  We'll ignore the error.
    856		 */
    857		amount = min(amount, bh->bulk_out_intended_length);
    858
    859		/* Don't write a partial block */
    860		amount = round_down(amount, curlun->blksize);
    861		if (amount == 0)
    862			goto empty_write;
    863
    864		/* Perform the write */
    865		file_offset_tmp = file_offset;
    866		nwritten = kernel_write(curlun->filp, bh->buf, amount,
    867				&file_offset_tmp);
    868		VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
    869				(unsigned long long)file_offset, (int)nwritten);
    870		if (signal_pending(current))
    871			return -EINTR;		/* Interrupted! */
    872
    873		if (nwritten < 0) {
    874			LDBG(curlun, "error in file write: %d\n",
    875					(int) nwritten);
    876			nwritten = 0;
    877		} else if (nwritten < amount) {
    878			LDBG(curlun, "partial file write: %d/%u\n",
    879					(int) nwritten, amount);
    880			nwritten = round_down(nwritten, curlun->blksize);
    881		}
    882		file_offset += nwritten;
    883		amount_left_to_write -= nwritten;
    884		common->residue -= nwritten;
    885
    886		/* If an error occurred, report it and its position */
    887		if (nwritten < amount) {
    888			curlun->sense_data = SS_WRITE_ERROR;
    889			curlun->sense_data_info =
    890					file_offset >> curlun->blkbits;
    891			curlun->info_valid = 1;
    892			break;
    893		}
    894
    895 empty_write:
    896		/* Did the host decide to stop early? */
    897		if (bh->outreq->actual < bh->bulk_out_intended_length) {
    898			common->short_packet_received = 1;
    899			break;
    900		}
    901	}
    902
    903	return -EIO;		/* No default reply */
    904}
    905
    906
    907/*-------------------------------------------------------------------------*/
    908
    909static int do_synchronize_cache(struct fsg_common *common)
    910{
    911	struct fsg_lun	*curlun = common->curlun;
    912	int		rc;
    913
    914	/* We ignore the requested LBA and write out all file's
    915	 * dirty data buffers. */
    916	rc = fsg_lun_fsync_sub(curlun);
    917	if (rc)
    918		curlun->sense_data = SS_WRITE_ERROR;
    919	return 0;
    920}
    921
    922
    923/*-------------------------------------------------------------------------*/
    924
    925static void invalidate_sub(struct fsg_lun *curlun)
    926{
    927	struct file	*filp = curlun->filp;
    928	struct inode	*inode = file_inode(filp);
    929	unsigned long	rc;
    930
    931	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
    932	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
    933}
    934
    935static int do_verify(struct fsg_common *common)
    936{
    937	struct fsg_lun		*curlun = common->curlun;
    938	u32			lba;
    939	u32			verification_length;
    940	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
    941	loff_t			file_offset, file_offset_tmp;
    942	u32			amount_left;
    943	unsigned int		amount;
    944	ssize_t			nread;
    945
    946	/*
    947	 * Get the starting Logical Block Address and check that it's
    948	 * not too big.
    949	 */
    950	lba = get_unaligned_be32(&common->cmnd[2]);
    951	if (lba >= curlun->num_sectors) {
    952		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
    953		return -EINVAL;
    954	}
    955
    956	/*
    957	 * We allow DPO (Disable Page Out = don't save data in the
    958	 * cache) but we don't implement it.
    959	 */
    960	if (common->cmnd[1] & ~0x10) {
    961		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
    962		return -EINVAL;
    963	}
    964
    965	verification_length = get_unaligned_be16(&common->cmnd[7]);
    966	if (unlikely(verification_length == 0))
    967		return -EIO;		/* No default reply */
    968
    969	/* Prepare to carry out the file verify */
    970	amount_left = verification_length << curlun->blkbits;
    971	file_offset = ((loff_t) lba) << curlun->blkbits;
    972
    973	/* Write out all the dirty buffers before invalidating them */
    974	fsg_lun_fsync_sub(curlun);
    975	if (signal_pending(current))
    976		return -EINTR;
    977
    978	invalidate_sub(curlun);
    979	if (signal_pending(current))
    980		return -EINTR;
    981
    982	/* Just try to read the requested blocks */
    983	while (amount_left > 0) {
    984		/*
    985		 * Figure out how much we need to read:
    986		 * Try to read the remaining amount, but not more than
    987		 * the buffer size.
    988		 * And don't try to read past the end of the file.
    989		 */
    990		amount = min(amount_left, FSG_BUFLEN);
    991		amount = min((loff_t)amount,
    992			     curlun->file_length - file_offset);
    993		if (amount == 0) {
    994			curlun->sense_data =
    995					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
    996			curlun->sense_data_info =
    997				file_offset >> curlun->blkbits;
    998			curlun->info_valid = 1;
    999			break;
   1000		}
   1001
   1002		/* Perform the read */
   1003		file_offset_tmp = file_offset;
   1004		nread = kernel_read(curlun->filp, bh->buf, amount,
   1005				&file_offset_tmp);
   1006		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
   1007				(unsigned long long) file_offset,
   1008				(int) nread);
   1009		if (signal_pending(current))
   1010			return -EINTR;
   1011
   1012		if (nread < 0) {
   1013			LDBG(curlun, "error in file verify: %d\n", (int)nread);
   1014			nread = 0;
   1015		} else if (nread < amount) {
   1016			LDBG(curlun, "partial file verify: %d/%u\n",
   1017			     (int)nread, amount);
   1018			nread = round_down(nread, curlun->blksize);
   1019		}
   1020		if (nread == 0) {
   1021			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
   1022			curlun->sense_data_info =
   1023				file_offset >> curlun->blkbits;
   1024			curlun->info_valid = 1;
   1025			break;
   1026		}
   1027		file_offset += nread;
   1028		amount_left -= nread;
   1029	}
   1030	return 0;
   1031}
   1032
   1033
   1034/*-------------------------------------------------------------------------*/
   1035
   1036static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
   1037{
   1038	struct fsg_lun *curlun = common->curlun;
   1039	u8	*buf = (u8 *) bh->buf;
   1040
   1041	if (!curlun) {		/* Unsupported LUNs are okay */
   1042		common->bad_lun_okay = 1;
   1043		memset(buf, 0, 36);
   1044		buf[0] = TYPE_NO_LUN;	/* Unsupported, no device-type */
   1045		buf[4] = 31;		/* Additional length */
   1046		return 36;
   1047	}
   1048
   1049	buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
   1050	buf[1] = curlun->removable ? 0x80 : 0;
   1051	buf[2] = 2;		/* ANSI SCSI level 2 */
   1052	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
   1053	buf[4] = 31;		/* Additional length */
   1054	buf[5] = 0;		/* No special options */
   1055	buf[6] = 0;
   1056	buf[7] = 0;
   1057	if (curlun->inquiry_string[0])
   1058		memcpy(buf + 8, curlun->inquiry_string,
   1059		       sizeof(curlun->inquiry_string));
   1060	else
   1061		memcpy(buf + 8, common->inquiry_string,
   1062		       sizeof(common->inquiry_string));
   1063	return 36;
   1064}
   1065
   1066static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
   1067{
   1068	struct fsg_lun	*curlun = common->curlun;
   1069	u8		*buf = (u8 *) bh->buf;
   1070	u32		sd, sdinfo;
   1071	int		valid;
   1072
   1073	/*
   1074	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
   1075	 *
   1076	 * If a REQUEST SENSE command is received from an initiator
   1077	 * with a pending unit attention condition (before the target
   1078	 * generates the contingent allegiance condition), then the
   1079	 * target shall either:
   1080	 *   a) report any pending sense data and preserve the unit
   1081	 *	attention condition on the logical unit, or,
   1082	 *   b) report the unit attention condition, may discard any
   1083	 *	pending sense data, and clear the unit attention
   1084	 *	condition on the logical unit for that initiator.
   1085	 *
   1086	 * FSG normally uses option a); enable this code to use option b).
   1087	 */
   1088#if 0
   1089	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
   1090		curlun->sense_data = curlun->unit_attention_data;
   1091		curlun->unit_attention_data = SS_NO_SENSE;
   1092	}
   1093#endif
   1094
   1095	if (!curlun) {		/* Unsupported LUNs are okay */
   1096		common->bad_lun_okay = 1;
   1097		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
   1098		sdinfo = 0;
   1099		valid = 0;
   1100	} else {
   1101		sd = curlun->sense_data;
   1102		sdinfo = curlun->sense_data_info;
   1103		valid = curlun->info_valid << 7;
   1104		curlun->sense_data = SS_NO_SENSE;
   1105		curlun->sense_data_info = 0;
   1106		curlun->info_valid = 0;
   1107	}
   1108
   1109	memset(buf, 0, 18);
   1110	buf[0] = valid | 0x70;			/* Valid, current error */
   1111	buf[2] = SK(sd);
   1112	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
   1113	buf[7] = 18 - 8;			/* Additional sense length */
   1114	buf[12] = ASC(sd);
   1115	buf[13] = ASCQ(sd);
   1116	return 18;
   1117}
   1118
   1119static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
   1120{
   1121	struct fsg_lun	*curlun = common->curlun;
   1122	u32		lba = get_unaligned_be32(&common->cmnd[2]);
   1123	int		pmi = common->cmnd[8];
   1124	u8		*buf = (u8 *)bh->buf;
   1125	u32		max_lba;
   1126
   1127	/* Check the PMI and LBA fields */
   1128	if (pmi > 1 || (pmi == 0 && lba != 0)) {
   1129		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1130		return -EINVAL;
   1131	}
   1132
   1133	if (curlun->num_sectors < 0x100000000ULL)
   1134		max_lba = curlun->num_sectors - 1;
   1135	else
   1136		max_lba = 0xffffffff;
   1137	put_unaligned_be32(max_lba, &buf[0]);		/* Max logical block */
   1138	put_unaligned_be32(curlun->blksize, &buf[4]);	/* Block length */
   1139	return 8;
   1140}
   1141
   1142static int do_read_capacity_16(struct fsg_common *common, struct fsg_buffhd *bh)
   1143{
   1144	struct fsg_lun  *curlun = common->curlun;
   1145	u64		lba = get_unaligned_be64(&common->cmnd[2]);
   1146	int		pmi = common->cmnd[14];
   1147	u8		*buf = (u8 *)bh->buf;
   1148
   1149	/* Check the PMI and LBA fields */
   1150	if (pmi > 1 || (pmi == 0 && lba != 0)) {
   1151		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1152		return -EINVAL;
   1153	}
   1154
   1155	put_unaligned_be64(curlun->num_sectors - 1, &buf[0]);
   1156							/* Max logical block */
   1157	put_unaligned_be32(curlun->blksize, &buf[8]);	/* Block length */
   1158
   1159	/* It is safe to keep other fields zeroed */
   1160	memset(&buf[12], 0, 32 - 12);
   1161	return 32;
   1162}
   1163
   1164static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
   1165{
   1166	struct fsg_lun	*curlun = common->curlun;
   1167	int		msf = common->cmnd[1] & 0x02;
   1168	u32		lba = get_unaligned_be32(&common->cmnd[2]);
   1169	u8		*buf = (u8 *)bh->buf;
   1170
   1171	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
   1172		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1173		return -EINVAL;
   1174	}
   1175	if (lba >= curlun->num_sectors) {
   1176		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
   1177		return -EINVAL;
   1178	}
   1179
   1180	memset(buf, 0, 8);
   1181	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
   1182	store_cdrom_address(&buf[4], msf, lba);
   1183	return 8;
   1184}
   1185
   1186static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
   1187{
   1188	struct fsg_lun	*curlun = common->curlun;
   1189	int		msf = common->cmnd[1] & 0x02;
   1190	int		start_track = common->cmnd[6];
   1191	u8		*buf = (u8 *)bh->buf;
   1192	u8		format;
   1193	int		i, len;
   1194
   1195	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
   1196			start_track > 1) {
   1197		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1198		return -EINVAL;
   1199	}
   1200
   1201	format = common->cmnd[2] & 0xf;
   1202	/*
   1203	 * Check if CDB is old style SFF-8020i
   1204	 * i.e. format is in 2 MSBs of byte 9
   1205	 * Mac OS-X host sends us this.
   1206	 */
   1207	if (format == 0)
   1208		format = (common->cmnd[9] >> 6) & 0x3;
   1209
   1210	switch (format) {
   1211	case 0:
   1212		/* Formatted TOC */
   1213		len = 4 + 2*8;		/* 4 byte header + 2 descriptors */
   1214		memset(buf, 0, len);
   1215		buf[1] = len - 2;	/* TOC Length excludes length field */
   1216		buf[2] = 1;		/* First track number */
   1217		buf[3] = 1;		/* Last track number */
   1218		buf[5] = 0x16;		/* Data track, copying allowed */
   1219		buf[6] = 0x01;		/* Only track is number 1 */
   1220		store_cdrom_address(&buf[8], msf, 0);
   1221
   1222		buf[13] = 0x16;		/* Lead-out track is data */
   1223		buf[14] = 0xAA;		/* Lead-out track number */
   1224		store_cdrom_address(&buf[16], msf, curlun->num_sectors);
   1225		return len;
   1226
   1227	case 2:
   1228		/* Raw TOC */
   1229		len = 4 + 3*11;		/* 4 byte header + 3 descriptors */
   1230		memset(buf, 0, len);	/* Header + A0, A1 & A2 descriptors */
   1231		buf[1] = len - 2;	/* TOC Length excludes length field */
   1232		buf[2] = 1;		/* First complete session */
   1233		buf[3] = 1;		/* Last complete session */
   1234
   1235		buf += 4;
   1236		/* fill in A0, A1 and A2 points */
   1237		for (i = 0; i < 3; i++) {
   1238			buf[0] = 1;	/* Session number */
   1239			buf[1] = 0x16;	/* Data track, copying allowed */
   1240			/* 2 - Track number 0 ->  TOC */
   1241			buf[3] = 0xA0 + i; /* A0, A1, A2 point */
   1242			/* 4, 5, 6 - Min, sec, frame is zero */
   1243			buf[8] = 1;	/* Pmin: last track number */
   1244			buf += 11;	/* go to next track descriptor */
   1245		}
   1246		buf -= 11;		/* go back to A2 descriptor */
   1247
   1248		/* For A2, 7, 8, 9, 10 - zero, Pmin, Psec, Pframe of Lead out */
   1249		store_cdrom_address(&buf[7], msf, curlun->num_sectors);
   1250		return len;
   1251
   1252	default:
   1253		/* Multi-session, PMA, ATIP, CD-TEXT not supported/required */
   1254		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1255		return -EINVAL;
   1256	}
   1257}
   1258
   1259static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
   1260{
   1261	struct fsg_lun	*curlun = common->curlun;
   1262	int		mscmnd = common->cmnd[0];
   1263	u8		*buf = (u8 *) bh->buf;
   1264	u8		*buf0 = buf;
   1265	int		pc, page_code;
   1266	int		changeable_values, all_pages;
   1267	int		valid_page = 0;
   1268	int		len, limit;
   1269
   1270	if ((common->cmnd[1] & ~0x08) != 0) {	/* Mask away DBD */
   1271		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1272		return -EINVAL;
   1273	}
   1274	pc = common->cmnd[2] >> 6;
   1275	page_code = common->cmnd[2] & 0x3f;
   1276	if (pc == 3) {
   1277		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
   1278		return -EINVAL;
   1279	}
   1280	changeable_values = (pc == 1);
   1281	all_pages = (page_code == 0x3f);
   1282
   1283	/*
   1284	 * Write the mode parameter header.  Fixed values are: default
   1285	 * medium type, no cache control (DPOFUA), and no block descriptors.
   1286	 * The only variable value is the WriteProtect bit.  We will fill in
   1287	 * the mode data length later.
   1288	 */
   1289	memset(buf, 0, 8);
   1290	if (mscmnd == MODE_SENSE) {
   1291		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
   1292		buf += 4;
   1293		limit = 255;
   1294	} else {			/* MODE_SENSE_10 */
   1295		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
   1296		buf += 8;
   1297		limit = 65535;		/* Should really be FSG_BUFLEN */
   1298	}
   1299
   1300	/* No block descriptors */
   1301
   1302	/*
   1303	 * The mode pages, in numerical order.  The only page we support
   1304	 * is the Caching page.
   1305	 */
   1306	if (page_code == 0x08 || all_pages) {
   1307		valid_page = 1;
   1308		buf[0] = 0x08;		/* Page code */
   1309		buf[1] = 10;		/* Page length */
   1310		memset(buf+2, 0, 10);	/* None of the fields are changeable */
   1311
   1312		if (!changeable_values) {
   1313			buf[2] = 0x04;	/* Write cache enable, */
   1314					/* Read cache not disabled */
   1315					/* No cache retention priorities */
   1316			put_unaligned_be16(0xffff, &buf[4]);
   1317					/* Don't disable prefetch */
   1318					/* Minimum prefetch = 0 */
   1319			put_unaligned_be16(0xffff, &buf[8]);
   1320					/* Maximum prefetch */
   1321			put_unaligned_be16(0xffff, &buf[10]);
   1322					/* Maximum prefetch ceiling */
   1323		}
   1324		buf += 12;
   1325	}
   1326
   1327	/*
   1328	 * Check that a valid page was requested and the mode data length
   1329	 * isn't too long.
   1330	 */
   1331	len = buf - buf0;
   1332	if (!valid_page || len > limit) {
   1333		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1334		return -EINVAL;
   1335	}
   1336
   1337	/*  Store the mode data length */
   1338	if (mscmnd == MODE_SENSE)
   1339		buf0[0] = len - 1;
   1340	else
   1341		put_unaligned_be16(len - 2, buf0);
   1342	return len;
   1343}
   1344
   1345static int do_start_stop(struct fsg_common *common)
   1346{
   1347	struct fsg_lun	*curlun = common->curlun;
   1348	int		loej, start;
   1349
   1350	if (!curlun) {
   1351		return -EINVAL;
   1352	} else if (!curlun->removable) {
   1353		curlun->sense_data = SS_INVALID_COMMAND;
   1354		return -EINVAL;
   1355	} else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
   1356		   (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
   1357		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1358		return -EINVAL;
   1359	}
   1360
   1361	loej  = common->cmnd[4] & 0x02;
   1362	start = common->cmnd[4] & 0x01;
   1363
   1364	/*
   1365	 * Our emulation doesn't support mounting; the medium is
   1366	 * available for use as soon as it is loaded.
   1367	 */
   1368	if (start) {
   1369		if (!fsg_lun_is_open(curlun)) {
   1370			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
   1371			return -EINVAL;
   1372		}
   1373		return 0;
   1374	}
   1375
   1376	/* Are we allowed to unload the media? */
   1377	if (curlun->prevent_medium_removal) {
   1378		LDBG(curlun, "unload attempt prevented\n");
   1379		curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
   1380		return -EINVAL;
   1381	}
   1382
   1383	if (!loej)
   1384		return 0;
   1385
   1386	up_read(&common->filesem);
   1387	down_write(&common->filesem);
   1388	fsg_lun_close(curlun);
   1389	up_write(&common->filesem);
   1390	down_read(&common->filesem);
   1391
   1392	return 0;
   1393}
   1394
   1395static int do_prevent_allow(struct fsg_common *common)
   1396{
   1397	struct fsg_lun	*curlun = common->curlun;
   1398	int		prevent;
   1399
   1400	if (!common->curlun) {
   1401		return -EINVAL;
   1402	} else if (!common->curlun->removable) {
   1403		common->curlun->sense_data = SS_INVALID_COMMAND;
   1404		return -EINVAL;
   1405	}
   1406
   1407	prevent = common->cmnd[4] & 0x01;
   1408	if ((common->cmnd[4] & ~0x01) != 0) {	/* Mask away Prevent */
   1409		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1410		return -EINVAL;
   1411	}
   1412
   1413	if (curlun->prevent_medium_removal && !prevent)
   1414		fsg_lun_fsync_sub(curlun);
   1415	curlun->prevent_medium_removal = prevent;
   1416	return 0;
   1417}
   1418
   1419static int do_read_format_capacities(struct fsg_common *common,
   1420			struct fsg_buffhd *bh)
   1421{
   1422	struct fsg_lun	*curlun = common->curlun;
   1423	u8		*buf = (u8 *) bh->buf;
   1424
   1425	buf[0] = buf[1] = buf[2] = 0;
   1426	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
   1427	buf += 4;
   1428
   1429	put_unaligned_be32(curlun->num_sectors, &buf[0]);
   1430						/* Number of blocks */
   1431	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
   1432	buf[4] = 0x02;				/* Current capacity */
   1433	return 12;
   1434}
   1435
   1436static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
   1437{
   1438	struct fsg_lun	*curlun = common->curlun;
   1439
   1440	/* We don't support MODE SELECT */
   1441	if (curlun)
   1442		curlun->sense_data = SS_INVALID_COMMAND;
   1443	return -EINVAL;
   1444}
   1445
   1446
   1447/*-------------------------------------------------------------------------*/
   1448
   1449static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
   1450{
   1451	int	rc;
   1452
   1453	rc = fsg_set_halt(fsg, fsg->bulk_in);
   1454	if (rc == -EAGAIN)
   1455		VDBG(fsg, "delayed bulk-in endpoint halt\n");
   1456	while (rc != 0) {
   1457		if (rc != -EAGAIN) {
   1458			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
   1459			rc = 0;
   1460			break;
   1461		}
   1462
   1463		/* Wait for a short time and then try again */
   1464		if (msleep_interruptible(100) != 0)
   1465			return -EINTR;
   1466		rc = usb_ep_set_halt(fsg->bulk_in);
   1467	}
   1468	return rc;
   1469}
   1470
   1471static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
   1472{
   1473	int	rc;
   1474
   1475	DBG(fsg, "bulk-in set wedge\n");
   1476	rc = usb_ep_set_wedge(fsg->bulk_in);
   1477	if (rc == -EAGAIN)
   1478		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
   1479	while (rc != 0) {
   1480		if (rc != -EAGAIN) {
   1481			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
   1482			rc = 0;
   1483			break;
   1484		}
   1485
   1486		/* Wait for a short time and then try again */
   1487		if (msleep_interruptible(100) != 0)
   1488			return -EINTR;
   1489		rc = usb_ep_set_wedge(fsg->bulk_in);
   1490	}
   1491	return rc;
   1492}
   1493
   1494static int throw_away_data(struct fsg_common *common)
   1495{
   1496	struct fsg_buffhd	*bh, *bh2;
   1497	u32			amount;
   1498	int			rc;
   1499
   1500	for (bh = common->next_buffhd_to_drain;
   1501	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
   1502	     bh = common->next_buffhd_to_drain) {
   1503
   1504		/* Try to submit another request if we need one */
   1505		bh2 = common->next_buffhd_to_fill;
   1506		if (bh2->state == BUF_STATE_EMPTY &&
   1507				common->usb_amount_left > 0) {
   1508			amount = min(common->usb_amount_left, FSG_BUFLEN);
   1509
   1510			/*
   1511			 * Except at the end of the transfer, amount will be
   1512			 * equal to the buffer size, which is divisible by
   1513			 * the bulk-out maxpacket size.
   1514			 */
   1515			set_bulk_out_req_length(common, bh2, amount);
   1516			if (!start_out_transfer(common, bh2))
   1517				/* Dunno what to do if common->fsg is NULL */
   1518				return -EIO;
   1519			common->next_buffhd_to_fill = bh2->next;
   1520			common->usb_amount_left -= amount;
   1521			continue;
   1522		}
   1523
   1524		/* Wait for the data to be received */
   1525		rc = sleep_thread(common, false, bh);
   1526		if (rc)
   1527			return rc;
   1528
   1529		/* Throw away the data in a filled buffer */
   1530		bh->state = BUF_STATE_EMPTY;
   1531		common->next_buffhd_to_drain = bh->next;
   1532
   1533		/* A short packet or an error ends everything */
   1534		if (bh->outreq->actual < bh->bulk_out_intended_length ||
   1535				bh->outreq->status != 0) {
   1536			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
   1537			return -EINTR;
   1538		}
   1539	}
   1540	return 0;
   1541}
   1542
   1543static int finish_reply(struct fsg_common *common)
   1544{
   1545	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
   1546	int			rc = 0;
   1547
   1548	switch (common->data_dir) {
   1549	case DATA_DIR_NONE:
   1550		break;			/* Nothing to send */
   1551
   1552	/*
   1553	 * If we don't know whether the host wants to read or write,
   1554	 * this must be CB or CBI with an unknown command.  We mustn't
   1555	 * try to send or receive any data.  So stall both bulk pipes
   1556	 * if we can and wait for a reset.
   1557	 */
   1558	case DATA_DIR_UNKNOWN:
   1559		if (!common->can_stall) {
   1560			/* Nothing */
   1561		} else if (fsg_is_set(common)) {
   1562			fsg_set_halt(common->fsg, common->fsg->bulk_out);
   1563			rc = halt_bulk_in_endpoint(common->fsg);
   1564		} else {
   1565			/* Don't know what to do if common->fsg is NULL */
   1566			rc = -EIO;
   1567		}
   1568		break;
   1569
   1570	/* All but the last buffer of data must have already been sent */
   1571	case DATA_DIR_TO_HOST:
   1572		if (common->data_size == 0) {
   1573			/* Nothing to send */
   1574
   1575		/* Don't know what to do if common->fsg is NULL */
   1576		} else if (!fsg_is_set(common)) {
   1577			rc = -EIO;
   1578
   1579		/* If there's no residue, simply send the last buffer */
   1580		} else if (common->residue == 0) {
   1581			bh->inreq->zero = 0;
   1582			if (!start_in_transfer(common, bh))
   1583				return -EIO;
   1584			common->next_buffhd_to_fill = bh->next;
   1585
   1586		/*
   1587		 * For Bulk-only, mark the end of the data with a short
   1588		 * packet.  If we are allowed to stall, halt the bulk-in
   1589		 * endpoint.  (Note: This violates the Bulk-Only Transport
   1590		 * specification, which requires us to pad the data if we
   1591		 * don't halt the endpoint.  Presumably nobody will mind.)
   1592		 */
   1593		} else {
   1594			bh->inreq->zero = 1;
   1595			if (!start_in_transfer(common, bh))
   1596				rc = -EIO;
   1597			common->next_buffhd_to_fill = bh->next;
   1598			if (common->can_stall)
   1599				rc = halt_bulk_in_endpoint(common->fsg);
   1600		}
   1601		break;
   1602
   1603	/*
   1604	 * We have processed all we want from the data the host has sent.
   1605	 * There may still be outstanding bulk-out requests.
   1606	 */
   1607	case DATA_DIR_FROM_HOST:
   1608		if (common->residue == 0) {
   1609			/* Nothing to receive */
   1610
   1611		/* Did the host stop sending unexpectedly early? */
   1612		} else if (common->short_packet_received) {
   1613			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
   1614			rc = -EINTR;
   1615
   1616		/*
   1617		 * We haven't processed all the incoming data.  Even though
   1618		 * we may be allowed to stall, doing so would cause a race.
   1619		 * The controller may already have ACK'ed all the remaining
   1620		 * bulk-out packets, in which case the host wouldn't see a
   1621		 * STALL.  Not realizing the endpoint was halted, it wouldn't
   1622		 * clear the halt -- leading to problems later on.
   1623		 */
   1624#if 0
   1625		} else if (common->can_stall) {
   1626			if (fsg_is_set(common))
   1627				fsg_set_halt(common->fsg,
   1628					     common->fsg->bulk_out);
   1629			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
   1630			rc = -EINTR;
   1631#endif
   1632
   1633		/*
   1634		 * We can't stall.  Read in the excess data and throw it
   1635		 * all away.
   1636		 */
   1637		} else {
   1638			rc = throw_away_data(common);
   1639		}
   1640		break;
   1641	}
   1642	return rc;
   1643}
   1644
   1645static void send_status(struct fsg_common *common)
   1646{
   1647	struct fsg_lun		*curlun = common->curlun;
   1648	struct fsg_buffhd	*bh;
   1649	struct bulk_cs_wrap	*csw;
   1650	int			rc;
   1651	u8			status = US_BULK_STAT_OK;
   1652	u32			sd, sdinfo = 0;
   1653
   1654	/* Wait for the next buffer to become available */
   1655	bh = common->next_buffhd_to_fill;
   1656	rc = sleep_thread(common, false, bh);
   1657	if (rc)
   1658		return;
   1659
   1660	if (curlun) {
   1661		sd = curlun->sense_data;
   1662		sdinfo = curlun->sense_data_info;
   1663	} else if (common->bad_lun_okay)
   1664		sd = SS_NO_SENSE;
   1665	else
   1666		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
   1667
   1668	if (common->phase_error) {
   1669		DBG(common, "sending phase-error status\n");
   1670		status = US_BULK_STAT_PHASE;
   1671		sd = SS_INVALID_COMMAND;
   1672	} else if (sd != SS_NO_SENSE) {
   1673		DBG(common, "sending command-failure status\n");
   1674		status = US_BULK_STAT_FAIL;
   1675		VDBG(common, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
   1676				"  info x%x\n",
   1677				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
   1678	}
   1679
   1680	/* Store and send the Bulk-only CSW */
   1681	csw = (void *)bh->buf;
   1682
   1683	csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
   1684	csw->Tag = common->tag;
   1685	csw->Residue = cpu_to_le32(common->residue);
   1686	csw->Status = status;
   1687
   1688	bh->inreq->length = US_BULK_CS_WRAP_LEN;
   1689	bh->inreq->zero = 0;
   1690	if (!start_in_transfer(common, bh))
   1691		/* Don't know what to do if common->fsg is NULL */
   1692		return;
   1693
   1694	common->next_buffhd_to_fill = bh->next;
   1695	return;
   1696}
   1697
   1698
   1699/*-------------------------------------------------------------------------*/
   1700
   1701/*
   1702 * Check whether the command is properly formed and whether its data size
   1703 * and direction agree with the values we already have.
   1704 */
   1705static int check_command(struct fsg_common *common, int cmnd_size,
   1706			 enum data_direction data_dir, unsigned int mask,
   1707			 int needs_medium, const char *name)
   1708{
   1709	int			i;
   1710	unsigned int		lun = common->cmnd[1] >> 5;
   1711	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
   1712	char			hdlen[20];
   1713	struct fsg_lun		*curlun;
   1714
   1715	hdlen[0] = 0;
   1716	if (common->data_dir != DATA_DIR_UNKNOWN)
   1717		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
   1718			common->data_size);
   1719	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
   1720	     name, cmnd_size, dirletter[(int) data_dir],
   1721	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
   1722
   1723	/*
   1724	 * We can't reply at all until we know the correct data direction
   1725	 * and size.
   1726	 */
   1727	if (common->data_size_from_cmnd == 0)
   1728		data_dir = DATA_DIR_NONE;
   1729	if (common->data_size < common->data_size_from_cmnd) {
   1730		/*
   1731		 * Host data size < Device data size is a phase error.
   1732		 * Carry out the command, but only transfer as much as
   1733		 * we are allowed.
   1734		 */
   1735		common->data_size_from_cmnd = common->data_size;
   1736		common->phase_error = 1;
   1737	}
   1738	common->residue = common->data_size;
   1739	common->usb_amount_left = common->data_size;
   1740
   1741	/* Conflicting data directions is a phase error */
   1742	if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
   1743		common->phase_error = 1;
   1744		return -EINVAL;
   1745	}
   1746
   1747	/* Verify the length of the command itself */
   1748	if (cmnd_size != common->cmnd_size) {
   1749
   1750		/*
   1751		 * Special case workaround: There are plenty of buggy SCSI
   1752		 * implementations. Many have issues with cbw->Length
   1753		 * field passing a wrong command size. For those cases we
   1754		 * always try to work around the problem by using the length
   1755		 * sent by the host side provided it is at least as large
   1756		 * as the correct command length.
   1757		 * Examples of such cases would be MS-Windows, which issues
   1758		 * REQUEST SENSE with cbw->Length == 12 where it should
   1759		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
   1760		 * REQUEST SENSE with cbw->Length == 10 where it should
   1761		 * be 6 as well.
   1762		 */
   1763		if (cmnd_size <= common->cmnd_size) {
   1764			DBG(common, "%s is buggy! Expected length %d "
   1765			    "but we got %d\n", name,
   1766			    cmnd_size, common->cmnd_size);
   1767			cmnd_size = common->cmnd_size;
   1768		} else {
   1769			common->phase_error = 1;
   1770			return -EINVAL;
   1771		}
   1772	}
   1773
   1774	/* Check that the LUN values are consistent */
   1775	if (common->lun != lun)
   1776		DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
   1777		    common->lun, lun);
   1778
   1779	/* Check the LUN */
   1780	curlun = common->curlun;
   1781	if (curlun) {
   1782		if (common->cmnd[0] != REQUEST_SENSE) {
   1783			curlun->sense_data = SS_NO_SENSE;
   1784			curlun->sense_data_info = 0;
   1785			curlun->info_valid = 0;
   1786		}
   1787	} else {
   1788		common->bad_lun_okay = 0;
   1789
   1790		/*
   1791		 * INQUIRY and REQUEST SENSE commands are explicitly allowed
   1792		 * to use unsupported LUNs; all others may not.
   1793		 */
   1794		if (common->cmnd[0] != INQUIRY &&
   1795		    common->cmnd[0] != REQUEST_SENSE) {
   1796			DBG(common, "unsupported LUN %u\n", common->lun);
   1797			return -EINVAL;
   1798		}
   1799	}
   1800
   1801	/*
   1802	 * If a unit attention condition exists, only INQUIRY and
   1803	 * REQUEST SENSE commands are allowed; anything else must fail.
   1804	 */
   1805	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
   1806	    common->cmnd[0] != INQUIRY &&
   1807	    common->cmnd[0] != REQUEST_SENSE) {
   1808		curlun->sense_data = curlun->unit_attention_data;
   1809		curlun->unit_attention_data = SS_NO_SENSE;
   1810		return -EINVAL;
   1811	}
   1812
   1813	/* Check that only command bytes listed in the mask are non-zero */
   1814	common->cmnd[1] &= 0x1f;			/* Mask away the LUN */
   1815	for (i = 1; i < cmnd_size; ++i) {
   1816		if (common->cmnd[i] && !(mask & (1 << i))) {
   1817			if (curlun)
   1818				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
   1819			return -EINVAL;
   1820		}
   1821	}
   1822
   1823	/* If the medium isn't mounted and the command needs to access
   1824	 * it, return an error. */
   1825	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
   1826		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
   1827		return -EINVAL;
   1828	}
   1829
   1830	return 0;
   1831}
   1832
   1833/* wrapper of check_command for data size in blocks handling */
   1834static int check_command_size_in_blocks(struct fsg_common *common,
   1835		int cmnd_size, enum data_direction data_dir,
   1836		unsigned int mask, int needs_medium, const char *name)
   1837{
   1838	if (common->curlun)
   1839		common->data_size_from_cmnd <<= common->curlun->blkbits;
   1840	return check_command(common, cmnd_size, data_dir,
   1841			mask, needs_medium, name);
   1842}
   1843
   1844static int do_scsi_command(struct fsg_common *common)
   1845{
   1846	struct fsg_buffhd	*bh;
   1847	int			rc;
   1848	int			reply = -EINVAL;
   1849	int			i;
   1850	static char		unknown[16];
   1851
   1852	dump_cdb(common);
   1853
   1854	/* Wait for the next buffer to become available for data or status */
   1855	bh = common->next_buffhd_to_fill;
   1856	common->next_buffhd_to_drain = bh;
   1857	rc = sleep_thread(common, false, bh);
   1858	if (rc)
   1859		return rc;
   1860
   1861	common->phase_error = 0;
   1862	common->short_packet_received = 0;
   1863
   1864	down_read(&common->filesem);	/* We're using the backing file */
   1865	switch (common->cmnd[0]) {
   1866
   1867	case INQUIRY:
   1868		common->data_size_from_cmnd = common->cmnd[4];
   1869		reply = check_command(common, 6, DATA_DIR_TO_HOST,
   1870				      (1<<4), 0,
   1871				      "INQUIRY");
   1872		if (reply == 0)
   1873			reply = do_inquiry(common, bh);
   1874		break;
   1875
   1876	case MODE_SELECT:
   1877		common->data_size_from_cmnd = common->cmnd[4];
   1878		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
   1879				      (1<<1) | (1<<4), 0,
   1880				      "MODE SELECT(6)");
   1881		if (reply == 0)
   1882			reply = do_mode_select(common, bh);
   1883		break;
   1884
   1885	case MODE_SELECT_10:
   1886		common->data_size_from_cmnd =
   1887			get_unaligned_be16(&common->cmnd[7]);
   1888		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
   1889				      (1<<1) | (3<<7), 0,
   1890				      "MODE SELECT(10)");
   1891		if (reply == 0)
   1892			reply = do_mode_select(common, bh);
   1893		break;
   1894
   1895	case MODE_SENSE:
   1896		common->data_size_from_cmnd = common->cmnd[4];
   1897		reply = check_command(common, 6, DATA_DIR_TO_HOST,
   1898				      (1<<1) | (1<<2) | (1<<4), 0,
   1899				      "MODE SENSE(6)");
   1900		if (reply == 0)
   1901			reply = do_mode_sense(common, bh);
   1902		break;
   1903
   1904	case MODE_SENSE_10:
   1905		common->data_size_from_cmnd =
   1906			get_unaligned_be16(&common->cmnd[7]);
   1907		reply = check_command(common, 10, DATA_DIR_TO_HOST,
   1908				      (1<<1) | (1<<2) | (3<<7), 0,
   1909				      "MODE SENSE(10)");
   1910		if (reply == 0)
   1911			reply = do_mode_sense(common, bh);
   1912		break;
   1913
   1914	case ALLOW_MEDIUM_REMOVAL:
   1915		common->data_size_from_cmnd = 0;
   1916		reply = check_command(common, 6, DATA_DIR_NONE,
   1917				      (1<<4), 0,
   1918				      "PREVENT-ALLOW MEDIUM REMOVAL");
   1919		if (reply == 0)
   1920			reply = do_prevent_allow(common);
   1921		break;
   1922
   1923	case READ_6:
   1924		i = common->cmnd[4];
   1925		common->data_size_from_cmnd = (i == 0) ? 256 : i;
   1926		reply = check_command_size_in_blocks(common, 6,
   1927				      DATA_DIR_TO_HOST,
   1928				      (7<<1) | (1<<4), 1,
   1929				      "READ(6)");
   1930		if (reply == 0)
   1931			reply = do_read(common);
   1932		break;
   1933
   1934	case READ_10:
   1935		common->data_size_from_cmnd =
   1936				get_unaligned_be16(&common->cmnd[7]);
   1937		reply = check_command_size_in_blocks(common, 10,
   1938				      DATA_DIR_TO_HOST,
   1939				      (1<<1) | (0xf<<2) | (3<<7), 1,
   1940				      "READ(10)");
   1941		if (reply == 0)
   1942			reply = do_read(common);
   1943		break;
   1944
   1945	case READ_12:
   1946		common->data_size_from_cmnd =
   1947				get_unaligned_be32(&common->cmnd[6]);
   1948		reply = check_command_size_in_blocks(common, 12,
   1949				      DATA_DIR_TO_HOST,
   1950				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
   1951				      "READ(12)");
   1952		if (reply == 0)
   1953			reply = do_read(common);
   1954		break;
   1955
   1956	case READ_16:
   1957		common->data_size_from_cmnd =
   1958				get_unaligned_be32(&common->cmnd[10]);
   1959		reply = check_command_size_in_blocks(common, 16,
   1960				      DATA_DIR_TO_HOST,
   1961				      (1<<1) | (0xff<<2) | (0xf<<10), 1,
   1962				      "READ(16)");
   1963		if (reply == 0)
   1964			reply = do_read(common);
   1965		break;
   1966
   1967	case READ_CAPACITY:
   1968		common->data_size_from_cmnd = 8;
   1969		reply = check_command(common, 10, DATA_DIR_TO_HOST,
   1970				      (0xf<<2) | (1<<8), 1,
   1971				      "READ CAPACITY");
   1972		if (reply == 0)
   1973			reply = do_read_capacity(common, bh);
   1974		break;
   1975
   1976	case READ_HEADER:
   1977		if (!common->curlun || !common->curlun->cdrom)
   1978			goto unknown_cmnd;
   1979		common->data_size_from_cmnd =
   1980			get_unaligned_be16(&common->cmnd[7]);
   1981		reply = check_command(common, 10, DATA_DIR_TO_HOST,
   1982				      (3<<7) | (0x1f<<1), 1,
   1983				      "READ HEADER");
   1984		if (reply == 0)
   1985			reply = do_read_header(common, bh);
   1986		break;
   1987
   1988	case READ_TOC:
   1989		if (!common->curlun || !common->curlun->cdrom)
   1990			goto unknown_cmnd;
   1991		common->data_size_from_cmnd =
   1992			get_unaligned_be16(&common->cmnd[7]);
   1993		reply = check_command(common, 10, DATA_DIR_TO_HOST,
   1994				      (0xf<<6) | (3<<1), 1,
   1995				      "READ TOC");
   1996		if (reply == 0)
   1997			reply = do_read_toc(common, bh);
   1998		break;
   1999
   2000	case READ_FORMAT_CAPACITIES:
   2001		common->data_size_from_cmnd =
   2002			get_unaligned_be16(&common->cmnd[7]);
   2003		reply = check_command(common, 10, DATA_DIR_TO_HOST,
   2004				      (3<<7), 1,
   2005				      "READ FORMAT CAPACITIES");
   2006		if (reply == 0)
   2007			reply = do_read_format_capacities(common, bh);
   2008		break;
   2009
   2010	case REQUEST_SENSE:
   2011		common->data_size_from_cmnd = common->cmnd[4];
   2012		reply = check_command(common, 6, DATA_DIR_TO_HOST,
   2013				      (1<<4), 0,
   2014				      "REQUEST SENSE");
   2015		if (reply == 0)
   2016			reply = do_request_sense(common, bh);
   2017		break;
   2018
   2019	case SERVICE_ACTION_IN_16:
   2020		switch (common->cmnd[1] & 0x1f) {
   2021
   2022		case SAI_READ_CAPACITY_16:
   2023			common->data_size_from_cmnd =
   2024				get_unaligned_be32(&common->cmnd[10]);
   2025			reply = check_command(common, 16, DATA_DIR_TO_HOST,
   2026					      (1<<1) | (0xff<<2) | (0xf<<10) |
   2027					      (1<<14), 1,
   2028					      "READ CAPACITY(16)");
   2029			if (reply == 0)
   2030				reply = do_read_capacity_16(common, bh);
   2031			break;
   2032
   2033		default:
   2034			goto unknown_cmnd;
   2035		}
   2036		break;
   2037
   2038	case START_STOP:
   2039		common->data_size_from_cmnd = 0;
   2040		reply = check_command(common, 6, DATA_DIR_NONE,
   2041				      (1<<1) | (1<<4), 0,
   2042				      "START-STOP UNIT");
   2043		if (reply == 0)
   2044			reply = do_start_stop(common);
   2045		break;
   2046
   2047	case SYNCHRONIZE_CACHE:
   2048		common->data_size_from_cmnd = 0;
   2049		reply = check_command(common, 10, DATA_DIR_NONE,
   2050				      (0xf<<2) | (3<<7), 1,
   2051				      "SYNCHRONIZE CACHE");
   2052		if (reply == 0)
   2053			reply = do_synchronize_cache(common);
   2054		break;
   2055
   2056	case TEST_UNIT_READY:
   2057		common->data_size_from_cmnd = 0;
   2058		reply = check_command(common, 6, DATA_DIR_NONE,
   2059				0, 1,
   2060				"TEST UNIT READY");
   2061		break;
   2062
   2063	/*
   2064	 * Although optional, this command is used by MS-Windows.  We
   2065	 * support a minimal version: BytChk must be 0.
   2066	 */
   2067	case VERIFY:
   2068		common->data_size_from_cmnd = 0;
   2069		reply = check_command(common, 10, DATA_DIR_NONE,
   2070				      (1<<1) | (0xf<<2) | (3<<7), 1,
   2071				      "VERIFY");
   2072		if (reply == 0)
   2073			reply = do_verify(common);
   2074		break;
   2075
   2076	case WRITE_6:
   2077		i = common->cmnd[4];
   2078		common->data_size_from_cmnd = (i == 0) ? 256 : i;
   2079		reply = check_command_size_in_blocks(common, 6,
   2080				      DATA_DIR_FROM_HOST,
   2081				      (7<<1) | (1<<4), 1,
   2082				      "WRITE(6)");
   2083		if (reply == 0)
   2084			reply = do_write(common);
   2085		break;
   2086
   2087	case WRITE_10:
   2088		common->data_size_from_cmnd =
   2089				get_unaligned_be16(&common->cmnd[7]);
   2090		reply = check_command_size_in_blocks(common, 10,
   2091				      DATA_DIR_FROM_HOST,
   2092				      (1<<1) | (0xf<<2) | (3<<7), 1,
   2093				      "WRITE(10)");
   2094		if (reply == 0)
   2095			reply = do_write(common);
   2096		break;
   2097
   2098	case WRITE_12:
   2099		common->data_size_from_cmnd =
   2100				get_unaligned_be32(&common->cmnd[6]);
   2101		reply = check_command_size_in_blocks(common, 12,
   2102				      DATA_DIR_FROM_HOST,
   2103				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
   2104				      "WRITE(12)");
   2105		if (reply == 0)
   2106			reply = do_write(common);
   2107		break;
   2108
   2109	case WRITE_16:
   2110		common->data_size_from_cmnd =
   2111				get_unaligned_be32(&common->cmnd[10]);
   2112		reply = check_command_size_in_blocks(common, 16,
   2113				      DATA_DIR_FROM_HOST,
   2114				      (1<<1) | (0xff<<2) | (0xf<<10), 1,
   2115				      "WRITE(16)");
   2116		if (reply == 0)
   2117			reply = do_write(common);
   2118		break;
   2119
   2120	/*
   2121	 * Some mandatory commands that we recognize but don't implement.
   2122	 * They don't mean much in this setting.  It's left as an exercise
   2123	 * for anyone interested to implement RESERVE and RELEASE in terms
   2124	 * of Posix locks.
   2125	 */
   2126	case FORMAT_UNIT:
   2127	case RELEASE:
   2128	case RESERVE:
   2129	case SEND_DIAGNOSTIC:
   2130
   2131	default:
   2132unknown_cmnd:
   2133		common->data_size_from_cmnd = 0;
   2134		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
   2135		reply = check_command(common, common->cmnd_size,
   2136				      DATA_DIR_UNKNOWN, ~0, 0, unknown);
   2137		if (reply == 0) {
   2138			common->curlun->sense_data = SS_INVALID_COMMAND;
   2139			reply = -EINVAL;
   2140		}
   2141		break;
   2142	}
   2143	up_read(&common->filesem);
   2144
   2145	if (reply == -EINTR || signal_pending(current))
   2146		return -EINTR;
   2147
   2148	/* Set up the single reply buffer for finish_reply() */
   2149	if (reply == -EINVAL)
   2150		reply = 0;		/* Error reply length */
   2151	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
   2152		reply = min((u32)reply, common->data_size_from_cmnd);
   2153		bh->inreq->length = reply;
   2154		bh->state = BUF_STATE_FULL;
   2155		common->residue -= reply;
   2156	}				/* Otherwise it's already set */
   2157
   2158	return 0;
   2159}
   2160
   2161
   2162/*-------------------------------------------------------------------------*/
   2163
   2164static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
   2165{
   2166	struct usb_request	*req = bh->outreq;
   2167	struct bulk_cb_wrap	*cbw = req->buf;
   2168	struct fsg_common	*common = fsg->common;
   2169
   2170	/* Was this a real packet?  Should it be ignored? */
   2171	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
   2172		return -EINVAL;
   2173
   2174	/* Is the CBW valid? */
   2175	if (req->actual != US_BULK_CB_WRAP_LEN ||
   2176			cbw->Signature != cpu_to_le32(
   2177				US_BULK_CB_SIGN)) {
   2178		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
   2179				req->actual,
   2180				le32_to_cpu(cbw->Signature));
   2181
   2182		/*
   2183		 * The Bulk-only spec says we MUST stall the IN endpoint
   2184		 * (6.6.1), so it's unavoidable.  It also says we must
   2185		 * retain this state until the next reset, but there's
   2186		 * no way to tell the controller driver it should ignore
   2187		 * Clear-Feature(HALT) requests.
   2188		 *
   2189		 * We aren't required to halt the OUT endpoint; instead
   2190		 * we can simply accept and discard any data received
   2191		 * until the next reset.
   2192		 */
   2193		wedge_bulk_in_endpoint(fsg);
   2194		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
   2195		return -EINVAL;
   2196	}
   2197
   2198	/* Is the CBW meaningful? */
   2199	if (cbw->Lun >= ARRAY_SIZE(common->luns) ||
   2200	    cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 ||
   2201	    cbw->Length > MAX_COMMAND_SIZE) {
   2202		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
   2203				"cmdlen %u\n",
   2204				cbw->Lun, cbw->Flags, cbw->Length);
   2205
   2206		/*
   2207		 * We can do anything we want here, so let's stall the
   2208		 * bulk pipes if we are allowed to.
   2209		 */
   2210		if (common->can_stall) {
   2211			fsg_set_halt(fsg, fsg->bulk_out);
   2212			halt_bulk_in_endpoint(fsg);
   2213		}
   2214		return -EINVAL;
   2215	}
   2216
   2217	/* Save the command for later */
   2218	common->cmnd_size = cbw->Length;
   2219	memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
   2220	if (cbw->Flags & US_BULK_FLAG_IN)
   2221		common->data_dir = DATA_DIR_TO_HOST;
   2222	else
   2223		common->data_dir = DATA_DIR_FROM_HOST;
   2224	common->data_size = le32_to_cpu(cbw->DataTransferLength);
   2225	if (common->data_size == 0)
   2226		common->data_dir = DATA_DIR_NONE;
   2227	common->lun = cbw->Lun;
   2228	if (common->lun < ARRAY_SIZE(common->luns))
   2229		common->curlun = common->luns[common->lun];
   2230	else
   2231		common->curlun = NULL;
   2232	common->tag = cbw->Tag;
   2233	return 0;
   2234}
   2235
   2236static int get_next_command(struct fsg_common *common)
   2237{
   2238	struct fsg_buffhd	*bh;
   2239	int			rc = 0;
   2240
   2241	/* Wait for the next buffer to become available */
   2242	bh = common->next_buffhd_to_fill;
   2243	rc = sleep_thread(common, true, bh);
   2244	if (rc)
   2245		return rc;
   2246
   2247	/* Queue a request to read a Bulk-only CBW */
   2248	set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
   2249	if (!start_out_transfer(common, bh))
   2250		/* Don't know what to do if common->fsg is NULL */
   2251		return -EIO;
   2252
   2253	/*
   2254	 * We will drain the buffer in software, which means we
   2255	 * can reuse it for the next filling.  No need to advance
   2256	 * next_buffhd_to_fill.
   2257	 */
   2258
   2259	/* Wait for the CBW to arrive */
   2260	rc = sleep_thread(common, true, bh);
   2261	if (rc)
   2262		return rc;
   2263
   2264	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
   2265	bh->state = BUF_STATE_EMPTY;
   2266
   2267	return rc;
   2268}
   2269
   2270
   2271/*-------------------------------------------------------------------------*/
   2272
   2273static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
   2274		struct usb_request **preq)
   2275{
   2276	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
   2277	if (*preq)
   2278		return 0;
   2279	ERROR(common, "can't allocate request for %s\n", ep->name);
   2280	return -ENOMEM;
   2281}
   2282
   2283/* Reset interface setting and re-init endpoint state (toggle etc). */
   2284static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
   2285{
   2286	struct fsg_dev *fsg;
   2287	int i, rc = 0;
   2288
   2289	if (common->running)
   2290		DBG(common, "reset interface\n");
   2291
   2292reset:
   2293	/* Deallocate the requests */
   2294	if (common->fsg) {
   2295		fsg = common->fsg;
   2296
   2297		for (i = 0; i < common->fsg_num_buffers; ++i) {
   2298			struct fsg_buffhd *bh = &common->buffhds[i];
   2299
   2300			if (bh->inreq) {
   2301				usb_ep_free_request(fsg->bulk_in, bh->inreq);
   2302				bh->inreq = NULL;
   2303			}
   2304			if (bh->outreq) {
   2305				usb_ep_free_request(fsg->bulk_out, bh->outreq);
   2306				bh->outreq = NULL;
   2307			}
   2308		}
   2309
   2310		/* Disable the endpoints */
   2311		if (fsg->bulk_in_enabled) {
   2312			usb_ep_disable(fsg->bulk_in);
   2313			fsg->bulk_in_enabled = 0;
   2314		}
   2315		if (fsg->bulk_out_enabled) {
   2316			usb_ep_disable(fsg->bulk_out);
   2317			fsg->bulk_out_enabled = 0;
   2318		}
   2319
   2320		common->fsg = NULL;
   2321		wake_up(&common->fsg_wait);
   2322	}
   2323
   2324	common->running = 0;
   2325	if (!new_fsg || rc)
   2326		return rc;
   2327
   2328	common->fsg = new_fsg;
   2329	fsg = common->fsg;
   2330
   2331	/* Enable the endpoints */
   2332	rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
   2333	if (rc)
   2334		goto reset;
   2335	rc = usb_ep_enable(fsg->bulk_in);
   2336	if (rc)
   2337		goto reset;
   2338	fsg->bulk_in->driver_data = common;
   2339	fsg->bulk_in_enabled = 1;
   2340
   2341	rc = config_ep_by_speed(common->gadget, &(fsg->function),
   2342				fsg->bulk_out);
   2343	if (rc)
   2344		goto reset;
   2345	rc = usb_ep_enable(fsg->bulk_out);
   2346	if (rc)
   2347		goto reset;
   2348	fsg->bulk_out->driver_data = common;
   2349	fsg->bulk_out_enabled = 1;
   2350	common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
   2351	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
   2352
   2353	/* Allocate the requests */
   2354	for (i = 0; i < common->fsg_num_buffers; ++i) {
   2355		struct fsg_buffhd	*bh = &common->buffhds[i];
   2356
   2357		rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
   2358		if (rc)
   2359			goto reset;
   2360		rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
   2361		if (rc)
   2362			goto reset;
   2363		bh->inreq->buf = bh->outreq->buf = bh->buf;
   2364		bh->inreq->context = bh->outreq->context = bh;
   2365		bh->inreq->complete = bulk_in_complete;
   2366		bh->outreq->complete = bulk_out_complete;
   2367	}
   2368
   2369	common->running = 1;
   2370	for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
   2371		if (common->luns[i])
   2372			common->luns[i]->unit_attention_data =
   2373				SS_RESET_OCCURRED;
   2374	return rc;
   2375}
   2376
   2377
   2378/****************************** ALT CONFIGS ******************************/
   2379
   2380static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
   2381{
   2382	struct fsg_dev *fsg = fsg_from_func(f);
   2383
   2384	__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
   2385	return USB_GADGET_DELAYED_STATUS;
   2386}
   2387
   2388static void fsg_disable(struct usb_function *f)
   2389{
   2390	struct fsg_dev *fsg = fsg_from_func(f);
   2391
   2392	/* Disable the endpoints */
   2393	if (fsg->bulk_in_enabled) {
   2394		usb_ep_disable(fsg->bulk_in);
   2395		fsg->bulk_in_enabled = 0;
   2396	}
   2397	if (fsg->bulk_out_enabled) {
   2398		usb_ep_disable(fsg->bulk_out);
   2399		fsg->bulk_out_enabled = 0;
   2400	}
   2401
   2402	__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
   2403}
   2404
   2405
   2406/*-------------------------------------------------------------------------*/
   2407
   2408static void handle_exception(struct fsg_common *common)
   2409{
   2410	int			i;
   2411	struct fsg_buffhd	*bh;
   2412	enum fsg_state		old_state;
   2413	struct fsg_lun		*curlun;
   2414	unsigned int		exception_req_tag;
   2415	struct fsg_dev		*new_fsg;
   2416
   2417	/*
   2418	 * Clear the existing signals.  Anything but SIGUSR1 is converted
   2419	 * into a high-priority EXIT exception.
   2420	 */
   2421	for (;;) {
   2422		int sig = kernel_dequeue_signal();
   2423		if (!sig)
   2424			break;
   2425		if (sig != SIGUSR1) {
   2426			spin_lock_irq(&common->lock);
   2427			if (common->state < FSG_STATE_EXIT)
   2428				DBG(common, "Main thread exiting on signal\n");
   2429			common->state = FSG_STATE_EXIT;
   2430			spin_unlock_irq(&common->lock);
   2431		}
   2432	}
   2433
   2434	/* Cancel all the pending transfers */
   2435	if (likely(common->fsg)) {
   2436		for (i = 0; i < common->fsg_num_buffers; ++i) {
   2437			bh = &common->buffhds[i];
   2438			if (bh->state == BUF_STATE_SENDING)
   2439				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
   2440			if (bh->state == BUF_STATE_RECEIVING)
   2441				usb_ep_dequeue(common->fsg->bulk_out,
   2442					       bh->outreq);
   2443
   2444			/* Wait for a transfer to become idle */
   2445			if (sleep_thread(common, false, bh))
   2446				return;
   2447		}
   2448
   2449		/* Clear out the controller's fifos */
   2450		if (common->fsg->bulk_in_enabled)
   2451			usb_ep_fifo_flush(common->fsg->bulk_in);
   2452		if (common->fsg->bulk_out_enabled)
   2453			usb_ep_fifo_flush(common->fsg->bulk_out);
   2454	}
   2455
   2456	/*
   2457	 * Reset the I/O buffer states and pointers, the SCSI
   2458	 * state, and the exception.  Then invoke the handler.
   2459	 */
   2460	spin_lock_irq(&common->lock);
   2461
   2462	for (i = 0; i < common->fsg_num_buffers; ++i) {
   2463		bh = &common->buffhds[i];
   2464		bh->state = BUF_STATE_EMPTY;
   2465	}
   2466	common->next_buffhd_to_fill = &common->buffhds[0];
   2467	common->next_buffhd_to_drain = &common->buffhds[0];
   2468	exception_req_tag = common->exception_req_tag;
   2469	new_fsg = common->exception_arg;
   2470	old_state = common->state;
   2471	common->state = FSG_STATE_NORMAL;
   2472
   2473	if (old_state != FSG_STATE_ABORT_BULK_OUT) {
   2474		for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
   2475			curlun = common->luns[i];
   2476			if (!curlun)
   2477				continue;
   2478			curlun->prevent_medium_removal = 0;
   2479			curlun->sense_data = SS_NO_SENSE;
   2480			curlun->unit_attention_data = SS_NO_SENSE;
   2481			curlun->sense_data_info = 0;
   2482			curlun->info_valid = 0;
   2483		}
   2484	}
   2485	spin_unlock_irq(&common->lock);
   2486
   2487	/* Carry out any extra actions required for the exception */
   2488	switch (old_state) {
   2489	case FSG_STATE_NORMAL:
   2490		break;
   2491
   2492	case FSG_STATE_ABORT_BULK_OUT:
   2493		send_status(common);
   2494		break;
   2495
   2496	case FSG_STATE_PROTOCOL_RESET:
   2497		/*
   2498		 * In case we were forced against our will to halt a
   2499		 * bulk endpoint, clear the halt now.  (The SuperH UDC
   2500		 * requires this.)
   2501		 */
   2502		if (!fsg_is_set(common))
   2503			break;
   2504		if (test_and_clear_bit(IGNORE_BULK_OUT,
   2505				       &common->fsg->atomic_bitflags))
   2506			usb_ep_clear_halt(common->fsg->bulk_in);
   2507
   2508		if (common->ep0_req_tag == exception_req_tag)
   2509			ep0_queue(common);	/* Complete the status stage */
   2510
   2511		/*
   2512		 * Technically this should go here, but it would only be
   2513		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
   2514		 * CONFIG_CHANGE cases.
   2515		 */
   2516		/* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
   2517		/*	if (common->luns[i]) */
   2518		/*		common->luns[i]->unit_attention_data = */
   2519		/*			SS_RESET_OCCURRED;  */
   2520		break;
   2521
   2522	case FSG_STATE_CONFIG_CHANGE:
   2523		do_set_interface(common, new_fsg);
   2524		if (new_fsg)
   2525			usb_composite_setup_continue(common->cdev);
   2526		break;
   2527
   2528	case FSG_STATE_EXIT:
   2529		do_set_interface(common, NULL);		/* Free resources */
   2530		spin_lock_irq(&common->lock);
   2531		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
   2532		spin_unlock_irq(&common->lock);
   2533		break;
   2534
   2535	case FSG_STATE_TERMINATED:
   2536		break;
   2537	}
   2538}
   2539
   2540
   2541/*-------------------------------------------------------------------------*/
   2542
   2543static int fsg_main_thread(void *common_)
   2544{
   2545	struct fsg_common	*common = common_;
   2546	int			i;
   2547
   2548	/*
   2549	 * Allow the thread to be killed by a signal, but set the signal mask
   2550	 * to block everything but INT, TERM, KILL, and USR1.
   2551	 */
   2552	allow_signal(SIGINT);
   2553	allow_signal(SIGTERM);
   2554	allow_signal(SIGKILL);
   2555	allow_signal(SIGUSR1);
   2556
   2557	/* Allow the thread to be frozen */
   2558	set_freezable();
   2559
   2560	/* The main loop */
   2561	while (common->state != FSG_STATE_TERMINATED) {
   2562		if (exception_in_progress(common) || signal_pending(current)) {
   2563			handle_exception(common);
   2564			continue;
   2565		}
   2566
   2567		if (!common->running) {
   2568			sleep_thread(common, true, NULL);
   2569			continue;
   2570		}
   2571
   2572		if (get_next_command(common) || exception_in_progress(common))
   2573			continue;
   2574		if (do_scsi_command(common) || exception_in_progress(common))
   2575			continue;
   2576		if (finish_reply(common) || exception_in_progress(common))
   2577			continue;
   2578		send_status(common);
   2579	}
   2580
   2581	spin_lock_irq(&common->lock);
   2582	common->thread_task = NULL;
   2583	spin_unlock_irq(&common->lock);
   2584
   2585	/* Eject media from all LUNs */
   2586
   2587	down_write(&common->filesem);
   2588	for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
   2589		struct fsg_lun *curlun = common->luns[i];
   2590
   2591		if (curlun && fsg_lun_is_open(curlun))
   2592			fsg_lun_close(curlun);
   2593	}
   2594	up_write(&common->filesem);
   2595
   2596	/* Let fsg_unbind() know the thread has exited */
   2597	kthread_complete_and_exit(&common->thread_notifier, 0);
   2598}
   2599
   2600
   2601/*************************** DEVICE ATTRIBUTES ***************************/
   2602
   2603static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf)
   2604{
   2605	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
   2606
   2607	return fsg_show_ro(curlun, buf);
   2608}
   2609
   2610static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
   2611			  char *buf)
   2612{
   2613	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
   2614
   2615	return fsg_show_nofua(curlun, buf);
   2616}
   2617
   2618static ssize_t file_show(struct device *dev, struct device_attribute *attr,
   2619			 char *buf)
   2620{
   2621	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
   2622	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
   2623
   2624	return fsg_show_file(curlun, filesem, buf);
   2625}
   2626
   2627static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
   2628			const char *buf, size_t count)
   2629{
   2630	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
   2631	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
   2632
   2633	return fsg_store_ro(curlun, filesem, buf, count);
   2634}
   2635
   2636static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
   2637			   const char *buf, size_t count)
   2638{
   2639	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
   2640
   2641	return fsg_store_nofua(curlun, buf, count);
   2642}
   2643
   2644static ssize_t file_store(struct device *dev, struct device_attribute *attr,
   2645			  const char *buf, size_t count)
   2646{
   2647	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
   2648	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
   2649
   2650	return fsg_store_file(curlun, filesem, buf, count);
   2651}
   2652
   2653static DEVICE_ATTR_RW(nofua);
   2654/* mode wil be set in fsg_lun_attr_is_visible() */
   2655static DEVICE_ATTR(ro, 0, ro_show, ro_store);
   2656static DEVICE_ATTR(file, 0, file_show, file_store);
   2657
   2658/****************************** FSG COMMON ******************************/
   2659
   2660static void fsg_lun_release(struct device *dev)
   2661{
   2662	/* Nothing needs to be done */
   2663}
   2664
   2665static struct fsg_common *fsg_common_setup(struct fsg_common *common)
   2666{
   2667	if (!common) {
   2668		common = kzalloc(sizeof(*common), GFP_KERNEL);
   2669		if (!common)
   2670			return ERR_PTR(-ENOMEM);
   2671		common->free_storage_on_release = 1;
   2672	} else {
   2673		common->free_storage_on_release = 0;
   2674	}
   2675	init_rwsem(&common->filesem);
   2676	spin_lock_init(&common->lock);
   2677	init_completion(&common->thread_notifier);
   2678	init_waitqueue_head(&common->io_wait);
   2679	init_waitqueue_head(&common->fsg_wait);
   2680	common->state = FSG_STATE_TERMINATED;
   2681	memset(common->luns, 0, sizeof(common->luns));
   2682
   2683	return common;
   2684}
   2685
   2686void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs)
   2687{
   2688	common->sysfs = sysfs;
   2689}
   2690EXPORT_SYMBOL_GPL(fsg_common_set_sysfs);
   2691
   2692static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
   2693{
   2694	if (buffhds) {
   2695		struct fsg_buffhd *bh = buffhds;
   2696		while (n--) {
   2697			kfree(bh->buf);
   2698			++bh;
   2699		}
   2700		kfree(buffhds);
   2701	}
   2702}
   2703
   2704int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
   2705{
   2706	struct fsg_buffhd *bh, *buffhds;
   2707	int i;
   2708
   2709	buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
   2710	if (!buffhds)
   2711		return -ENOMEM;
   2712
   2713	/* Data buffers cyclic list */
   2714	bh = buffhds;
   2715	i = n;
   2716	goto buffhds_first_it;
   2717	do {
   2718		bh->next = bh + 1;
   2719		++bh;
   2720buffhds_first_it:
   2721		bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
   2722		if (unlikely(!bh->buf))
   2723			goto error_release;
   2724	} while (--i);
   2725	bh->next = buffhds;
   2726
   2727	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
   2728	common->fsg_num_buffers = n;
   2729	common->buffhds = buffhds;
   2730
   2731	return 0;
   2732
   2733error_release:
   2734	/*
   2735	 * "buf"s pointed to by heads after n - i are NULL
   2736	 * so releasing them won't hurt
   2737	 */
   2738	_fsg_common_free_buffers(buffhds, n);
   2739
   2740	return -ENOMEM;
   2741}
   2742EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
   2743
   2744void fsg_common_remove_lun(struct fsg_lun *lun)
   2745{
   2746	if (device_is_registered(&lun->dev))
   2747		device_unregister(&lun->dev);
   2748	fsg_lun_close(lun);
   2749	kfree(lun);
   2750}
   2751EXPORT_SYMBOL_GPL(fsg_common_remove_lun);
   2752
   2753static void _fsg_common_remove_luns(struct fsg_common *common, int n)
   2754{
   2755	int i;
   2756
   2757	for (i = 0; i < n; ++i)
   2758		if (common->luns[i]) {
   2759			fsg_common_remove_lun(common->luns[i]);
   2760			common->luns[i] = NULL;
   2761		}
   2762}
   2763
   2764void fsg_common_remove_luns(struct fsg_common *common)
   2765{
   2766	_fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
   2767}
   2768EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
   2769
   2770void fsg_common_free_buffers(struct fsg_common *common)
   2771{
   2772	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
   2773	common->buffhds = NULL;
   2774}
   2775EXPORT_SYMBOL_GPL(fsg_common_free_buffers);
   2776
   2777int fsg_common_set_cdev(struct fsg_common *common,
   2778			 struct usb_composite_dev *cdev, bool can_stall)
   2779{
   2780	struct usb_string *us;
   2781
   2782	common->gadget = cdev->gadget;
   2783	common->ep0 = cdev->gadget->ep0;
   2784	common->ep0req = cdev->req;
   2785	common->cdev = cdev;
   2786
   2787	us = usb_gstrings_attach(cdev, fsg_strings_array,
   2788				 ARRAY_SIZE(fsg_strings));
   2789	if (IS_ERR(us))
   2790		return PTR_ERR(us);
   2791
   2792	fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id;
   2793
   2794	/*
   2795	 * Some peripheral controllers are known not to be able to
   2796	 * halt bulk endpoints correctly.  If one of them is present,
   2797	 * disable stalls.
   2798	 */
   2799	common->can_stall = can_stall &&
   2800			gadget_is_stall_supported(common->gadget);
   2801
   2802	return 0;
   2803}
   2804EXPORT_SYMBOL_GPL(fsg_common_set_cdev);
   2805
   2806static struct attribute *fsg_lun_dev_attrs[] = {
   2807	&dev_attr_ro.attr,
   2808	&dev_attr_file.attr,
   2809	&dev_attr_nofua.attr,
   2810	NULL
   2811};
   2812
   2813static umode_t fsg_lun_dev_is_visible(struct kobject *kobj,
   2814				      struct attribute *attr, int idx)
   2815{
   2816	struct device *dev = kobj_to_dev(kobj);
   2817	struct fsg_lun *lun = fsg_lun_from_dev(dev);
   2818
   2819	if (attr == &dev_attr_ro.attr)
   2820		return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO);
   2821	if (attr == &dev_attr_file.attr)
   2822		return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO;
   2823	return attr->mode;
   2824}
   2825
   2826static const struct attribute_group fsg_lun_dev_group = {
   2827	.attrs = fsg_lun_dev_attrs,
   2828	.is_visible = fsg_lun_dev_is_visible,
   2829};
   2830
   2831static const struct attribute_group *fsg_lun_dev_groups[] = {
   2832	&fsg_lun_dev_group,
   2833	NULL
   2834};
   2835
   2836int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
   2837			  unsigned int id, const char *name,
   2838			  const char **name_pfx)
   2839{
   2840	struct fsg_lun *lun;
   2841	char *pathbuf, *p;
   2842	int rc = -ENOMEM;
   2843
   2844	if (id >= ARRAY_SIZE(common->luns))
   2845		return -ENODEV;
   2846
   2847	if (common->luns[id])
   2848		return -EBUSY;
   2849
   2850	if (!cfg->filename && !cfg->removable) {
   2851		pr_err("no file given for LUN%d\n", id);
   2852		return -EINVAL;
   2853	}
   2854
   2855	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
   2856	if (!lun)
   2857		return -ENOMEM;
   2858
   2859	lun->name_pfx = name_pfx;
   2860
   2861	lun->cdrom = !!cfg->cdrom;
   2862	lun->ro = cfg->cdrom || cfg->ro;
   2863	lun->initially_ro = lun->ro;
   2864	lun->removable = !!cfg->removable;
   2865
   2866	if (!common->sysfs) {
   2867		/* we DON'T own the name!*/
   2868		lun->name = name;
   2869	} else {
   2870		lun->dev.release = fsg_lun_release;
   2871		lun->dev.parent = &common->gadget->dev;
   2872		lun->dev.groups = fsg_lun_dev_groups;
   2873		dev_set_drvdata(&lun->dev, &common->filesem);
   2874		dev_set_name(&lun->dev, "%s", name);
   2875		lun->name = dev_name(&lun->dev);
   2876
   2877		rc = device_register(&lun->dev);
   2878		if (rc) {
   2879			pr_info("failed to register LUN%d: %d\n", id, rc);
   2880			put_device(&lun->dev);
   2881			goto error_sysfs;
   2882		}
   2883	}
   2884
   2885	common->luns[id] = lun;
   2886
   2887	if (cfg->filename) {
   2888		rc = fsg_lun_open(lun, cfg->filename);
   2889		if (rc)
   2890			goto error_lun;
   2891	}
   2892
   2893	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
   2894	p = "(no medium)";
   2895	if (fsg_lun_is_open(lun)) {
   2896		p = "(error)";
   2897		if (pathbuf) {
   2898			p = file_path(lun->filp, pathbuf, PATH_MAX);
   2899			if (IS_ERR(p))
   2900				p = "(error)";
   2901		}
   2902	}
   2903	pr_info("LUN: %s%s%sfile: %s\n",
   2904	      lun->removable ? "removable " : "",
   2905	      lun->ro ? "read only " : "",
   2906	      lun->cdrom ? "CD-ROM " : "",
   2907	      p);
   2908	kfree(pathbuf);
   2909
   2910	return 0;
   2911
   2912error_lun:
   2913	if (device_is_registered(&lun->dev))
   2914		device_unregister(&lun->dev);
   2915	fsg_lun_close(lun);
   2916	common->luns[id] = NULL;
   2917error_sysfs:
   2918	kfree(lun);
   2919	return rc;
   2920}
   2921EXPORT_SYMBOL_GPL(fsg_common_create_lun);
   2922
   2923int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
   2924{
   2925	char buf[8]; /* enough for 100000000 different numbers, decimal */
   2926	int i, rc;
   2927
   2928	fsg_common_remove_luns(common);
   2929
   2930	for (i = 0; i < cfg->nluns; ++i) {
   2931		snprintf(buf, sizeof(buf), "lun%d", i);
   2932		rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
   2933		if (rc)
   2934			goto fail;
   2935	}
   2936
   2937	pr_info("Number of LUNs=%d\n", cfg->nluns);
   2938
   2939	return 0;
   2940
   2941fail:
   2942	_fsg_common_remove_luns(common, i);
   2943	return rc;
   2944}
   2945EXPORT_SYMBOL_GPL(fsg_common_create_luns);
   2946
   2947void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
   2948				   const char *pn)
   2949{
   2950	int i;
   2951
   2952	/* Prepare inquiryString */
   2953	i = get_default_bcdDevice();
   2954	snprintf(common->inquiry_string, sizeof(common->inquiry_string),
   2955		 "%-8s%-16s%04x", vn ?: "Linux",
   2956		 /* Assume product name dependent on the first LUN */
   2957		 pn ?: ((*common->luns)->cdrom
   2958		     ? "File-CD Gadget"
   2959		     : "File-Stor Gadget"),
   2960		 i);
   2961}
   2962EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
   2963
   2964static void fsg_common_release(struct fsg_common *common)
   2965{
   2966	int i;
   2967
   2968	/* If the thread isn't already dead, tell it to exit now */
   2969	if (common->state != FSG_STATE_TERMINATED) {
   2970		raise_exception(common, FSG_STATE_EXIT);
   2971		wait_for_completion(&common->thread_notifier);
   2972	}
   2973
   2974	for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
   2975		struct fsg_lun *lun = common->luns[i];
   2976		if (!lun)
   2977			continue;
   2978		fsg_lun_close(lun);
   2979		if (device_is_registered(&lun->dev))
   2980			device_unregister(&lun->dev);
   2981		kfree(lun);
   2982	}
   2983
   2984	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
   2985	if (common->free_storage_on_release)
   2986		kfree(common);
   2987}
   2988
   2989
   2990/*-------------------------------------------------------------------------*/
   2991
   2992static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
   2993{
   2994	struct fsg_dev		*fsg = fsg_from_func(f);
   2995	struct fsg_common	*common = fsg->common;
   2996	struct usb_gadget	*gadget = c->cdev->gadget;
   2997	int			i;
   2998	struct usb_ep		*ep;
   2999	unsigned		max_burst;
   3000	int			ret;
   3001	struct fsg_opts		*opts;
   3002
   3003	/* Don't allow to bind if we don't have at least one LUN */
   3004	ret = _fsg_common_get_max_lun(common);
   3005	if (ret < 0) {
   3006		pr_err("There should be at least one LUN.\n");
   3007		return -EINVAL;
   3008	}
   3009
   3010	opts = fsg_opts_from_func_inst(f->fi);
   3011	if (!opts->no_configfs) {
   3012		ret = fsg_common_set_cdev(fsg->common, c->cdev,
   3013					  fsg->common->can_stall);
   3014		if (ret)
   3015			return ret;
   3016		fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
   3017	}
   3018
   3019	if (!common->thread_task) {
   3020		common->state = FSG_STATE_NORMAL;
   3021		common->thread_task =
   3022			kthread_create(fsg_main_thread, common, "file-storage");
   3023		if (IS_ERR(common->thread_task)) {
   3024			ret = PTR_ERR(common->thread_task);
   3025			common->thread_task = NULL;
   3026			common->state = FSG_STATE_TERMINATED;
   3027			return ret;
   3028		}
   3029		DBG(common, "I/O thread pid: %d\n",
   3030		    task_pid_nr(common->thread_task));
   3031		wake_up_process(common->thread_task);
   3032	}
   3033
   3034	fsg->gadget = gadget;
   3035
   3036	/* New interface */
   3037	i = usb_interface_id(c, f);
   3038	if (i < 0)
   3039		goto fail;
   3040	fsg_intf_desc.bInterfaceNumber = i;
   3041	fsg->interface_number = i;
   3042
   3043	/* Find all the endpoints we will use */
   3044	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
   3045	if (!ep)
   3046		goto autoconf_fail;
   3047	fsg->bulk_in = ep;
   3048
   3049	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
   3050	if (!ep)
   3051		goto autoconf_fail;
   3052	fsg->bulk_out = ep;
   3053
   3054	/* Assume endpoint addresses are the same for both speeds */
   3055	fsg_hs_bulk_in_desc.bEndpointAddress =
   3056		fsg_fs_bulk_in_desc.bEndpointAddress;
   3057	fsg_hs_bulk_out_desc.bEndpointAddress =
   3058		fsg_fs_bulk_out_desc.bEndpointAddress;
   3059
   3060	/* Calculate bMaxBurst, we know packet size is 1024 */
   3061	max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
   3062
   3063	fsg_ss_bulk_in_desc.bEndpointAddress =
   3064		fsg_fs_bulk_in_desc.bEndpointAddress;
   3065	fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
   3066
   3067	fsg_ss_bulk_out_desc.bEndpointAddress =
   3068		fsg_fs_bulk_out_desc.bEndpointAddress;
   3069	fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
   3070
   3071	ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
   3072			fsg_ss_function, fsg_ss_function);
   3073	if (ret)
   3074		goto autoconf_fail;
   3075
   3076	return 0;
   3077
   3078autoconf_fail:
   3079	ERROR(fsg, "unable to autoconfigure all endpoints\n");
   3080	i = -ENOTSUPP;
   3081fail:
   3082	/* terminate the thread */
   3083	if (fsg->common->state != FSG_STATE_TERMINATED) {
   3084		raise_exception(fsg->common, FSG_STATE_EXIT);
   3085		wait_for_completion(&fsg->common->thread_notifier);
   3086	}
   3087	return i;
   3088}
   3089
   3090/****************************** ALLOCATE FUNCTION *************************/
   3091
   3092static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
   3093{
   3094	struct fsg_dev		*fsg = fsg_from_func(f);
   3095	struct fsg_common	*common = fsg->common;
   3096
   3097	DBG(fsg, "unbind\n");
   3098	if (fsg->common->fsg == fsg) {
   3099		__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
   3100		/* FIXME: make interruptible or killable somehow? */
   3101		wait_event(common->fsg_wait, common->fsg != fsg);
   3102	}
   3103
   3104	usb_free_all_descriptors(&fsg->function);
   3105}
   3106
   3107static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
   3108{
   3109	return container_of(to_config_group(item), struct fsg_lun_opts, group);
   3110}
   3111
   3112static inline struct fsg_opts *to_fsg_opts(struct config_item *item)
   3113{
   3114	return container_of(to_config_group(item), struct fsg_opts,
   3115			    func_inst.group);
   3116}
   3117
   3118static void fsg_lun_attr_release(struct config_item *item)
   3119{
   3120	struct fsg_lun_opts *lun_opts;
   3121
   3122	lun_opts = to_fsg_lun_opts(item);
   3123	kfree(lun_opts);
   3124}
   3125
   3126static struct configfs_item_operations fsg_lun_item_ops = {
   3127	.release		= fsg_lun_attr_release,
   3128};
   3129
   3130static ssize_t fsg_lun_opts_file_show(struct config_item *item, char *page)
   3131{
   3132	struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
   3133	struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
   3134
   3135	return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
   3136}
   3137
   3138static ssize_t fsg_lun_opts_file_store(struct config_item *item,
   3139				       const char *page, size_t len)
   3140{
   3141	struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
   3142	struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
   3143
   3144	return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
   3145}
   3146
   3147CONFIGFS_ATTR(fsg_lun_opts_, file);
   3148
   3149static ssize_t fsg_lun_opts_ro_show(struct config_item *item, char *page)
   3150{
   3151	return fsg_show_ro(to_fsg_lun_opts(item)->lun, page);
   3152}
   3153
   3154static ssize_t fsg_lun_opts_ro_store(struct config_item *item,
   3155				       const char *page, size_t len)
   3156{
   3157	struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
   3158	struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
   3159
   3160	return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
   3161}
   3162
   3163CONFIGFS_ATTR(fsg_lun_opts_, ro);
   3164
   3165static ssize_t fsg_lun_opts_removable_show(struct config_item *item,
   3166					   char *page)
   3167{
   3168	return fsg_show_removable(to_fsg_lun_opts(item)->lun, page);
   3169}
   3170
   3171static ssize_t fsg_lun_opts_removable_store(struct config_item *item,
   3172				       const char *page, size_t len)
   3173{
   3174	return fsg_store_removable(to_fsg_lun_opts(item)->lun, page, len);
   3175}
   3176
   3177CONFIGFS_ATTR(fsg_lun_opts_, removable);
   3178
   3179static ssize_t fsg_lun_opts_cdrom_show(struct config_item *item, char *page)
   3180{
   3181	return fsg_show_cdrom(to_fsg_lun_opts(item)->lun, page);
   3182}
   3183
   3184static ssize_t fsg_lun_opts_cdrom_store(struct config_item *item,
   3185				       const char *page, size_t len)
   3186{
   3187	struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
   3188	struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
   3189
   3190	return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
   3191			       len);
   3192}
   3193
   3194CONFIGFS_ATTR(fsg_lun_opts_, cdrom);
   3195
   3196static ssize_t fsg_lun_opts_nofua_show(struct config_item *item, char *page)
   3197{
   3198	return fsg_show_nofua(to_fsg_lun_opts(item)->lun, page);
   3199}
   3200
   3201static ssize_t fsg_lun_opts_nofua_store(struct config_item *item,
   3202				       const char *page, size_t len)
   3203{
   3204	return fsg_store_nofua(to_fsg_lun_opts(item)->lun, page, len);
   3205}
   3206
   3207CONFIGFS_ATTR(fsg_lun_opts_, nofua);
   3208
   3209static ssize_t fsg_lun_opts_inquiry_string_show(struct config_item *item,
   3210						char *page)
   3211{
   3212	return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page);
   3213}
   3214
   3215static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item,
   3216						 const char *page, size_t len)
   3217{
   3218	return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len);
   3219}
   3220
   3221CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string);
   3222
   3223static struct configfs_attribute *fsg_lun_attrs[] = {
   3224	&fsg_lun_opts_attr_file,
   3225	&fsg_lun_opts_attr_ro,
   3226	&fsg_lun_opts_attr_removable,
   3227	&fsg_lun_opts_attr_cdrom,
   3228	&fsg_lun_opts_attr_nofua,
   3229	&fsg_lun_opts_attr_inquiry_string,
   3230	NULL,
   3231};
   3232
   3233static const struct config_item_type fsg_lun_type = {
   3234	.ct_item_ops	= &fsg_lun_item_ops,
   3235	.ct_attrs	= fsg_lun_attrs,
   3236	.ct_owner	= THIS_MODULE,
   3237};
   3238
   3239static struct config_group *fsg_lun_make(struct config_group *group,
   3240					 const char *name)
   3241{
   3242	struct fsg_lun_opts *opts;
   3243	struct fsg_opts *fsg_opts;
   3244	struct fsg_lun_config config;
   3245	char *num_str;
   3246	u8 num;
   3247	int ret;
   3248
   3249	num_str = strchr(name, '.');
   3250	if (!num_str) {
   3251		pr_err("Unable to locate . in LUN.NUMBER\n");
   3252		return ERR_PTR(-EINVAL);
   3253	}
   3254	num_str++;
   3255
   3256	ret = kstrtou8(num_str, 0, &num);
   3257	if (ret)
   3258		return ERR_PTR(ret);
   3259
   3260	fsg_opts = to_fsg_opts(&group->cg_item);
   3261	if (num >= FSG_MAX_LUNS)
   3262		return ERR_PTR(-ERANGE);
   3263	num = array_index_nospec(num, FSG_MAX_LUNS);
   3264
   3265	mutex_lock(&fsg_opts->lock);
   3266	if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
   3267		ret = -EBUSY;
   3268		goto out;
   3269	}
   3270
   3271	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
   3272	if (!opts) {
   3273		ret = -ENOMEM;
   3274		goto out;
   3275	}
   3276
   3277	memset(&config, 0, sizeof(config));
   3278	config.removable = true;
   3279
   3280	ret = fsg_common_create_lun(fsg_opts->common, &config, num, name,
   3281				    (const char **)&group->cg_item.ci_name);
   3282	if (ret) {
   3283		kfree(opts);
   3284		goto out;
   3285	}
   3286	opts->lun = fsg_opts->common->luns[num];
   3287	opts->lun_id = num;
   3288	mutex_unlock(&fsg_opts->lock);
   3289
   3290	config_group_init_type_name(&opts->group, name, &fsg_lun_type);
   3291
   3292	return &opts->group;
   3293out:
   3294	mutex_unlock(&fsg_opts->lock);
   3295	return ERR_PTR(ret);
   3296}
   3297
   3298static void fsg_lun_drop(struct config_group *group, struct config_item *item)
   3299{
   3300	struct fsg_lun_opts *lun_opts;
   3301	struct fsg_opts *fsg_opts;
   3302
   3303	lun_opts = to_fsg_lun_opts(item);
   3304	fsg_opts = to_fsg_opts(&group->cg_item);
   3305
   3306	mutex_lock(&fsg_opts->lock);
   3307	if (fsg_opts->refcnt) {
   3308		struct config_item *gadget;
   3309
   3310		gadget = group->cg_item.ci_parent->ci_parent;
   3311		unregister_gadget_item(gadget);
   3312	}
   3313
   3314	fsg_common_remove_lun(lun_opts->lun);
   3315	fsg_opts->common->luns[lun_opts->lun_id] = NULL;
   3316	lun_opts->lun_id = 0;
   3317	mutex_unlock(&fsg_opts->lock);
   3318
   3319	config_item_put(item);
   3320}
   3321
   3322static void fsg_attr_release(struct config_item *item)
   3323{
   3324	struct fsg_opts *opts = to_fsg_opts(item);
   3325
   3326	usb_put_function_instance(&opts->func_inst);
   3327}
   3328
   3329static struct configfs_item_operations fsg_item_ops = {
   3330	.release		= fsg_attr_release,
   3331};
   3332
   3333static ssize_t fsg_opts_stall_show(struct config_item *item, char *page)
   3334{
   3335	struct fsg_opts *opts = to_fsg_opts(item);
   3336	int result;
   3337
   3338	mutex_lock(&opts->lock);
   3339	result = sprintf(page, "%d", opts->common->can_stall);
   3340	mutex_unlock(&opts->lock);
   3341
   3342	return result;
   3343}
   3344
   3345static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page,
   3346				    size_t len)
   3347{
   3348	struct fsg_opts *opts = to_fsg_opts(item);
   3349	int ret;
   3350	bool stall;
   3351
   3352	mutex_lock(&opts->lock);
   3353
   3354	if (opts->refcnt) {
   3355		mutex_unlock(&opts->lock);
   3356		return -EBUSY;
   3357	}
   3358
   3359	ret = strtobool(page, &stall);
   3360	if (!ret) {
   3361		opts->common->can_stall = stall;
   3362		ret = len;
   3363	}
   3364
   3365	mutex_unlock(&opts->lock);
   3366
   3367	return ret;
   3368}
   3369
   3370CONFIGFS_ATTR(fsg_opts_, stall);
   3371
   3372#ifdef CONFIG_USB_GADGET_DEBUG_FILES
   3373static ssize_t fsg_opts_num_buffers_show(struct config_item *item, char *page)
   3374{
   3375	struct fsg_opts *opts = to_fsg_opts(item);
   3376	int result;
   3377
   3378	mutex_lock(&opts->lock);
   3379	result = sprintf(page, "%d", opts->common->fsg_num_buffers);
   3380	mutex_unlock(&opts->lock);
   3381
   3382	return result;
   3383}
   3384
   3385static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
   3386					  const char *page, size_t len)
   3387{
   3388	struct fsg_opts *opts = to_fsg_opts(item);
   3389	int ret;
   3390	u8 num;
   3391
   3392	mutex_lock(&opts->lock);
   3393	if (opts->refcnt) {
   3394		ret = -EBUSY;
   3395		goto end;
   3396	}
   3397	ret = kstrtou8(page, 0, &num);
   3398	if (ret)
   3399		goto end;
   3400
   3401	ret = fsg_common_set_num_buffers(opts->common, num);
   3402	if (ret)
   3403		goto end;
   3404	ret = len;
   3405
   3406end:
   3407	mutex_unlock(&opts->lock);
   3408	return ret;
   3409}
   3410
   3411CONFIGFS_ATTR(fsg_opts_, num_buffers);
   3412#endif
   3413
   3414static struct configfs_attribute *fsg_attrs[] = {
   3415	&fsg_opts_attr_stall,
   3416#ifdef CONFIG_USB_GADGET_DEBUG_FILES
   3417	&fsg_opts_attr_num_buffers,
   3418#endif
   3419	NULL,
   3420};
   3421
   3422static struct configfs_group_operations fsg_group_ops = {
   3423	.make_group	= fsg_lun_make,
   3424	.drop_item	= fsg_lun_drop,
   3425};
   3426
   3427static const struct config_item_type fsg_func_type = {
   3428	.ct_item_ops	= &fsg_item_ops,
   3429	.ct_group_ops	= &fsg_group_ops,
   3430	.ct_attrs	= fsg_attrs,
   3431	.ct_owner	= THIS_MODULE,
   3432};
   3433
   3434static void fsg_free_inst(struct usb_function_instance *fi)
   3435{
   3436	struct fsg_opts *opts;
   3437
   3438	opts = fsg_opts_from_func_inst(fi);
   3439	fsg_common_release(opts->common);
   3440	kfree(opts);
   3441}
   3442
   3443static struct usb_function_instance *fsg_alloc_inst(void)
   3444{
   3445	struct fsg_opts *opts;
   3446	struct fsg_lun_config config;
   3447	int rc;
   3448
   3449	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
   3450	if (!opts)
   3451		return ERR_PTR(-ENOMEM);
   3452	mutex_init(&opts->lock);
   3453	opts->func_inst.free_func_inst = fsg_free_inst;
   3454	opts->common = fsg_common_setup(opts->common);
   3455	if (IS_ERR(opts->common)) {
   3456		rc = PTR_ERR(opts->common);
   3457		goto release_opts;
   3458	}
   3459
   3460	rc = fsg_common_set_num_buffers(opts->common,
   3461					CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
   3462	if (rc)
   3463		goto release_common;
   3464
   3465	pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
   3466
   3467	memset(&config, 0, sizeof(config));
   3468	config.removable = true;
   3469	rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
   3470			(const char **)&opts->func_inst.group.cg_item.ci_name);
   3471	if (rc)
   3472		goto release_buffers;
   3473
   3474	opts->lun0.lun = opts->common->luns[0];
   3475	opts->lun0.lun_id = 0;
   3476
   3477	config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);
   3478
   3479	config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
   3480	configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group);
   3481
   3482	return &opts->func_inst;
   3483
   3484release_buffers:
   3485	fsg_common_free_buffers(opts->common);
   3486release_common:
   3487	kfree(opts->common);
   3488release_opts:
   3489	kfree(opts);
   3490	return ERR_PTR(rc);
   3491}
   3492
   3493static void fsg_free(struct usb_function *f)
   3494{
   3495	struct fsg_dev *fsg;
   3496	struct fsg_opts *opts;
   3497
   3498	fsg = container_of(f, struct fsg_dev, function);
   3499	opts = container_of(f->fi, struct fsg_opts, func_inst);
   3500
   3501	mutex_lock(&opts->lock);
   3502	opts->refcnt--;
   3503	mutex_unlock(&opts->lock);
   3504
   3505	kfree(fsg);
   3506}
   3507
   3508static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
   3509{
   3510	struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
   3511	struct fsg_common *common = opts->common;
   3512	struct fsg_dev *fsg;
   3513
   3514	fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
   3515	if (unlikely(!fsg))
   3516		return ERR_PTR(-ENOMEM);
   3517
   3518	mutex_lock(&opts->lock);
   3519	opts->refcnt++;
   3520	mutex_unlock(&opts->lock);
   3521
   3522	fsg->function.name	= FSG_DRIVER_DESC;
   3523	fsg->function.bind	= fsg_bind;
   3524	fsg->function.unbind	= fsg_unbind;
   3525	fsg->function.setup	= fsg_setup;
   3526	fsg->function.set_alt	= fsg_set_alt;
   3527	fsg->function.disable	= fsg_disable;
   3528	fsg->function.free_func	= fsg_free;
   3529
   3530	fsg->common               = common;
   3531
   3532	return &fsg->function;
   3533}
   3534
   3535DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc);
   3536MODULE_LICENSE("GPL");
   3537MODULE_AUTHOR("Michal Nazarewicz");
   3538
   3539/************************* Module parameters *************************/
   3540
   3541
   3542void fsg_config_from_params(struct fsg_config *cfg,
   3543		       const struct fsg_module_parameters *params,
   3544		       unsigned int fsg_num_buffers)
   3545{
   3546	struct fsg_lun_config *lun;
   3547	unsigned i;
   3548
   3549	/* Configure LUNs */
   3550	cfg->nluns =
   3551		min(params->luns ?: (params->file_count ?: 1u),
   3552		    (unsigned)FSG_MAX_LUNS);
   3553	for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
   3554		lun->ro = !!params->ro[i];
   3555		lun->cdrom = !!params->cdrom[i];
   3556		lun->removable = !!params->removable[i];
   3557		lun->filename =
   3558			params->file_count > i && params->file[i][0]
   3559			? params->file[i]
   3560			: NULL;
   3561	}
   3562
   3563	/* Let MSF use defaults */
   3564	cfg->vendor_name = NULL;
   3565	cfg->product_name = NULL;
   3566
   3567	cfg->ops = NULL;
   3568	cfg->private_data = NULL;
   3569
   3570	/* Finalise */
   3571	cfg->can_stall = params->stall;
   3572	cfg->fsg_num_buffers = fsg_num_buffers;
   3573}
   3574EXPORT_SYMBOL_GPL(fsg_config_from_params);