cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

urb.c (34906B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Released under the GPLv2 only.
      4 */
      5
      6#include <linux/module.h>
      7#include <linux/string.h>
      8#include <linux/bitops.h>
      9#include <linux/slab.h>
     10#include <linux/log2.h>
     11#include <linux/usb.h>
     12#include <linux/wait.h>
     13#include <linux/usb/hcd.h>
     14#include <linux/scatterlist.h>
     15
     16#define to_urb(d) container_of(d, struct urb, kref)
     17
     18
     19static void urb_destroy(struct kref *kref)
     20{
     21	struct urb *urb = to_urb(kref);
     22
     23	if (urb->transfer_flags & URB_FREE_BUFFER)
     24		kfree(urb->transfer_buffer);
     25
     26	kfree(urb);
     27}
     28
     29/**
     30 * usb_init_urb - initializes a urb so that it can be used by a USB driver
     31 * @urb: pointer to the urb to initialize
     32 *
     33 * Initializes a urb so that the USB subsystem can use it properly.
     34 *
     35 * If a urb is created with a call to usb_alloc_urb() it is not
     36 * necessary to call this function.  Only use this if you allocate the
     37 * space for a struct urb on your own.  If you call this function, be
     38 * careful when freeing the memory for your urb that it is no longer in
     39 * use by the USB core.
     40 *
     41 * Only use this function if you _really_ understand what you are doing.
     42 */
     43void usb_init_urb(struct urb *urb)
     44{
     45	if (urb) {
     46		memset(urb, 0, sizeof(*urb));
     47		kref_init(&urb->kref);
     48		INIT_LIST_HEAD(&urb->urb_list);
     49		INIT_LIST_HEAD(&urb->anchor_list);
     50	}
     51}
     52EXPORT_SYMBOL_GPL(usb_init_urb);
     53
     54/**
     55 * usb_alloc_urb - creates a new urb for a USB driver to use
     56 * @iso_packets: number of iso packets for this urb
     57 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
     58 *	valid options for this.
     59 *
     60 * Creates an urb for the USB driver to use, initializes a few internal
     61 * structures, increments the usage counter, and returns a pointer to it.
     62 *
     63 * If the driver want to use this urb for interrupt, control, or bulk
     64 * endpoints, pass '0' as the number of iso packets.
     65 *
     66 * The driver must call usb_free_urb() when it is finished with the urb.
     67 *
     68 * Return: A pointer to the new urb, or %NULL if no memory is available.
     69 */
     70struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
     71{
     72	struct urb *urb;
     73
     74	urb = kmalloc(struct_size(urb, iso_frame_desc, iso_packets),
     75		      mem_flags);
     76	if (!urb)
     77		return NULL;
     78	usb_init_urb(urb);
     79	return urb;
     80}
     81EXPORT_SYMBOL_GPL(usb_alloc_urb);
     82
     83/**
     84 * usb_free_urb - frees the memory used by a urb when all users of it are finished
     85 * @urb: pointer to the urb to free, may be NULL
     86 *
     87 * Must be called when a user of a urb is finished with it.  When the last user
     88 * of the urb calls this function, the memory of the urb is freed.
     89 *
     90 * Note: The transfer buffer associated with the urb is not freed unless the
     91 * URB_FREE_BUFFER transfer flag is set.
     92 */
     93void usb_free_urb(struct urb *urb)
     94{
     95	if (urb)
     96		kref_put(&urb->kref, urb_destroy);
     97}
     98EXPORT_SYMBOL_GPL(usb_free_urb);
     99
    100/**
    101 * usb_get_urb - increments the reference count of the urb
    102 * @urb: pointer to the urb to modify, may be NULL
    103 *
    104 * This must be  called whenever a urb is transferred from a device driver to a
    105 * host controller driver.  This allows proper reference counting to happen
    106 * for urbs.
    107 *
    108 * Return: A pointer to the urb with the incremented reference counter.
    109 */
    110struct urb *usb_get_urb(struct urb *urb)
    111{
    112	if (urb)
    113		kref_get(&urb->kref);
    114	return urb;
    115}
    116EXPORT_SYMBOL_GPL(usb_get_urb);
    117
    118/**
    119 * usb_anchor_urb - anchors an URB while it is processed
    120 * @urb: pointer to the urb to anchor
    121 * @anchor: pointer to the anchor
    122 *
    123 * This can be called to have access to URBs which are to be executed
    124 * without bothering to track them
    125 */
    126void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
    127{
    128	unsigned long flags;
    129
    130	spin_lock_irqsave(&anchor->lock, flags);
    131	usb_get_urb(urb);
    132	list_add_tail(&urb->anchor_list, &anchor->urb_list);
    133	urb->anchor = anchor;
    134
    135	if (unlikely(anchor->poisoned))
    136		atomic_inc(&urb->reject);
    137
    138	spin_unlock_irqrestore(&anchor->lock, flags);
    139}
    140EXPORT_SYMBOL_GPL(usb_anchor_urb);
    141
    142static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
    143{
    144	return atomic_read(&anchor->suspend_wakeups) == 0 &&
    145		list_empty(&anchor->urb_list);
    146}
    147
    148/* Callers must hold anchor->lock */
    149static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
    150{
    151	urb->anchor = NULL;
    152	list_del(&urb->anchor_list);
    153	usb_put_urb(urb);
    154	if (usb_anchor_check_wakeup(anchor))
    155		wake_up(&anchor->wait);
    156}
    157
    158/**
    159 * usb_unanchor_urb - unanchors an URB
    160 * @urb: pointer to the urb to anchor
    161 *
    162 * Call this to stop the system keeping track of this URB
    163 */
    164void usb_unanchor_urb(struct urb *urb)
    165{
    166	unsigned long flags;
    167	struct usb_anchor *anchor;
    168
    169	if (!urb)
    170		return;
    171
    172	anchor = urb->anchor;
    173	if (!anchor)
    174		return;
    175
    176	spin_lock_irqsave(&anchor->lock, flags);
    177	/*
    178	 * At this point, we could be competing with another thread which
    179	 * has the same intention. To protect the urb from being unanchored
    180	 * twice, only the winner of the race gets the job.
    181	 */
    182	if (likely(anchor == urb->anchor))
    183		__usb_unanchor_urb(urb, anchor);
    184	spin_unlock_irqrestore(&anchor->lock, flags);
    185}
    186EXPORT_SYMBOL_GPL(usb_unanchor_urb);
    187
    188/*-------------------------------------------------------------------*/
    189
    190static const int pipetypes[4] = {
    191	PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
    192};
    193
    194/**
    195 * usb_pipe_type_check - sanity check of a specific pipe for a usb device
    196 * @dev: struct usb_device to be checked
    197 * @pipe: pipe to check
    198 *
    199 * This performs a light-weight sanity check for the endpoint in the
    200 * given usb device.  It returns 0 if the pipe is valid for the specific usb
    201 * device, otherwise a negative error code.
    202 */
    203int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe)
    204{
    205	const struct usb_host_endpoint *ep;
    206
    207	ep = usb_pipe_endpoint(dev, pipe);
    208	if (!ep)
    209		return -EINVAL;
    210	if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
    211		return -EINVAL;
    212	return 0;
    213}
    214EXPORT_SYMBOL_GPL(usb_pipe_type_check);
    215
    216/**
    217 * usb_urb_ep_type_check - sanity check of endpoint in the given urb
    218 * @urb: urb to be checked
    219 *
    220 * This performs a light-weight sanity check for the endpoint in the
    221 * given urb.  It returns 0 if the urb contains a valid endpoint, otherwise
    222 * a negative error code.
    223 */
    224int usb_urb_ep_type_check(const struct urb *urb)
    225{
    226	return usb_pipe_type_check(urb->dev, urb->pipe);
    227}
    228EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
    229
    230/**
    231 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
    232 * @urb: pointer to the urb describing the request
    233 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
    234 *	of valid options for this.
    235 *
    236 * This submits a transfer request, and transfers control of the URB
    237 * describing that request to the USB subsystem.  Request completion will
    238 * be indicated later, asynchronously, by calling the completion handler.
    239 * The three types of completion are success, error, and unlink
    240 * (a software-induced fault, also called "request cancellation").
    241 *
    242 * URBs may be submitted in interrupt context.
    243 *
    244 * The caller must have correctly initialized the URB before submitting
    245 * it.  Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
    246 * available to ensure that most fields are correctly initialized, for
    247 * the particular kind of transfer, although they will not initialize
    248 * any transfer flags.
    249 *
    250 * If the submission is successful, the complete() callback from the URB
    251 * will be called exactly once, when the USB core and Host Controller Driver
    252 * (HCD) are finished with the URB.  When the completion function is called,
    253 * control of the URB is returned to the device driver which issued the
    254 * request.  The completion handler may then immediately free or reuse that
    255 * URB.
    256 *
    257 * With few exceptions, USB device drivers should never access URB fields
    258 * provided by usbcore or the HCD until its complete() is called.
    259 * The exceptions relate to periodic transfer scheduling.  For both
    260 * interrupt and isochronous urbs, as part of successful URB submission
    261 * urb->interval is modified to reflect the actual transfer period used
    262 * (normally some power of two units).  And for isochronous urbs,
    263 * urb->start_frame is modified to reflect when the URB's transfers were
    264 * scheduled to start.
    265 *
    266 * Not all isochronous transfer scheduling policies will work, but most
    267 * host controller drivers should easily handle ISO queues going from now
    268 * until 10-200 msec into the future.  Drivers should try to keep at
    269 * least one or two msec of data in the queue; many controllers require
    270 * that new transfers start at least 1 msec in the future when they are
    271 * added.  If the driver is unable to keep up and the queue empties out,
    272 * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
    273 * If the flag is set, or if the queue is idle, then the URB is always
    274 * assigned to the first available (and not yet expired) slot in the
    275 * endpoint's schedule.  If the flag is not set and the queue is active
    276 * then the URB is always assigned to the next slot in the schedule
    277 * following the end of the endpoint's previous URB, even if that slot is
    278 * in the past.  When a packet is assigned in this way to a slot that has
    279 * already expired, the packet is not transmitted and the corresponding
    280 * usb_iso_packet_descriptor's status field will return -EXDEV.  If this
    281 * would happen to all the packets in the URB, submission fails with a
    282 * -EXDEV error code.
    283 *
    284 * For control endpoints, the synchronous usb_control_msg() call is
    285 * often used (in non-interrupt context) instead of this call.
    286 * That is often used through convenience wrappers, for the requests
    287 * that are standardized in the USB 2.0 specification.  For bulk
    288 * endpoints, a synchronous usb_bulk_msg() call is available.
    289 *
    290 * Return:
    291 * 0 on successful submissions. A negative error number otherwise.
    292 *
    293 * Request Queuing:
    294 *
    295 * URBs may be submitted to endpoints before previous ones complete, to
    296 * minimize the impact of interrupt latencies and system overhead on data
    297 * throughput.  With that queuing policy, an endpoint's queue would never
    298 * be empty.  This is required for continuous isochronous data streams,
    299 * and may also be required for some kinds of interrupt transfers. Such
    300 * queuing also maximizes bandwidth utilization by letting USB controllers
    301 * start work on later requests before driver software has finished the
    302 * completion processing for earlier (successful) requests.
    303 *
    304 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
    305 * than one.  This was previously a HCD-specific behavior, except for ISO
    306 * transfers.  Non-isochronous endpoint queues are inactive during cleanup
    307 * after faults (transfer errors or cancellation).
    308 *
    309 * Reserved Bandwidth Transfers:
    310 *
    311 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
    312 * using the interval specified in the urb.  Submitting the first urb to
    313 * the endpoint reserves the bandwidth necessary to make those transfers.
    314 * If the USB subsystem can't allocate sufficient bandwidth to perform
    315 * the periodic request, submitting such a periodic request should fail.
    316 *
    317 * For devices under xHCI, the bandwidth is reserved at configuration time, or
    318 * when the alt setting is selected.  If there is not enough bus bandwidth, the
    319 * configuration/alt setting request will fail.  Therefore, submissions to
    320 * periodic endpoints on devices under xHCI should never fail due to bandwidth
    321 * constraints.
    322 *
    323 * Device drivers must explicitly request that repetition, by ensuring that
    324 * some URB is always on the endpoint's queue (except possibly for short
    325 * periods during completion callbacks).  When there is no longer an urb
    326 * queued, the endpoint's bandwidth reservation is canceled.  This means
    327 * drivers can use their completion handlers to ensure they keep bandwidth
    328 * they need, by reinitializing and resubmitting the just-completed urb
    329 * until the driver longer needs that periodic bandwidth.
    330 *
    331 * Memory Flags:
    332 *
    333 * The general rules for how to decide which mem_flags to use
    334 * are the same as for kmalloc.  There are four
    335 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
    336 * GFP_ATOMIC.
    337 *
    338 * GFP_NOFS is not ever used, as it has not been implemented yet.
    339 *
    340 * GFP_ATOMIC is used when
    341 *   (a) you are inside a completion handler, an interrupt, bottom half,
    342 *       tasklet or timer, or
    343 *   (b) you are holding a spinlock or rwlock (does not apply to
    344 *       semaphores), or
    345 *   (c) current->state != TASK_RUNNING, this is the case only after
    346 *       you've changed it.
    347 *
    348 * GFP_NOIO is used in the block io path and error handling of storage
    349 * devices.
    350 *
    351 * All other situations use GFP_KERNEL.
    352 *
    353 * Some more specific rules for mem_flags can be inferred, such as
    354 *  (1) start_xmit, timeout, and receive methods of network drivers must
    355 *      use GFP_ATOMIC (they are called with a spinlock held);
    356 *  (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
    357 *      called with a spinlock held);
    358 *  (3) If you use a kernel thread with a network driver you must use
    359 *      GFP_NOIO, unless (b) or (c) apply;
    360 *  (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
    361 *      apply or your are in a storage driver's block io path;
    362 *  (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
    363 *  (6) changing firmware on a running storage or net device uses
    364 *      GFP_NOIO, unless b) or c) apply
    365 *
    366 */
    367int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
    368{
    369	int				xfertype, max;
    370	struct usb_device		*dev;
    371	struct usb_host_endpoint	*ep;
    372	int				is_out;
    373	unsigned int			allowed;
    374
    375	if (!urb || !urb->complete)
    376		return -EINVAL;
    377	if (urb->hcpriv) {
    378		WARN_ONCE(1, "URB %pK submitted while active\n", urb);
    379		return -EBUSY;
    380	}
    381
    382	dev = urb->dev;
    383	if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
    384		return -ENODEV;
    385
    386	/* For now, get the endpoint from the pipe.  Eventually drivers
    387	 * will be required to set urb->ep directly and we will eliminate
    388	 * urb->pipe.
    389	 */
    390	ep = usb_pipe_endpoint(dev, urb->pipe);
    391	if (!ep)
    392		return -ENOENT;
    393
    394	urb->ep = ep;
    395	urb->status = -EINPROGRESS;
    396	urb->actual_length = 0;
    397
    398	/* Lots of sanity checks, so HCDs can rely on clean data
    399	 * and don't need to duplicate tests
    400	 */
    401	xfertype = usb_endpoint_type(&ep->desc);
    402	if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
    403		struct usb_ctrlrequest *setup =
    404				(struct usb_ctrlrequest *) urb->setup_packet;
    405
    406		if (!setup)
    407			return -ENOEXEC;
    408		is_out = !(setup->bRequestType & USB_DIR_IN) ||
    409				!setup->wLength;
    410		dev_WARN_ONCE(&dev->dev, (usb_pipeout(urb->pipe) != is_out),
    411				"BOGUS control dir, pipe %x doesn't match bRequestType %x\n",
    412				urb->pipe, setup->bRequestType);
    413		if (le16_to_cpu(setup->wLength) != urb->transfer_buffer_length) {
    414			dev_dbg(&dev->dev, "BOGUS control len %d doesn't match transfer length %d\n",
    415					le16_to_cpu(setup->wLength),
    416					urb->transfer_buffer_length);
    417			return -EBADR;
    418		}
    419	} else {
    420		is_out = usb_endpoint_dir_out(&ep->desc);
    421	}
    422
    423	/* Clear the internal flags and cache the direction for later use */
    424	urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
    425			URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
    426			URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
    427			URB_DMA_SG_COMBINED);
    428	urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
    429
    430	if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
    431			dev->state < USB_STATE_CONFIGURED)
    432		return -ENODEV;
    433
    434	max = usb_endpoint_maxp(&ep->desc);
    435	if (max <= 0) {
    436		dev_dbg(&dev->dev,
    437			"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
    438			usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
    439			__func__, max);
    440		return -EMSGSIZE;
    441	}
    442
    443	/* periodic transfers limit size per frame/uframe,
    444	 * but drivers only control those sizes for ISO.
    445	 * while we're checking, initialize return status.
    446	 */
    447	if (xfertype == USB_ENDPOINT_XFER_ISOC) {
    448		int	n, len;
    449
    450		/* SuperSpeed isoc endpoints have up to 16 bursts of up to
    451		 * 3 packets each
    452		 */
    453		if (dev->speed >= USB_SPEED_SUPER) {
    454			int     burst = 1 + ep->ss_ep_comp.bMaxBurst;
    455			int     mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
    456			max *= burst;
    457			max *= mult;
    458		}
    459
    460		if (dev->speed == USB_SPEED_SUPER_PLUS &&
    461		    USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) {
    462			struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp;
    463
    464			isoc_ep_comp = &ep->ssp_isoc_ep_comp;
    465			max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval);
    466		}
    467
    468		/* "high bandwidth" mode, 1-3 packets/uframe? */
    469		if (dev->speed == USB_SPEED_HIGH)
    470			max *= usb_endpoint_maxp_mult(&ep->desc);
    471
    472		if (urb->number_of_packets <= 0)
    473			return -EINVAL;
    474		for (n = 0; n < urb->number_of_packets; n++) {
    475			len = urb->iso_frame_desc[n].length;
    476			if (len < 0 || len > max)
    477				return -EMSGSIZE;
    478			urb->iso_frame_desc[n].status = -EXDEV;
    479			urb->iso_frame_desc[n].actual_length = 0;
    480		}
    481	} else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
    482			dev->speed != USB_SPEED_WIRELESS) {
    483		struct scatterlist *sg;
    484		int i;
    485
    486		for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
    487			if (sg->length % max)
    488				return -EINVAL;
    489	}
    490
    491	/* the I/O buffer must be mapped/unmapped, except when length=0 */
    492	if (urb->transfer_buffer_length > INT_MAX)
    493		return -EMSGSIZE;
    494
    495	/*
    496	 * stuff that drivers shouldn't do, but which shouldn't
    497	 * cause problems in HCDs if they get it wrong.
    498	 */
    499
    500	/* Check that the pipe's type matches the endpoint's type */
    501	if (usb_pipe_type_check(urb->dev, urb->pipe))
    502		dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
    503			usb_pipetype(urb->pipe), pipetypes[xfertype]);
    504
    505	/* Check against a simple/standard policy */
    506	allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
    507			URB_FREE_BUFFER);
    508	switch (xfertype) {
    509	case USB_ENDPOINT_XFER_BULK:
    510	case USB_ENDPOINT_XFER_INT:
    511		if (is_out)
    512			allowed |= URB_ZERO_PACKET;
    513		fallthrough;
    514	default:			/* all non-iso endpoints */
    515		if (!is_out)
    516			allowed |= URB_SHORT_NOT_OK;
    517		break;
    518	case USB_ENDPOINT_XFER_ISOC:
    519		allowed |= URB_ISO_ASAP;
    520		break;
    521	}
    522	allowed &= urb->transfer_flags;
    523
    524	/* warn if submitter gave bogus flags */
    525	if (allowed != urb->transfer_flags)
    526		dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
    527			urb->transfer_flags, allowed);
    528
    529	/*
    530	 * Force periodic transfer intervals to be legal values that are
    531	 * a power of two (so HCDs don't need to).
    532	 *
    533	 * FIXME want bus->{intr,iso}_sched_horizon values here.  Each HC
    534	 * supports different values... this uses EHCI/UHCI defaults (and
    535	 * EHCI can use smaller non-default values).
    536	 */
    537	switch (xfertype) {
    538	case USB_ENDPOINT_XFER_ISOC:
    539	case USB_ENDPOINT_XFER_INT:
    540		/* too small? */
    541		switch (dev->speed) {
    542		case USB_SPEED_WIRELESS:
    543			if ((urb->interval < 6)
    544				&& (xfertype == USB_ENDPOINT_XFER_INT))
    545				return -EINVAL;
    546			fallthrough;
    547		default:
    548			if (urb->interval <= 0)
    549				return -EINVAL;
    550			break;
    551		}
    552		/* too big? */
    553		switch (dev->speed) {
    554		case USB_SPEED_SUPER_PLUS:
    555		case USB_SPEED_SUPER:	/* units are 125us */
    556			/* Handle up to 2^(16-1) microframes */
    557			if (urb->interval > (1 << 15))
    558				return -EINVAL;
    559			max = 1 << 15;
    560			break;
    561		case USB_SPEED_WIRELESS:
    562			if (urb->interval > 16)
    563				return -EINVAL;
    564			break;
    565		case USB_SPEED_HIGH:	/* units are microframes */
    566			/* NOTE usb handles 2^15 */
    567			if (urb->interval > (1024 * 8))
    568				urb->interval = 1024 * 8;
    569			max = 1024 * 8;
    570			break;
    571		case USB_SPEED_FULL:	/* units are frames/msec */
    572		case USB_SPEED_LOW:
    573			if (xfertype == USB_ENDPOINT_XFER_INT) {
    574				if (urb->interval > 255)
    575					return -EINVAL;
    576				/* NOTE ohci only handles up to 32 */
    577				max = 128;
    578			} else {
    579				if (urb->interval > 1024)
    580					urb->interval = 1024;
    581				/* NOTE usb and ohci handle up to 2^15 */
    582				max = 1024;
    583			}
    584			break;
    585		default:
    586			return -EINVAL;
    587		}
    588		if (dev->speed != USB_SPEED_WIRELESS) {
    589			/* Round down to a power of 2, no more than max */
    590			urb->interval = min(max, 1 << ilog2(urb->interval));
    591		}
    592	}
    593
    594	return usb_hcd_submit_urb(urb, mem_flags);
    595}
    596EXPORT_SYMBOL_GPL(usb_submit_urb);
    597
    598/*-------------------------------------------------------------------*/
    599
    600/**
    601 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
    602 * @urb: pointer to urb describing a previously submitted request,
    603 *	may be NULL
    604 *
    605 * This routine cancels an in-progress request.  URBs complete only once
    606 * per submission, and may be canceled only once per submission.
    607 * Successful cancellation means termination of @urb will be expedited
    608 * and the completion handler will be called with a status code
    609 * indicating that the request has been canceled (rather than any other
    610 * code).
    611 *
    612 * Drivers should not call this routine or related routines, such as
    613 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
    614 * method has returned.  The disconnect function should synchronize with
    615 * a driver's I/O routines to insure that all URB-related activity has
    616 * completed before it returns.
    617 *
    618 * This request is asynchronous, however the HCD might call the ->complete()
    619 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
    620 * must not hold any locks that may be taken by the completion function.
    621 * Success is indicated by returning -EINPROGRESS, at which time the URB will
    622 * probably not yet have been given back to the device driver. When it is
    623 * eventually called, the completion function will see @urb->status ==
    624 * -ECONNRESET.
    625 * Failure is indicated by usb_unlink_urb() returning any other value.
    626 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
    627 * never submitted, or it was unlinked before, or the hardware is already
    628 * finished with it), even if the completion handler has not yet run.
    629 *
    630 * The URB must not be deallocated while this routine is running.  In
    631 * particular, when a driver calls this routine, it must insure that the
    632 * completion handler cannot deallocate the URB.
    633 *
    634 * Return: -EINPROGRESS on success. See description for other values on
    635 * failure.
    636 *
    637 * Unlinking and Endpoint Queues:
    638 *
    639 * [The behaviors and guarantees described below do not apply to virtual
    640 * root hubs but only to endpoint queues for physical USB devices.]
    641 *
    642 * Host Controller Drivers (HCDs) place all the URBs for a particular
    643 * endpoint in a queue.  Normally the queue advances as the controller
    644 * hardware processes each request.  But when an URB terminates with an
    645 * error its queue generally stops (see below), at least until that URB's
    646 * completion routine returns.  It is guaranteed that a stopped queue
    647 * will not restart until all its unlinked URBs have been fully retired,
    648 * with their completion routines run, even if that's not until some time
    649 * after the original completion handler returns.  The same behavior and
    650 * guarantee apply when an URB terminates because it was unlinked.
    651 *
    652 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
    653 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
    654 * and -EREMOTEIO.  Control endpoint queues behave the same way except
    655 * that they are not guaranteed to stop for -EREMOTEIO errors.  Queues
    656 * for isochronous endpoints are treated differently, because they must
    657 * advance at fixed rates.  Such queues do not stop when an URB
    658 * encounters an error or is unlinked.  An unlinked isochronous URB may
    659 * leave a gap in the stream of packets; it is undefined whether such
    660 * gaps can be filled in.
    661 *
    662 * Note that early termination of an URB because a short packet was
    663 * received will generate a -EREMOTEIO error if and only if the
    664 * URB_SHORT_NOT_OK flag is set.  By setting this flag, USB device
    665 * drivers can build deep queues for large or complex bulk transfers
    666 * and clean them up reliably after any sort of aborted transfer by
    667 * unlinking all pending URBs at the first fault.
    668 *
    669 * When a control URB terminates with an error other than -EREMOTEIO, it
    670 * is quite likely that the status stage of the transfer will not take
    671 * place.
    672 */
    673int usb_unlink_urb(struct urb *urb)
    674{
    675	if (!urb)
    676		return -EINVAL;
    677	if (!urb->dev)
    678		return -ENODEV;
    679	if (!urb->ep)
    680		return -EIDRM;
    681	return usb_hcd_unlink_urb(urb, -ECONNRESET);
    682}
    683EXPORT_SYMBOL_GPL(usb_unlink_urb);
    684
    685/**
    686 * usb_kill_urb - cancel a transfer request and wait for it to finish
    687 * @urb: pointer to URB describing a previously submitted request,
    688 *	may be NULL
    689 *
    690 * This routine cancels an in-progress request.  It is guaranteed that
    691 * upon return all completion handlers will have finished and the URB
    692 * will be totally idle and available for reuse.  These features make
    693 * this an ideal way to stop I/O in a disconnect() callback or close()
    694 * function.  If the request has not already finished or been unlinked
    695 * the completion handler will see urb->status == -ENOENT.
    696 *
    697 * While the routine is running, attempts to resubmit the URB will fail
    698 * with error -EPERM.  Thus even if the URB's completion handler always
    699 * tries to resubmit, it will not succeed and the URB will become idle.
    700 *
    701 * The URB must not be deallocated while this routine is running.  In
    702 * particular, when a driver calls this routine, it must insure that the
    703 * completion handler cannot deallocate the URB.
    704 *
    705 * This routine may not be used in an interrupt context (such as a bottom
    706 * half or a completion handler), or when holding a spinlock, or in other
    707 * situations where the caller can't schedule().
    708 *
    709 * This routine should not be called by a driver after its disconnect
    710 * method has returned.
    711 */
    712void usb_kill_urb(struct urb *urb)
    713{
    714	might_sleep();
    715	if (!(urb && urb->dev && urb->ep))
    716		return;
    717	atomic_inc(&urb->reject);
    718	/*
    719	 * Order the write of urb->reject above before the read
    720	 * of urb->use_count below.  Pairs with the barriers in
    721	 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
    722	 */
    723	smp_mb__after_atomic();
    724
    725	usb_hcd_unlink_urb(urb, -ENOENT);
    726	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
    727
    728	atomic_dec(&urb->reject);
    729}
    730EXPORT_SYMBOL_GPL(usb_kill_urb);
    731
    732/**
    733 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
    734 * @urb: pointer to URB describing a previously submitted request,
    735 *	may be NULL
    736 *
    737 * This routine cancels an in-progress request.  It is guaranteed that
    738 * upon return all completion handlers will have finished and the URB
    739 * will be totally idle and cannot be reused.  These features make
    740 * this an ideal way to stop I/O in a disconnect() callback.
    741 * If the request has not already finished or been unlinked
    742 * the completion handler will see urb->status == -ENOENT.
    743 *
    744 * After and while the routine runs, attempts to resubmit the URB will fail
    745 * with error -EPERM.  Thus even if the URB's completion handler always
    746 * tries to resubmit, it will not succeed and the URB will become idle.
    747 *
    748 * The URB must not be deallocated while this routine is running.  In
    749 * particular, when a driver calls this routine, it must insure that the
    750 * completion handler cannot deallocate the URB.
    751 *
    752 * This routine may not be used in an interrupt context (such as a bottom
    753 * half or a completion handler), or when holding a spinlock, or in other
    754 * situations where the caller can't schedule().
    755 *
    756 * This routine should not be called by a driver after its disconnect
    757 * method has returned.
    758 */
    759void usb_poison_urb(struct urb *urb)
    760{
    761	might_sleep();
    762	if (!urb)
    763		return;
    764	atomic_inc(&urb->reject);
    765	/*
    766	 * Order the write of urb->reject above before the read
    767	 * of urb->use_count below.  Pairs with the barriers in
    768	 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
    769	 */
    770	smp_mb__after_atomic();
    771
    772	if (!urb->dev || !urb->ep)
    773		return;
    774
    775	usb_hcd_unlink_urb(urb, -ENOENT);
    776	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
    777}
    778EXPORT_SYMBOL_GPL(usb_poison_urb);
    779
    780void usb_unpoison_urb(struct urb *urb)
    781{
    782	if (!urb)
    783		return;
    784
    785	atomic_dec(&urb->reject);
    786}
    787EXPORT_SYMBOL_GPL(usb_unpoison_urb);
    788
    789/**
    790 * usb_block_urb - reliably prevent further use of an URB
    791 * @urb: pointer to URB to be blocked, may be NULL
    792 *
    793 * After the routine has run, attempts to resubmit the URB will fail
    794 * with error -EPERM.  Thus even if the URB's completion handler always
    795 * tries to resubmit, it will not succeed and the URB will become idle.
    796 *
    797 * The URB must not be deallocated while this routine is running.  In
    798 * particular, when a driver calls this routine, it must insure that the
    799 * completion handler cannot deallocate the URB.
    800 */
    801void usb_block_urb(struct urb *urb)
    802{
    803	if (!urb)
    804		return;
    805
    806	atomic_inc(&urb->reject);
    807}
    808EXPORT_SYMBOL_GPL(usb_block_urb);
    809
    810/**
    811 * usb_kill_anchored_urbs - kill all URBs associated with an anchor
    812 * @anchor: anchor the requests are bound to
    813 *
    814 * This kills all outstanding URBs starting from the back of the queue,
    815 * with guarantee that no completer callbacks will take place from the
    816 * anchor after this function returns.
    817 *
    818 * This routine should not be called by a driver after its disconnect
    819 * method has returned.
    820 */
    821void usb_kill_anchored_urbs(struct usb_anchor *anchor)
    822{
    823	struct urb *victim;
    824	int surely_empty;
    825
    826	do {
    827		spin_lock_irq(&anchor->lock);
    828		while (!list_empty(&anchor->urb_list)) {
    829			victim = list_entry(anchor->urb_list.prev,
    830					    struct urb, anchor_list);
    831			/* make sure the URB isn't freed before we kill it */
    832			usb_get_urb(victim);
    833			spin_unlock_irq(&anchor->lock);
    834			/* this will unanchor the URB */
    835			usb_kill_urb(victim);
    836			usb_put_urb(victim);
    837			spin_lock_irq(&anchor->lock);
    838		}
    839		surely_empty = usb_anchor_check_wakeup(anchor);
    840
    841		spin_unlock_irq(&anchor->lock);
    842		cpu_relax();
    843	} while (!surely_empty);
    844}
    845EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
    846
    847
    848/**
    849 * usb_poison_anchored_urbs - cease all traffic from an anchor
    850 * @anchor: anchor the requests are bound to
    851 *
    852 * this allows all outstanding URBs to be poisoned starting
    853 * from the back of the queue. Newly added URBs will also be
    854 * poisoned
    855 *
    856 * This routine should not be called by a driver after its disconnect
    857 * method has returned.
    858 */
    859void usb_poison_anchored_urbs(struct usb_anchor *anchor)
    860{
    861	struct urb *victim;
    862	int surely_empty;
    863
    864	do {
    865		spin_lock_irq(&anchor->lock);
    866		anchor->poisoned = 1;
    867		while (!list_empty(&anchor->urb_list)) {
    868			victim = list_entry(anchor->urb_list.prev,
    869					    struct urb, anchor_list);
    870			/* make sure the URB isn't freed before we kill it */
    871			usb_get_urb(victim);
    872			spin_unlock_irq(&anchor->lock);
    873			/* this will unanchor the URB */
    874			usb_poison_urb(victim);
    875			usb_put_urb(victim);
    876			spin_lock_irq(&anchor->lock);
    877		}
    878		surely_empty = usb_anchor_check_wakeup(anchor);
    879
    880		spin_unlock_irq(&anchor->lock);
    881		cpu_relax();
    882	} while (!surely_empty);
    883}
    884EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
    885
    886/**
    887 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
    888 * @anchor: anchor the requests are bound to
    889 *
    890 * Reverses the effect of usb_poison_anchored_urbs
    891 * the anchor can be used normally after it returns
    892 */
    893void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
    894{
    895	unsigned long flags;
    896	struct urb *lazarus;
    897
    898	spin_lock_irqsave(&anchor->lock, flags);
    899	list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
    900		usb_unpoison_urb(lazarus);
    901	}
    902	anchor->poisoned = 0;
    903	spin_unlock_irqrestore(&anchor->lock, flags);
    904}
    905EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
    906/**
    907 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
    908 * @anchor: anchor the requests are bound to
    909 *
    910 * this allows all outstanding URBs to be unlinked starting
    911 * from the back of the queue. This function is asynchronous.
    912 * The unlinking is just triggered. It may happen after this
    913 * function has returned.
    914 *
    915 * This routine should not be called by a driver after its disconnect
    916 * method has returned.
    917 */
    918void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
    919{
    920	struct urb *victim;
    921
    922	while ((victim = usb_get_from_anchor(anchor)) != NULL) {
    923		usb_unlink_urb(victim);
    924		usb_put_urb(victim);
    925	}
    926}
    927EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
    928
    929/**
    930 * usb_anchor_suspend_wakeups
    931 * @anchor: the anchor you want to suspend wakeups on
    932 *
    933 * Call this to stop the last urb being unanchored from waking up any
    934 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
    935 * back path to delay waking up until after the completion handler has run.
    936 */
    937void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
    938{
    939	if (anchor)
    940		atomic_inc(&anchor->suspend_wakeups);
    941}
    942EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
    943
    944/**
    945 * usb_anchor_resume_wakeups
    946 * @anchor: the anchor you want to resume wakeups on
    947 *
    948 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
    949 * wake up any current waiters if the anchor is empty.
    950 */
    951void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
    952{
    953	if (!anchor)
    954		return;
    955
    956	atomic_dec(&anchor->suspend_wakeups);
    957	if (usb_anchor_check_wakeup(anchor))
    958		wake_up(&anchor->wait);
    959}
    960EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
    961
    962/**
    963 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
    964 * @anchor: the anchor you want to become unused
    965 * @timeout: how long you are willing to wait in milliseconds
    966 *
    967 * Call this is you want to be sure all an anchor's
    968 * URBs have finished
    969 *
    970 * Return: Non-zero if the anchor became unused. Zero on timeout.
    971 */
    972int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
    973				  unsigned int timeout)
    974{
    975	return wait_event_timeout(anchor->wait,
    976				  usb_anchor_check_wakeup(anchor),
    977				  msecs_to_jiffies(timeout));
    978}
    979EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
    980
    981/**
    982 * usb_get_from_anchor - get an anchor's oldest urb
    983 * @anchor: the anchor whose urb you want
    984 *
    985 * This will take the oldest urb from an anchor,
    986 * unanchor and return it
    987 *
    988 * Return: The oldest urb from @anchor, or %NULL if @anchor has no
    989 * urbs associated with it.
    990 */
    991struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
    992{
    993	struct urb *victim;
    994	unsigned long flags;
    995
    996	spin_lock_irqsave(&anchor->lock, flags);
    997	if (!list_empty(&anchor->urb_list)) {
    998		victim = list_entry(anchor->urb_list.next, struct urb,
    999				    anchor_list);
   1000		usb_get_urb(victim);
   1001		__usb_unanchor_urb(victim, anchor);
   1002	} else {
   1003		victim = NULL;
   1004	}
   1005	spin_unlock_irqrestore(&anchor->lock, flags);
   1006
   1007	return victim;
   1008}
   1009
   1010EXPORT_SYMBOL_GPL(usb_get_from_anchor);
   1011
   1012/**
   1013 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
   1014 * @anchor: the anchor whose urbs you want to unanchor
   1015 *
   1016 * use this to get rid of all an anchor's urbs
   1017 */
   1018void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
   1019{
   1020	struct urb *victim;
   1021	unsigned long flags;
   1022	int surely_empty;
   1023
   1024	do {
   1025		spin_lock_irqsave(&anchor->lock, flags);
   1026		while (!list_empty(&anchor->urb_list)) {
   1027			victim = list_entry(anchor->urb_list.prev,
   1028					    struct urb, anchor_list);
   1029			__usb_unanchor_urb(victim, anchor);
   1030		}
   1031		surely_empty = usb_anchor_check_wakeup(anchor);
   1032
   1033		spin_unlock_irqrestore(&anchor->lock, flags);
   1034		cpu_relax();
   1035	} while (!surely_empty);
   1036}
   1037
   1038EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
   1039
   1040/**
   1041 * usb_anchor_empty - is an anchor empty
   1042 * @anchor: the anchor you want to query
   1043 *
   1044 * Return: 1 if the anchor has no urbs associated with it.
   1045 */
   1046int usb_anchor_empty(struct usb_anchor *anchor)
   1047{
   1048	return list_empty(&anchor->urb_list);
   1049}
   1050
   1051EXPORT_SYMBOL_GPL(usb_anchor_empty);
   1052