cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xhci.c (165708B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * xHCI host controller driver
      4 *
      5 * Copyright (C) 2008 Intel Corp.
      6 *
      7 * Author: Sarah Sharp
      8 * Some code borrowed from the Linux EHCI driver.
      9 */
     10
     11#include <linux/pci.h>
     12#include <linux/iopoll.h>
     13#include <linux/irq.h>
     14#include <linux/log2.h>
     15#include <linux/module.h>
     16#include <linux/moduleparam.h>
     17#include <linux/slab.h>
     18#include <linux/dmi.h>
     19#include <linux/dma-mapping.h>
     20
     21#include "xhci.h"
     22#include "xhci-trace.h"
     23#include "xhci-debugfs.h"
     24#include "xhci-dbgcap.h"
     25
     26#define DRIVER_AUTHOR "Sarah Sharp"
     27#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
     28
     29#define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
     30
     31/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
     32static int link_quirk;
     33module_param(link_quirk, int, S_IRUGO | S_IWUSR);
     34MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
     35
     36static unsigned long long quirks;
     37module_param(quirks, ullong, S_IRUGO);
     38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
     39
     40static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
     41{
     42	struct xhci_segment *seg = ring->first_seg;
     43
     44	if (!td || !td->start_seg)
     45		return false;
     46	do {
     47		if (seg == td->start_seg)
     48			return true;
     49		seg = seg->next;
     50	} while (seg && seg != ring->first_seg);
     51
     52	return false;
     53}
     54
     55/*
     56 * xhci_handshake - spin reading hc until handshake completes or fails
     57 * @ptr: address of hc register to be read
     58 * @mask: bits to look at in result of read
     59 * @done: value of those bits when handshake succeeds
     60 * @usec: timeout in microseconds
     61 *
     62 * Returns negative errno, or zero on success
     63 *
     64 * Success happens when the "mask" bits have the specified value (hardware
     65 * handshake done).  There are two failure modes:  "usec" have passed (major
     66 * hardware flakeout), or the register reads as all-ones (hardware removed).
     67 */
     68int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
     69{
     70	u32	result;
     71	int	ret;
     72
     73	ret = readl_poll_timeout_atomic(ptr, result,
     74					(result & mask) == done ||
     75					result == U32_MAX,
     76					1, timeout_us);
     77	if (result == U32_MAX)		/* card removed */
     78		return -ENODEV;
     79
     80	return ret;
     81}
     82
     83/*
     84 * Disable interrupts and begin the xHCI halting process.
     85 */
     86void xhci_quiesce(struct xhci_hcd *xhci)
     87{
     88	u32 halted;
     89	u32 cmd;
     90	u32 mask;
     91
     92	mask = ~(XHCI_IRQS);
     93	halted = readl(&xhci->op_regs->status) & STS_HALT;
     94	if (!halted)
     95		mask &= ~CMD_RUN;
     96
     97	cmd = readl(&xhci->op_regs->command);
     98	cmd &= mask;
     99	writel(cmd, &xhci->op_regs->command);
    100}
    101
    102/*
    103 * Force HC into halt state.
    104 *
    105 * Disable any IRQs and clear the run/stop bit.
    106 * HC will complete any current and actively pipelined transactions, and
    107 * should halt within 16 ms of the run/stop bit being cleared.
    108 * Read HC Halted bit in the status register to see when the HC is finished.
    109 */
    110int xhci_halt(struct xhci_hcd *xhci)
    111{
    112	int ret;
    113
    114	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
    115	xhci_quiesce(xhci);
    116
    117	ret = xhci_handshake(&xhci->op_regs->status,
    118			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
    119	if (ret) {
    120		xhci_warn(xhci, "Host halt failed, %d\n", ret);
    121		return ret;
    122	}
    123
    124	xhci->xhc_state |= XHCI_STATE_HALTED;
    125	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
    126
    127	return ret;
    128}
    129
    130/*
    131 * Set the run bit and wait for the host to be running.
    132 */
    133int xhci_start(struct xhci_hcd *xhci)
    134{
    135	u32 temp;
    136	int ret;
    137
    138	temp = readl(&xhci->op_regs->command);
    139	temp |= (CMD_RUN);
    140	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
    141			temp);
    142	writel(temp, &xhci->op_regs->command);
    143
    144	/*
    145	 * Wait for the HCHalted Status bit to be 0 to indicate the host is
    146	 * running.
    147	 */
    148	ret = xhci_handshake(&xhci->op_regs->status,
    149			STS_HALT, 0, XHCI_MAX_HALT_USEC);
    150	if (ret == -ETIMEDOUT)
    151		xhci_err(xhci, "Host took too long to start, "
    152				"waited %u microseconds.\n",
    153				XHCI_MAX_HALT_USEC);
    154	if (!ret)
    155		/* clear state flags. Including dying, halted or removing */
    156		xhci->xhc_state = 0;
    157
    158	return ret;
    159}
    160
    161/*
    162 * Reset a halted HC.
    163 *
    164 * This resets pipelines, timers, counters, state machines, etc.
    165 * Transactions will be terminated immediately, and operational registers
    166 * will be set to their defaults.
    167 */
    168int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
    169{
    170	u32 command;
    171	u32 state;
    172	int ret;
    173
    174	state = readl(&xhci->op_regs->status);
    175
    176	if (state == ~(u32)0) {
    177		xhci_warn(xhci, "Host not accessible, reset failed.\n");
    178		return -ENODEV;
    179	}
    180
    181	if ((state & STS_HALT) == 0) {
    182		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
    183		return 0;
    184	}
    185
    186	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
    187	command = readl(&xhci->op_regs->command);
    188	command |= CMD_RESET;
    189	writel(command, &xhci->op_regs->command);
    190
    191	/* Existing Intel xHCI controllers require a delay of 1 mS,
    192	 * after setting the CMD_RESET bit, and before accessing any
    193	 * HC registers. This allows the HC to complete the
    194	 * reset operation and be ready for HC register access.
    195	 * Without this delay, the subsequent HC register access,
    196	 * may result in a system hang very rarely.
    197	 */
    198	if (xhci->quirks & XHCI_INTEL_HOST)
    199		udelay(1000);
    200
    201	ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
    202	if (ret)
    203		return ret;
    204
    205	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
    206		usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
    207
    208	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    209			 "Wait for controller to be ready for doorbell rings");
    210	/*
    211	 * xHCI cannot write to any doorbells or operational registers other
    212	 * than status until the "Controller Not Ready" flag is cleared.
    213	 */
    214	ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
    215
    216	xhci->usb2_rhub.bus_state.port_c_suspend = 0;
    217	xhci->usb2_rhub.bus_state.suspended_ports = 0;
    218	xhci->usb2_rhub.bus_state.resuming_ports = 0;
    219	xhci->usb3_rhub.bus_state.port_c_suspend = 0;
    220	xhci->usb3_rhub.bus_state.suspended_ports = 0;
    221	xhci->usb3_rhub.bus_state.resuming_ports = 0;
    222
    223	return ret;
    224}
    225
    226static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
    227{
    228	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
    229	int err, i;
    230	u64 val;
    231	u32 intrs;
    232
    233	/*
    234	 * Some Renesas controllers get into a weird state if they are
    235	 * reset while programmed with 64bit addresses (they will preserve
    236	 * the top half of the address in internal, non visible
    237	 * registers). You end up with half the address coming from the
    238	 * kernel, and the other half coming from the firmware. Also,
    239	 * changing the programming leads to extra accesses even if the
    240	 * controller is supposed to be halted. The controller ends up with
    241	 * a fatal fault, and is then ripe for being properly reset.
    242	 *
    243	 * Special care is taken to only apply this if the device is behind
    244	 * an iommu. Doing anything when there is no iommu is definitely
    245	 * unsafe...
    246	 */
    247	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
    248		return;
    249
    250	xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
    251
    252	/* Clear HSEIE so that faults do not get signaled */
    253	val = readl(&xhci->op_regs->command);
    254	val &= ~CMD_HSEIE;
    255	writel(val, &xhci->op_regs->command);
    256
    257	/* Clear HSE (aka FATAL) */
    258	val = readl(&xhci->op_regs->status);
    259	val |= STS_FATAL;
    260	writel(val, &xhci->op_regs->status);
    261
    262	/* Now zero the registers, and brace for impact */
    263	val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
    264	if (upper_32_bits(val))
    265		xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
    266	val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
    267	if (upper_32_bits(val))
    268		xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
    269
    270	intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
    271		      ARRAY_SIZE(xhci->run_regs->ir_set));
    272
    273	for (i = 0; i < intrs; i++) {
    274		struct xhci_intr_reg __iomem *ir;
    275
    276		ir = &xhci->run_regs->ir_set[i];
    277		val = xhci_read_64(xhci, &ir->erst_base);
    278		if (upper_32_bits(val))
    279			xhci_write_64(xhci, 0, &ir->erst_base);
    280		val= xhci_read_64(xhci, &ir->erst_dequeue);
    281		if (upper_32_bits(val))
    282			xhci_write_64(xhci, 0, &ir->erst_dequeue);
    283	}
    284
    285	/* Wait for the fault to appear. It will be cleared on reset */
    286	err = xhci_handshake(&xhci->op_regs->status,
    287			     STS_FATAL, STS_FATAL,
    288			     XHCI_MAX_HALT_USEC);
    289	if (!err)
    290		xhci_info(xhci, "Fault detected\n");
    291}
    292
    293#ifdef CONFIG_USB_PCI
    294/*
    295 * Set up MSI
    296 */
    297static int xhci_setup_msi(struct xhci_hcd *xhci)
    298{
    299	int ret;
    300	/*
    301	 * TODO:Check with MSI Soc for sysdev
    302	 */
    303	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
    304
    305	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
    306	if (ret < 0) {
    307		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    308				"failed to allocate MSI entry");
    309		return ret;
    310	}
    311
    312	ret = request_irq(pdev->irq, xhci_msi_irq,
    313				0, "xhci_hcd", xhci_to_hcd(xhci));
    314	if (ret) {
    315		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    316				"disable MSI interrupt");
    317		pci_free_irq_vectors(pdev);
    318	}
    319
    320	return ret;
    321}
    322
    323/*
    324 * Set up MSI-X
    325 */
    326static int xhci_setup_msix(struct xhci_hcd *xhci)
    327{
    328	int i, ret;
    329	struct usb_hcd *hcd = xhci_to_hcd(xhci);
    330	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
    331
    332	/*
    333	 * calculate number of msi-x vectors supported.
    334	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
    335	 *   with max number of interrupters based on the xhci HCSPARAMS1.
    336	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
    337	 *   Add additional 1 vector to ensure always available interrupt.
    338	 */
    339	xhci->msix_count = min(num_online_cpus() + 1,
    340				HCS_MAX_INTRS(xhci->hcs_params1));
    341
    342	ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
    343			PCI_IRQ_MSIX);
    344	if (ret < 0) {
    345		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    346				"Failed to enable MSI-X");
    347		return ret;
    348	}
    349
    350	for (i = 0; i < xhci->msix_count; i++) {
    351		ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
    352				"xhci_hcd", xhci_to_hcd(xhci));
    353		if (ret)
    354			goto disable_msix;
    355	}
    356
    357	hcd->msix_enabled = 1;
    358	return ret;
    359
    360disable_msix:
    361	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
    362	while (--i >= 0)
    363		free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
    364	pci_free_irq_vectors(pdev);
    365	return ret;
    366}
    367
    368/* Free any IRQs and disable MSI-X */
    369static void xhci_cleanup_msix(struct xhci_hcd *xhci)
    370{
    371	struct usb_hcd *hcd = xhci_to_hcd(xhci);
    372	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
    373
    374	if (xhci->quirks & XHCI_PLAT)
    375		return;
    376
    377	/* return if using legacy interrupt */
    378	if (hcd->irq > 0)
    379		return;
    380
    381	if (hcd->msix_enabled) {
    382		int i;
    383
    384		for (i = 0; i < xhci->msix_count; i++)
    385			free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
    386	} else {
    387		free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
    388	}
    389
    390	pci_free_irq_vectors(pdev);
    391	hcd->msix_enabled = 0;
    392}
    393
    394static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
    395{
    396	struct usb_hcd *hcd = xhci_to_hcd(xhci);
    397
    398	if (hcd->msix_enabled) {
    399		struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
    400		int i;
    401
    402		for (i = 0; i < xhci->msix_count; i++)
    403			synchronize_irq(pci_irq_vector(pdev, i));
    404	}
    405}
    406
    407static int xhci_try_enable_msi(struct usb_hcd *hcd)
    408{
    409	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    410	struct pci_dev  *pdev;
    411	int ret;
    412
    413	/* The xhci platform device has set up IRQs through usb_add_hcd. */
    414	if (xhci->quirks & XHCI_PLAT)
    415		return 0;
    416
    417	pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
    418	/*
    419	 * Some Fresco Logic host controllers advertise MSI, but fail to
    420	 * generate interrupts.  Don't even try to enable MSI.
    421	 */
    422	if (xhci->quirks & XHCI_BROKEN_MSI)
    423		goto legacy_irq;
    424
    425	/* unregister the legacy interrupt */
    426	if (hcd->irq)
    427		free_irq(hcd->irq, hcd);
    428	hcd->irq = 0;
    429
    430	ret = xhci_setup_msix(xhci);
    431	if (ret)
    432		/* fall back to msi*/
    433		ret = xhci_setup_msi(xhci);
    434
    435	if (!ret) {
    436		hcd->msi_enabled = 1;
    437		return 0;
    438	}
    439
    440	if (!pdev->irq) {
    441		xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
    442		return -EINVAL;
    443	}
    444
    445 legacy_irq:
    446	if (!strlen(hcd->irq_descr))
    447		snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
    448			 hcd->driver->description, hcd->self.busnum);
    449
    450	/* fall back to legacy interrupt*/
    451	ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
    452			hcd->irq_descr, hcd);
    453	if (ret) {
    454		xhci_err(xhci, "request interrupt %d failed\n",
    455				pdev->irq);
    456		return ret;
    457	}
    458	hcd->irq = pdev->irq;
    459	return 0;
    460}
    461
    462#else
    463
    464static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
    465{
    466	return 0;
    467}
    468
    469static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
    470{
    471}
    472
    473static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
    474{
    475}
    476
    477#endif
    478
    479static void compliance_mode_recovery(struct timer_list *t)
    480{
    481	struct xhci_hcd *xhci;
    482	struct usb_hcd *hcd;
    483	struct xhci_hub *rhub;
    484	u32 temp;
    485	int i;
    486
    487	xhci = from_timer(xhci, t, comp_mode_recovery_timer);
    488	rhub = &xhci->usb3_rhub;
    489	hcd = rhub->hcd;
    490
    491	if (!hcd)
    492		return;
    493
    494	for (i = 0; i < rhub->num_ports; i++) {
    495		temp = readl(rhub->ports[i]->addr);
    496		if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
    497			/*
    498			 * Compliance Mode Detected. Letting USB Core
    499			 * handle the Warm Reset
    500			 */
    501			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
    502					"Compliance mode detected->port %d",
    503					i + 1);
    504			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
    505					"Attempting compliance mode recovery");
    506
    507			if (hcd->state == HC_STATE_SUSPENDED)
    508				usb_hcd_resume_root_hub(hcd);
    509
    510			usb_hcd_poll_rh_status(hcd);
    511		}
    512	}
    513
    514	if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
    515		mod_timer(&xhci->comp_mode_recovery_timer,
    516			jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
    517}
    518
    519/*
    520 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
    521 * that causes ports behind that hardware to enter compliance mode sometimes.
    522 * The quirk creates a timer that polls every 2 seconds the link state of
    523 * each host controller's port and recovers it by issuing a Warm reset
    524 * if Compliance mode is detected, otherwise the port will become "dead" (no
    525 * device connections or disconnections will be detected anymore). Becasue no
    526 * status event is generated when entering compliance mode (per xhci spec),
    527 * this quirk is needed on systems that have the failing hardware installed.
    528 */
    529static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
    530{
    531	xhci->port_status_u0 = 0;
    532	timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
    533		    0);
    534	xhci->comp_mode_recovery_timer.expires = jiffies +
    535			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
    536
    537	add_timer(&xhci->comp_mode_recovery_timer);
    538	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
    539			"Compliance mode recovery timer initialized");
    540}
    541
    542/*
    543 * This function identifies the systems that have installed the SN65LVPE502CP
    544 * USB3.0 re-driver and that need the Compliance Mode Quirk.
    545 * Systems:
    546 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
    547 */
    548static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
    549{
    550	const char *dmi_product_name, *dmi_sys_vendor;
    551
    552	dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
    553	dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
    554	if (!dmi_product_name || !dmi_sys_vendor)
    555		return false;
    556
    557	if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
    558		return false;
    559
    560	if (strstr(dmi_product_name, "Z420") ||
    561			strstr(dmi_product_name, "Z620") ||
    562			strstr(dmi_product_name, "Z820") ||
    563			strstr(dmi_product_name, "Z1 Workstation"))
    564		return true;
    565
    566	return false;
    567}
    568
    569static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
    570{
    571	return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
    572}
    573
    574
    575/*
    576 * Initialize memory for HCD and xHC (one-time init).
    577 *
    578 * Program the PAGESIZE register, initialize the device context array, create
    579 * device contexts (?), set up a command ring segment (or two?), create event
    580 * ring (one for now).
    581 */
    582static int xhci_init(struct usb_hcd *hcd)
    583{
    584	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    585	int retval;
    586
    587	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
    588	spin_lock_init(&xhci->lock);
    589	if (xhci->hci_version == 0x95 && link_quirk) {
    590		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
    591				"QUIRK: Not clearing Link TRB chain bits.");
    592		xhci->quirks |= XHCI_LINK_TRB_QUIRK;
    593	} else {
    594		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    595				"xHCI doesn't need link TRB QUIRK");
    596	}
    597	retval = xhci_mem_init(xhci, GFP_KERNEL);
    598	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
    599
    600	/* Initializing Compliance Mode Recovery Data If Needed */
    601	if (xhci_compliance_mode_recovery_timer_quirk_check()) {
    602		xhci->quirks |= XHCI_COMP_MODE_QUIRK;
    603		compliance_mode_recovery_timer_init(xhci);
    604	}
    605
    606	return retval;
    607}
    608
    609/*-------------------------------------------------------------------------*/
    610
    611
    612static int xhci_run_finished(struct xhci_hcd *xhci)
    613{
    614	unsigned long	flags;
    615	u32		temp;
    616
    617	/*
    618	 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
    619	 * Protect the short window before host is running with a lock
    620	 */
    621	spin_lock_irqsave(&xhci->lock, flags);
    622
    623	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
    624	temp = readl(&xhci->op_regs->command);
    625	temp |= (CMD_EIE);
    626	writel(temp, &xhci->op_regs->command);
    627
    628	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
    629	temp = readl(&xhci->ir_set->irq_pending);
    630	writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
    631
    632	if (xhci_start(xhci)) {
    633		xhci_halt(xhci);
    634		spin_unlock_irqrestore(&xhci->lock, flags);
    635		return -ENODEV;
    636	}
    637
    638	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
    639
    640	if (xhci->quirks & XHCI_NEC_HOST)
    641		xhci_ring_cmd_db(xhci);
    642
    643	spin_unlock_irqrestore(&xhci->lock, flags);
    644
    645	return 0;
    646}
    647
    648/*
    649 * Start the HC after it was halted.
    650 *
    651 * This function is called by the USB core when the HC driver is added.
    652 * Its opposite is xhci_stop().
    653 *
    654 * xhci_init() must be called once before this function can be called.
    655 * Reset the HC, enable device slot contexts, program DCBAAP, and
    656 * set command ring pointer and event ring pointer.
    657 *
    658 * Setup MSI-X vectors and enable interrupts.
    659 */
    660int xhci_run(struct usb_hcd *hcd)
    661{
    662	u32 temp;
    663	u64 temp_64;
    664	int ret;
    665	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    666
    667	/* Start the xHCI host controller running only after the USB 2.0 roothub
    668	 * is setup.
    669	 */
    670
    671	hcd->uses_new_polling = 1;
    672	if (!usb_hcd_is_primary_hcd(hcd))
    673		return xhci_run_finished(xhci);
    674
    675	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
    676
    677	ret = xhci_try_enable_msi(hcd);
    678	if (ret)
    679		return ret;
    680
    681	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
    682	temp_64 &= ~ERST_PTR_MASK;
    683	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    684			"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
    685
    686	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    687			"// Set the interrupt modulation register");
    688	temp = readl(&xhci->ir_set->irq_control);
    689	temp &= ~ER_IRQ_INTERVAL_MASK;
    690	temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
    691	writel(temp, &xhci->ir_set->irq_control);
    692
    693	if (xhci->quirks & XHCI_NEC_HOST) {
    694		struct xhci_command *command;
    695
    696		command = xhci_alloc_command(xhci, false, GFP_KERNEL);
    697		if (!command)
    698			return -ENOMEM;
    699
    700		ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
    701				TRB_TYPE(TRB_NEC_GET_FW));
    702		if (ret)
    703			xhci_free_command(xhci, command);
    704	}
    705	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    706			"Finished %s for main hcd", __func__);
    707
    708	xhci_create_dbc_dev(xhci);
    709
    710	xhci_debugfs_init(xhci);
    711
    712	if (xhci_has_one_roothub(xhci))
    713		return xhci_run_finished(xhci);
    714
    715	set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
    716
    717	return 0;
    718}
    719EXPORT_SYMBOL_GPL(xhci_run);
    720
    721/*
    722 * Stop xHCI driver.
    723 *
    724 * This function is called by the USB core when the HC driver is removed.
    725 * Its opposite is xhci_run().
    726 *
    727 * Disable device contexts, disable IRQs, and quiesce the HC.
    728 * Reset the HC, finish any completed transactions, and cleanup memory.
    729 */
    730static void xhci_stop(struct usb_hcd *hcd)
    731{
    732	u32 temp;
    733	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    734
    735	mutex_lock(&xhci->mutex);
    736
    737	/* Only halt host and free memory after both hcds are removed */
    738	if (!usb_hcd_is_primary_hcd(hcd)) {
    739		mutex_unlock(&xhci->mutex);
    740		return;
    741	}
    742
    743	xhci_remove_dbc_dev(xhci);
    744
    745	spin_lock_irq(&xhci->lock);
    746	xhci->xhc_state |= XHCI_STATE_HALTED;
    747	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
    748	xhci_halt(xhci);
    749	xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
    750	spin_unlock_irq(&xhci->lock);
    751
    752	xhci_cleanup_msix(xhci);
    753
    754	/* Deleting Compliance Mode Recovery Timer */
    755	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
    756			(!(xhci_all_ports_seen_u0(xhci)))) {
    757		del_timer_sync(&xhci->comp_mode_recovery_timer);
    758		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
    759				"%s: compliance mode recovery timer deleted",
    760				__func__);
    761	}
    762
    763	if (xhci->quirks & XHCI_AMD_PLL_FIX)
    764		usb_amd_dev_put();
    765
    766	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    767			"// Disabling event ring interrupts");
    768	temp = readl(&xhci->op_regs->status);
    769	writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
    770	temp = readl(&xhci->ir_set->irq_pending);
    771	writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
    772
    773	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
    774	xhci_mem_cleanup(xhci);
    775	xhci_debugfs_exit(xhci);
    776	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    777			"xhci_stop completed - status = %x",
    778			readl(&xhci->op_regs->status));
    779	mutex_unlock(&xhci->mutex);
    780}
    781
    782/*
    783 * Shutdown HC (not bus-specific)
    784 *
    785 * This is called when the machine is rebooting or halting.  We assume that the
    786 * machine will be powered off, and the HC's internal state will be reset.
    787 * Don't bother to free memory.
    788 *
    789 * This will only ever be called with the main usb_hcd (the USB3 roothub).
    790 */
    791void xhci_shutdown(struct usb_hcd *hcd)
    792{
    793	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    794	unsigned long flags;
    795	int i;
    796
    797	if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
    798		usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
    799
    800	/* Don't poll the roothubs after shutdown. */
    801	xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
    802			__func__, hcd->self.busnum);
    803	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
    804	del_timer_sync(&hcd->rh_timer);
    805
    806	if (xhci->shared_hcd) {
    807		clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
    808		del_timer_sync(&xhci->shared_hcd->rh_timer);
    809	}
    810
    811	spin_lock_irqsave(&xhci->lock, flags);
    812	xhci_halt(xhci);
    813
    814	/* Power off USB2 ports*/
    815	for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
    816		xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
    817
    818	/* Power off USB3 ports*/
    819	for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
    820		xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
    821
    822	/* Workaround for spurious wakeups at shutdown with HSW */
    823	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
    824		xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
    825	spin_unlock_irqrestore(&xhci->lock, flags);
    826
    827	xhci_cleanup_msix(xhci);
    828
    829	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    830			"xhci_shutdown completed - status = %x",
    831			readl(&xhci->op_regs->status));
    832}
    833EXPORT_SYMBOL_GPL(xhci_shutdown);
    834
    835#ifdef CONFIG_PM
    836static void xhci_save_registers(struct xhci_hcd *xhci)
    837{
    838	xhci->s3.command = readl(&xhci->op_regs->command);
    839	xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
    840	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
    841	xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
    842	xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
    843	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
    844	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
    845	xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
    846	xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
    847}
    848
    849static void xhci_restore_registers(struct xhci_hcd *xhci)
    850{
    851	writel(xhci->s3.command, &xhci->op_regs->command);
    852	writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
    853	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
    854	writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
    855	writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
    856	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
    857	xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
    858	writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
    859	writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
    860}
    861
    862static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
    863{
    864	u64	val_64;
    865
    866	/* step 2: initialize command ring buffer */
    867	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
    868	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
    869		(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
    870				      xhci->cmd_ring->dequeue) &
    871		 (u64) ~CMD_RING_RSVD_BITS) |
    872		xhci->cmd_ring->cycle_state;
    873	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
    874			"// Setting command ring address to 0x%llx",
    875			(long unsigned long) val_64);
    876	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
    877}
    878
    879/*
    880 * The whole command ring must be cleared to zero when we suspend the host.
    881 *
    882 * The host doesn't save the command ring pointer in the suspend well, so we
    883 * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
    884 * aligned, because of the reserved bits in the command ring dequeue pointer
    885 * register.  Therefore, we can't just set the dequeue pointer back in the
    886 * middle of the ring (TRBs are 16-byte aligned).
    887 */
    888static void xhci_clear_command_ring(struct xhci_hcd *xhci)
    889{
    890	struct xhci_ring *ring;
    891	struct xhci_segment *seg;
    892
    893	ring = xhci->cmd_ring;
    894	seg = ring->deq_seg;
    895	do {
    896		memset(seg->trbs, 0,
    897			sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
    898		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
    899			cpu_to_le32(~TRB_CYCLE);
    900		seg = seg->next;
    901	} while (seg != ring->deq_seg);
    902
    903	/* Reset the software enqueue and dequeue pointers */
    904	ring->deq_seg = ring->first_seg;
    905	ring->dequeue = ring->first_seg->trbs;
    906	ring->enq_seg = ring->deq_seg;
    907	ring->enqueue = ring->dequeue;
    908
    909	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
    910	/*
    911	 * Ring is now zeroed, so the HW should look for change of ownership
    912	 * when the cycle bit is set to 1.
    913	 */
    914	ring->cycle_state = 1;
    915
    916	/*
    917	 * Reset the hardware dequeue pointer.
    918	 * Yes, this will need to be re-written after resume, but we're paranoid
    919	 * and want to make sure the hardware doesn't access bogus memory
    920	 * because, say, the BIOS or an SMI started the host without changing
    921	 * the command ring pointers.
    922	 */
    923	xhci_set_cmd_ring_deq(xhci);
    924}
    925
    926/*
    927 * Disable port wake bits if do_wakeup is not set.
    928 *
    929 * Also clear a possible internal port wake state left hanging for ports that
    930 * detected termination but never successfully enumerated (trained to 0U).
    931 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
    932 * at enumeration clears this wake, force one here as well for unconnected ports
    933 */
    934
    935static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
    936				       struct xhci_hub *rhub,
    937				       bool do_wakeup)
    938{
    939	unsigned long flags;
    940	u32 t1, t2, portsc;
    941	int i;
    942
    943	spin_lock_irqsave(&xhci->lock, flags);
    944
    945	for (i = 0; i < rhub->num_ports; i++) {
    946		portsc = readl(rhub->ports[i]->addr);
    947		t1 = xhci_port_state_to_neutral(portsc);
    948		t2 = t1;
    949
    950		/* clear wake bits if do_wake is not set */
    951		if (!do_wakeup)
    952			t2 &= ~PORT_WAKE_BITS;
    953
    954		/* Don't touch csc bit if connected or connect change is set */
    955		if (!(portsc & (PORT_CSC | PORT_CONNECT)))
    956			t2 |= PORT_CSC;
    957
    958		if (t1 != t2) {
    959			writel(t2, rhub->ports[i]->addr);
    960			xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
    961				 rhub->hcd->self.busnum, i + 1, portsc, t2);
    962		}
    963	}
    964	spin_unlock_irqrestore(&xhci->lock, flags);
    965}
    966
    967static bool xhci_pending_portevent(struct xhci_hcd *xhci)
    968{
    969	struct xhci_port	**ports;
    970	int			port_index;
    971	u32			status;
    972	u32			portsc;
    973
    974	status = readl(&xhci->op_regs->status);
    975	if (status & STS_EINT)
    976		return true;
    977	/*
    978	 * Checking STS_EINT is not enough as there is a lag between a change
    979	 * bit being set and the Port Status Change Event that it generated
    980	 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
    981	 */
    982
    983	port_index = xhci->usb2_rhub.num_ports;
    984	ports = xhci->usb2_rhub.ports;
    985	while (port_index--) {
    986		portsc = readl(ports[port_index]->addr);
    987		if (portsc & PORT_CHANGE_MASK ||
    988		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
    989			return true;
    990	}
    991	port_index = xhci->usb3_rhub.num_ports;
    992	ports = xhci->usb3_rhub.ports;
    993	while (port_index--) {
    994		portsc = readl(ports[port_index]->addr);
    995		if (portsc & PORT_CHANGE_MASK ||
    996		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
    997			return true;
    998	}
    999	return false;
   1000}
   1001
   1002/*
   1003 * Stop HC (not bus-specific)
   1004 *
   1005 * This is called when the machine transition into S3/S4 mode.
   1006 *
   1007 */
   1008int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
   1009{
   1010	int			rc = 0;
   1011	unsigned int		delay = XHCI_MAX_HALT_USEC * 2;
   1012	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
   1013	u32			command;
   1014	u32			res;
   1015
   1016	if (!hcd->state)
   1017		return 0;
   1018
   1019	if (hcd->state != HC_STATE_SUSPENDED ||
   1020	    (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
   1021		return -EINVAL;
   1022
   1023	/* Clear root port wake on bits if wakeup not allowed. */
   1024	xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
   1025	xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
   1026
   1027	if (!HCD_HW_ACCESSIBLE(hcd))
   1028		return 0;
   1029
   1030	xhci_dbc_suspend(xhci);
   1031
   1032	/* Don't poll the roothubs on bus suspend. */
   1033	xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
   1034		 __func__, hcd->self.busnum);
   1035	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
   1036	del_timer_sync(&hcd->rh_timer);
   1037	if (xhci->shared_hcd) {
   1038		clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
   1039		del_timer_sync(&xhci->shared_hcd->rh_timer);
   1040	}
   1041
   1042	if (xhci->quirks & XHCI_SUSPEND_DELAY)
   1043		usleep_range(1000, 1500);
   1044
   1045	spin_lock_irq(&xhci->lock);
   1046	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
   1047	if (xhci->shared_hcd)
   1048		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
   1049	/* step 1: stop endpoint */
   1050	/* skipped assuming that port suspend has done */
   1051
   1052	/* step 2: clear Run/Stop bit */
   1053	command = readl(&xhci->op_regs->command);
   1054	command &= ~CMD_RUN;
   1055	writel(command, &xhci->op_regs->command);
   1056
   1057	/* Some chips from Fresco Logic need an extraordinary delay */
   1058	delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
   1059
   1060	if (xhci_handshake(&xhci->op_regs->status,
   1061		      STS_HALT, STS_HALT, delay)) {
   1062		xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
   1063		spin_unlock_irq(&xhci->lock);
   1064		return -ETIMEDOUT;
   1065	}
   1066	xhci_clear_command_ring(xhci);
   1067
   1068	/* step 3: save registers */
   1069	xhci_save_registers(xhci);
   1070
   1071	/* step 4: set CSS flag */
   1072	command = readl(&xhci->op_regs->command);
   1073	command |= CMD_CSS;
   1074	writel(command, &xhci->op_regs->command);
   1075	xhci->broken_suspend = 0;
   1076	if (xhci_handshake(&xhci->op_regs->status,
   1077				STS_SAVE, 0, 20 * 1000)) {
   1078	/*
   1079	 * AMD SNPS xHC 3.0 occasionally does not clear the
   1080	 * SSS bit of USBSTS and when driver tries to poll
   1081	 * to see if the xHC clears BIT(8) which never happens
   1082	 * and driver assumes that controller is not responding
   1083	 * and times out. To workaround this, its good to check
   1084	 * if SRE and HCE bits are not set (as per xhci
   1085	 * Section 5.4.2) and bypass the timeout.
   1086	 */
   1087		res = readl(&xhci->op_regs->status);
   1088		if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
   1089		    (((res & STS_SRE) == 0) &&
   1090				((res & STS_HCE) == 0))) {
   1091			xhci->broken_suspend = 1;
   1092		} else {
   1093			xhci_warn(xhci, "WARN: xHC save state timeout\n");
   1094			spin_unlock_irq(&xhci->lock);
   1095			return -ETIMEDOUT;
   1096		}
   1097	}
   1098	spin_unlock_irq(&xhci->lock);
   1099
   1100	/*
   1101	 * Deleting Compliance Mode Recovery Timer because the xHCI Host
   1102	 * is about to be suspended.
   1103	 */
   1104	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
   1105			(!(xhci_all_ports_seen_u0(xhci)))) {
   1106		del_timer_sync(&xhci->comp_mode_recovery_timer);
   1107		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   1108				"%s: compliance mode recovery timer deleted",
   1109				__func__);
   1110	}
   1111
   1112	/* step 5: remove core well power */
   1113	/* synchronize irq when using MSI-X */
   1114	xhci_msix_sync_irqs(xhci);
   1115
   1116	return rc;
   1117}
   1118EXPORT_SYMBOL_GPL(xhci_suspend);
   1119
   1120/*
   1121 * start xHC (not bus-specific)
   1122 *
   1123 * This is called when the machine transition from S3/S4 mode.
   1124 *
   1125 */
   1126int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
   1127{
   1128	u32			command, temp = 0;
   1129	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
   1130	int			retval = 0;
   1131	bool			comp_timer_running = false;
   1132	bool			pending_portevent = false;
   1133	bool			reinit_xhc = false;
   1134
   1135	if (!hcd->state)
   1136		return 0;
   1137
   1138	/* Wait a bit if either of the roothubs need to settle from the
   1139	 * transition into bus suspend.
   1140	 */
   1141
   1142	if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
   1143	    time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
   1144		msleep(100);
   1145
   1146	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
   1147	if (xhci->shared_hcd)
   1148		set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
   1149
   1150	spin_lock_irq(&xhci->lock);
   1151
   1152	if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
   1153		reinit_xhc = true;
   1154
   1155	if (!reinit_xhc) {
   1156		/*
   1157		 * Some controllers might lose power during suspend, so wait
   1158		 * for controller not ready bit to clear, just as in xHC init.
   1159		 */
   1160		retval = xhci_handshake(&xhci->op_regs->status,
   1161					STS_CNR, 0, 10 * 1000 * 1000);
   1162		if (retval) {
   1163			xhci_warn(xhci, "Controller not ready at resume %d\n",
   1164				  retval);
   1165			spin_unlock_irq(&xhci->lock);
   1166			return retval;
   1167		}
   1168		/* step 1: restore register */
   1169		xhci_restore_registers(xhci);
   1170		/* step 2: initialize command ring buffer */
   1171		xhci_set_cmd_ring_deq(xhci);
   1172		/* step 3: restore state and start state*/
   1173		/* step 3: set CRS flag */
   1174		command = readl(&xhci->op_regs->command);
   1175		command |= CMD_CRS;
   1176		writel(command, &xhci->op_regs->command);
   1177		/*
   1178		 * Some controllers take up to 55+ ms to complete the controller
   1179		 * restore so setting the timeout to 100ms. Xhci specification
   1180		 * doesn't mention any timeout value.
   1181		 */
   1182		if (xhci_handshake(&xhci->op_regs->status,
   1183			      STS_RESTORE, 0, 100 * 1000)) {
   1184			xhci_warn(xhci, "WARN: xHC restore state timeout\n");
   1185			spin_unlock_irq(&xhci->lock);
   1186			return -ETIMEDOUT;
   1187		}
   1188	}
   1189
   1190	temp = readl(&xhci->op_regs->status);
   1191
   1192	/* re-initialize the HC on Restore Error, or Host Controller Error */
   1193	if (temp & (STS_SRE | STS_HCE)) {
   1194		reinit_xhc = true;
   1195		xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
   1196	}
   1197
   1198	if (reinit_xhc) {
   1199		if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
   1200				!(xhci_all_ports_seen_u0(xhci))) {
   1201			del_timer_sync(&xhci->comp_mode_recovery_timer);
   1202			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   1203				"Compliance Mode Recovery Timer deleted!");
   1204		}
   1205
   1206		/* Let the USB core know _both_ roothubs lost power. */
   1207		usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
   1208		if (xhci->shared_hcd)
   1209			usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
   1210
   1211		xhci_dbg(xhci, "Stop HCD\n");
   1212		xhci_halt(xhci);
   1213		xhci_zero_64b_regs(xhci);
   1214		retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
   1215		spin_unlock_irq(&xhci->lock);
   1216		if (retval)
   1217			return retval;
   1218		xhci_cleanup_msix(xhci);
   1219
   1220		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
   1221		temp = readl(&xhci->op_regs->status);
   1222		writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
   1223		temp = readl(&xhci->ir_set->irq_pending);
   1224		writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
   1225
   1226		xhci_dbg(xhci, "cleaning up memory\n");
   1227		xhci_mem_cleanup(xhci);
   1228		xhci_debugfs_exit(xhci);
   1229		xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
   1230			    readl(&xhci->op_regs->status));
   1231
   1232		/* USB core calls the PCI reinit and start functions twice:
   1233		 * first with the primary HCD, and then with the secondary HCD.
   1234		 * If we don't do the same, the host will never be started.
   1235		 */
   1236		xhci_dbg(xhci, "Initialize the xhci_hcd\n");
   1237		retval = xhci_init(hcd);
   1238		if (retval)
   1239			return retval;
   1240		comp_timer_running = true;
   1241
   1242		xhci_dbg(xhci, "Start the primary HCD\n");
   1243		retval = xhci_run(hcd);
   1244		if (!retval && xhci->shared_hcd) {
   1245			xhci_dbg(xhci, "Start the secondary HCD\n");
   1246			retval = xhci_run(xhci->shared_hcd);
   1247		}
   1248
   1249		hcd->state = HC_STATE_SUSPENDED;
   1250		if (xhci->shared_hcd)
   1251			xhci->shared_hcd->state = HC_STATE_SUSPENDED;
   1252		goto done;
   1253	}
   1254
   1255	/* step 4: set Run/Stop bit */
   1256	command = readl(&xhci->op_regs->command);
   1257	command |= CMD_RUN;
   1258	writel(command, &xhci->op_regs->command);
   1259	xhci_handshake(&xhci->op_regs->status, STS_HALT,
   1260		  0, 250 * 1000);
   1261
   1262	/* step 5: walk topology and initialize portsc,
   1263	 * portpmsc and portli
   1264	 */
   1265	/* this is done in bus_resume */
   1266
   1267	/* step 6: restart each of the previously
   1268	 * Running endpoints by ringing their doorbells
   1269	 */
   1270
   1271	spin_unlock_irq(&xhci->lock);
   1272
   1273	xhci_dbc_resume(xhci);
   1274
   1275 done:
   1276	if (retval == 0) {
   1277		/*
   1278		 * Resume roothubs only if there are pending events.
   1279		 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
   1280		 * the first wake signalling failed, give it that chance.
   1281		 */
   1282		pending_portevent = xhci_pending_portevent(xhci);
   1283		if (!pending_portevent) {
   1284			msleep(120);
   1285			pending_portevent = xhci_pending_portevent(xhci);
   1286		}
   1287
   1288		if (pending_portevent) {
   1289			if (xhci->shared_hcd)
   1290				usb_hcd_resume_root_hub(xhci->shared_hcd);
   1291			usb_hcd_resume_root_hub(hcd);
   1292		}
   1293	}
   1294	/*
   1295	 * If system is subject to the Quirk, Compliance Mode Timer needs to
   1296	 * be re-initialized Always after a system resume. Ports are subject
   1297	 * to suffer the Compliance Mode issue again. It doesn't matter if
   1298	 * ports have entered previously to U0 before system's suspension.
   1299	 */
   1300	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
   1301		compliance_mode_recovery_timer_init(xhci);
   1302
   1303	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
   1304		usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
   1305
   1306	/* Re-enable port polling. */
   1307	xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
   1308		 __func__, hcd->self.busnum);
   1309	if (xhci->shared_hcd) {
   1310		set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
   1311		usb_hcd_poll_rh_status(xhci->shared_hcd);
   1312	}
   1313	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
   1314	usb_hcd_poll_rh_status(hcd);
   1315
   1316	return retval;
   1317}
   1318EXPORT_SYMBOL_GPL(xhci_resume);
   1319#endif	/* CONFIG_PM */
   1320
   1321/*-------------------------------------------------------------------------*/
   1322
   1323static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
   1324{
   1325	void *temp;
   1326	int ret = 0;
   1327	unsigned int buf_len;
   1328	enum dma_data_direction dir;
   1329
   1330	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
   1331	buf_len = urb->transfer_buffer_length;
   1332
   1333	temp = kzalloc_node(buf_len, GFP_ATOMIC,
   1334			    dev_to_node(hcd->self.sysdev));
   1335
   1336	if (usb_urb_dir_out(urb))
   1337		sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
   1338				   temp, buf_len, 0);
   1339
   1340	urb->transfer_buffer = temp;
   1341	urb->transfer_dma = dma_map_single(hcd->self.sysdev,
   1342					   urb->transfer_buffer,
   1343					   urb->transfer_buffer_length,
   1344					   dir);
   1345
   1346	if (dma_mapping_error(hcd->self.sysdev,
   1347			      urb->transfer_dma)) {
   1348		ret = -EAGAIN;
   1349		kfree(temp);
   1350	} else {
   1351		urb->transfer_flags |= URB_DMA_MAP_SINGLE;
   1352	}
   1353
   1354	return ret;
   1355}
   1356
   1357static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
   1358					  struct urb *urb)
   1359{
   1360	bool ret = false;
   1361	unsigned int i;
   1362	unsigned int len = 0;
   1363	unsigned int trb_size;
   1364	unsigned int max_pkt;
   1365	struct scatterlist *sg;
   1366	struct scatterlist *tail_sg;
   1367
   1368	tail_sg = urb->sg;
   1369	max_pkt = usb_endpoint_maxp(&urb->ep->desc);
   1370
   1371	if (!urb->num_sgs)
   1372		return ret;
   1373
   1374	if (urb->dev->speed >= USB_SPEED_SUPER)
   1375		trb_size = TRB_CACHE_SIZE_SS;
   1376	else
   1377		trb_size = TRB_CACHE_SIZE_HS;
   1378
   1379	if (urb->transfer_buffer_length != 0 &&
   1380	    !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
   1381		for_each_sg(urb->sg, sg, urb->num_sgs, i) {
   1382			len = len + sg->length;
   1383			if (i > trb_size - 2) {
   1384				len = len - tail_sg->length;
   1385				if (len < max_pkt) {
   1386					ret = true;
   1387					break;
   1388				}
   1389
   1390				tail_sg = sg_next(tail_sg);
   1391			}
   1392		}
   1393	}
   1394	return ret;
   1395}
   1396
   1397static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
   1398{
   1399	unsigned int len;
   1400	unsigned int buf_len;
   1401	enum dma_data_direction dir;
   1402
   1403	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
   1404
   1405	buf_len = urb->transfer_buffer_length;
   1406
   1407	if (IS_ENABLED(CONFIG_HAS_DMA) &&
   1408	    (urb->transfer_flags & URB_DMA_MAP_SINGLE))
   1409		dma_unmap_single(hcd->self.sysdev,
   1410				 urb->transfer_dma,
   1411				 urb->transfer_buffer_length,
   1412				 dir);
   1413
   1414	if (usb_urb_dir_in(urb)) {
   1415		len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
   1416					   urb->transfer_buffer,
   1417					   buf_len,
   1418					   0);
   1419		if (len != buf_len) {
   1420			xhci_dbg(hcd_to_xhci(hcd),
   1421				 "Copy from tmp buf to urb sg list failed\n");
   1422			urb->actual_length = len;
   1423		}
   1424	}
   1425	urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
   1426	kfree(urb->transfer_buffer);
   1427	urb->transfer_buffer = NULL;
   1428}
   1429
   1430/*
   1431 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
   1432 * we'll copy the actual data into the TRB address register. This is limited to
   1433 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
   1434 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
   1435 */
   1436static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
   1437				gfp_t mem_flags)
   1438{
   1439	struct xhci_hcd *xhci;
   1440
   1441	xhci = hcd_to_xhci(hcd);
   1442
   1443	if (xhci_urb_suitable_for_idt(urb))
   1444		return 0;
   1445
   1446	if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
   1447		if (xhci_urb_temp_buffer_required(hcd, urb))
   1448			return xhci_map_temp_buffer(hcd, urb);
   1449	}
   1450	return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
   1451}
   1452
   1453static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
   1454{
   1455	struct xhci_hcd *xhci;
   1456	bool unmap_temp_buf = false;
   1457
   1458	xhci = hcd_to_xhci(hcd);
   1459
   1460	if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
   1461		unmap_temp_buf = true;
   1462
   1463	if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
   1464		xhci_unmap_temp_buf(hcd, urb);
   1465	else
   1466		usb_hcd_unmap_urb_for_dma(hcd, urb);
   1467}
   1468
   1469/**
   1470 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
   1471 * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
   1472 * value to right shift 1 for the bitmask.
   1473 *
   1474 * Index  = (epnum * 2) + direction - 1,
   1475 * where direction = 0 for OUT, 1 for IN.
   1476 * For control endpoints, the IN index is used (OUT index is unused), so
   1477 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
   1478 */
   1479unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
   1480{
   1481	unsigned int index;
   1482	if (usb_endpoint_xfer_control(desc))
   1483		index = (unsigned int) (usb_endpoint_num(desc)*2);
   1484	else
   1485		index = (unsigned int) (usb_endpoint_num(desc)*2) +
   1486			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
   1487	return index;
   1488}
   1489EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
   1490
   1491/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
   1492 * address from the XHCI endpoint index.
   1493 */
   1494unsigned int xhci_get_endpoint_address(unsigned int ep_index)
   1495{
   1496	unsigned int number = DIV_ROUND_UP(ep_index, 2);
   1497	unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
   1498	return direction | number;
   1499}
   1500
   1501/* Find the flag for this endpoint (for use in the control context).  Use the
   1502 * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
   1503 * bit 1, etc.
   1504 */
   1505static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
   1506{
   1507	return 1 << (xhci_get_endpoint_index(desc) + 1);
   1508}
   1509
   1510/* Compute the last valid endpoint context index.  Basically, this is the
   1511 * endpoint index plus one.  For slot contexts with more than valid endpoint,
   1512 * we find the most significant bit set in the added contexts flags.
   1513 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
   1514 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
   1515 */
   1516unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
   1517{
   1518	return fls(added_ctxs) - 1;
   1519}
   1520
   1521/* Returns 1 if the arguments are OK;
   1522 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
   1523 */
   1524static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
   1525		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
   1526		const char *func) {
   1527	struct xhci_hcd	*xhci;
   1528	struct xhci_virt_device	*virt_dev;
   1529
   1530	if (!hcd || (check_ep && !ep) || !udev) {
   1531		pr_debug("xHCI %s called with invalid args\n", func);
   1532		return -EINVAL;
   1533	}
   1534	if (!udev->parent) {
   1535		pr_debug("xHCI %s called for root hub\n", func);
   1536		return 0;
   1537	}
   1538
   1539	xhci = hcd_to_xhci(hcd);
   1540	if (check_virt_dev) {
   1541		if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
   1542			xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
   1543					func);
   1544			return -EINVAL;
   1545		}
   1546
   1547		virt_dev = xhci->devs[udev->slot_id];
   1548		if (virt_dev->udev != udev) {
   1549			xhci_dbg(xhci, "xHCI %s called with udev and "
   1550					  "virt_dev does not match\n", func);
   1551			return -EINVAL;
   1552		}
   1553	}
   1554
   1555	if (xhci->xhc_state & XHCI_STATE_HALTED)
   1556		return -ENODEV;
   1557
   1558	return 1;
   1559}
   1560
   1561static int xhci_configure_endpoint(struct xhci_hcd *xhci,
   1562		struct usb_device *udev, struct xhci_command *command,
   1563		bool ctx_change, bool must_succeed);
   1564
   1565/*
   1566 * Full speed devices may have a max packet size greater than 8 bytes, but the
   1567 * USB core doesn't know that until it reads the first 8 bytes of the
   1568 * descriptor.  If the usb_device's max packet size changes after that point,
   1569 * we need to issue an evaluate context command and wait on it.
   1570 */
   1571static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
   1572		unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
   1573{
   1574	struct xhci_container_ctx *out_ctx;
   1575	struct xhci_input_control_ctx *ctrl_ctx;
   1576	struct xhci_ep_ctx *ep_ctx;
   1577	struct xhci_command *command;
   1578	int max_packet_size;
   1579	int hw_max_packet_size;
   1580	int ret = 0;
   1581
   1582	out_ctx = xhci->devs[slot_id]->out_ctx;
   1583	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
   1584	hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
   1585	max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
   1586	if (hw_max_packet_size != max_packet_size) {
   1587		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
   1588				"Max Packet Size for ep 0 changed.");
   1589		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
   1590				"Max packet size in usb_device = %d",
   1591				max_packet_size);
   1592		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
   1593				"Max packet size in xHCI HW = %d",
   1594				hw_max_packet_size);
   1595		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
   1596				"Issuing evaluate context command.");
   1597
   1598		/* Set up the input context flags for the command */
   1599		/* FIXME: This won't work if a non-default control endpoint
   1600		 * changes max packet sizes.
   1601		 */
   1602
   1603		command = xhci_alloc_command(xhci, true, mem_flags);
   1604		if (!command)
   1605			return -ENOMEM;
   1606
   1607		command->in_ctx = xhci->devs[slot_id]->in_ctx;
   1608		ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
   1609		if (!ctrl_ctx) {
   1610			xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   1611					__func__);
   1612			ret = -ENOMEM;
   1613			goto command_cleanup;
   1614		}
   1615		/* Set up the modified control endpoint 0 */
   1616		xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
   1617				xhci->devs[slot_id]->out_ctx, ep_index);
   1618
   1619		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
   1620		ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
   1621		ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
   1622		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
   1623
   1624		ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
   1625		ctrl_ctx->drop_flags = 0;
   1626
   1627		ret = xhci_configure_endpoint(xhci, urb->dev, command,
   1628				true, false);
   1629
   1630		/* Clean up the input context for later use by bandwidth
   1631		 * functions.
   1632		 */
   1633		ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
   1634command_cleanup:
   1635		kfree(command->completion);
   1636		kfree(command);
   1637	}
   1638	return ret;
   1639}
   1640
   1641/*
   1642 * non-error returns are a promise to giveback() the urb later
   1643 * we drop ownership so next owner (or urb unlink) can get it
   1644 */
   1645static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
   1646{
   1647	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
   1648	unsigned long flags;
   1649	int ret = 0;
   1650	unsigned int slot_id, ep_index;
   1651	unsigned int *ep_state;
   1652	struct urb_priv	*urb_priv;
   1653	int num_tds;
   1654
   1655	if (!urb)
   1656		return -EINVAL;
   1657	ret = xhci_check_args(hcd, urb->dev, urb->ep,
   1658					true, true, __func__);
   1659	if (ret <= 0)
   1660		return ret ? ret : -EINVAL;
   1661
   1662	slot_id = urb->dev->slot_id;
   1663	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
   1664	ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
   1665
   1666	if (!HCD_HW_ACCESSIBLE(hcd))
   1667		return -ESHUTDOWN;
   1668
   1669	if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
   1670		xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
   1671		return -ENODEV;
   1672	}
   1673
   1674	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
   1675		num_tds = urb->number_of_packets;
   1676	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
   1677	    urb->transfer_buffer_length > 0 &&
   1678	    urb->transfer_flags & URB_ZERO_PACKET &&
   1679	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
   1680		num_tds = 2;
   1681	else
   1682		num_tds = 1;
   1683
   1684	urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
   1685	if (!urb_priv)
   1686		return -ENOMEM;
   1687
   1688	urb_priv->num_tds = num_tds;
   1689	urb_priv->num_tds_done = 0;
   1690	urb->hcpriv = urb_priv;
   1691
   1692	trace_xhci_urb_enqueue(urb);
   1693
   1694	if (usb_endpoint_xfer_control(&urb->ep->desc)) {
   1695		/* Check to see if the max packet size for the default control
   1696		 * endpoint changed during FS device enumeration
   1697		 */
   1698		if (urb->dev->speed == USB_SPEED_FULL) {
   1699			ret = xhci_check_maxpacket(xhci, slot_id,
   1700					ep_index, urb, mem_flags);
   1701			if (ret < 0) {
   1702				xhci_urb_free_priv(urb_priv);
   1703				urb->hcpriv = NULL;
   1704				return ret;
   1705			}
   1706		}
   1707	}
   1708
   1709	spin_lock_irqsave(&xhci->lock, flags);
   1710
   1711	if (xhci->xhc_state & XHCI_STATE_DYING) {
   1712		xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
   1713			 urb->ep->desc.bEndpointAddress, urb);
   1714		ret = -ESHUTDOWN;
   1715		goto free_priv;
   1716	}
   1717	if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
   1718		xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
   1719			  *ep_state);
   1720		ret = -EINVAL;
   1721		goto free_priv;
   1722	}
   1723	if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
   1724		xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
   1725		ret = -EINVAL;
   1726		goto free_priv;
   1727	}
   1728
   1729	switch (usb_endpoint_type(&urb->ep->desc)) {
   1730
   1731	case USB_ENDPOINT_XFER_CONTROL:
   1732		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
   1733					 slot_id, ep_index);
   1734		break;
   1735	case USB_ENDPOINT_XFER_BULK:
   1736		ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
   1737					 slot_id, ep_index);
   1738		break;
   1739	case USB_ENDPOINT_XFER_INT:
   1740		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
   1741				slot_id, ep_index);
   1742		break;
   1743	case USB_ENDPOINT_XFER_ISOC:
   1744		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
   1745				slot_id, ep_index);
   1746	}
   1747
   1748	if (ret) {
   1749free_priv:
   1750		xhci_urb_free_priv(urb_priv);
   1751		urb->hcpriv = NULL;
   1752	}
   1753	spin_unlock_irqrestore(&xhci->lock, flags);
   1754	return ret;
   1755}
   1756
   1757/*
   1758 * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
   1759 * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
   1760 * should pick up where it left off in the TD, unless a Set Transfer Ring
   1761 * Dequeue Pointer is issued.
   1762 *
   1763 * The TRBs that make up the buffers for the canceled URB will be "removed" from
   1764 * the ring.  Since the ring is a contiguous structure, they can't be physically
   1765 * removed.  Instead, there are two options:
   1766 *
   1767 *  1) If the HC is in the middle of processing the URB to be canceled, we
   1768 *     simply move the ring's dequeue pointer past those TRBs using the Set
   1769 *     Transfer Ring Dequeue Pointer command.  This will be the common case,
   1770 *     when drivers timeout on the last submitted URB and attempt to cancel.
   1771 *
   1772 *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
   1773 *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
   1774 *     HC will need to invalidate the any TRBs it has cached after the stop
   1775 *     endpoint command, as noted in the xHCI 0.95 errata.
   1776 *
   1777 *  3) The TD may have completed by the time the Stop Endpoint Command
   1778 *     completes, so software needs to handle that case too.
   1779 *
   1780 * This function should protect against the TD enqueueing code ringing the
   1781 * doorbell while this code is waiting for a Stop Endpoint command to complete.
   1782 * It also needs to account for multiple cancellations on happening at the same
   1783 * time for the same endpoint.
   1784 *
   1785 * Note that this function can be called in any context, or so says
   1786 * usb_hcd_unlink_urb()
   1787 */
   1788static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
   1789{
   1790	unsigned long flags;
   1791	int ret, i;
   1792	u32 temp;
   1793	struct xhci_hcd *xhci;
   1794	struct urb_priv	*urb_priv;
   1795	struct xhci_td *td;
   1796	unsigned int ep_index;
   1797	struct xhci_ring *ep_ring;
   1798	struct xhci_virt_ep *ep;
   1799	struct xhci_command *command;
   1800	struct xhci_virt_device *vdev;
   1801
   1802	xhci = hcd_to_xhci(hcd);
   1803	spin_lock_irqsave(&xhci->lock, flags);
   1804
   1805	trace_xhci_urb_dequeue(urb);
   1806
   1807	/* Make sure the URB hasn't completed or been unlinked already */
   1808	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
   1809	if (ret)
   1810		goto done;
   1811
   1812	/* give back URB now if we can't queue it for cancel */
   1813	vdev = xhci->devs[urb->dev->slot_id];
   1814	urb_priv = urb->hcpriv;
   1815	if (!vdev || !urb_priv)
   1816		goto err_giveback;
   1817
   1818	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
   1819	ep = &vdev->eps[ep_index];
   1820	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
   1821	if (!ep || !ep_ring)
   1822		goto err_giveback;
   1823
   1824	/* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
   1825	temp = readl(&xhci->op_regs->status);
   1826	if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
   1827		xhci_hc_died(xhci);
   1828		goto done;
   1829	}
   1830
   1831	/*
   1832	 * check ring is not re-allocated since URB was enqueued. If it is, then
   1833	 * make sure none of the ring related pointers in this URB private data
   1834	 * are touched, such as td_list, otherwise we overwrite freed data
   1835	 */
   1836	if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
   1837		xhci_err(xhci, "Canceled URB td not found on endpoint ring");
   1838		for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
   1839			td = &urb_priv->td[i];
   1840			if (!list_empty(&td->cancelled_td_list))
   1841				list_del_init(&td->cancelled_td_list);
   1842		}
   1843		goto err_giveback;
   1844	}
   1845
   1846	if (xhci->xhc_state & XHCI_STATE_HALTED) {
   1847		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
   1848				"HC halted, freeing TD manually.");
   1849		for (i = urb_priv->num_tds_done;
   1850		     i < urb_priv->num_tds;
   1851		     i++) {
   1852			td = &urb_priv->td[i];
   1853			if (!list_empty(&td->td_list))
   1854				list_del_init(&td->td_list);
   1855			if (!list_empty(&td->cancelled_td_list))
   1856				list_del_init(&td->cancelled_td_list);
   1857		}
   1858		goto err_giveback;
   1859	}
   1860
   1861	i = urb_priv->num_tds_done;
   1862	if (i < urb_priv->num_tds)
   1863		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
   1864				"Cancel URB %p, dev %s, ep 0x%x, "
   1865				"starting at offset 0x%llx",
   1866				urb, urb->dev->devpath,
   1867				urb->ep->desc.bEndpointAddress,
   1868				(unsigned long long) xhci_trb_virt_to_dma(
   1869					urb_priv->td[i].start_seg,
   1870					urb_priv->td[i].first_trb));
   1871
   1872	for (; i < urb_priv->num_tds; i++) {
   1873		td = &urb_priv->td[i];
   1874		/* TD can already be on cancelled list if ep halted on it */
   1875		if (list_empty(&td->cancelled_td_list)) {
   1876			td->cancel_status = TD_DIRTY;
   1877			list_add_tail(&td->cancelled_td_list,
   1878				      &ep->cancelled_td_list);
   1879		}
   1880	}
   1881
   1882	/* Queue a stop endpoint command, but only if this is
   1883	 * the first cancellation to be handled.
   1884	 */
   1885	if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
   1886		command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
   1887		if (!command) {
   1888			ret = -ENOMEM;
   1889			goto done;
   1890		}
   1891		ep->ep_state |= EP_STOP_CMD_PENDING;
   1892		xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
   1893					 ep_index, 0);
   1894		xhci_ring_cmd_db(xhci);
   1895	}
   1896done:
   1897	spin_unlock_irqrestore(&xhci->lock, flags);
   1898	return ret;
   1899
   1900err_giveback:
   1901	if (urb_priv)
   1902		xhci_urb_free_priv(urb_priv);
   1903	usb_hcd_unlink_urb_from_ep(hcd, urb);
   1904	spin_unlock_irqrestore(&xhci->lock, flags);
   1905	usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
   1906	return ret;
   1907}
   1908
   1909/* Drop an endpoint from a new bandwidth configuration for this device.
   1910 * Only one call to this function is allowed per endpoint before
   1911 * check_bandwidth() or reset_bandwidth() must be called.
   1912 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
   1913 * add the endpoint to the schedule with possibly new parameters denoted by a
   1914 * different endpoint descriptor in usb_host_endpoint.
   1915 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
   1916 * not allowed.
   1917 *
   1918 * The USB core will not allow URBs to be queued to an endpoint that is being
   1919 * disabled, so there's no need for mutual exclusion to protect
   1920 * the xhci->devs[slot_id] structure.
   1921 */
   1922int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
   1923		       struct usb_host_endpoint *ep)
   1924{
   1925	struct xhci_hcd *xhci;
   1926	struct xhci_container_ctx *in_ctx, *out_ctx;
   1927	struct xhci_input_control_ctx *ctrl_ctx;
   1928	unsigned int ep_index;
   1929	struct xhci_ep_ctx *ep_ctx;
   1930	u32 drop_flag;
   1931	u32 new_add_flags, new_drop_flags;
   1932	int ret;
   1933
   1934	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
   1935	if (ret <= 0)
   1936		return ret;
   1937	xhci = hcd_to_xhci(hcd);
   1938	if (xhci->xhc_state & XHCI_STATE_DYING)
   1939		return -ENODEV;
   1940
   1941	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
   1942	drop_flag = xhci_get_endpoint_flag(&ep->desc);
   1943	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
   1944		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
   1945				__func__, drop_flag);
   1946		return 0;
   1947	}
   1948
   1949	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
   1950	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
   1951	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
   1952	if (!ctrl_ctx) {
   1953		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   1954				__func__);
   1955		return 0;
   1956	}
   1957
   1958	ep_index = xhci_get_endpoint_index(&ep->desc);
   1959	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
   1960	/* If the HC already knows the endpoint is disabled,
   1961	 * or the HCD has noted it is disabled, ignore this request
   1962	 */
   1963	if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
   1964	    le32_to_cpu(ctrl_ctx->drop_flags) &
   1965	    xhci_get_endpoint_flag(&ep->desc)) {
   1966		/* Do not warn when called after a usb_device_reset */
   1967		if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
   1968			xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
   1969				  __func__, ep);
   1970		return 0;
   1971	}
   1972
   1973	ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
   1974	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
   1975
   1976	ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
   1977	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
   1978
   1979	xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
   1980
   1981	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
   1982
   1983	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
   1984			(unsigned int) ep->desc.bEndpointAddress,
   1985			udev->slot_id,
   1986			(unsigned int) new_drop_flags,
   1987			(unsigned int) new_add_flags);
   1988	return 0;
   1989}
   1990EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
   1991
   1992/* Add an endpoint to a new possible bandwidth configuration for this device.
   1993 * Only one call to this function is allowed per endpoint before
   1994 * check_bandwidth() or reset_bandwidth() must be called.
   1995 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
   1996 * add the endpoint to the schedule with possibly new parameters denoted by a
   1997 * different endpoint descriptor in usb_host_endpoint.
   1998 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
   1999 * not allowed.
   2000 *
   2001 * The USB core will not allow URBs to be queued to an endpoint until the
   2002 * configuration or alt setting is installed in the device, so there's no need
   2003 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
   2004 */
   2005int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
   2006		      struct usb_host_endpoint *ep)
   2007{
   2008	struct xhci_hcd *xhci;
   2009	struct xhci_container_ctx *in_ctx;
   2010	unsigned int ep_index;
   2011	struct xhci_input_control_ctx *ctrl_ctx;
   2012	struct xhci_ep_ctx *ep_ctx;
   2013	u32 added_ctxs;
   2014	u32 new_add_flags, new_drop_flags;
   2015	struct xhci_virt_device *virt_dev;
   2016	int ret = 0;
   2017
   2018	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
   2019	if (ret <= 0) {
   2020		/* So we won't queue a reset ep command for a root hub */
   2021		ep->hcpriv = NULL;
   2022		return ret;
   2023	}
   2024	xhci = hcd_to_xhci(hcd);
   2025	if (xhci->xhc_state & XHCI_STATE_DYING)
   2026		return -ENODEV;
   2027
   2028	added_ctxs = xhci_get_endpoint_flag(&ep->desc);
   2029	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
   2030		/* FIXME when we have to issue an evaluate endpoint command to
   2031		 * deal with ep0 max packet size changing once we get the
   2032		 * descriptors
   2033		 */
   2034		xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
   2035				__func__, added_ctxs);
   2036		return 0;
   2037	}
   2038
   2039	virt_dev = xhci->devs[udev->slot_id];
   2040	in_ctx = virt_dev->in_ctx;
   2041	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
   2042	if (!ctrl_ctx) {
   2043		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   2044				__func__);
   2045		return 0;
   2046	}
   2047
   2048	ep_index = xhci_get_endpoint_index(&ep->desc);
   2049	/* If this endpoint is already in use, and the upper layers are trying
   2050	 * to add it again without dropping it, reject the addition.
   2051	 */
   2052	if (virt_dev->eps[ep_index].ring &&
   2053			!(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
   2054		xhci_warn(xhci, "Trying to add endpoint 0x%x "
   2055				"without dropping it.\n",
   2056				(unsigned int) ep->desc.bEndpointAddress);
   2057		return -EINVAL;
   2058	}
   2059
   2060	/* If the HCD has already noted the endpoint is enabled,
   2061	 * ignore this request.
   2062	 */
   2063	if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
   2064		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
   2065				__func__, ep);
   2066		return 0;
   2067	}
   2068
   2069	/*
   2070	 * Configuration and alternate setting changes must be done in
   2071	 * process context, not interrupt context (or so documenation
   2072	 * for usb_set_interface() and usb_set_configuration() claim).
   2073	 */
   2074	if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
   2075		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
   2076				__func__, ep->desc.bEndpointAddress);
   2077		return -ENOMEM;
   2078	}
   2079
   2080	ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
   2081	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
   2082
   2083	/* If xhci_endpoint_disable() was called for this endpoint, but the
   2084	 * xHC hasn't been notified yet through the check_bandwidth() call,
   2085	 * this re-adds a new state for the endpoint from the new endpoint
   2086	 * descriptors.  We must drop and re-add this endpoint, so we leave the
   2087	 * drop flags alone.
   2088	 */
   2089	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
   2090
   2091	/* Store the usb_device pointer for later use */
   2092	ep->hcpriv = udev;
   2093
   2094	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
   2095	trace_xhci_add_endpoint(ep_ctx);
   2096
   2097	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
   2098			(unsigned int) ep->desc.bEndpointAddress,
   2099			udev->slot_id,
   2100			(unsigned int) new_drop_flags,
   2101			(unsigned int) new_add_flags);
   2102	return 0;
   2103}
   2104EXPORT_SYMBOL_GPL(xhci_add_endpoint);
   2105
   2106static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
   2107{
   2108	struct xhci_input_control_ctx *ctrl_ctx;
   2109	struct xhci_ep_ctx *ep_ctx;
   2110	struct xhci_slot_ctx *slot_ctx;
   2111	int i;
   2112
   2113	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
   2114	if (!ctrl_ctx) {
   2115		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   2116				__func__);
   2117		return;
   2118	}
   2119
   2120	/* When a device's add flag and drop flag are zero, any subsequent
   2121	 * configure endpoint command will leave that endpoint's state
   2122	 * untouched.  Make sure we don't leave any old state in the input
   2123	 * endpoint contexts.
   2124	 */
   2125	ctrl_ctx->drop_flags = 0;
   2126	ctrl_ctx->add_flags = 0;
   2127	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
   2128	slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
   2129	/* Endpoint 0 is always valid */
   2130	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
   2131	for (i = 1; i < 31; i++) {
   2132		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
   2133		ep_ctx->ep_info = 0;
   2134		ep_ctx->ep_info2 = 0;
   2135		ep_ctx->deq = 0;
   2136		ep_ctx->tx_info = 0;
   2137	}
   2138}
   2139
   2140static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
   2141		struct usb_device *udev, u32 *cmd_status)
   2142{
   2143	int ret;
   2144
   2145	switch (*cmd_status) {
   2146	case COMP_COMMAND_ABORTED:
   2147	case COMP_COMMAND_RING_STOPPED:
   2148		xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
   2149		ret = -ETIME;
   2150		break;
   2151	case COMP_RESOURCE_ERROR:
   2152		dev_warn(&udev->dev,
   2153			 "Not enough host controller resources for new device state.\n");
   2154		ret = -ENOMEM;
   2155		/* FIXME: can we allocate more resources for the HC? */
   2156		break;
   2157	case COMP_BANDWIDTH_ERROR:
   2158	case COMP_SECONDARY_BANDWIDTH_ERROR:
   2159		dev_warn(&udev->dev,
   2160			 "Not enough bandwidth for new device state.\n");
   2161		ret = -ENOSPC;
   2162		/* FIXME: can we go back to the old state? */
   2163		break;
   2164	case COMP_TRB_ERROR:
   2165		/* the HCD set up something wrong */
   2166		dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
   2167				"add flag = 1, "
   2168				"and endpoint is not disabled.\n");
   2169		ret = -EINVAL;
   2170		break;
   2171	case COMP_INCOMPATIBLE_DEVICE_ERROR:
   2172		dev_warn(&udev->dev,
   2173			 "ERROR: Incompatible device for endpoint configure command.\n");
   2174		ret = -ENODEV;
   2175		break;
   2176	case COMP_SUCCESS:
   2177		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
   2178				"Successful Endpoint Configure command");
   2179		ret = 0;
   2180		break;
   2181	default:
   2182		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
   2183				*cmd_status);
   2184		ret = -EINVAL;
   2185		break;
   2186	}
   2187	return ret;
   2188}
   2189
   2190static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
   2191		struct usb_device *udev, u32 *cmd_status)
   2192{
   2193	int ret;
   2194
   2195	switch (*cmd_status) {
   2196	case COMP_COMMAND_ABORTED:
   2197	case COMP_COMMAND_RING_STOPPED:
   2198		xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
   2199		ret = -ETIME;
   2200		break;
   2201	case COMP_PARAMETER_ERROR:
   2202		dev_warn(&udev->dev,
   2203			 "WARN: xHCI driver setup invalid evaluate context command.\n");
   2204		ret = -EINVAL;
   2205		break;
   2206	case COMP_SLOT_NOT_ENABLED_ERROR:
   2207		dev_warn(&udev->dev,
   2208			"WARN: slot not enabled for evaluate context command.\n");
   2209		ret = -EINVAL;
   2210		break;
   2211	case COMP_CONTEXT_STATE_ERROR:
   2212		dev_warn(&udev->dev,
   2213			"WARN: invalid context state for evaluate context command.\n");
   2214		ret = -EINVAL;
   2215		break;
   2216	case COMP_INCOMPATIBLE_DEVICE_ERROR:
   2217		dev_warn(&udev->dev,
   2218			"ERROR: Incompatible device for evaluate context command.\n");
   2219		ret = -ENODEV;
   2220		break;
   2221	case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
   2222		/* Max Exit Latency too large error */
   2223		dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
   2224		ret = -EINVAL;
   2225		break;
   2226	case COMP_SUCCESS:
   2227		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
   2228				"Successful evaluate context command");
   2229		ret = 0;
   2230		break;
   2231	default:
   2232		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
   2233			*cmd_status);
   2234		ret = -EINVAL;
   2235		break;
   2236	}
   2237	return ret;
   2238}
   2239
   2240static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
   2241		struct xhci_input_control_ctx *ctrl_ctx)
   2242{
   2243	u32 valid_add_flags;
   2244	u32 valid_drop_flags;
   2245
   2246	/* Ignore the slot flag (bit 0), and the default control endpoint flag
   2247	 * (bit 1).  The default control endpoint is added during the Address
   2248	 * Device command and is never removed until the slot is disabled.
   2249	 */
   2250	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
   2251	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
   2252
   2253	/* Use hweight32 to count the number of ones in the add flags, or
   2254	 * number of endpoints added.  Don't count endpoints that are changed
   2255	 * (both added and dropped).
   2256	 */
   2257	return hweight32(valid_add_flags) -
   2258		hweight32(valid_add_flags & valid_drop_flags);
   2259}
   2260
   2261static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
   2262		struct xhci_input_control_ctx *ctrl_ctx)
   2263{
   2264	u32 valid_add_flags;
   2265	u32 valid_drop_flags;
   2266
   2267	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
   2268	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
   2269
   2270	return hweight32(valid_drop_flags) -
   2271		hweight32(valid_add_flags & valid_drop_flags);
   2272}
   2273
   2274/*
   2275 * We need to reserve the new number of endpoints before the configure endpoint
   2276 * command completes.  We can't subtract the dropped endpoints from the number
   2277 * of active endpoints until the command completes because we can oversubscribe
   2278 * the host in this case:
   2279 *
   2280 *  - the first configure endpoint command drops more endpoints than it adds
   2281 *  - a second configure endpoint command that adds more endpoints is queued
   2282 *  - the first configure endpoint command fails, so the config is unchanged
   2283 *  - the second command may succeed, even though there isn't enough resources
   2284 *
   2285 * Must be called with xhci->lock held.
   2286 */
   2287static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
   2288		struct xhci_input_control_ctx *ctrl_ctx)
   2289{
   2290	u32 added_eps;
   2291
   2292	added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
   2293	if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
   2294		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2295				"Not enough ep ctxs: "
   2296				"%u active, need to add %u, limit is %u.",
   2297				xhci->num_active_eps, added_eps,
   2298				xhci->limit_active_eps);
   2299		return -ENOMEM;
   2300	}
   2301	xhci->num_active_eps += added_eps;
   2302	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2303			"Adding %u ep ctxs, %u now active.", added_eps,
   2304			xhci->num_active_eps);
   2305	return 0;
   2306}
   2307
   2308/*
   2309 * The configure endpoint was failed by the xHC for some other reason, so we
   2310 * need to revert the resources that failed configuration would have used.
   2311 *
   2312 * Must be called with xhci->lock held.
   2313 */
   2314static void xhci_free_host_resources(struct xhci_hcd *xhci,
   2315		struct xhci_input_control_ctx *ctrl_ctx)
   2316{
   2317	u32 num_failed_eps;
   2318
   2319	num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
   2320	xhci->num_active_eps -= num_failed_eps;
   2321	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2322			"Removing %u failed ep ctxs, %u now active.",
   2323			num_failed_eps,
   2324			xhci->num_active_eps);
   2325}
   2326
   2327/*
   2328 * Now that the command has completed, clean up the active endpoint count by
   2329 * subtracting out the endpoints that were dropped (but not changed).
   2330 *
   2331 * Must be called with xhci->lock held.
   2332 */
   2333static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
   2334		struct xhci_input_control_ctx *ctrl_ctx)
   2335{
   2336	u32 num_dropped_eps;
   2337
   2338	num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
   2339	xhci->num_active_eps -= num_dropped_eps;
   2340	if (num_dropped_eps)
   2341		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2342				"Removing %u dropped ep ctxs, %u now active.",
   2343				num_dropped_eps,
   2344				xhci->num_active_eps);
   2345}
   2346
   2347static unsigned int xhci_get_block_size(struct usb_device *udev)
   2348{
   2349	switch (udev->speed) {
   2350	case USB_SPEED_LOW:
   2351	case USB_SPEED_FULL:
   2352		return FS_BLOCK;
   2353	case USB_SPEED_HIGH:
   2354		return HS_BLOCK;
   2355	case USB_SPEED_SUPER:
   2356	case USB_SPEED_SUPER_PLUS:
   2357		return SS_BLOCK;
   2358	case USB_SPEED_UNKNOWN:
   2359	case USB_SPEED_WIRELESS:
   2360	default:
   2361		/* Should never happen */
   2362		return 1;
   2363	}
   2364}
   2365
   2366static unsigned int
   2367xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
   2368{
   2369	if (interval_bw->overhead[LS_OVERHEAD_TYPE])
   2370		return LS_OVERHEAD;
   2371	if (interval_bw->overhead[FS_OVERHEAD_TYPE])
   2372		return FS_OVERHEAD;
   2373	return HS_OVERHEAD;
   2374}
   2375
   2376/* If we are changing a LS/FS device under a HS hub,
   2377 * make sure (if we are activating a new TT) that the HS bus has enough
   2378 * bandwidth for this new TT.
   2379 */
   2380static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
   2381		struct xhci_virt_device *virt_dev,
   2382		int old_active_eps)
   2383{
   2384	struct xhci_interval_bw_table *bw_table;
   2385	struct xhci_tt_bw_info *tt_info;
   2386
   2387	/* Find the bandwidth table for the root port this TT is attached to. */
   2388	bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
   2389	tt_info = virt_dev->tt_info;
   2390	/* If this TT already had active endpoints, the bandwidth for this TT
   2391	 * has already been added.  Removing all periodic endpoints (and thus
   2392	 * making the TT enactive) will only decrease the bandwidth used.
   2393	 */
   2394	if (old_active_eps)
   2395		return 0;
   2396	if (old_active_eps == 0 && tt_info->active_eps != 0) {
   2397		if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
   2398			return -ENOMEM;
   2399		return 0;
   2400	}
   2401	/* Not sure why we would have no new active endpoints...
   2402	 *
   2403	 * Maybe because of an Evaluate Context change for a hub update or a
   2404	 * control endpoint 0 max packet size change?
   2405	 * FIXME: skip the bandwidth calculation in that case.
   2406	 */
   2407	return 0;
   2408}
   2409
   2410static int xhci_check_ss_bw(struct xhci_hcd *xhci,
   2411		struct xhci_virt_device *virt_dev)
   2412{
   2413	unsigned int bw_reserved;
   2414
   2415	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
   2416	if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
   2417		return -ENOMEM;
   2418
   2419	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
   2420	if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
   2421		return -ENOMEM;
   2422
   2423	return 0;
   2424}
   2425
   2426/*
   2427 * This algorithm is a very conservative estimate of the worst-case scheduling
   2428 * scenario for any one interval.  The hardware dynamically schedules the
   2429 * packets, so we can't tell which microframe could be the limiting factor in
   2430 * the bandwidth scheduling.  This only takes into account periodic endpoints.
   2431 *
   2432 * Obviously, we can't solve an NP complete problem to find the minimum worst
   2433 * case scenario.  Instead, we come up with an estimate that is no less than
   2434 * the worst case bandwidth used for any one microframe, but may be an
   2435 * over-estimate.
   2436 *
   2437 * We walk the requirements for each endpoint by interval, starting with the
   2438 * smallest interval, and place packets in the schedule where there is only one
   2439 * possible way to schedule packets for that interval.  In order to simplify
   2440 * this algorithm, we record the largest max packet size for each interval, and
   2441 * assume all packets will be that size.
   2442 *
   2443 * For interval 0, we obviously must schedule all packets for each interval.
   2444 * The bandwidth for interval 0 is just the amount of data to be transmitted
   2445 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
   2446 * the number of packets).
   2447 *
   2448 * For interval 1, we have two possible microframes to schedule those packets
   2449 * in.  For this algorithm, if we can schedule the same number of packets for
   2450 * each possible scheduling opportunity (each microframe), we will do so.  The
   2451 * remaining number of packets will be saved to be transmitted in the gaps in
   2452 * the next interval's scheduling sequence.
   2453 *
   2454 * As we move those remaining packets to be scheduled with interval 2 packets,
   2455 * we have to double the number of remaining packets to transmit.  This is
   2456 * because the intervals are actually powers of 2, and we would be transmitting
   2457 * the previous interval's packets twice in this interval.  We also have to be
   2458 * sure that when we look at the largest max packet size for this interval, we
   2459 * also look at the largest max packet size for the remaining packets and take
   2460 * the greater of the two.
   2461 *
   2462 * The algorithm continues to evenly distribute packets in each scheduling
   2463 * opportunity, and push the remaining packets out, until we get to the last
   2464 * interval.  Then those packets and their associated overhead are just added
   2465 * to the bandwidth used.
   2466 */
   2467static int xhci_check_bw_table(struct xhci_hcd *xhci,
   2468		struct xhci_virt_device *virt_dev,
   2469		int old_active_eps)
   2470{
   2471	unsigned int bw_reserved;
   2472	unsigned int max_bandwidth;
   2473	unsigned int bw_used;
   2474	unsigned int block_size;
   2475	struct xhci_interval_bw_table *bw_table;
   2476	unsigned int packet_size = 0;
   2477	unsigned int overhead = 0;
   2478	unsigned int packets_transmitted = 0;
   2479	unsigned int packets_remaining = 0;
   2480	unsigned int i;
   2481
   2482	if (virt_dev->udev->speed >= USB_SPEED_SUPER)
   2483		return xhci_check_ss_bw(xhci, virt_dev);
   2484
   2485	if (virt_dev->udev->speed == USB_SPEED_HIGH) {
   2486		max_bandwidth = HS_BW_LIMIT;
   2487		/* Convert percent of bus BW reserved to blocks reserved */
   2488		bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
   2489	} else {
   2490		max_bandwidth = FS_BW_LIMIT;
   2491		bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
   2492	}
   2493
   2494	bw_table = virt_dev->bw_table;
   2495	/* We need to translate the max packet size and max ESIT payloads into
   2496	 * the units the hardware uses.
   2497	 */
   2498	block_size = xhci_get_block_size(virt_dev->udev);
   2499
   2500	/* If we are manipulating a LS/FS device under a HS hub, double check
   2501	 * that the HS bus has enough bandwidth if we are activing a new TT.
   2502	 */
   2503	if (virt_dev->tt_info) {
   2504		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2505				"Recalculating BW for rootport %u",
   2506				virt_dev->real_port);
   2507		if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
   2508			xhci_warn(xhci, "Not enough bandwidth on HS bus for "
   2509					"newly activated TT.\n");
   2510			return -ENOMEM;
   2511		}
   2512		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2513				"Recalculating BW for TT slot %u port %u",
   2514				virt_dev->tt_info->slot_id,
   2515				virt_dev->tt_info->ttport);
   2516	} else {
   2517		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2518				"Recalculating BW for rootport %u",
   2519				virt_dev->real_port);
   2520	}
   2521
   2522	/* Add in how much bandwidth will be used for interval zero, or the
   2523	 * rounded max ESIT payload + number of packets * largest overhead.
   2524	 */
   2525	bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
   2526		bw_table->interval_bw[0].num_packets *
   2527		xhci_get_largest_overhead(&bw_table->interval_bw[0]);
   2528
   2529	for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
   2530		unsigned int bw_added;
   2531		unsigned int largest_mps;
   2532		unsigned int interval_overhead;
   2533
   2534		/*
   2535		 * How many packets could we transmit in this interval?
   2536		 * If packets didn't fit in the previous interval, we will need
   2537		 * to transmit that many packets twice within this interval.
   2538		 */
   2539		packets_remaining = 2 * packets_remaining +
   2540			bw_table->interval_bw[i].num_packets;
   2541
   2542		/* Find the largest max packet size of this or the previous
   2543		 * interval.
   2544		 */
   2545		if (list_empty(&bw_table->interval_bw[i].endpoints))
   2546			largest_mps = 0;
   2547		else {
   2548			struct xhci_virt_ep *virt_ep;
   2549			struct list_head *ep_entry;
   2550
   2551			ep_entry = bw_table->interval_bw[i].endpoints.next;
   2552			virt_ep = list_entry(ep_entry,
   2553					struct xhci_virt_ep, bw_endpoint_list);
   2554			/* Convert to blocks, rounding up */
   2555			largest_mps = DIV_ROUND_UP(
   2556					virt_ep->bw_info.max_packet_size,
   2557					block_size);
   2558		}
   2559		if (largest_mps > packet_size)
   2560			packet_size = largest_mps;
   2561
   2562		/* Use the larger overhead of this or the previous interval. */
   2563		interval_overhead = xhci_get_largest_overhead(
   2564				&bw_table->interval_bw[i]);
   2565		if (interval_overhead > overhead)
   2566			overhead = interval_overhead;
   2567
   2568		/* How many packets can we evenly distribute across
   2569		 * (1 << (i + 1)) possible scheduling opportunities?
   2570		 */
   2571		packets_transmitted = packets_remaining >> (i + 1);
   2572
   2573		/* Add in the bandwidth used for those scheduled packets */
   2574		bw_added = packets_transmitted * (overhead + packet_size);
   2575
   2576		/* How many packets do we have remaining to transmit? */
   2577		packets_remaining = packets_remaining % (1 << (i + 1));
   2578
   2579		/* What largest max packet size should those packets have? */
   2580		/* If we've transmitted all packets, don't carry over the
   2581		 * largest packet size.
   2582		 */
   2583		if (packets_remaining == 0) {
   2584			packet_size = 0;
   2585			overhead = 0;
   2586		} else if (packets_transmitted > 0) {
   2587			/* Otherwise if we do have remaining packets, and we've
   2588			 * scheduled some packets in this interval, take the
   2589			 * largest max packet size from endpoints with this
   2590			 * interval.
   2591			 */
   2592			packet_size = largest_mps;
   2593			overhead = interval_overhead;
   2594		}
   2595		/* Otherwise carry over packet_size and overhead from the last
   2596		 * time we had a remainder.
   2597		 */
   2598		bw_used += bw_added;
   2599		if (bw_used > max_bandwidth) {
   2600			xhci_warn(xhci, "Not enough bandwidth. "
   2601					"Proposed: %u, Max: %u\n",
   2602				bw_used, max_bandwidth);
   2603			return -ENOMEM;
   2604		}
   2605	}
   2606	/*
   2607	 * Ok, we know we have some packets left over after even-handedly
   2608	 * scheduling interval 15.  We don't know which microframes they will
   2609	 * fit into, so we over-schedule and say they will be scheduled every
   2610	 * microframe.
   2611	 */
   2612	if (packets_remaining > 0)
   2613		bw_used += overhead + packet_size;
   2614
   2615	if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
   2616		unsigned int port_index = virt_dev->real_port - 1;
   2617
   2618		/* OK, we're manipulating a HS device attached to a
   2619		 * root port bandwidth domain.  Include the number of active TTs
   2620		 * in the bandwidth used.
   2621		 */
   2622		bw_used += TT_HS_OVERHEAD *
   2623			xhci->rh_bw[port_index].num_active_tts;
   2624	}
   2625
   2626	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   2627		"Final bandwidth: %u, Limit: %u, Reserved: %u, "
   2628		"Available: %u " "percent",
   2629		bw_used, max_bandwidth, bw_reserved,
   2630		(max_bandwidth - bw_used - bw_reserved) * 100 /
   2631		max_bandwidth);
   2632
   2633	bw_used += bw_reserved;
   2634	if (bw_used > max_bandwidth) {
   2635		xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
   2636				bw_used, max_bandwidth);
   2637		return -ENOMEM;
   2638	}
   2639
   2640	bw_table->bw_used = bw_used;
   2641	return 0;
   2642}
   2643
   2644static bool xhci_is_async_ep(unsigned int ep_type)
   2645{
   2646	return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
   2647					ep_type != ISOC_IN_EP &&
   2648					ep_type != INT_IN_EP);
   2649}
   2650
   2651static bool xhci_is_sync_in_ep(unsigned int ep_type)
   2652{
   2653	return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
   2654}
   2655
   2656static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
   2657{
   2658	unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
   2659
   2660	if (ep_bw->ep_interval == 0)
   2661		return SS_OVERHEAD_BURST +
   2662			(ep_bw->mult * ep_bw->num_packets *
   2663					(SS_OVERHEAD + mps));
   2664	return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
   2665				(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
   2666				1 << ep_bw->ep_interval);
   2667
   2668}
   2669
   2670static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
   2671		struct xhci_bw_info *ep_bw,
   2672		struct xhci_interval_bw_table *bw_table,
   2673		struct usb_device *udev,
   2674		struct xhci_virt_ep *virt_ep,
   2675		struct xhci_tt_bw_info *tt_info)
   2676{
   2677	struct xhci_interval_bw	*interval_bw;
   2678	int normalized_interval;
   2679
   2680	if (xhci_is_async_ep(ep_bw->type))
   2681		return;
   2682
   2683	if (udev->speed >= USB_SPEED_SUPER) {
   2684		if (xhci_is_sync_in_ep(ep_bw->type))
   2685			xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
   2686				xhci_get_ss_bw_consumed(ep_bw);
   2687		else
   2688			xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
   2689				xhci_get_ss_bw_consumed(ep_bw);
   2690		return;
   2691	}
   2692
   2693	/* SuperSpeed endpoints never get added to intervals in the table, so
   2694	 * this check is only valid for HS/FS/LS devices.
   2695	 */
   2696	if (list_empty(&virt_ep->bw_endpoint_list))
   2697		return;
   2698	/* For LS/FS devices, we need to translate the interval expressed in
   2699	 * microframes to frames.
   2700	 */
   2701	if (udev->speed == USB_SPEED_HIGH)
   2702		normalized_interval = ep_bw->ep_interval;
   2703	else
   2704		normalized_interval = ep_bw->ep_interval - 3;
   2705
   2706	if (normalized_interval == 0)
   2707		bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
   2708	interval_bw = &bw_table->interval_bw[normalized_interval];
   2709	interval_bw->num_packets -= ep_bw->num_packets;
   2710	switch (udev->speed) {
   2711	case USB_SPEED_LOW:
   2712		interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
   2713		break;
   2714	case USB_SPEED_FULL:
   2715		interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
   2716		break;
   2717	case USB_SPEED_HIGH:
   2718		interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
   2719		break;
   2720	case USB_SPEED_SUPER:
   2721	case USB_SPEED_SUPER_PLUS:
   2722	case USB_SPEED_UNKNOWN:
   2723	case USB_SPEED_WIRELESS:
   2724		/* Should never happen because only LS/FS/HS endpoints will get
   2725		 * added to the endpoint list.
   2726		 */
   2727		return;
   2728	}
   2729	if (tt_info)
   2730		tt_info->active_eps -= 1;
   2731	list_del_init(&virt_ep->bw_endpoint_list);
   2732}
   2733
   2734static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
   2735		struct xhci_bw_info *ep_bw,
   2736		struct xhci_interval_bw_table *bw_table,
   2737		struct usb_device *udev,
   2738		struct xhci_virt_ep *virt_ep,
   2739		struct xhci_tt_bw_info *tt_info)
   2740{
   2741	struct xhci_interval_bw	*interval_bw;
   2742	struct xhci_virt_ep *smaller_ep;
   2743	int normalized_interval;
   2744
   2745	if (xhci_is_async_ep(ep_bw->type))
   2746		return;
   2747
   2748	if (udev->speed == USB_SPEED_SUPER) {
   2749		if (xhci_is_sync_in_ep(ep_bw->type))
   2750			xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
   2751				xhci_get_ss_bw_consumed(ep_bw);
   2752		else
   2753			xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
   2754				xhci_get_ss_bw_consumed(ep_bw);
   2755		return;
   2756	}
   2757
   2758	/* For LS/FS devices, we need to translate the interval expressed in
   2759	 * microframes to frames.
   2760	 */
   2761	if (udev->speed == USB_SPEED_HIGH)
   2762		normalized_interval = ep_bw->ep_interval;
   2763	else
   2764		normalized_interval = ep_bw->ep_interval - 3;
   2765
   2766	if (normalized_interval == 0)
   2767		bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
   2768	interval_bw = &bw_table->interval_bw[normalized_interval];
   2769	interval_bw->num_packets += ep_bw->num_packets;
   2770	switch (udev->speed) {
   2771	case USB_SPEED_LOW:
   2772		interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
   2773		break;
   2774	case USB_SPEED_FULL:
   2775		interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
   2776		break;
   2777	case USB_SPEED_HIGH:
   2778		interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
   2779		break;
   2780	case USB_SPEED_SUPER:
   2781	case USB_SPEED_SUPER_PLUS:
   2782	case USB_SPEED_UNKNOWN:
   2783	case USB_SPEED_WIRELESS:
   2784		/* Should never happen because only LS/FS/HS endpoints will get
   2785		 * added to the endpoint list.
   2786		 */
   2787		return;
   2788	}
   2789
   2790	if (tt_info)
   2791		tt_info->active_eps += 1;
   2792	/* Insert the endpoint into the list, largest max packet size first. */
   2793	list_for_each_entry(smaller_ep, &interval_bw->endpoints,
   2794			bw_endpoint_list) {
   2795		if (ep_bw->max_packet_size >=
   2796				smaller_ep->bw_info.max_packet_size) {
   2797			/* Add the new ep before the smaller endpoint */
   2798			list_add_tail(&virt_ep->bw_endpoint_list,
   2799					&smaller_ep->bw_endpoint_list);
   2800			return;
   2801		}
   2802	}
   2803	/* Add the new endpoint at the end of the list. */
   2804	list_add_tail(&virt_ep->bw_endpoint_list,
   2805			&interval_bw->endpoints);
   2806}
   2807
   2808void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
   2809		struct xhci_virt_device *virt_dev,
   2810		int old_active_eps)
   2811{
   2812	struct xhci_root_port_bw_info *rh_bw_info;
   2813	if (!virt_dev->tt_info)
   2814		return;
   2815
   2816	rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
   2817	if (old_active_eps == 0 &&
   2818				virt_dev->tt_info->active_eps != 0) {
   2819		rh_bw_info->num_active_tts += 1;
   2820		rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
   2821	} else if (old_active_eps != 0 &&
   2822				virt_dev->tt_info->active_eps == 0) {
   2823		rh_bw_info->num_active_tts -= 1;
   2824		rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
   2825	}
   2826}
   2827
   2828static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
   2829		struct xhci_virt_device *virt_dev,
   2830		struct xhci_container_ctx *in_ctx)
   2831{
   2832	struct xhci_bw_info ep_bw_info[31];
   2833	int i;
   2834	struct xhci_input_control_ctx *ctrl_ctx;
   2835	int old_active_eps = 0;
   2836
   2837	if (virt_dev->tt_info)
   2838		old_active_eps = virt_dev->tt_info->active_eps;
   2839
   2840	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
   2841	if (!ctrl_ctx) {
   2842		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   2843				__func__);
   2844		return -ENOMEM;
   2845	}
   2846
   2847	for (i = 0; i < 31; i++) {
   2848		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
   2849			continue;
   2850
   2851		/* Make a copy of the BW info in case we need to revert this */
   2852		memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
   2853				sizeof(ep_bw_info[i]));
   2854		/* Drop the endpoint from the interval table if the endpoint is
   2855		 * being dropped or changed.
   2856		 */
   2857		if (EP_IS_DROPPED(ctrl_ctx, i))
   2858			xhci_drop_ep_from_interval_table(xhci,
   2859					&virt_dev->eps[i].bw_info,
   2860					virt_dev->bw_table,
   2861					virt_dev->udev,
   2862					&virt_dev->eps[i],
   2863					virt_dev->tt_info);
   2864	}
   2865	/* Overwrite the information stored in the endpoints' bw_info */
   2866	xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
   2867	for (i = 0; i < 31; i++) {
   2868		/* Add any changed or added endpoints to the interval table */
   2869		if (EP_IS_ADDED(ctrl_ctx, i))
   2870			xhci_add_ep_to_interval_table(xhci,
   2871					&virt_dev->eps[i].bw_info,
   2872					virt_dev->bw_table,
   2873					virt_dev->udev,
   2874					&virt_dev->eps[i],
   2875					virt_dev->tt_info);
   2876	}
   2877
   2878	if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
   2879		/* Ok, this fits in the bandwidth we have.
   2880		 * Update the number of active TTs.
   2881		 */
   2882		xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
   2883		return 0;
   2884	}
   2885
   2886	/* We don't have enough bandwidth for this, revert the stored info. */
   2887	for (i = 0; i < 31; i++) {
   2888		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
   2889			continue;
   2890
   2891		/* Drop the new copies of any added or changed endpoints from
   2892		 * the interval table.
   2893		 */
   2894		if (EP_IS_ADDED(ctrl_ctx, i)) {
   2895			xhci_drop_ep_from_interval_table(xhci,
   2896					&virt_dev->eps[i].bw_info,
   2897					virt_dev->bw_table,
   2898					virt_dev->udev,
   2899					&virt_dev->eps[i],
   2900					virt_dev->tt_info);
   2901		}
   2902		/* Revert the endpoint back to its old information */
   2903		memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
   2904				sizeof(ep_bw_info[i]));
   2905		/* Add any changed or dropped endpoints back into the table */
   2906		if (EP_IS_DROPPED(ctrl_ctx, i))
   2907			xhci_add_ep_to_interval_table(xhci,
   2908					&virt_dev->eps[i].bw_info,
   2909					virt_dev->bw_table,
   2910					virt_dev->udev,
   2911					&virt_dev->eps[i],
   2912					virt_dev->tt_info);
   2913	}
   2914	return -ENOMEM;
   2915}
   2916
   2917
   2918/* Issue a configure endpoint command or evaluate context command
   2919 * and wait for it to finish.
   2920 */
   2921static int xhci_configure_endpoint(struct xhci_hcd *xhci,
   2922		struct usb_device *udev,
   2923		struct xhci_command *command,
   2924		bool ctx_change, bool must_succeed)
   2925{
   2926	int ret;
   2927	unsigned long flags;
   2928	struct xhci_input_control_ctx *ctrl_ctx;
   2929	struct xhci_virt_device *virt_dev;
   2930	struct xhci_slot_ctx *slot_ctx;
   2931
   2932	if (!command)
   2933		return -EINVAL;
   2934
   2935	spin_lock_irqsave(&xhci->lock, flags);
   2936
   2937	if (xhci->xhc_state & XHCI_STATE_DYING) {
   2938		spin_unlock_irqrestore(&xhci->lock, flags);
   2939		return -ESHUTDOWN;
   2940	}
   2941
   2942	virt_dev = xhci->devs[udev->slot_id];
   2943
   2944	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
   2945	if (!ctrl_ctx) {
   2946		spin_unlock_irqrestore(&xhci->lock, flags);
   2947		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   2948				__func__);
   2949		return -ENOMEM;
   2950	}
   2951
   2952	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
   2953			xhci_reserve_host_resources(xhci, ctrl_ctx)) {
   2954		spin_unlock_irqrestore(&xhci->lock, flags);
   2955		xhci_warn(xhci, "Not enough host resources, "
   2956				"active endpoint contexts = %u\n",
   2957				xhci->num_active_eps);
   2958		return -ENOMEM;
   2959	}
   2960	if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
   2961	    xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
   2962		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
   2963			xhci_free_host_resources(xhci, ctrl_ctx);
   2964		spin_unlock_irqrestore(&xhci->lock, flags);
   2965		xhci_warn(xhci, "Not enough bandwidth\n");
   2966		return -ENOMEM;
   2967	}
   2968
   2969	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
   2970
   2971	trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
   2972	trace_xhci_configure_endpoint(slot_ctx);
   2973
   2974	if (!ctx_change)
   2975		ret = xhci_queue_configure_endpoint(xhci, command,
   2976				command->in_ctx->dma,
   2977				udev->slot_id, must_succeed);
   2978	else
   2979		ret = xhci_queue_evaluate_context(xhci, command,
   2980				command->in_ctx->dma,
   2981				udev->slot_id, must_succeed);
   2982	if (ret < 0) {
   2983		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
   2984			xhci_free_host_resources(xhci, ctrl_ctx);
   2985		spin_unlock_irqrestore(&xhci->lock, flags);
   2986		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
   2987				"FIXME allocate a new ring segment");
   2988		return -ENOMEM;
   2989	}
   2990	xhci_ring_cmd_db(xhci);
   2991	spin_unlock_irqrestore(&xhci->lock, flags);
   2992
   2993	/* Wait for the configure endpoint command to complete */
   2994	wait_for_completion(command->completion);
   2995
   2996	if (!ctx_change)
   2997		ret = xhci_configure_endpoint_result(xhci, udev,
   2998						     &command->status);
   2999	else
   3000		ret = xhci_evaluate_context_result(xhci, udev,
   3001						   &command->status);
   3002
   3003	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
   3004		spin_lock_irqsave(&xhci->lock, flags);
   3005		/* If the command failed, remove the reserved resources.
   3006		 * Otherwise, clean up the estimate to include dropped eps.
   3007		 */
   3008		if (ret)
   3009			xhci_free_host_resources(xhci, ctrl_ctx);
   3010		else
   3011			xhci_finish_resource_reservation(xhci, ctrl_ctx);
   3012		spin_unlock_irqrestore(&xhci->lock, flags);
   3013	}
   3014	return ret;
   3015}
   3016
   3017static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
   3018	struct xhci_virt_device *vdev, int i)
   3019{
   3020	struct xhci_virt_ep *ep = &vdev->eps[i];
   3021
   3022	if (ep->ep_state & EP_HAS_STREAMS) {
   3023		xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
   3024				xhci_get_endpoint_address(i));
   3025		xhci_free_stream_info(xhci, ep->stream_info);
   3026		ep->stream_info = NULL;
   3027		ep->ep_state &= ~EP_HAS_STREAMS;
   3028	}
   3029}
   3030
   3031/* Called after one or more calls to xhci_add_endpoint() or
   3032 * xhci_drop_endpoint().  If this call fails, the USB core is expected
   3033 * to call xhci_reset_bandwidth().
   3034 *
   3035 * Since we are in the middle of changing either configuration or
   3036 * installing a new alt setting, the USB core won't allow URBs to be
   3037 * enqueued for any endpoint on the old config or interface.  Nothing
   3038 * else should be touching the xhci->devs[slot_id] structure, so we
   3039 * don't need to take the xhci->lock for manipulating that.
   3040 */
   3041int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
   3042{
   3043	int i;
   3044	int ret = 0;
   3045	struct xhci_hcd *xhci;
   3046	struct xhci_virt_device	*virt_dev;
   3047	struct xhci_input_control_ctx *ctrl_ctx;
   3048	struct xhci_slot_ctx *slot_ctx;
   3049	struct xhci_command *command;
   3050
   3051	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
   3052	if (ret <= 0)
   3053		return ret;
   3054	xhci = hcd_to_xhci(hcd);
   3055	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
   3056		(xhci->xhc_state & XHCI_STATE_REMOVING))
   3057		return -ENODEV;
   3058
   3059	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
   3060	virt_dev = xhci->devs[udev->slot_id];
   3061
   3062	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
   3063	if (!command)
   3064		return -ENOMEM;
   3065
   3066	command->in_ctx = virt_dev->in_ctx;
   3067
   3068	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
   3069	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
   3070	if (!ctrl_ctx) {
   3071		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   3072				__func__);
   3073		ret = -ENOMEM;
   3074		goto command_cleanup;
   3075	}
   3076	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
   3077	ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
   3078	ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
   3079
   3080	/* Don't issue the command if there's no endpoints to update. */
   3081	if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
   3082	    ctrl_ctx->drop_flags == 0) {
   3083		ret = 0;
   3084		goto command_cleanup;
   3085	}
   3086	/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
   3087	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
   3088	for (i = 31; i >= 1; i--) {
   3089		__le32 le32 = cpu_to_le32(BIT(i));
   3090
   3091		if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
   3092		    || (ctrl_ctx->add_flags & le32) || i == 1) {
   3093			slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
   3094			slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
   3095			break;
   3096		}
   3097	}
   3098
   3099	ret = xhci_configure_endpoint(xhci, udev, command,
   3100			false, false);
   3101	if (ret)
   3102		/* Callee should call reset_bandwidth() */
   3103		goto command_cleanup;
   3104
   3105	/* Free any rings that were dropped, but not changed. */
   3106	for (i = 1; i < 31; i++) {
   3107		if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
   3108		    !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
   3109			xhci_free_endpoint_ring(xhci, virt_dev, i);
   3110			xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
   3111		}
   3112	}
   3113	xhci_zero_in_ctx(xhci, virt_dev);
   3114	/*
   3115	 * Install any rings for completely new endpoints or changed endpoints,
   3116	 * and free any old rings from changed endpoints.
   3117	 */
   3118	for (i = 1; i < 31; i++) {
   3119		if (!virt_dev->eps[i].new_ring)
   3120			continue;
   3121		/* Only free the old ring if it exists.
   3122		 * It may not if this is the first add of an endpoint.
   3123		 */
   3124		if (virt_dev->eps[i].ring) {
   3125			xhci_free_endpoint_ring(xhci, virt_dev, i);
   3126		}
   3127		xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
   3128		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
   3129		virt_dev->eps[i].new_ring = NULL;
   3130		xhci_debugfs_create_endpoint(xhci, virt_dev, i);
   3131	}
   3132command_cleanup:
   3133	kfree(command->completion);
   3134	kfree(command);
   3135
   3136	return ret;
   3137}
   3138EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
   3139
   3140void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
   3141{
   3142	struct xhci_hcd *xhci;
   3143	struct xhci_virt_device	*virt_dev;
   3144	int i, ret;
   3145
   3146	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
   3147	if (ret <= 0)
   3148		return;
   3149	xhci = hcd_to_xhci(hcd);
   3150
   3151	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
   3152	virt_dev = xhci->devs[udev->slot_id];
   3153	/* Free any rings allocated for added endpoints */
   3154	for (i = 0; i < 31; i++) {
   3155		if (virt_dev->eps[i].new_ring) {
   3156			xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
   3157			xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
   3158			virt_dev->eps[i].new_ring = NULL;
   3159		}
   3160	}
   3161	xhci_zero_in_ctx(xhci, virt_dev);
   3162}
   3163EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
   3164
   3165static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
   3166		struct xhci_container_ctx *in_ctx,
   3167		struct xhci_container_ctx *out_ctx,
   3168		struct xhci_input_control_ctx *ctrl_ctx,
   3169		u32 add_flags, u32 drop_flags)
   3170{
   3171	ctrl_ctx->add_flags = cpu_to_le32(add_flags);
   3172	ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
   3173	xhci_slot_copy(xhci, in_ctx, out_ctx);
   3174	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
   3175}
   3176
   3177static void xhci_endpoint_disable(struct usb_hcd *hcd,
   3178				  struct usb_host_endpoint *host_ep)
   3179{
   3180	struct xhci_hcd		*xhci;
   3181	struct xhci_virt_device	*vdev;
   3182	struct xhci_virt_ep	*ep;
   3183	struct usb_device	*udev;
   3184	unsigned long		flags;
   3185	unsigned int		ep_index;
   3186
   3187	xhci = hcd_to_xhci(hcd);
   3188rescan:
   3189	spin_lock_irqsave(&xhci->lock, flags);
   3190
   3191	udev = (struct usb_device *)host_ep->hcpriv;
   3192	if (!udev || !udev->slot_id)
   3193		goto done;
   3194
   3195	vdev = xhci->devs[udev->slot_id];
   3196	if (!vdev)
   3197		goto done;
   3198
   3199	ep_index = xhci_get_endpoint_index(&host_ep->desc);
   3200	ep = &vdev->eps[ep_index];
   3201
   3202	/* wait for hub_tt_work to finish clearing hub TT */
   3203	if (ep->ep_state & EP_CLEARING_TT) {
   3204		spin_unlock_irqrestore(&xhci->lock, flags);
   3205		schedule_timeout_uninterruptible(1);
   3206		goto rescan;
   3207	}
   3208
   3209	if (ep->ep_state)
   3210		xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
   3211			 ep->ep_state);
   3212done:
   3213	host_ep->hcpriv = NULL;
   3214	spin_unlock_irqrestore(&xhci->lock, flags);
   3215}
   3216
   3217/*
   3218 * Called after usb core issues a clear halt control message.
   3219 * The host side of the halt should already be cleared by a reset endpoint
   3220 * command issued when the STALL event was received.
   3221 *
   3222 * The reset endpoint command may only be issued to endpoints in the halted
   3223 * state. For software that wishes to reset the data toggle or sequence number
   3224 * of an endpoint that isn't in the halted state this function will issue a
   3225 * configure endpoint command with the Drop and Add bits set for the target
   3226 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
   3227 */
   3228
   3229static void xhci_endpoint_reset(struct usb_hcd *hcd,
   3230		struct usb_host_endpoint *host_ep)
   3231{
   3232	struct xhci_hcd *xhci;
   3233	struct usb_device *udev;
   3234	struct xhci_virt_device *vdev;
   3235	struct xhci_virt_ep *ep;
   3236	struct xhci_input_control_ctx *ctrl_ctx;
   3237	struct xhci_command *stop_cmd, *cfg_cmd;
   3238	unsigned int ep_index;
   3239	unsigned long flags;
   3240	u32 ep_flag;
   3241	int err;
   3242
   3243	xhci = hcd_to_xhci(hcd);
   3244	if (!host_ep->hcpriv)
   3245		return;
   3246	udev = (struct usb_device *) host_ep->hcpriv;
   3247	vdev = xhci->devs[udev->slot_id];
   3248
   3249	/*
   3250	 * vdev may be lost due to xHC restore error and re-initialization
   3251	 * during S3/S4 resume. A new vdev will be allocated later by
   3252	 * xhci_discover_or_reset_device()
   3253	 */
   3254	if (!udev->slot_id || !vdev)
   3255		return;
   3256	ep_index = xhci_get_endpoint_index(&host_ep->desc);
   3257	ep = &vdev->eps[ep_index];
   3258
   3259	/* Bail out if toggle is already being cleared by a endpoint reset */
   3260	spin_lock_irqsave(&xhci->lock, flags);
   3261	if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
   3262		ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
   3263		spin_unlock_irqrestore(&xhci->lock, flags);
   3264		return;
   3265	}
   3266	spin_unlock_irqrestore(&xhci->lock, flags);
   3267	/* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
   3268	if (usb_endpoint_xfer_control(&host_ep->desc) ||
   3269	    usb_endpoint_xfer_isoc(&host_ep->desc))
   3270		return;
   3271
   3272	ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
   3273
   3274	if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
   3275		return;
   3276
   3277	stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
   3278	if (!stop_cmd)
   3279		return;
   3280
   3281	cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
   3282	if (!cfg_cmd)
   3283		goto cleanup;
   3284
   3285	spin_lock_irqsave(&xhci->lock, flags);
   3286
   3287	/* block queuing new trbs and ringing ep doorbell */
   3288	ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
   3289
   3290	/*
   3291	 * Make sure endpoint ring is empty before resetting the toggle/seq.
   3292	 * Driver is required to synchronously cancel all transfer request.
   3293	 * Stop the endpoint to force xHC to update the output context
   3294	 */
   3295
   3296	if (!list_empty(&ep->ring->td_list)) {
   3297		dev_err(&udev->dev, "EP not empty, refuse reset\n");
   3298		spin_unlock_irqrestore(&xhci->lock, flags);
   3299		xhci_free_command(xhci, cfg_cmd);
   3300		goto cleanup;
   3301	}
   3302
   3303	err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
   3304					ep_index, 0);
   3305	if (err < 0) {
   3306		spin_unlock_irqrestore(&xhci->lock, flags);
   3307		xhci_free_command(xhci, cfg_cmd);
   3308		xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
   3309				__func__, err);
   3310		goto cleanup;
   3311	}
   3312
   3313	xhci_ring_cmd_db(xhci);
   3314	spin_unlock_irqrestore(&xhci->lock, flags);
   3315
   3316	wait_for_completion(stop_cmd->completion);
   3317
   3318	spin_lock_irqsave(&xhci->lock, flags);
   3319
   3320	/* config ep command clears toggle if add and drop ep flags are set */
   3321	ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
   3322	if (!ctrl_ctx) {
   3323		spin_unlock_irqrestore(&xhci->lock, flags);
   3324		xhci_free_command(xhci, cfg_cmd);
   3325		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   3326				__func__);
   3327		goto cleanup;
   3328	}
   3329
   3330	xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
   3331					   ctrl_ctx, ep_flag, ep_flag);
   3332	xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
   3333
   3334	err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
   3335				      udev->slot_id, false);
   3336	if (err < 0) {
   3337		spin_unlock_irqrestore(&xhci->lock, flags);
   3338		xhci_free_command(xhci, cfg_cmd);
   3339		xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
   3340				__func__, err);
   3341		goto cleanup;
   3342	}
   3343
   3344	xhci_ring_cmd_db(xhci);
   3345	spin_unlock_irqrestore(&xhci->lock, flags);
   3346
   3347	wait_for_completion(cfg_cmd->completion);
   3348
   3349	xhci_free_command(xhci, cfg_cmd);
   3350cleanup:
   3351	xhci_free_command(xhci, stop_cmd);
   3352	spin_lock_irqsave(&xhci->lock, flags);
   3353	if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
   3354		ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
   3355	spin_unlock_irqrestore(&xhci->lock, flags);
   3356}
   3357
   3358static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
   3359		struct usb_device *udev, struct usb_host_endpoint *ep,
   3360		unsigned int slot_id)
   3361{
   3362	int ret;
   3363	unsigned int ep_index;
   3364	unsigned int ep_state;
   3365
   3366	if (!ep)
   3367		return -EINVAL;
   3368	ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
   3369	if (ret <= 0)
   3370		return ret ? ret : -EINVAL;
   3371	if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
   3372		xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
   3373				" descriptor for ep 0x%x does not support streams\n",
   3374				ep->desc.bEndpointAddress);
   3375		return -EINVAL;
   3376	}
   3377
   3378	ep_index = xhci_get_endpoint_index(&ep->desc);
   3379	ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
   3380	if (ep_state & EP_HAS_STREAMS ||
   3381			ep_state & EP_GETTING_STREAMS) {
   3382		xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
   3383				"already has streams set up.\n",
   3384				ep->desc.bEndpointAddress);
   3385		xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
   3386				"dynamic stream context array reallocation.\n");
   3387		return -EINVAL;
   3388	}
   3389	if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
   3390		xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
   3391				"endpoint 0x%x; URBs are pending.\n",
   3392				ep->desc.bEndpointAddress);
   3393		return -EINVAL;
   3394	}
   3395	return 0;
   3396}
   3397
   3398static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
   3399		unsigned int *num_streams, unsigned int *num_stream_ctxs)
   3400{
   3401	unsigned int max_streams;
   3402
   3403	/* The stream context array size must be a power of two */
   3404	*num_stream_ctxs = roundup_pow_of_two(*num_streams);
   3405	/*
   3406	 * Find out how many primary stream array entries the host controller
   3407	 * supports.  Later we may use secondary stream arrays (similar to 2nd
   3408	 * level page entries), but that's an optional feature for xHCI host
   3409	 * controllers. xHCs must support at least 4 stream IDs.
   3410	 */
   3411	max_streams = HCC_MAX_PSA(xhci->hcc_params);
   3412	if (*num_stream_ctxs > max_streams) {
   3413		xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
   3414				max_streams);
   3415		*num_stream_ctxs = max_streams;
   3416		*num_streams = max_streams;
   3417	}
   3418}
   3419
   3420/* Returns an error code if one of the endpoint already has streams.
   3421 * This does not change any data structures, it only checks and gathers
   3422 * information.
   3423 */
   3424static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
   3425		struct usb_device *udev,
   3426		struct usb_host_endpoint **eps, unsigned int num_eps,
   3427		unsigned int *num_streams, u32 *changed_ep_bitmask)
   3428{
   3429	unsigned int max_streams;
   3430	unsigned int endpoint_flag;
   3431	int i;
   3432	int ret;
   3433
   3434	for (i = 0; i < num_eps; i++) {
   3435		ret = xhci_check_streams_endpoint(xhci, udev,
   3436				eps[i], udev->slot_id);
   3437		if (ret < 0)
   3438			return ret;
   3439
   3440		max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
   3441		if (max_streams < (*num_streams - 1)) {
   3442			xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
   3443					eps[i]->desc.bEndpointAddress,
   3444					max_streams);
   3445			*num_streams = max_streams+1;
   3446		}
   3447
   3448		endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
   3449		if (*changed_ep_bitmask & endpoint_flag)
   3450			return -EINVAL;
   3451		*changed_ep_bitmask |= endpoint_flag;
   3452	}
   3453	return 0;
   3454}
   3455
   3456static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
   3457		struct usb_device *udev,
   3458		struct usb_host_endpoint **eps, unsigned int num_eps)
   3459{
   3460	u32 changed_ep_bitmask = 0;
   3461	unsigned int slot_id;
   3462	unsigned int ep_index;
   3463	unsigned int ep_state;
   3464	int i;
   3465
   3466	slot_id = udev->slot_id;
   3467	if (!xhci->devs[slot_id])
   3468		return 0;
   3469
   3470	for (i = 0; i < num_eps; i++) {
   3471		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3472		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
   3473		/* Are streams already being freed for the endpoint? */
   3474		if (ep_state & EP_GETTING_NO_STREAMS) {
   3475			xhci_warn(xhci, "WARN Can't disable streams for "
   3476					"endpoint 0x%x, "
   3477					"streams are being disabled already\n",
   3478					eps[i]->desc.bEndpointAddress);
   3479			return 0;
   3480		}
   3481		/* Are there actually any streams to free? */
   3482		if (!(ep_state & EP_HAS_STREAMS) &&
   3483				!(ep_state & EP_GETTING_STREAMS)) {
   3484			xhci_warn(xhci, "WARN Can't disable streams for "
   3485					"endpoint 0x%x, "
   3486					"streams are already disabled!\n",
   3487					eps[i]->desc.bEndpointAddress);
   3488			xhci_warn(xhci, "WARN xhci_free_streams() called "
   3489					"with non-streams endpoint\n");
   3490			return 0;
   3491		}
   3492		changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
   3493	}
   3494	return changed_ep_bitmask;
   3495}
   3496
   3497/*
   3498 * The USB device drivers use this function (through the HCD interface in USB
   3499 * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
   3500 * coordinate mass storage command queueing across multiple endpoints (basically
   3501 * a stream ID == a task ID).
   3502 *
   3503 * Setting up streams involves allocating the same size stream context array
   3504 * for each endpoint and issuing a configure endpoint command for all endpoints.
   3505 *
   3506 * Don't allow the call to succeed if one endpoint only supports one stream
   3507 * (which means it doesn't support streams at all).
   3508 *
   3509 * Drivers may get less stream IDs than they asked for, if the host controller
   3510 * hardware or endpoints claim they can't support the number of requested
   3511 * stream IDs.
   3512 */
   3513static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
   3514		struct usb_host_endpoint **eps, unsigned int num_eps,
   3515		unsigned int num_streams, gfp_t mem_flags)
   3516{
   3517	int i, ret;
   3518	struct xhci_hcd *xhci;
   3519	struct xhci_virt_device *vdev;
   3520	struct xhci_command *config_cmd;
   3521	struct xhci_input_control_ctx *ctrl_ctx;
   3522	unsigned int ep_index;
   3523	unsigned int num_stream_ctxs;
   3524	unsigned int max_packet;
   3525	unsigned long flags;
   3526	u32 changed_ep_bitmask = 0;
   3527
   3528	if (!eps)
   3529		return -EINVAL;
   3530
   3531	/* Add one to the number of streams requested to account for
   3532	 * stream 0 that is reserved for xHCI usage.
   3533	 */
   3534	num_streams += 1;
   3535	xhci = hcd_to_xhci(hcd);
   3536	xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
   3537			num_streams);
   3538
   3539	/* MaxPSASize value 0 (2 streams) means streams are not supported */
   3540	if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
   3541			HCC_MAX_PSA(xhci->hcc_params) < 4) {
   3542		xhci_dbg(xhci, "xHCI controller does not support streams.\n");
   3543		return -ENOSYS;
   3544	}
   3545
   3546	config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
   3547	if (!config_cmd)
   3548		return -ENOMEM;
   3549
   3550	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
   3551	if (!ctrl_ctx) {
   3552		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   3553				__func__);
   3554		xhci_free_command(xhci, config_cmd);
   3555		return -ENOMEM;
   3556	}
   3557
   3558	/* Check to make sure all endpoints are not already configured for
   3559	 * streams.  While we're at it, find the maximum number of streams that
   3560	 * all the endpoints will support and check for duplicate endpoints.
   3561	 */
   3562	spin_lock_irqsave(&xhci->lock, flags);
   3563	ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
   3564			num_eps, &num_streams, &changed_ep_bitmask);
   3565	if (ret < 0) {
   3566		xhci_free_command(xhci, config_cmd);
   3567		spin_unlock_irqrestore(&xhci->lock, flags);
   3568		return ret;
   3569	}
   3570	if (num_streams <= 1) {
   3571		xhci_warn(xhci, "WARN: endpoints can't handle "
   3572				"more than one stream.\n");
   3573		xhci_free_command(xhci, config_cmd);
   3574		spin_unlock_irqrestore(&xhci->lock, flags);
   3575		return -EINVAL;
   3576	}
   3577	vdev = xhci->devs[udev->slot_id];
   3578	/* Mark each endpoint as being in transition, so
   3579	 * xhci_urb_enqueue() will reject all URBs.
   3580	 */
   3581	for (i = 0; i < num_eps; i++) {
   3582		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3583		vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
   3584	}
   3585	spin_unlock_irqrestore(&xhci->lock, flags);
   3586
   3587	/* Setup internal data structures and allocate HW data structures for
   3588	 * streams (but don't install the HW structures in the input context
   3589	 * until we're sure all memory allocation succeeded).
   3590	 */
   3591	xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
   3592	xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
   3593			num_stream_ctxs, num_streams);
   3594
   3595	for (i = 0; i < num_eps; i++) {
   3596		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3597		max_packet = usb_endpoint_maxp(&eps[i]->desc);
   3598		vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
   3599				num_stream_ctxs,
   3600				num_streams,
   3601				max_packet, mem_flags);
   3602		if (!vdev->eps[ep_index].stream_info)
   3603			goto cleanup;
   3604		/* Set maxPstreams in endpoint context and update deq ptr to
   3605		 * point to stream context array. FIXME
   3606		 */
   3607	}
   3608
   3609	/* Set up the input context for a configure endpoint command. */
   3610	for (i = 0; i < num_eps; i++) {
   3611		struct xhci_ep_ctx *ep_ctx;
   3612
   3613		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3614		ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
   3615
   3616		xhci_endpoint_copy(xhci, config_cmd->in_ctx,
   3617				vdev->out_ctx, ep_index);
   3618		xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
   3619				vdev->eps[ep_index].stream_info);
   3620	}
   3621	/* Tell the HW to drop its old copy of the endpoint context info
   3622	 * and add the updated copy from the input context.
   3623	 */
   3624	xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
   3625			vdev->out_ctx, ctrl_ctx,
   3626			changed_ep_bitmask, changed_ep_bitmask);
   3627
   3628	/* Issue and wait for the configure endpoint command */
   3629	ret = xhci_configure_endpoint(xhci, udev, config_cmd,
   3630			false, false);
   3631
   3632	/* xHC rejected the configure endpoint command for some reason, so we
   3633	 * leave the old ring intact and free our internal streams data
   3634	 * structure.
   3635	 */
   3636	if (ret < 0)
   3637		goto cleanup;
   3638
   3639	spin_lock_irqsave(&xhci->lock, flags);
   3640	for (i = 0; i < num_eps; i++) {
   3641		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3642		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
   3643		xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
   3644			 udev->slot_id, ep_index);
   3645		vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
   3646	}
   3647	xhci_free_command(xhci, config_cmd);
   3648	spin_unlock_irqrestore(&xhci->lock, flags);
   3649
   3650	for (i = 0; i < num_eps; i++) {
   3651		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3652		xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
   3653	}
   3654	/* Subtract 1 for stream 0, which drivers can't use */
   3655	return num_streams - 1;
   3656
   3657cleanup:
   3658	/* If it didn't work, free the streams! */
   3659	for (i = 0; i < num_eps; i++) {
   3660		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3661		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
   3662		vdev->eps[ep_index].stream_info = NULL;
   3663		/* FIXME Unset maxPstreams in endpoint context and
   3664		 * update deq ptr to point to normal string ring.
   3665		 */
   3666		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
   3667		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
   3668		xhci_endpoint_zero(xhci, vdev, eps[i]);
   3669	}
   3670	xhci_free_command(xhci, config_cmd);
   3671	return -ENOMEM;
   3672}
   3673
   3674/* Transition the endpoint from using streams to being a "normal" endpoint
   3675 * without streams.
   3676 *
   3677 * Modify the endpoint context state, submit a configure endpoint command,
   3678 * and free all endpoint rings for streams if that completes successfully.
   3679 */
   3680static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
   3681		struct usb_host_endpoint **eps, unsigned int num_eps,
   3682		gfp_t mem_flags)
   3683{
   3684	int i, ret;
   3685	struct xhci_hcd *xhci;
   3686	struct xhci_virt_device *vdev;
   3687	struct xhci_command *command;
   3688	struct xhci_input_control_ctx *ctrl_ctx;
   3689	unsigned int ep_index;
   3690	unsigned long flags;
   3691	u32 changed_ep_bitmask;
   3692
   3693	xhci = hcd_to_xhci(hcd);
   3694	vdev = xhci->devs[udev->slot_id];
   3695
   3696	/* Set up a configure endpoint command to remove the streams rings */
   3697	spin_lock_irqsave(&xhci->lock, flags);
   3698	changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
   3699			udev, eps, num_eps);
   3700	if (changed_ep_bitmask == 0) {
   3701		spin_unlock_irqrestore(&xhci->lock, flags);
   3702		return -EINVAL;
   3703	}
   3704
   3705	/* Use the xhci_command structure from the first endpoint.  We may have
   3706	 * allocated too many, but the driver may call xhci_free_streams() for
   3707	 * each endpoint it grouped into one call to xhci_alloc_streams().
   3708	 */
   3709	ep_index = xhci_get_endpoint_index(&eps[0]->desc);
   3710	command = vdev->eps[ep_index].stream_info->free_streams_command;
   3711	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
   3712	if (!ctrl_ctx) {
   3713		spin_unlock_irqrestore(&xhci->lock, flags);
   3714		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   3715				__func__);
   3716		return -EINVAL;
   3717	}
   3718
   3719	for (i = 0; i < num_eps; i++) {
   3720		struct xhci_ep_ctx *ep_ctx;
   3721
   3722		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3723		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
   3724		xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
   3725			EP_GETTING_NO_STREAMS;
   3726
   3727		xhci_endpoint_copy(xhci, command->in_ctx,
   3728				vdev->out_ctx, ep_index);
   3729		xhci_setup_no_streams_ep_input_ctx(ep_ctx,
   3730				&vdev->eps[ep_index]);
   3731	}
   3732	xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
   3733			vdev->out_ctx, ctrl_ctx,
   3734			changed_ep_bitmask, changed_ep_bitmask);
   3735	spin_unlock_irqrestore(&xhci->lock, flags);
   3736
   3737	/* Issue and wait for the configure endpoint command,
   3738	 * which must succeed.
   3739	 */
   3740	ret = xhci_configure_endpoint(xhci, udev, command,
   3741			false, true);
   3742
   3743	/* xHC rejected the configure endpoint command for some reason, so we
   3744	 * leave the streams rings intact.
   3745	 */
   3746	if (ret < 0)
   3747		return ret;
   3748
   3749	spin_lock_irqsave(&xhci->lock, flags);
   3750	for (i = 0; i < num_eps; i++) {
   3751		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
   3752		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
   3753		vdev->eps[ep_index].stream_info = NULL;
   3754		/* FIXME Unset maxPstreams in endpoint context and
   3755		 * update deq ptr to point to normal string ring.
   3756		 */
   3757		vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
   3758		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
   3759	}
   3760	spin_unlock_irqrestore(&xhci->lock, flags);
   3761
   3762	return 0;
   3763}
   3764
   3765/*
   3766 * Deletes endpoint resources for endpoints that were active before a Reset
   3767 * Device command, or a Disable Slot command.  The Reset Device command leaves
   3768 * the control endpoint intact, whereas the Disable Slot command deletes it.
   3769 *
   3770 * Must be called with xhci->lock held.
   3771 */
   3772void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
   3773	struct xhci_virt_device *virt_dev, bool drop_control_ep)
   3774{
   3775	int i;
   3776	unsigned int num_dropped_eps = 0;
   3777	unsigned int drop_flags = 0;
   3778
   3779	for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
   3780		if (virt_dev->eps[i].ring) {
   3781			drop_flags |= 1 << i;
   3782			num_dropped_eps++;
   3783		}
   3784	}
   3785	xhci->num_active_eps -= num_dropped_eps;
   3786	if (num_dropped_eps)
   3787		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   3788				"Dropped %u ep ctxs, flags = 0x%x, "
   3789				"%u now active.",
   3790				num_dropped_eps, drop_flags,
   3791				xhci->num_active_eps);
   3792}
   3793
   3794/*
   3795 * This submits a Reset Device Command, which will set the device state to 0,
   3796 * set the device address to 0, and disable all the endpoints except the default
   3797 * control endpoint.  The USB core should come back and call
   3798 * xhci_address_device(), and then re-set up the configuration.  If this is
   3799 * called because of a usb_reset_and_verify_device(), then the old alternate
   3800 * settings will be re-installed through the normal bandwidth allocation
   3801 * functions.
   3802 *
   3803 * Wait for the Reset Device command to finish.  Remove all structures
   3804 * associated with the endpoints that were disabled.  Clear the input device
   3805 * structure? Reset the control endpoint 0 max packet size?
   3806 *
   3807 * If the virt_dev to be reset does not exist or does not match the udev,
   3808 * it means the device is lost, possibly due to the xHC restore error and
   3809 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
   3810 * re-allocate the device.
   3811 */
   3812static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
   3813		struct usb_device *udev)
   3814{
   3815	int ret, i;
   3816	unsigned long flags;
   3817	struct xhci_hcd *xhci;
   3818	unsigned int slot_id;
   3819	struct xhci_virt_device *virt_dev;
   3820	struct xhci_command *reset_device_cmd;
   3821	struct xhci_slot_ctx *slot_ctx;
   3822	int old_active_eps = 0;
   3823
   3824	ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
   3825	if (ret <= 0)
   3826		return ret;
   3827	xhci = hcd_to_xhci(hcd);
   3828	slot_id = udev->slot_id;
   3829	virt_dev = xhci->devs[slot_id];
   3830	if (!virt_dev) {
   3831		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
   3832				"not exist. Re-allocate the device\n", slot_id);
   3833		ret = xhci_alloc_dev(hcd, udev);
   3834		if (ret == 1)
   3835			return 0;
   3836		else
   3837			return -EINVAL;
   3838	}
   3839
   3840	if (virt_dev->tt_info)
   3841		old_active_eps = virt_dev->tt_info->active_eps;
   3842
   3843	if (virt_dev->udev != udev) {
   3844		/* If the virt_dev and the udev does not match, this virt_dev
   3845		 * may belong to another udev.
   3846		 * Re-allocate the device.
   3847		 */
   3848		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
   3849				"not match the udev. Re-allocate the device\n",
   3850				slot_id);
   3851		ret = xhci_alloc_dev(hcd, udev);
   3852		if (ret == 1)
   3853			return 0;
   3854		else
   3855			return -EINVAL;
   3856	}
   3857
   3858	/* If device is not setup, there is no point in resetting it */
   3859	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
   3860	if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
   3861						SLOT_STATE_DISABLED)
   3862		return 0;
   3863
   3864	trace_xhci_discover_or_reset_device(slot_ctx);
   3865
   3866	xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
   3867	/* Allocate the command structure that holds the struct completion.
   3868	 * Assume we're in process context, since the normal device reset
   3869	 * process has to wait for the device anyway.  Storage devices are
   3870	 * reset as part of error handling, so use GFP_NOIO instead of
   3871	 * GFP_KERNEL.
   3872	 */
   3873	reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
   3874	if (!reset_device_cmd) {
   3875		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
   3876		return -ENOMEM;
   3877	}
   3878
   3879	/* Attempt to submit the Reset Device command to the command ring */
   3880	spin_lock_irqsave(&xhci->lock, flags);
   3881
   3882	ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
   3883	if (ret) {
   3884		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
   3885		spin_unlock_irqrestore(&xhci->lock, flags);
   3886		goto command_cleanup;
   3887	}
   3888	xhci_ring_cmd_db(xhci);
   3889	spin_unlock_irqrestore(&xhci->lock, flags);
   3890
   3891	/* Wait for the Reset Device command to finish */
   3892	wait_for_completion(reset_device_cmd->completion);
   3893
   3894	/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
   3895	 * unless we tried to reset a slot ID that wasn't enabled,
   3896	 * or the device wasn't in the addressed or configured state.
   3897	 */
   3898	ret = reset_device_cmd->status;
   3899	switch (ret) {
   3900	case COMP_COMMAND_ABORTED:
   3901	case COMP_COMMAND_RING_STOPPED:
   3902		xhci_warn(xhci, "Timeout waiting for reset device command\n");
   3903		ret = -ETIME;
   3904		goto command_cleanup;
   3905	case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
   3906	case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
   3907		xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
   3908				slot_id,
   3909				xhci_get_slot_state(xhci, virt_dev->out_ctx));
   3910		xhci_dbg(xhci, "Not freeing device rings.\n");
   3911		/* Don't treat this as an error.  May change my mind later. */
   3912		ret = 0;
   3913		goto command_cleanup;
   3914	case COMP_SUCCESS:
   3915		xhci_dbg(xhci, "Successful reset device command.\n");
   3916		break;
   3917	default:
   3918		if (xhci_is_vendor_info_code(xhci, ret))
   3919			break;
   3920		xhci_warn(xhci, "Unknown completion code %u for "
   3921				"reset device command.\n", ret);
   3922		ret = -EINVAL;
   3923		goto command_cleanup;
   3924	}
   3925
   3926	/* Free up host controller endpoint resources */
   3927	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
   3928		spin_lock_irqsave(&xhci->lock, flags);
   3929		/* Don't delete the default control endpoint resources */
   3930		xhci_free_device_endpoint_resources(xhci, virt_dev, false);
   3931		spin_unlock_irqrestore(&xhci->lock, flags);
   3932	}
   3933
   3934	/* Everything but endpoint 0 is disabled, so free the rings. */
   3935	for (i = 1; i < 31; i++) {
   3936		struct xhci_virt_ep *ep = &virt_dev->eps[i];
   3937
   3938		if (ep->ep_state & EP_HAS_STREAMS) {
   3939			xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
   3940					xhci_get_endpoint_address(i));
   3941			xhci_free_stream_info(xhci, ep->stream_info);
   3942			ep->stream_info = NULL;
   3943			ep->ep_state &= ~EP_HAS_STREAMS;
   3944		}
   3945
   3946		if (ep->ring) {
   3947			xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
   3948			xhci_free_endpoint_ring(xhci, virt_dev, i);
   3949		}
   3950		if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
   3951			xhci_drop_ep_from_interval_table(xhci,
   3952					&virt_dev->eps[i].bw_info,
   3953					virt_dev->bw_table,
   3954					udev,
   3955					&virt_dev->eps[i],
   3956					virt_dev->tt_info);
   3957		xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
   3958	}
   3959	/* If necessary, update the number of active TTs on this root port */
   3960	xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
   3961	virt_dev->flags = 0;
   3962	ret = 0;
   3963
   3964command_cleanup:
   3965	xhci_free_command(xhci, reset_device_cmd);
   3966	return ret;
   3967}
   3968
   3969/*
   3970 * At this point, the struct usb_device is about to go away, the device has
   3971 * disconnected, and all traffic has been stopped and the endpoints have been
   3972 * disabled.  Free any HC data structures associated with that device.
   3973 */
   3974static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
   3975{
   3976	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
   3977	struct xhci_virt_device *virt_dev;
   3978	struct xhci_slot_ctx *slot_ctx;
   3979	int i, ret;
   3980
   3981	/*
   3982	 * We called pm_runtime_get_noresume when the device was attached.
   3983	 * Decrement the counter here to allow controller to runtime suspend
   3984	 * if no devices remain.
   3985	 */
   3986	if (xhci->quirks & XHCI_RESET_ON_RESUME)
   3987		pm_runtime_put_noidle(hcd->self.controller);
   3988
   3989	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
   3990	/* If the host is halted due to driver unload, we still need to free the
   3991	 * device.
   3992	 */
   3993	if (ret <= 0 && ret != -ENODEV)
   3994		return;
   3995
   3996	virt_dev = xhci->devs[udev->slot_id];
   3997	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
   3998	trace_xhci_free_dev(slot_ctx);
   3999
   4000	/* Stop any wayward timer functions (which may grab the lock) */
   4001	for (i = 0; i < 31; i++)
   4002		virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
   4003	virt_dev->udev = NULL;
   4004	xhci_disable_slot(xhci, udev->slot_id);
   4005	xhci_free_virt_device(xhci, udev->slot_id);
   4006}
   4007
   4008int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
   4009{
   4010	struct xhci_command *command;
   4011	unsigned long flags;
   4012	u32 state;
   4013	int ret;
   4014
   4015	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
   4016	if (!command)
   4017		return -ENOMEM;
   4018
   4019	xhci_debugfs_remove_slot(xhci, slot_id);
   4020
   4021	spin_lock_irqsave(&xhci->lock, flags);
   4022	/* Don't disable the slot if the host controller is dead. */
   4023	state = readl(&xhci->op_regs->status);
   4024	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
   4025			(xhci->xhc_state & XHCI_STATE_HALTED)) {
   4026		spin_unlock_irqrestore(&xhci->lock, flags);
   4027		kfree(command);
   4028		return -ENODEV;
   4029	}
   4030
   4031	ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
   4032				slot_id);
   4033	if (ret) {
   4034		spin_unlock_irqrestore(&xhci->lock, flags);
   4035		kfree(command);
   4036		return ret;
   4037	}
   4038	xhci_ring_cmd_db(xhci);
   4039	spin_unlock_irqrestore(&xhci->lock, flags);
   4040
   4041	wait_for_completion(command->completion);
   4042
   4043	if (command->status != COMP_SUCCESS)
   4044		xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
   4045			  slot_id, command->status);
   4046
   4047	xhci_free_command(xhci, command);
   4048
   4049	return 0;
   4050}
   4051
   4052/*
   4053 * Checks if we have enough host controller resources for the default control
   4054 * endpoint.
   4055 *
   4056 * Must be called with xhci->lock held.
   4057 */
   4058static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
   4059{
   4060	if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
   4061		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   4062				"Not enough ep ctxs: "
   4063				"%u active, need to add 1, limit is %u.",
   4064				xhci->num_active_eps, xhci->limit_active_eps);
   4065		return -ENOMEM;
   4066	}
   4067	xhci->num_active_eps += 1;
   4068	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
   4069			"Adding 1 ep ctx, %u now active.",
   4070			xhci->num_active_eps);
   4071	return 0;
   4072}
   4073
   4074
   4075/*
   4076 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
   4077 * timed out, or allocating memory failed.  Returns 1 on success.
   4078 */
   4079int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
   4080{
   4081	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
   4082	struct xhci_virt_device *vdev;
   4083	struct xhci_slot_ctx *slot_ctx;
   4084	unsigned long flags;
   4085	int ret, slot_id;
   4086	struct xhci_command *command;
   4087
   4088	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
   4089	if (!command)
   4090		return 0;
   4091
   4092	spin_lock_irqsave(&xhci->lock, flags);
   4093	ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
   4094	if (ret) {
   4095		spin_unlock_irqrestore(&xhci->lock, flags);
   4096		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
   4097		xhci_free_command(xhci, command);
   4098		return 0;
   4099	}
   4100	xhci_ring_cmd_db(xhci);
   4101	spin_unlock_irqrestore(&xhci->lock, flags);
   4102
   4103	wait_for_completion(command->completion);
   4104	slot_id = command->slot_id;
   4105
   4106	if (!slot_id || command->status != COMP_SUCCESS) {
   4107		xhci_err(xhci, "Error while assigning device slot ID\n");
   4108		xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
   4109				HCS_MAX_SLOTS(
   4110					readl(&xhci->cap_regs->hcs_params1)));
   4111		xhci_free_command(xhci, command);
   4112		return 0;
   4113	}
   4114
   4115	xhci_free_command(xhci, command);
   4116
   4117	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
   4118		spin_lock_irqsave(&xhci->lock, flags);
   4119		ret = xhci_reserve_host_control_ep_resources(xhci);
   4120		if (ret) {
   4121			spin_unlock_irqrestore(&xhci->lock, flags);
   4122			xhci_warn(xhci, "Not enough host resources, "
   4123					"active endpoint contexts = %u\n",
   4124					xhci->num_active_eps);
   4125			goto disable_slot;
   4126		}
   4127		spin_unlock_irqrestore(&xhci->lock, flags);
   4128	}
   4129	/* Use GFP_NOIO, since this function can be called from
   4130	 * xhci_discover_or_reset_device(), which may be called as part of
   4131	 * mass storage driver error handling.
   4132	 */
   4133	if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
   4134		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
   4135		goto disable_slot;
   4136	}
   4137	vdev = xhci->devs[slot_id];
   4138	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
   4139	trace_xhci_alloc_dev(slot_ctx);
   4140
   4141	udev->slot_id = slot_id;
   4142
   4143	xhci_debugfs_create_slot(xhci, slot_id);
   4144
   4145	/*
   4146	 * If resetting upon resume, we can't put the controller into runtime
   4147	 * suspend if there is a device attached.
   4148	 */
   4149	if (xhci->quirks & XHCI_RESET_ON_RESUME)
   4150		pm_runtime_get_noresume(hcd->self.controller);
   4151
   4152	/* Is this a LS or FS device under a HS hub? */
   4153	/* Hub or peripherial? */
   4154	return 1;
   4155
   4156disable_slot:
   4157	xhci_disable_slot(xhci, udev->slot_id);
   4158	xhci_free_virt_device(xhci, udev->slot_id);
   4159
   4160	return 0;
   4161}
   4162
   4163/*
   4164 * Issue an Address Device command and optionally send a corresponding
   4165 * SetAddress request to the device.
   4166 */
   4167static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
   4168			     enum xhci_setup_dev setup)
   4169{
   4170	const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
   4171	unsigned long flags;
   4172	struct xhci_virt_device *virt_dev;
   4173	int ret = 0;
   4174	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
   4175	struct xhci_slot_ctx *slot_ctx;
   4176	struct xhci_input_control_ctx *ctrl_ctx;
   4177	u64 temp_64;
   4178	struct xhci_command *command = NULL;
   4179
   4180	mutex_lock(&xhci->mutex);
   4181
   4182	if (xhci->xhc_state) {	/* dying, removing or halted */
   4183		ret = -ESHUTDOWN;
   4184		goto out;
   4185	}
   4186
   4187	if (!udev->slot_id) {
   4188		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
   4189				"Bad Slot ID %d", udev->slot_id);
   4190		ret = -EINVAL;
   4191		goto out;
   4192	}
   4193
   4194	virt_dev = xhci->devs[udev->slot_id];
   4195
   4196	if (WARN_ON(!virt_dev)) {
   4197		/*
   4198		 * In plug/unplug torture test with an NEC controller,
   4199		 * a zero-dereference was observed once due to virt_dev = 0.
   4200		 * Print useful debug rather than crash if it is observed again!
   4201		 */
   4202		xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
   4203			udev->slot_id);
   4204		ret = -EINVAL;
   4205		goto out;
   4206	}
   4207	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
   4208	trace_xhci_setup_device_slot(slot_ctx);
   4209
   4210	if (setup == SETUP_CONTEXT_ONLY) {
   4211		if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
   4212		    SLOT_STATE_DEFAULT) {
   4213			xhci_dbg(xhci, "Slot already in default state\n");
   4214			goto out;
   4215		}
   4216	}
   4217
   4218	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
   4219	if (!command) {
   4220		ret = -ENOMEM;
   4221		goto out;
   4222	}
   4223
   4224	command->in_ctx = virt_dev->in_ctx;
   4225
   4226	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
   4227	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
   4228	if (!ctrl_ctx) {
   4229		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   4230				__func__);
   4231		ret = -EINVAL;
   4232		goto out;
   4233	}
   4234	/*
   4235	 * If this is the first Set Address since device plug-in or
   4236	 * virt_device realloaction after a resume with an xHCI power loss,
   4237	 * then set up the slot context.
   4238	 */
   4239	if (!slot_ctx->dev_info)
   4240		xhci_setup_addressable_virt_dev(xhci, udev);
   4241	/* Otherwise, update the control endpoint ring enqueue pointer. */
   4242	else
   4243		xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
   4244	ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
   4245	ctrl_ctx->drop_flags = 0;
   4246
   4247	trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
   4248				le32_to_cpu(slot_ctx->dev_info) >> 27);
   4249
   4250	trace_xhci_address_ctrl_ctx(ctrl_ctx);
   4251	spin_lock_irqsave(&xhci->lock, flags);
   4252	trace_xhci_setup_device(virt_dev);
   4253	ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
   4254					udev->slot_id, setup);
   4255	if (ret) {
   4256		spin_unlock_irqrestore(&xhci->lock, flags);
   4257		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
   4258				"FIXME: allocate a command ring segment");
   4259		goto out;
   4260	}
   4261	xhci_ring_cmd_db(xhci);
   4262	spin_unlock_irqrestore(&xhci->lock, flags);
   4263
   4264	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
   4265	wait_for_completion(command->completion);
   4266
   4267	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
   4268	 * the SetAddress() "recovery interval" required by USB and aborting the
   4269	 * command on a timeout.
   4270	 */
   4271	switch (command->status) {
   4272	case COMP_COMMAND_ABORTED:
   4273	case COMP_COMMAND_RING_STOPPED:
   4274		xhci_warn(xhci, "Timeout while waiting for setup device command\n");
   4275		ret = -ETIME;
   4276		break;
   4277	case COMP_CONTEXT_STATE_ERROR:
   4278	case COMP_SLOT_NOT_ENABLED_ERROR:
   4279		xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
   4280			 act, udev->slot_id);
   4281		ret = -EINVAL;
   4282		break;
   4283	case COMP_USB_TRANSACTION_ERROR:
   4284		dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
   4285
   4286		mutex_unlock(&xhci->mutex);
   4287		ret = xhci_disable_slot(xhci, udev->slot_id);
   4288		xhci_free_virt_device(xhci, udev->slot_id);
   4289		if (!ret)
   4290			xhci_alloc_dev(hcd, udev);
   4291		kfree(command->completion);
   4292		kfree(command);
   4293		return -EPROTO;
   4294	case COMP_INCOMPATIBLE_DEVICE_ERROR:
   4295		dev_warn(&udev->dev,
   4296			 "ERROR: Incompatible device for setup %s command\n", act);
   4297		ret = -ENODEV;
   4298		break;
   4299	case COMP_SUCCESS:
   4300		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
   4301			       "Successful setup %s command", act);
   4302		break;
   4303	default:
   4304		xhci_err(xhci,
   4305			 "ERROR: unexpected setup %s command completion code 0x%x.\n",
   4306			 act, command->status);
   4307		trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
   4308		ret = -EINVAL;
   4309		break;
   4310	}
   4311	if (ret)
   4312		goto out;
   4313	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
   4314	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
   4315			"Op regs DCBAA ptr = %#016llx", temp_64);
   4316	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
   4317		"Slot ID %d dcbaa entry @%p = %#016llx",
   4318		udev->slot_id,
   4319		&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
   4320		(unsigned long long)
   4321		le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
   4322	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
   4323			"Output Context DMA address = %#08llx",
   4324			(unsigned long long)virt_dev->out_ctx->dma);
   4325	trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
   4326				le32_to_cpu(slot_ctx->dev_info) >> 27);
   4327	/*
   4328	 * USB core uses address 1 for the roothubs, so we add one to the
   4329	 * address given back to us by the HC.
   4330	 */
   4331	trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
   4332				le32_to_cpu(slot_ctx->dev_info) >> 27);
   4333	/* Zero the input context control for later use */
   4334	ctrl_ctx->add_flags = 0;
   4335	ctrl_ctx->drop_flags = 0;
   4336	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
   4337	udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
   4338
   4339	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
   4340		       "Internal device address = %d",
   4341		       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
   4342out:
   4343	mutex_unlock(&xhci->mutex);
   4344	if (command) {
   4345		kfree(command->completion);
   4346		kfree(command);
   4347	}
   4348	return ret;
   4349}
   4350
   4351static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
   4352{
   4353	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
   4354}
   4355
   4356static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
   4357{
   4358	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
   4359}
   4360
   4361/*
   4362 * Transfer the port index into real index in the HW port status
   4363 * registers. Caculate offset between the port's PORTSC register
   4364 * and port status base. Divide the number of per port register
   4365 * to get the real index. The raw port number bases 1.
   4366 */
   4367int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
   4368{
   4369	struct xhci_hub *rhub;
   4370
   4371	rhub = xhci_get_rhub(hcd);
   4372	return rhub->ports[port1 - 1]->hw_portnum + 1;
   4373}
   4374
   4375/*
   4376 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
   4377 * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
   4378 */
   4379static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
   4380			struct usb_device *udev, u16 max_exit_latency)
   4381{
   4382	struct xhci_virt_device *virt_dev;
   4383	struct xhci_command *command;
   4384	struct xhci_input_control_ctx *ctrl_ctx;
   4385	struct xhci_slot_ctx *slot_ctx;
   4386	unsigned long flags;
   4387	int ret;
   4388
   4389	command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
   4390	if (!command)
   4391		return -ENOMEM;
   4392
   4393	spin_lock_irqsave(&xhci->lock, flags);
   4394
   4395	virt_dev = xhci->devs[udev->slot_id];
   4396
   4397	/*
   4398	 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
   4399	 * xHC was re-initialized. Exit latency will be set later after
   4400	 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
   4401	 */
   4402
   4403	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
   4404		spin_unlock_irqrestore(&xhci->lock, flags);
   4405		return 0;
   4406	}
   4407
   4408	/* Attempt to issue an Evaluate Context command to change the MEL. */
   4409	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
   4410	if (!ctrl_ctx) {
   4411		spin_unlock_irqrestore(&xhci->lock, flags);
   4412		xhci_free_command(xhci, command);
   4413		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   4414				__func__);
   4415		return -ENOMEM;
   4416	}
   4417
   4418	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
   4419	spin_unlock_irqrestore(&xhci->lock, flags);
   4420
   4421	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
   4422	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
   4423	slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
   4424	slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
   4425	slot_ctx->dev_state = 0;
   4426
   4427	xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
   4428			"Set up evaluate context for LPM MEL change.");
   4429
   4430	/* Issue and wait for the evaluate context command. */
   4431	ret = xhci_configure_endpoint(xhci, udev, command,
   4432			true, true);
   4433
   4434	if (!ret) {
   4435		spin_lock_irqsave(&xhci->lock, flags);
   4436		virt_dev->current_mel = max_exit_latency;
   4437		spin_unlock_irqrestore(&xhci->lock, flags);
   4438	}
   4439
   4440	xhci_free_command(xhci, command);
   4441
   4442	return ret;
   4443}
   4444
   4445#ifdef CONFIG_PM
   4446
   4447/* BESL to HIRD Encoding array for USB2 LPM */
   4448static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
   4449	3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
   4450
   4451/* Calculate HIRD/BESL for USB2 PORTPMSC*/
   4452static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
   4453					struct usb_device *udev)
   4454{
   4455	int u2del, besl, besl_host;
   4456	int besl_device = 0;
   4457	u32 field;
   4458
   4459	u2del = HCS_U2_LATENCY(xhci->hcs_params3);
   4460	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
   4461
   4462	if (field & USB_BESL_SUPPORT) {
   4463		for (besl_host = 0; besl_host < 16; besl_host++) {
   4464			if (xhci_besl_encoding[besl_host] >= u2del)
   4465				break;
   4466		}
   4467		/* Use baseline BESL value as default */
   4468		if (field & USB_BESL_BASELINE_VALID)
   4469			besl_device = USB_GET_BESL_BASELINE(field);
   4470		else if (field & USB_BESL_DEEP_VALID)
   4471			besl_device = USB_GET_BESL_DEEP(field);
   4472	} else {
   4473		if (u2del <= 50)
   4474			besl_host = 0;
   4475		else
   4476			besl_host = (u2del - 51) / 75 + 1;
   4477	}
   4478
   4479	besl = besl_host + besl_device;
   4480	if (besl > 15)
   4481		besl = 15;
   4482
   4483	return besl;
   4484}
   4485
   4486/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
   4487static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
   4488{
   4489	u32 field;
   4490	int l1;
   4491	int besld = 0;
   4492	int hirdm = 0;
   4493
   4494	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
   4495
   4496	/* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
   4497	l1 = udev->l1_params.timeout / 256;
   4498
   4499	/* device has preferred BESLD */
   4500	if (field & USB_BESL_DEEP_VALID) {
   4501		besld = USB_GET_BESL_DEEP(field);
   4502		hirdm = 1;
   4503	}
   4504
   4505	return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
   4506}
   4507
   4508static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
   4509			struct usb_device *udev, int enable)
   4510{
   4511	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
   4512	struct xhci_port **ports;
   4513	__le32 __iomem	*pm_addr, *hlpm_addr;
   4514	u32		pm_val, hlpm_val, field;
   4515	unsigned int	port_num;
   4516	unsigned long	flags;
   4517	int		hird, exit_latency;
   4518	int		ret;
   4519
   4520	if (xhci->quirks & XHCI_HW_LPM_DISABLE)
   4521		return -EPERM;
   4522
   4523	if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
   4524			!udev->lpm_capable)
   4525		return -EPERM;
   4526
   4527	if (!udev->parent || udev->parent->parent ||
   4528			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
   4529		return -EPERM;
   4530
   4531	if (udev->usb2_hw_lpm_capable != 1)
   4532		return -EPERM;
   4533
   4534	spin_lock_irqsave(&xhci->lock, flags);
   4535
   4536	ports = xhci->usb2_rhub.ports;
   4537	port_num = udev->portnum - 1;
   4538	pm_addr = ports[port_num]->addr + PORTPMSC;
   4539	pm_val = readl(pm_addr);
   4540	hlpm_addr = ports[port_num]->addr + PORTHLPMC;
   4541
   4542	xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
   4543			enable ? "enable" : "disable", port_num + 1);
   4544
   4545	if (enable) {
   4546		/* Host supports BESL timeout instead of HIRD */
   4547		if (udev->usb2_hw_lpm_besl_capable) {
   4548			/* if device doesn't have a preferred BESL value use a
   4549			 * default one which works with mixed HIRD and BESL
   4550			 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
   4551			 */
   4552			field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
   4553			if ((field & USB_BESL_SUPPORT) &&
   4554			    (field & USB_BESL_BASELINE_VALID))
   4555				hird = USB_GET_BESL_BASELINE(field);
   4556			else
   4557				hird = udev->l1_params.besl;
   4558
   4559			exit_latency = xhci_besl_encoding[hird];
   4560			spin_unlock_irqrestore(&xhci->lock, flags);
   4561
   4562			ret = xhci_change_max_exit_latency(xhci, udev,
   4563							   exit_latency);
   4564			if (ret < 0)
   4565				return ret;
   4566			spin_lock_irqsave(&xhci->lock, flags);
   4567
   4568			hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
   4569			writel(hlpm_val, hlpm_addr);
   4570			/* flush write */
   4571			readl(hlpm_addr);
   4572		} else {
   4573			hird = xhci_calculate_hird_besl(xhci, udev);
   4574		}
   4575
   4576		pm_val &= ~PORT_HIRD_MASK;
   4577		pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
   4578		writel(pm_val, pm_addr);
   4579		pm_val = readl(pm_addr);
   4580		pm_val |= PORT_HLE;
   4581		writel(pm_val, pm_addr);
   4582		/* flush write */
   4583		readl(pm_addr);
   4584	} else {
   4585		pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
   4586		writel(pm_val, pm_addr);
   4587		/* flush write */
   4588		readl(pm_addr);
   4589		if (udev->usb2_hw_lpm_besl_capable) {
   4590			spin_unlock_irqrestore(&xhci->lock, flags);
   4591			xhci_change_max_exit_latency(xhci, udev, 0);
   4592			readl_poll_timeout(ports[port_num]->addr, pm_val,
   4593					   (pm_val & PORT_PLS_MASK) == XDEV_U0,
   4594					   100, 10000);
   4595			return 0;
   4596		}
   4597	}
   4598
   4599	spin_unlock_irqrestore(&xhci->lock, flags);
   4600	return 0;
   4601}
   4602
   4603/* check if a usb2 port supports a given extened capability protocol
   4604 * only USB2 ports extended protocol capability values are cached.
   4605 * Return 1 if capability is supported
   4606 */
   4607static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
   4608					   unsigned capability)
   4609{
   4610	u32 port_offset, port_count;
   4611	int i;
   4612
   4613	for (i = 0; i < xhci->num_ext_caps; i++) {
   4614		if (xhci->ext_caps[i] & capability) {
   4615			/* port offsets starts at 1 */
   4616			port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
   4617			port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
   4618			if (port >= port_offset &&
   4619			    port < port_offset + port_count)
   4620				return 1;
   4621		}
   4622	}
   4623	return 0;
   4624}
   4625
   4626static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
   4627{
   4628	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
   4629	int		portnum = udev->portnum - 1;
   4630
   4631	if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
   4632		return 0;
   4633
   4634	/* we only support lpm for non-hub device connected to root hub yet */
   4635	if (!udev->parent || udev->parent->parent ||
   4636			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
   4637		return 0;
   4638
   4639	if (xhci->hw_lpm_support == 1 &&
   4640			xhci_check_usb2_port_capability(
   4641				xhci, portnum, XHCI_HLC)) {
   4642		udev->usb2_hw_lpm_capable = 1;
   4643		udev->l1_params.timeout = XHCI_L1_TIMEOUT;
   4644		udev->l1_params.besl = XHCI_DEFAULT_BESL;
   4645		if (xhci_check_usb2_port_capability(xhci, portnum,
   4646					XHCI_BLC))
   4647			udev->usb2_hw_lpm_besl_capable = 1;
   4648	}
   4649
   4650	return 0;
   4651}
   4652
   4653/*---------------------- USB 3.0 Link PM functions ------------------------*/
   4654
   4655/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
   4656static unsigned long long xhci_service_interval_to_ns(
   4657		struct usb_endpoint_descriptor *desc)
   4658{
   4659	return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
   4660}
   4661
   4662static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
   4663		enum usb3_link_state state)
   4664{
   4665	unsigned long long sel;
   4666	unsigned long long pel;
   4667	unsigned int max_sel_pel;
   4668	char *state_name;
   4669
   4670	switch (state) {
   4671	case USB3_LPM_U1:
   4672		/* Convert SEL and PEL stored in nanoseconds to microseconds */
   4673		sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
   4674		pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
   4675		max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
   4676		state_name = "U1";
   4677		break;
   4678	case USB3_LPM_U2:
   4679		sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
   4680		pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
   4681		max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
   4682		state_name = "U2";
   4683		break;
   4684	default:
   4685		dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
   4686				__func__);
   4687		return USB3_LPM_DISABLED;
   4688	}
   4689
   4690	if (sel <= max_sel_pel && pel <= max_sel_pel)
   4691		return USB3_LPM_DEVICE_INITIATED;
   4692
   4693	if (sel > max_sel_pel)
   4694		dev_dbg(&udev->dev, "Device-initiated %s disabled "
   4695				"due to long SEL %llu ms\n",
   4696				state_name, sel);
   4697	else
   4698		dev_dbg(&udev->dev, "Device-initiated %s disabled "
   4699				"due to long PEL %llu ms\n",
   4700				state_name, pel);
   4701	return USB3_LPM_DISABLED;
   4702}
   4703
   4704/* The U1 timeout should be the maximum of the following values:
   4705 *  - For control endpoints, U1 system exit latency (SEL) * 3
   4706 *  - For bulk endpoints, U1 SEL * 5
   4707 *  - For interrupt endpoints:
   4708 *    - Notification EPs, U1 SEL * 3
   4709 *    - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
   4710 *  - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
   4711 */
   4712static unsigned long long xhci_calculate_intel_u1_timeout(
   4713		struct usb_device *udev,
   4714		struct usb_endpoint_descriptor *desc)
   4715{
   4716	unsigned long long timeout_ns;
   4717	int ep_type;
   4718	int intr_type;
   4719
   4720	ep_type = usb_endpoint_type(desc);
   4721	switch (ep_type) {
   4722	case USB_ENDPOINT_XFER_CONTROL:
   4723		timeout_ns = udev->u1_params.sel * 3;
   4724		break;
   4725	case USB_ENDPOINT_XFER_BULK:
   4726		timeout_ns = udev->u1_params.sel * 5;
   4727		break;
   4728	case USB_ENDPOINT_XFER_INT:
   4729		intr_type = usb_endpoint_interrupt_type(desc);
   4730		if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
   4731			timeout_ns = udev->u1_params.sel * 3;
   4732			break;
   4733		}
   4734		/* Otherwise the calculation is the same as isoc eps */
   4735		fallthrough;
   4736	case USB_ENDPOINT_XFER_ISOC:
   4737		timeout_ns = xhci_service_interval_to_ns(desc);
   4738		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
   4739		if (timeout_ns < udev->u1_params.sel * 2)
   4740			timeout_ns = udev->u1_params.sel * 2;
   4741		break;
   4742	default:
   4743		return 0;
   4744	}
   4745
   4746	return timeout_ns;
   4747}
   4748
   4749/* Returns the hub-encoded U1 timeout value. */
   4750static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
   4751		struct usb_device *udev,
   4752		struct usb_endpoint_descriptor *desc)
   4753{
   4754	unsigned long long timeout_ns;
   4755
   4756	/* Prevent U1 if service interval is shorter than U1 exit latency */
   4757	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
   4758		if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
   4759			dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
   4760			return USB3_LPM_DISABLED;
   4761		}
   4762	}
   4763
   4764	if (xhci->quirks & XHCI_INTEL_HOST)
   4765		timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
   4766	else
   4767		timeout_ns = udev->u1_params.sel;
   4768
   4769	/* The U1 timeout is encoded in 1us intervals.
   4770	 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
   4771	 */
   4772	if (timeout_ns == USB3_LPM_DISABLED)
   4773		timeout_ns = 1;
   4774	else
   4775		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
   4776
   4777	/* If the necessary timeout value is bigger than what we can set in the
   4778	 * USB 3.0 hub, we have to disable hub-initiated U1.
   4779	 */
   4780	if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
   4781		return timeout_ns;
   4782	dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
   4783			"due to long timeout %llu ms\n", timeout_ns);
   4784	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
   4785}
   4786
   4787/* The U2 timeout should be the maximum of:
   4788 *  - 10 ms (to avoid the bandwidth impact on the scheduler)
   4789 *  - largest bInterval of any active periodic endpoint (to avoid going
   4790 *    into lower power link states between intervals).
   4791 *  - the U2 Exit Latency of the device
   4792 */
   4793static unsigned long long xhci_calculate_intel_u2_timeout(
   4794		struct usb_device *udev,
   4795		struct usb_endpoint_descriptor *desc)
   4796{
   4797	unsigned long long timeout_ns;
   4798	unsigned long long u2_del_ns;
   4799
   4800	timeout_ns = 10 * 1000 * 1000;
   4801
   4802	if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
   4803			(xhci_service_interval_to_ns(desc) > timeout_ns))
   4804		timeout_ns = xhci_service_interval_to_ns(desc);
   4805
   4806	u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
   4807	if (u2_del_ns > timeout_ns)
   4808		timeout_ns = u2_del_ns;
   4809
   4810	return timeout_ns;
   4811}
   4812
   4813/* Returns the hub-encoded U2 timeout value. */
   4814static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
   4815		struct usb_device *udev,
   4816		struct usb_endpoint_descriptor *desc)
   4817{
   4818	unsigned long long timeout_ns;
   4819
   4820	/* Prevent U2 if service interval is shorter than U2 exit latency */
   4821	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
   4822		if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
   4823			dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
   4824			return USB3_LPM_DISABLED;
   4825		}
   4826	}
   4827
   4828	if (xhci->quirks & XHCI_INTEL_HOST)
   4829		timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
   4830	else
   4831		timeout_ns = udev->u2_params.sel;
   4832
   4833	/* The U2 timeout is encoded in 256us intervals */
   4834	timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
   4835	/* If the necessary timeout value is bigger than what we can set in the
   4836	 * USB 3.0 hub, we have to disable hub-initiated U2.
   4837	 */
   4838	if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
   4839		return timeout_ns;
   4840	dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
   4841			"due to long timeout %llu ms\n", timeout_ns);
   4842	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
   4843}
   4844
   4845static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
   4846		struct usb_device *udev,
   4847		struct usb_endpoint_descriptor *desc,
   4848		enum usb3_link_state state,
   4849		u16 *timeout)
   4850{
   4851	if (state == USB3_LPM_U1)
   4852		return xhci_calculate_u1_timeout(xhci, udev, desc);
   4853	else if (state == USB3_LPM_U2)
   4854		return xhci_calculate_u2_timeout(xhci, udev, desc);
   4855
   4856	return USB3_LPM_DISABLED;
   4857}
   4858
   4859static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
   4860		struct usb_device *udev,
   4861		struct usb_endpoint_descriptor *desc,
   4862		enum usb3_link_state state,
   4863		u16 *timeout)
   4864{
   4865	u16 alt_timeout;
   4866
   4867	alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
   4868		desc, state, timeout);
   4869
   4870	/* If we found we can't enable hub-initiated LPM, and
   4871	 * the U1 or U2 exit latency was too high to allow
   4872	 * device-initiated LPM as well, then we will disable LPM
   4873	 * for this device, so stop searching any further.
   4874	 */
   4875	if (alt_timeout == USB3_LPM_DISABLED) {
   4876		*timeout = alt_timeout;
   4877		return -E2BIG;
   4878	}
   4879	if (alt_timeout > *timeout)
   4880		*timeout = alt_timeout;
   4881	return 0;
   4882}
   4883
   4884static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
   4885		struct usb_device *udev,
   4886		struct usb_host_interface *alt,
   4887		enum usb3_link_state state,
   4888		u16 *timeout)
   4889{
   4890	int j;
   4891
   4892	for (j = 0; j < alt->desc.bNumEndpoints; j++) {
   4893		if (xhci_update_timeout_for_endpoint(xhci, udev,
   4894					&alt->endpoint[j].desc, state, timeout))
   4895			return -E2BIG;
   4896	}
   4897	return 0;
   4898}
   4899
   4900static int xhci_check_intel_tier_policy(struct usb_device *udev,
   4901		enum usb3_link_state state)
   4902{
   4903	struct usb_device *parent;
   4904	unsigned int num_hubs;
   4905
   4906	/* Don't enable U1 if the device is on a 2nd tier hub or lower. */
   4907	for (parent = udev->parent, num_hubs = 0; parent->parent;
   4908			parent = parent->parent)
   4909		num_hubs++;
   4910
   4911	if (num_hubs < 2)
   4912		return 0;
   4913
   4914	dev_dbg(&udev->dev, "Disabling U1/U2 link state for device"
   4915			" below second-tier hub.\n");
   4916	dev_dbg(&udev->dev, "Plug device into first-tier hub "
   4917			"to decrease power consumption.\n");
   4918	return -E2BIG;
   4919}
   4920
   4921static int xhci_check_tier_policy(struct xhci_hcd *xhci,
   4922		struct usb_device *udev,
   4923		enum usb3_link_state state)
   4924{
   4925	if (xhci->quirks & XHCI_INTEL_HOST)
   4926		return xhci_check_intel_tier_policy(udev, state);
   4927	else
   4928		return 0;
   4929}
   4930
   4931/* Returns the U1 or U2 timeout that should be enabled.
   4932 * If the tier check or timeout setting functions return with a non-zero exit
   4933 * code, that means the timeout value has been finalized and we shouldn't look
   4934 * at any more endpoints.
   4935 */
   4936static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
   4937			struct usb_device *udev, enum usb3_link_state state)
   4938{
   4939	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
   4940	struct usb_host_config *config;
   4941	char *state_name;
   4942	int i;
   4943	u16 timeout = USB3_LPM_DISABLED;
   4944
   4945	if (state == USB3_LPM_U1)
   4946		state_name = "U1";
   4947	else if (state == USB3_LPM_U2)
   4948		state_name = "U2";
   4949	else {
   4950		dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
   4951				state);
   4952		return timeout;
   4953	}
   4954
   4955	/* Gather some information about the currently installed configuration
   4956	 * and alternate interface settings.
   4957	 */
   4958	if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
   4959			state, &timeout))
   4960		return timeout;
   4961
   4962	config = udev->actconfig;
   4963	if (!config)
   4964		return timeout;
   4965
   4966	for (i = 0; i < config->desc.bNumInterfaces; i++) {
   4967		struct usb_driver *driver;
   4968		struct usb_interface *intf = config->interface[i];
   4969
   4970		if (!intf)
   4971			continue;
   4972
   4973		/* Check if any currently bound drivers want hub-initiated LPM
   4974		 * disabled.
   4975		 */
   4976		if (intf->dev.driver) {
   4977			driver = to_usb_driver(intf->dev.driver);
   4978			if (driver && driver->disable_hub_initiated_lpm) {
   4979				dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
   4980					state_name, driver->name);
   4981				timeout = xhci_get_timeout_no_hub_lpm(udev,
   4982								      state);
   4983				if (timeout == USB3_LPM_DISABLED)
   4984					return timeout;
   4985			}
   4986		}
   4987
   4988		/* Not sure how this could happen... */
   4989		if (!intf->cur_altsetting)
   4990			continue;
   4991
   4992		if (xhci_update_timeout_for_interface(xhci, udev,
   4993					intf->cur_altsetting,
   4994					state, &timeout))
   4995			return timeout;
   4996	}
   4997	return timeout;
   4998}
   4999
   5000static int calculate_max_exit_latency(struct usb_device *udev,
   5001		enum usb3_link_state state_changed,
   5002		u16 hub_encoded_timeout)
   5003{
   5004	unsigned long long u1_mel_us = 0;
   5005	unsigned long long u2_mel_us = 0;
   5006	unsigned long long mel_us = 0;
   5007	bool disabling_u1;
   5008	bool disabling_u2;
   5009	bool enabling_u1;
   5010	bool enabling_u2;
   5011
   5012	disabling_u1 = (state_changed == USB3_LPM_U1 &&
   5013			hub_encoded_timeout == USB3_LPM_DISABLED);
   5014	disabling_u2 = (state_changed == USB3_LPM_U2 &&
   5015			hub_encoded_timeout == USB3_LPM_DISABLED);
   5016
   5017	enabling_u1 = (state_changed == USB3_LPM_U1 &&
   5018			hub_encoded_timeout != USB3_LPM_DISABLED);
   5019	enabling_u2 = (state_changed == USB3_LPM_U2 &&
   5020			hub_encoded_timeout != USB3_LPM_DISABLED);
   5021
   5022	/* If U1 was already enabled and we're not disabling it,
   5023	 * or we're going to enable U1, account for the U1 max exit latency.
   5024	 */
   5025	if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
   5026			enabling_u1)
   5027		u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
   5028	if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
   5029			enabling_u2)
   5030		u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
   5031
   5032	mel_us = max(u1_mel_us, u2_mel_us);
   5033
   5034	/* xHCI host controller max exit latency field is only 16 bits wide. */
   5035	if (mel_us > MAX_EXIT) {
   5036		dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
   5037				"is too big.\n", mel_us);
   5038		return -E2BIG;
   5039	}
   5040	return mel_us;
   5041}
   5042
   5043/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
   5044static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
   5045			struct usb_device *udev, enum usb3_link_state state)
   5046{
   5047	struct xhci_hcd	*xhci;
   5048	u16 hub_encoded_timeout;
   5049	int mel;
   5050	int ret;
   5051
   5052	xhci = hcd_to_xhci(hcd);
   5053	/* The LPM timeout values are pretty host-controller specific, so don't
   5054	 * enable hub-initiated timeouts unless the vendor has provided
   5055	 * information about their timeout algorithm.
   5056	 */
   5057	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
   5058			!xhci->devs[udev->slot_id])
   5059		return USB3_LPM_DISABLED;
   5060
   5061	if (xhci_check_tier_policy(xhci, udev, state) < 0)
   5062		return USB3_LPM_DISABLED;
   5063
   5064	hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
   5065	mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
   5066	if (mel < 0) {
   5067		/* Max Exit Latency is too big, disable LPM. */
   5068		hub_encoded_timeout = USB3_LPM_DISABLED;
   5069		mel = 0;
   5070	}
   5071
   5072	ret = xhci_change_max_exit_latency(xhci, udev, mel);
   5073	if (ret)
   5074		return ret;
   5075	return hub_encoded_timeout;
   5076}
   5077
   5078static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
   5079			struct usb_device *udev, enum usb3_link_state state)
   5080{
   5081	struct xhci_hcd	*xhci;
   5082	u16 mel;
   5083
   5084	xhci = hcd_to_xhci(hcd);
   5085	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
   5086			!xhci->devs[udev->slot_id])
   5087		return 0;
   5088
   5089	mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
   5090	return xhci_change_max_exit_latency(xhci, udev, mel);
   5091}
   5092#else /* CONFIG_PM */
   5093
   5094static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
   5095				struct usb_device *udev, int enable)
   5096{
   5097	return 0;
   5098}
   5099
   5100static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
   5101{
   5102	return 0;
   5103}
   5104
   5105static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
   5106			struct usb_device *udev, enum usb3_link_state state)
   5107{
   5108	return USB3_LPM_DISABLED;
   5109}
   5110
   5111static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
   5112			struct usb_device *udev, enum usb3_link_state state)
   5113{
   5114	return 0;
   5115}
   5116#endif	/* CONFIG_PM */
   5117
   5118/*-------------------------------------------------------------------------*/
   5119
   5120/* Once a hub descriptor is fetched for a device, we need to update the xHC's
   5121 * internal data structures for the device.
   5122 */
   5123static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
   5124			struct usb_tt *tt, gfp_t mem_flags)
   5125{
   5126	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
   5127	struct xhci_virt_device *vdev;
   5128	struct xhci_command *config_cmd;
   5129	struct xhci_input_control_ctx *ctrl_ctx;
   5130	struct xhci_slot_ctx *slot_ctx;
   5131	unsigned long flags;
   5132	unsigned think_time;
   5133	int ret;
   5134
   5135	/* Ignore root hubs */
   5136	if (!hdev->parent)
   5137		return 0;
   5138
   5139	vdev = xhci->devs[hdev->slot_id];
   5140	if (!vdev) {
   5141		xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
   5142		return -EINVAL;
   5143	}
   5144
   5145	config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
   5146	if (!config_cmd)
   5147		return -ENOMEM;
   5148
   5149	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
   5150	if (!ctrl_ctx) {
   5151		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
   5152				__func__);
   5153		xhci_free_command(xhci, config_cmd);
   5154		return -ENOMEM;
   5155	}
   5156
   5157	spin_lock_irqsave(&xhci->lock, flags);
   5158	if (hdev->speed == USB_SPEED_HIGH &&
   5159			xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
   5160		xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
   5161		xhci_free_command(xhci, config_cmd);
   5162		spin_unlock_irqrestore(&xhci->lock, flags);
   5163		return -ENOMEM;
   5164	}
   5165
   5166	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
   5167	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
   5168	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
   5169	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
   5170	/*
   5171	 * refer to section 6.2.2: MTT should be 0 for full speed hub,
   5172	 * but it may be already set to 1 when setup an xHCI virtual
   5173	 * device, so clear it anyway.
   5174	 */
   5175	if (tt->multi)
   5176		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
   5177	else if (hdev->speed == USB_SPEED_FULL)
   5178		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
   5179
   5180	if (xhci->hci_version > 0x95) {
   5181		xhci_dbg(xhci, "xHCI version %x needs hub "
   5182				"TT think time and number of ports\n",
   5183				(unsigned int) xhci->hci_version);
   5184		slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
   5185		/* Set TT think time - convert from ns to FS bit times.
   5186		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
   5187		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
   5188		 *
   5189		 * xHCI 1.0: this field shall be 0 if the device is not a
   5190		 * High-spped hub.
   5191		 */
   5192		think_time = tt->think_time;
   5193		if (think_time != 0)
   5194			think_time = (think_time / 666) - 1;
   5195		if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
   5196			slot_ctx->tt_info |=
   5197				cpu_to_le32(TT_THINK_TIME(think_time));
   5198	} else {
   5199		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
   5200				"TT think time or number of ports\n",
   5201				(unsigned int) xhci->hci_version);
   5202	}
   5203	slot_ctx->dev_state = 0;
   5204	spin_unlock_irqrestore(&xhci->lock, flags);
   5205
   5206	xhci_dbg(xhci, "Set up %s for hub device.\n",
   5207			(xhci->hci_version > 0x95) ?
   5208			"configure endpoint" : "evaluate context");
   5209
   5210	/* Issue and wait for the configure endpoint or
   5211	 * evaluate context command.
   5212	 */
   5213	if (xhci->hci_version > 0x95)
   5214		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
   5215				false, false);
   5216	else
   5217		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
   5218				true, false);
   5219
   5220	xhci_free_command(xhci, config_cmd);
   5221	return ret;
   5222}
   5223
   5224static int xhci_get_frame(struct usb_hcd *hcd)
   5225{
   5226	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
   5227	/* EHCI mods by the periodic size.  Why? */
   5228	return readl(&xhci->run_regs->microframe_index) >> 3;
   5229}
   5230
   5231static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
   5232{
   5233	xhci->usb2_rhub.hcd = hcd;
   5234	hcd->speed = HCD_USB2;
   5235	hcd->self.root_hub->speed = USB_SPEED_HIGH;
   5236	/*
   5237	 * USB 2.0 roothub under xHCI has an integrated TT,
   5238	 * (rate matching hub) as opposed to having an OHCI/UHCI
   5239	 * companion controller.
   5240	 */
   5241	hcd->has_tt = 1;
   5242}
   5243
   5244static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
   5245{
   5246	unsigned int minor_rev;
   5247
   5248	/*
   5249	 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
   5250	 * should return 0x31 for sbrn, or that the minor revision
   5251	 * is a two digit BCD containig minor and sub-minor numbers.
   5252	 * This was later clarified in xHCI 1.2.
   5253	 *
   5254	 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
   5255	 * minor revision set to 0x1 instead of 0x10.
   5256	 */
   5257	if (xhci->usb3_rhub.min_rev == 0x1)
   5258		minor_rev = 1;
   5259	else
   5260		minor_rev = xhci->usb3_rhub.min_rev / 0x10;
   5261
   5262	switch (minor_rev) {
   5263	case 2:
   5264		hcd->speed = HCD_USB32;
   5265		hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
   5266		hcd->self.root_hub->rx_lanes = 2;
   5267		hcd->self.root_hub->tx_lanes = 2;
   5268		hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
   5269		break;
   5270	case 1:
   5271		hcd->speed = HCD_USB31;
   5272		hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
   5273		hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
   5274		break;
   5275	}
   5276	xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
   5277		  minor_rev, minor_rev ? "Enhanced " : "");
   5278
   5279	xhci->usb3_rhub.hcd = hcd;
   5280}
   5281
   5282int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
   5283{
   5284	struct xhci_hcd		*xhci;
   5285	/*
   5286	 * TODO: Check with DWC3 clients for sysdev according to
   5287	 * quirks
   5288	 */
   5289	struct device		*dev = hcd->self.sysdev;
   5290	int			retval;
   5291
   5292	/* Accept arbitrarily long scatter-gather lists */
   5293	hcd->self.sg_tablesize = ~0;
   5294
   5295	/* support to build packet from discontinuous buffers */
   5296	hcd->self.no_sg_constraint = 1;
   5297
   5298	/* XHCI controllers don't stop the ep queue on short packets :| */
   5299	hcd->self.no_stop_on_short = 1;
   5300
   5301	xhci = hcd_to_xhci(hcd);
   5302
   5303	if (!usb_hcd_is_primary_hcd(hcd)) {
   5304		xhci_hcd_init_usb3_data(xhci, hcd);
   5305		return 0;
   5306	}
   5307
   5308	mutex_init(&xhci->mutex);
   5309	xhci->main_hcd = hcd;
   5310	xhci->cap_regs = hcd->regs;
   5311	xhci->op_regs = hcd->regs +
   5312		HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
   5313	xhci->run_regs = hcd->regs +
   5314		(readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
   5315	/* Cache read-only capability registers */
   5316	xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
   5317	xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
   5318	xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
   5319	xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
   5320	xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
   5321	if (xhci->hci_version > 0x100)
   5322		xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
   5323
   5324	xhci->quirks |= quirks;
   5325
   5326	get_quirks(dev, xhci);
   5327
   5328	/* In xhci controllers which follow xhci 1.0 spec gives a spurious
   5329	 * success event after a short transfer. This quirk will ignore such
   5330	 * spurious event.
   5331	 */
   5332	if (xhci->hci_version > 0x96)
   5333		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
   5334
   5335	/* Make sure the HC is halted. */
   5336	retval = xhci_halt(xhci);
   5337	if (retval)
   5338		return retval;
   5339
   5340	xhci_zero_64b_regs(xhci);
   5341
   5342	xhci_dbg(xhci, "Resetting HCD\n");
   5343	/* Reset the internal HC memory state and registers. */
   5344	retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
   5345	if (retval)
   5346		return retval;
   5347	xhci_dbg(xhci, "Reset complete\n");
   5348
   5349	/*
   5350	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
   5351	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
   5352	 * address memory pointers actually. So, this driver clears the AC64
   5353	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
   5354	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
   5355	 */
   5356	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
   5357		xhci->hcc_params &= ~BIT(0);
   5358
   5359	/* Set dma_mask and coherent_dma_mask to 64-bits,
   5360	 * if xHC supports 64-bit addressing */
   5361	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
   5362			!dma_set_mask(dev, DMA_BIT_MASK(64))) {
   5363		xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
   5364		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
   5365	} else {
   5366		/*
   5367		 * This is to avoid error in cases where a 32-bit USB
   5368		 * controller is used on a 64-bit capable system.
   5369		 */
   5370		retval = dma_set_mask(dev, DMA_BIT_MASK(32));
   5371		if (retval)
   5372			return retval;
   5373		xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
   5374		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
   5375	}
   5376
   5377	xhci_dbg(xhci, "Calling HCD init\n");
   5378	/* Initialize HCD and host controller data structures. */
   5379	retval = xhci_init(hcd);
   5380	if (retval)
   5381		return retval;
   5382	xhci_dbg(xhci, "Called HCD init\n");
   5383
   5384	if (xhci_hcd_is_usb3(hcd))
   5385		xhci_hcd_init_usb3_data(xhci, hcd);
   5386	else
   5387		xhci_hcd_init_usb2_data(xhci, hcd);
   5388
   5389	xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
   5390		  xhci->hcc_params, xhci->hci_version, xhci->quirks);
   5391
   5392	return 0;
   5393}
   5394EXPORT_SYMBOL_GPL(xhci_gen_setup);
   5395
   5396static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
   5397		struct usb_host_endpoint *ep)
   5398{
   5399	struct xhci_hcd *xhci;
   5400	struct usb_device *udev;
   5401	unsigned int slot_id;
   5402	unsigned int ep_index;
   5403	unsigned long flags;
   5404
   5405	xhci = hcd_to_xhci(hcd);
   5406
   5407	spin_lock_irqsave(&xhci->lock, flags);
   5408	udev = (struct usb_device *)ep->hcpriv;
   5409	slot_id = udev->slot_id;
   5410	ep_index = xhci_get_endpoint_index(&ep->desc);
   5411
   5412	xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
   5413	xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
   5414	spin_unlock_irqrestore(&xhci->lock, flags);
   5415}
   5416
   5417static const struct hc_driver xhci_hc_driver = {
   5418	.description =		"xhci-hcd",
   5419	.product_desc =		"xHCI Host Controller",
   5420	.hcd_priv_size =	sizeof(struct xhci_hcd),
   5421
   5422	/*
   5423	 * generic hardware linkage
   5424	 */
   5425	.irq =			xhci_irq,
   5426	.flags =		HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
   5427				HCD_BH,
   5428
   5429	/*
   5430	 * basic lifecycle operations
   5431	 */
   5432	.reset =		NULL, /* set in xhci_init_driver() */
   5433	.start =		xhci_run,
   5434	.stop =			xhci_stop,
   5435	.shutdown =		xhci_shutdown,
   5436
   5437	/*
   5438	 * managing i/o requests and associated device resources
   5439	 */
   5440	.map_urb_for_dma =      xhci_map_urb_for_dma,
   5441	.unmap_urb_for_dma =    xhci_unmap_urb_for_dma,
   5442	.urb_enqueue =		xhci_urb_enqueue,
   5443	.urb_dequeue =		xhci_urb_dequeue,
   5444	.alloc_dev =		xhci_alloc_dev,
   5445	.free_dev =		xhci_free_dev,
   5446	.alloc_streams =	xhci_alloc_streams,
   5447	.free_streams =		xhci_free_streams,
   5448	.add_endpoint =		xhci_add_endpoint,
   5449	.drop_endpoint =	xhci_drop_endpoint,
   5450	.endpoint_disable =	xhci_endpoint_disable,
   5451	.endpoint_reset =	xhci_endpoint_reset,
   5452	.check_bandwidth =	xhci_check_bandwidth,
   5453	.reset_bandwidth =	xhci_reset_bandwidth,
   5454	.address_device =	xhci_address_device,
   5455	.enable_device =	xhci_enable_device,
   5456	.update_hub_device =	xhci_update_hub_device,
   5457	.reset_device =		xhci_discover_or_reset_device,
   5458
   5459	/*
   5460	 * scheduling support
   5461	 */
   5462	.get_frame_number =	xhci_get_frame,
   5463
   5464	/*
   5465	 * root hub support
   5466	 */
   5467	.hub_control =		xhci_hub_control,
   5468	.hub_status_data =	xhci_hub_status_data,
   5469	.bus_suspend =		xhci_bus_suspend,
   5470	.bus_resume =		xhci_bus_resume,
   5471	.get_resuming_ports =	xhci_get_resuming_ports,
   5472
   5473	/*
   5474	 * call back when device connected and addressed
   5475	 */
   5476	.update_device =        xhci_update_device,
   5477	.set_usb2_hw_lpm =	xhci_set_usb2_hardware_lpm,
   5478	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
   5479	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
   5480	.find_raw_port_number =	xhci_find_raw_port_number,
   5481	.clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
   5482};
   5483
   5484void xhci_init_driver(struct hc_driver *drv,
   5485		      const struct xhci_driver_overrides *over)
   5486{
   5487	BUG_ON(!over);
   5488
   5489	/* Copy the generic table to drv then apply the overrides */
   5490	*drv = xhci_hc_driver;
   5491
   5492	if (over) {
   5493		drv->hcd_priv_size += over->extra_priv_size;
   5494		if (over->reset)
   5495			drv->reset = over->reset;
   5496		if (over->start)
   5497			drv->start = over->start;
   5498		if (over->add_endpoint)
   5499			drv->add_endpoint = over->add_endpoint;
   5500		if (over->drop_endpoint)
   5501			drv->drop_endpoint = over->drop_endpoint;
   5502		if (over->check_bandwidth)
   5503			drv->check_bandwidth = over->check_bandwidth;
   5504		if (over->reset_bandwidth)
   5505			drv->reset_bandwidth = over->reset_bandwidth;
   5506	}
   5507}
   5508EXPORT_SYMBOL_GPL(xhci_init_driver);
   5509
   5510MODULE_DESCRIPTION(DRIVER_DESC);
   5511MODULE_AUTHOR(DRIVER_AUTHOR);
   5512MODULE_LICENSE("GPL");
   5513
   5514static int __init xhci_hcd_init(void)
   5515{
   5516	/*
   5517	 * Check the compiler generated sizes of structures that must be laid
   5518	 * out in specific ways for hardware access.
   5519	 */
   5520	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
   5521	BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
   5522	BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
   5523	/* xhci_device_control has eight fields, and also
   5524	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
   5525	 */
   5526	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
   5527	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
   5528	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
   5529	BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
   5530	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
   5531	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
   5532	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
   5533
   5534	if (usb_disabled())
   5535		return -ENODEV;
   5536
   5537	xhci_debugfs_create_root();
   5538	xhci_dbc_init();
   5539
   5540	return 0;
   5541}
   5542
   5543/*
   5544 * If an init function is provided, an exit function must also be provided
   5545 * to allow module unload.
   5546 */
   5547static void __exit xhci_hcd_fini(void)
   5548{
   5549	xhci_debugfs_remove_root();
   5550	xhci_dbc_exit();
   5551}
   5552
   5553module_init(xhci_hcd_init);
   5554module_exit(xhci_hcd_fini);