cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

musb_core.c (81036B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * MUSB OTG driver core code
      4 *
      5 * Copyright 2005 Mentor Graphics Corporation
      6 * Copyright (C) 2005-2006 by Texas Instruments
      7 * Copyright (C) 2006-2007 Nokia Corporation
      8 */
      9
     10/*
     11 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
     12 *
     13 * This consists of a Host Controller Driver (HCD) and a peripheral
     14 * controller driver implementing the "Gadget" API; OTG support is
     15 * in the works.  These are normal Linux-USB controller drivers which
     16 * use IRQs and have no dedicated thread.
     17 *
     18 * This version of the driver has only been used with products from
     19 * Texas Instruments.  Those products integrate the Inventra logic
     20 * with other DMA, IRQ, and bus modules, as well as other logic that
     21 * needs to be reflected in this driver.
     22 *
     23 *
     24 * NOTE:  the original Mentor code here was pretty much a collection
     25 * of mechanisms that don't seem to have been fully integrated/working
     26 * for *any* Linux kernel version.  This version aims at Linux 2.6.now,
     27 * Key open issues include:
     28 *
     29 *  - Lack of host-side transaction scheduling, for all transfer types.
     30 *    The hardware doesn't do it; instead, software must.
     31 *
     32 *    This is not an issue for OTG devices that don't support external
     33 *    hubs, but for more "normal" USB hosts it's a user issue that the
     34 *    "multipoint" support doesn't scale in the expected ways.  That
     35 *    includes DaVinci EVM in a common non-OTG mode.
     36 *
     37 *      * Control and bulk use dedicated endpoints, and there's as
     38 *        yet no mechanism to either (a) reclaim the hardware when
     39 *        peripherals are NAKing, which gets complicated with bulk
     40 *        endpoints, or (b) use more than a single bulk endpoint in
     41 *        each direction.
     42 *
     43 *        RESULT:  one device may be perceived as blocking another one.
     44 *
     45 *      * Interrupt and isochronous will dynamically allocate endpoint
     46 *        hardware, but (a) there's no record keeping for bandwidth;
     47 *        (b) in the common case that few endpoints are available, there
     48 *        is no mechanism to reuse endpoints to talk to multiple devices.
     49 *
     50 *        RESULT:  At one extreme, bandwidth can be overcommitted in
     51 *        some hardware configurations, no faults will be reported.
     52 *        At the other extreme, the bandwidth capabilities which do
     53 *        exist tend to be severely undercommitted.  You can't yet hook
     54 *        up both a keyboard and a mouse to an external USB hub.
     55 */
     56
     57/*
     58 * This gets many kinds of configuration information:
     59 *	- Kconfig for everything user-configurable
     60 *	- platform_device for addressing, irq, and platform_data
     61 *	- platform_data is mostly for board-specific information
     62 *	  (plus recentrly, SOC or family details)
     63 *
     64 * Most of the conditional compilation will (someday) vanish.
     65 */
     66
     67#include <linux/module.h>
     68#include <linux/kernel.h>
     69#include <linux/sched.h>
     70#include <linux/slab.h>
     71#include <linux/list.h>
     72#include <linux/kobject.h>
     73#include <linux/prefetch.h>
     74#include <linux/platform_device.h>
     75#include <linux/io.h>
     76#include <linux/iopoll.h>
     77#include <linux/dma-mapping.h>
     78#include <linux/usb.h>
     79#include <linux/usb/of.h>
     80
     81#include "musb_core.h"
     82#include "musb_trace.h"
     83
     84#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
     85
     86
     87#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
     88#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
     89
     90#define MUSB_VERSION "6.0"
     91
     92#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
     93
     94#define MUSB_DRIVER_NAME "musb-hdrc"
     95const char musb_driver_name[] = MUSB_DRIVER_NAME;
     96
     97MODULE_DESCRIPTION(DRIVER_INFO);
     98MODULE_AUTHOR(DRIVER_AUTHOR);
     99MODULE_LICENSE("GPL");
    100MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
    101
    102
    103/*-------------------------------------------------------------------------*/
    104
    105static inline struct musb *dev_to_musb(struct device *dev)
    106{
    107	return dev_get_drvdata(dev);
    108}
    109
    110enum musb_mode musb_get_mode(struct device *dev)
    111{
    112	enum usb_dr_mode mode;
    113
    114	mode = usb_get_dr_mode(dev);
    115	switch (mode) {
    116	case USB_DR_MODE_HOST:
    117		return MUSB_HOST;
    118	case USB_DR_MODE_PERIPHERAL:
    119		return MUSB_PERIPHERAL;
    120	case USB_DR_MODE_OTG:
    121	case USB_DR_MODE_UNKNOWN:
    122	default:
    123		return MUSB_OTG;
    124	}
    125}
    126EXPORT_SYMBOL_GPL(musb_get_mode);
    127
    128/*-------------------------------------------------------------------------*/
    129
    130static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
    131{
    132	void __iomem *addr = phy->io_priv;
    133	int	i = 0;
    134	u8	r;
    135	u8	power;
    136	int	ret;
    137
    138	pm_runtime_get_sync(phy->io_dev);
    139
    140	/* Make sure the transceiver is not in low power mode */
    141	power = musb_readb(addr, MUSB_POWER);
    142	power &= ~MUSB_POWER_SUSPENDM;
    143	musb_writeb(addr, MUSB_POWER, power);
    144
    145	/* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
    146	 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
    147	 */
    148
    149	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
    150	musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
    151			MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
    152
    153	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
    154				& MUSB_ULPI_REG_CMPLT)) {
    155		i++;
    156		if (i == 10000) {
    157			ret = -ETIMEDOUT;
    158			goto out;
    159		}
    160
    161	}
    162	r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
    163	r &= ~MUSB_ULPI_REG_CMPLT;
    164	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
    165
    166	ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
    167
    168out:
    169	pm_runtime_put(phy->io_dev);
    170
    171	return ret;
    172}
    173
    174static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
    175{
    176	void __iomem *addr = phy->io_priv;
    177	int	i = 0;
    178	u8	r = 0;
    179	u8	power;
    180	int	ret = 0;
    181
    182	pm_runtime_get_sync(phy->io_dev);
    183
    184	/* Make sure the transceiver is not in low power mode */
    185	power = musb_readb(addr, MUSB_POWER);
    186	power &= ~MUSB_POWER_SUSPENDM;
    187	musb_writeb(addr, MUSB_POWER, power);
    188
    189	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
    190	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
    191	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
    192
    193	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
    194				& MUSB_ULPI_REG_CMPLT)) {
    195		i++;
    196		if (i == 10000) {
    197			ret = -ETIMEDOUT;
    198			goto out;
    199		}
    200	}
    201
    202	r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
    203	r &= ~MUSB_ULPI_REG_CMPLT;
    204	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
    205
    206out:
    207	pm_runtime_put(phy->io_dev);
    208
    209	return ret;
    210}
    211
    212static struct usb_phy_io_ops musb_ulpi_access = {
    213	.read = musb_ulpi_read,
    214	.write = musb_ulpi_write,
    215};
    216
    217/*-------------------------------------------------------------------------*/
    218
    219static u32 musb_default_fifo_offset(u8 epnum)
    220{
    221	return 0x20 + (epnum * 4);
    222}
    223
    224/* "flat" mapping: each endpoint has its own i/o address */
    225static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
    226{
    227}
    228
    229static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
    230{
    231	return 0x100 + (0x10 * epnum) + offset;
    232}
    233
    234/* "indexed" mapping: INDEX register controls register bank select */
    235static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
    236{
    237	musb_writeb(mbase, MUSB_INDEX, epnum);
    238}
    239
    240static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
    241{
    242	return 0x10 + offset;
    243}
    244
    245static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
    246{
    247	return 0x80 + (0x08 * epnum) + offset;
    248}
    249
    250static u8 musb_default_readb(void __iomem *addr, u32 offset)
    251{
    252	u8 data =  __raw_readb(addr + offset);
    253
    254	trace_musb_readb(__builtin_return_address(0), addr, offset, data);
    255	return data;
    256}
    257
    258static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data)
    259{
    260	trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
    261	__raw_writeb(data, addr + offset);
    262}
    263
    264static u16 musb_default_readw(void __iomem *addr, u32 offset)
    265{
    266	u16 data = __raw_readw(addr + offset);
    267
    268	trace_musb_readw(__builtin_return_address(0), addr, offset, data);
    269	return data;
    270}
    271
    272static void musb_default_writew(void __iomem *addr, u32 offset, u16 data)
    273{
    274	trace_musb_writew(__builtin_return_address(0), addr, offset, data);
    275	__raw_writew(data, addr + offset);
    276}
    277
    278static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out)
    279{
    280	void __iomem *epio = qh->hw_ep->regs;
    281	u16 csr;
    282
    283	if (is_out)
    284		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
    285	else
    286		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
    287
    288	return csr;
    289}
    290
    291static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out,
    292				   struct urb *urb)
    293{
    294	u16 csr;
    295	u16 toggle;
    296
    297	toggle = usb_gettoggle(urb->dev, qh->epnum, is_out);
    298
    299	if (is_out)
    300		csr = toggle ? (MUSB_TXCSR_H_WR_DATATOGGLE
    301				| MUSB_TXCSR_H_DATATOGGLE)
    302				: MUSB_TXCSR_CLRDATATOG;
    303	else
    304		csr = toggle ? (MUSB_RXCSR_H_WR_DATATOGGLE
    305				| MUSB_RXCSR_H_DATATOGGLE) : 0;
    306
    307	return csr;
    308}
    309
    310/*
    311 * Load an endpoint's FIFO
    312 */
    313static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
    314				    const u8 *src)
    315{
    316	struct musb *musb = hw_ep->musb;
    317	void __iomem *fifo = hw_ep->fifo;
    318
    319	if (unlikely(len == 0))
    320		return;
    321
    322	prefetch((u8 *)src);
    323
    324	dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
    325			'T', hw_ep->epnum, fifo, len, src);
    326
    327	/* we can't assume unaligned reads work */
    328	if (likely((0x01 & (unsigned long) src) == 0)) {
    329		u16	index = 0;
    330
    331		/* best case is 32bit-aligned source address */
    332		if ((0x02 & (unsigned long) src) == 0) {
    333			if (len >= 4) {
    334				iowrite32_rep(fifo, src + index, len >> 2);
    335				index += len & ~0x03;
    336			}
    337			if (len & 0x02) {
    338				__raw_writew(*(u16 *)&src[index], fifo);
    339				index += 2;
    340			}
    341		} else {
    342			if (len >= 2) {
    343				iowrite16_rep(fifo, src + index, len >> 1);
    344				index += len & ~0x01;
    345			}
    346		}
    347		if (len & 0x01)
    348			__raw_writeb(src[index], fifo);
    349	} else  {
    350		/* byte aligned */
    351		iowrite8_rep(fifo, src, len);
    352	}
    353}
    354
    355/*
    356 * Unload an endpoint's FIFO
    357 */
    358static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
    359{
    360	struct musb *musb = hw_ep->musb;
    361	void __iomem *fifo = hw_ep->fifo;
    362
    363	if (unlikely(len == 0))
    364		return;
    365
    366	dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
    367			'R', hw_ep->epnum, fifo, len, dst);
    368
    369	/* we can't assume unaligned writes work */
    370	if (likely((0x01 & (unsigned long) dst) == 0)) {
    371		u16	index = 0;
    372
    373		/* best case is 32bit-aligned destination address */
    374		if ((0x02 & (unsigned long) dst) == 0) {
    375			if (len >= 4) {
    376				ioread32_rep(fifo, dst, len >> 2);
    377				index = len & ~0x03;
    378			}
    379			if (len & 0x02) {
    380				*(u16 *)&dst[index] = __raw_readw(fifo);
    381				index += 2;
    382			}
    383		} else {
    384			if (len >= 2) {
    385				ioread16_rep(fifo, dst, len >> 1);
    386				index = len & ~0x01;
    387			}
    388		}
    389		if (len & 0x01)
    390			dst[index] = __raw_readb(fifo);
    391	} else  {
    392		/* byte aligned */
    393		ioread8_rep(fifo, dst, len);
    394	}
    395}
    396
    397/*
    398 * Old style IO functions
    399 */
    400u8 (*musb_readb)(void __iomem *addr, u32 offset);
    401EXPORT_SYMBOL_GPL(musb_readb);
    402
    403void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data);
    404EXPORT_SYMBOL_GPL(musb_writeb);
    405
    406u8 (*musb_clearb)(void __iomem *addr, u32 offset);
    407EXPORT_SYMBOL_GPL(musb_clearb);
    408
    409u16 (*musb_readw)(void __iomem *addr, u32 offset);
    410EXPORT_SYMBOL_GPL(musb_readw);
    411
    412void (*musb_writew)(void __iomem *addr, u32 offset, u16 data);
    413EXPORT_SYMBOL_GPL(musb_writew);
    414
    415u16 (*musb_clearw)(void __iomem *addr, u32 offset);
    416EXPORT_SYMBOL_GPL(musb_clearw);
    417
    418u32 musb_readl(void __iomem *addr, u32 offset)
    419{
    420	u32 data = __raw_readl(addr + offset);
    421
    422	trace_musb_readl(__builtin_return_address(0), addr, offset, data);
    423	return data;
    424}
    425EXPORT_SYMBOL_GPL(musb_readl);
    426
    427void musb_writel(void __iomem *addr, u32 offset, u32 data)
    428{
    429	trace_musb_writel(__builtin_return_address(0), addr, offset, data);
    430	__raw_writel(data, addr + offset);
    431}
    432EXPORT_SYMBOL_GPL(musb_writel);
    433
    434#ifndef CONFIG_MUSB_PIO_ONLY
    435struct dma_controller *
    436(*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
    437EXPORT_SYMBOL(musb_dma_controller_create);
    438
    439void (*musb_dma_controller_destroy)(struct dma_controller *c);
    440EXPORT_SYMBOL(musb_dma_controller_destroy);
    441#endif
    442
    443/*
    444 * New style IO functions
    445 */
    446void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
    447{
    448	return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
    449}
    450
    451void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
    452{
    453	return hw_ep->musb->io.write_fifo(hw_ep, len, src);
    454}
    455
    456static u8 musb_read_devctl(struct musb *musb)
    457{
    458	return musb_readb(musb->mregs, MUSB_DEVCTL);
    459}
    460
    461/**
    462 * musb_set_host - set and initialize host mode
    463 * @musb: musb controller driver data
    464 *
    465 * At least some musb revisions need to enable devctl session bit in
    466 * peripheral mode to switch to host mode. Initializes things to host
    467 * mode and sets A_IDLE. SoC glue needs to advance state further
    468 * based on phy provided VBUS state.
    469 *
    470 * Note that the SoC glue code may need to wait for musb to settle
    471 * on enable before calling this to avoid babble.
    472 */
    473int musb_set_host(struct musb *musb)
    474{
    475	int error = 0;
    476	u8 devctl;
    477
    478	if (!musb)
    479		return -EINVAL;
    480
    481	devctl = musb_read_devctl(musb);
    482	if (!(devctl & MUSB_DEVCTL_BDEVICE)) {
    483		trace_musb_state(musb, devctl, "Already in host mode");
    484		goto init_data;
    485	}
    486
    487	devctl |= MUSB_DEVCTL_SESSION;
    488	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
    489
    490	error = readx_poll_timeout(musb_read_devctl, musb, devctl,
    491				   !(devctl & MUSB_DEVCTL_BDEVICE), 5000,
    492				   1000000);
    493	if (error) {
    494		dev_err(musb->controller, "%s: could not set host: %02x\n",
    495			__func__, devctl);
    496
    497		return error;
    498	}
    499
    500	devctl = musb_read_devctl(musb);
    501	trace_musb_state(musb, devctl, "Host mode set");
    502
    503init_data:
    504	musb->is_active = 1;
    505	musb->xceiv->otg->state = OTG_STATE_A_IDLE;
    506	MUSB_HST_MODE(musb);
    507
    508	return error;
    509}
    510EXPORT_SYMBOL_GPL(musb_set_host);
    511
    512/**
    513 * musb_set_peripheral - set and initialize peripheral mode
    514 * @musb: musb controller driver data
    515 *
    516 * Clears devctl session bit and initializes things for peripheral
    517 * mode and sets B_IDLE. SoC glue needs to advance state further
    518 * based on phy provided VBUS state.
    519 */
    520int musb_set_peripheral(struct musb *musb)
    521{
    522	int error = 0;
    523	u8 devctl;
    524
    525	if (!musb)
    526		return -EINVAL;
    527
    528	devctl = musb_read_devctl(musb);
    529	if (devctl & MUSB_DEVCTL_BDEVICE) {
    530		trace_musb_state(musb, devctl, "Already in peripheral mode");
    531		goto init_data;
    532	}
    533
    534	devctl &= ~MUSB_DEVCTL_SESSION;
    535	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
    536
    537	error = readx_poll_timeout(musb_read_devctl, musb, devctl,
    538				   devctl & MUSB_DEVCTL_BDEVICE, 5000,
    539				   1000000);
    540	if (error) {
    541		dev_err(musb->controller, "%s: could not set peripheral: %02x\n",
    542			__func__, devctl);
    543
    544		return error;
    545	}
    546
    547	devctl = musb_read_devctl(musb);
    548	trace_musb_state(musb, devctl, "Peripheral mode set");
    549
    550init_data:
    551	musb->is_active = 0;
    552	musb->xceiv->otg->state = OTG_STATE_B_IDLE;
    553	MUSB_DEV_MODE(musb);
    554
    555	return error;
    556}
    557EXPORT_SYMBOL_GPL(musb_set_peripheral);
    558
    559/*-------------------------------------------------------------------------*/
    560
    561/* for high speed test mode; see USB 2.0 spec 7.1.20 */
    562static const u8 musb_test_packet[53] = {
    563	/* implicit SYNC then DATA0 to start */
    564
    565	/* JKJKJKJK x9 */
    566	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    567	/* JJKKJJKK x8 */
    568	0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
    569	/* JJJJKKKK x8 */
    570	0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
    571	/* JJJJJJJKKKKKKK x8 */
    572	0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
    573	/* JJJJJJJK x8 */
    574	0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
    575	/* JKKKKKKK x10, JK */
    576	0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
    577
    578	/* implicit CRC16 then EOP to end */
    579};
    580
    581void musb_load_testpacket(struct musb *musb)
    582{
    583	void __iomem	*regs = musb->endpoints[0].regs;
    584
    585	musb_ep_select(musb->mregs, 0);
    586	musb_write_fifo(musb->control_ep,
    587			sizeof(musb_test_packet), musb_test_packet);
    588	musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
    589}
    590
    591/*-------------------------------------------------------------------------*/
    592
    593/*
    594 * Handles OTG hnp timeouts, such as b_ase0_brst
    595 */
    596static void musb_otg_timer_func(struct timer_list *t)
    597{
    598	struct musb	*musb = from_timer(musb, t, otg_timer);
    599	unsigned long	flags;
    600
    601	spin_lock_irqsave(&musb->lock, flags);
    602	switch (musb->xceiv->otg->state) {
    603	case OTG_STATE_B_WAIT_ACON:
    604		musb_dbg(musb,
    605			"HNP: b_wait_acon timeout; back to b_peripheral");
    606		musb_g_disconnect(musb);
    607		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
    608		musb->is_active = 0;
    609		break;
    610	case OTG_STATE_A_SUSPEND:
    611	case OTG_STATE_A_WAIT_BCON:
    612		musb_dbg(musb, "HNP: %s timeout",
    613			usb_otg_state_string(musb->xceiv->otg->state));
    614		musb_platform_set_vbus(musb, 0);
    615		musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
    616		break;
    617	default:
    618		musb_dbg(musb, "HNP: Unhandled mode %s",
    619			usb_otg_state_string(musb->xceiv->otg->state));
    620	}
    621	spin_unlock_irqrestore(&musb->lock, flags);
    622}
    623
    624/*
    625 * Stops the HNP transition. Caller must take care of locking.
    626 */
    627void musb_hnp_stop(struct musb *musb)
    628{
    629	struct usb_hcd	*hcd = musb->hcd;
    630	void __iomem	*mbase = musb->mregs;
    631	u8	reg;
    632
    633	musb_dbg(musb, "HNP: stop from %s",
    634			usb_otg_state_string(musb->xceiv->otg->state));
    635
    636	switch (musb->xceiv->otg->state) {
    637	case OTG_STATE_A_PERIPHERAL:
    638		musb_g_disconnect(musb);
    639		musb_dbg(musb, "HNP: back to %s",
    640			usb_otg_state_string(musb->xceiv->otg->state));
    641		break;
    642	case OTG_STATE_B_HOST:
    643		musb_dbg(musb, "HNP: Disabling HR");
    644		if (hcd)
    645			hcd->self.is_b_host = 0;
    646		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
    647		MUSB_DEV_MODE(musb);
    648		reg = musb_readb(mbase, MUSB_POWER);
    649		reg |= MUSB_POWER_SUSPENDM;
    650		musb_writeb(mbase, MUSB_POWER, reg);
    651		/* REVISIT: Start SESSION_REQUEST here? */
    652		break;
    653	default:
    654		musb_dbg(musb, "HNP: Stopping in unknown state %s",
    655			usb_otg_state_string(musb->xceiv->otg->state));
    656	}
    657
    658	/*
    659	 * When returning to A state after HNP, avoid hub_port_rebounce(),
    660	 * which cause occasional OPT A "Did not receive reset after connect"
    661	 * errors.
    662	 */
    663	musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
    664}
    665
    666static void musb_recover_from_babble(struct musb *musb);
    667
    668static void musb_handle_intr_resume(struct musb *musb, u8 devctl)
    669{
    670	musb_dbg(musb, "RESUME (%s)",
    671			usb_otg_state_string(musb->xceiv->otg->state));
    672
    673	if (devctl & MUSB_DEVCTL_HM) {
    674		switch (musb->xceiv->otg->state) {
    675		case OTG_STATE_A_SUSPEND:
    676			/* remote wakeup? */
    677			musb->port1_status |=
    678					(USB_PORT_STAT_C_SUSPEND << 16)
    679					| MUSB_PORT_STAT_RESUME;
    680			musb->rh_timer = jiffies
    681				+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
    682			musb->xceiv->otg->state = OTG_STATE_A_HOST;
    683			musb->is_active = 1;
    684			musb_host_resume_root_hub(musb);
    685			schedule_delayed_work(&musb->finish_resume_work,
    686				msecs_to_jiffies(USB_RESUME_TIMEOUT));
    687			break;
    688		case OTG_STATE_B_WAIT_ACON:
    689			musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
    690			musb->is_active = 1;
    691			MUSB_DEV_MODE(musb);
    692			break;
    693		default:
    694			WARNING("bogus %s RESUME (%s)\n",
    695				"host",
    696				usb_otg_state_string(musb->xceiv->otg->state));
    697		}
    698	} else {
    699		switch (musb->xceiv->otg->state) {
    700		case OTG_STATE_A_SUSPEND:
    701			/* possibly DISCONNECT is upcoming */
    702			musb->xceiv->otg->state = OTG_STATE_A_HOST;
    703			musb_host_resume_root_hub(musb);
    704			break;
    705		case OTG_STATE_B_WAIT_ACON:
    706		case OTG_STATE_B_PERIPHERAL:
    707			/* disconnect while suspended?  we may
    708			 * not get a disconnect irq...
    709			 */
    710			if ((devctl & MUSB_DEVCTL_VBUS)
    711					!= (3 << MUSB_DEVCTL_VBUS_SHIFT)
    712					) {
    713				musb->int_usb |= MUSB_INTR_DISCONNECT;
    714				musb->int_usb &= ~MUSB_INTR_SUSPEND;
    715				break;
    716			}
    717			musb_g_resume(musb);
    718			break;
    719		case OTG_STATE_B_IDLE:
    720			musb->int_usb &= ~MUSB_INTR_SUSPEND;
    721			break;
    722		default:
    723			WARNING("bogus %s RESUME (%s)\n",
    724				"peripheral",
    725				usb_otg_state_string(musb->xceiv->otg->state));
    726		}
    727	}
    728}
    729
    730/* return IRQ_HANDLED to tell the caller to return immediately */
    731static irqreturn_t musb_handle_intr_sessreq(struct musb *musb, u8 devctl)
    732{
    733	void __iomem *mbase = musb->mregs;
    734
    735	if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
    736			&& (devctl & MUSB_DEVCTL_BDEVICE)) {
    737		musb_dbg(musb, "SessReq while on B state");
    738		return IRQ_HANDLED;
    739	}
    740
    741	musb_dbg(musb, "SESSION_REQUEST (%s)",
    742		usb_otg_state_string(musb->xceiv->otg->state));
    743
    744	/* IRQ arrives from ID pin sense or (later, if VBUS power
    745	 * is removed) SRP.  responses are time critical:
    746	 *  - turn on VBUS (with silicon-specific mechanism)
    747	 *  - go through A_WAIT_VRISE
    748	 *  - ... to A_WAIT_BCON.
    749	 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
    750	 */
    751	musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
    752	musb->ep0_stage = MUSB_EP0_START;
    753	musb->xceiv->otg->state = OTG_STATE_A_IDLE;
    754	MUSB_HST_MODE(musb);
    755	musb_platform_set_vbus(musb, 1);
    756
    757	return IRQ_NONE;
    758}
    759
    760static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl)
    761{
    762	int	ignore = 0;
    763
    764	/* During connection as an A-Device, we may see a short
    765	 * current spikes causing voltage drop, because of cable
    766	 * and peripheral capacitance combined with vbus draw.
    767	 * (So: less common with truly self-powered devices, where
    768	 * vbus doesn't act like a power supply.)
    769	 *
    770	 * Such spikes are short; usually less than ~500 usec, max
    771	 * of ~2 msec.  That is, they're not sustained overcurrent
    772	 * errors, though they're reported using VBUSERROR irqs.
    773	 *
    774	 * Workarounds:  (a) hardware: use self powered devices.
    775	 * (b) software:  ignore non-repeated VBUS errors.
    776	 *
    777	 * REVISIT:  do delays from lots of DEBUG_KERNEL checks
    778	 * make trouble here, keeping VBUS < 4.4V ?
    779	 */
    780	switch (musb->xceiv->otg->state) {
    781	case OTG_STATE_A_HOST:
    782		/* recovery is dicey once we've gotten past the
    783		 * initial stages of enumeration, but if VBUS
    784		 * stayed ok at the other end of the link, and
    785		 * another reset is due (at least for high speed,
    786		 * to redo the chirp etc), it might work OK...
    787		 */
    788	case OTG_STATE_A_WAIT_BCON:
    789	case OTG_STATE_A_WAIT_VRISE:
    790		if (musb->vbuserr_retry) {
    791			void __iomem *mbase = musb->mregs;
    792
    793			musb->vbuserr_retry--;
    794			ignore = 1;
    795			devctl |= MUSB_DEVCTL_SESSION;
    796			musb_writeb(mbase, MUSB_DEVCTL, devctl);
    797		} else {
    798			musb->port1_status |=
    799				  USB_PORT_STAT_OVERCURRENT
    800				| (USB_PORT_STAT_C_OVERCURRENT << 16);
    801		}
    802		break;
    803	default:
    804		break;
    805	}
    806
    807	dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
    808			"VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
    809			usb_otg_state_string(musb->xceiv->otg->state),
    810			devctl,
    811			({ char *s;
    812			switch (devctl & MUSB_DEVCTL_VBUS) {
    813			case 0 << MUSB_DEVCTL_VBUS_SHIFT:
    814				s = "<SessEnd"; break;
    815			case 1 << MUSB_DEVCTL_VBUS_SHIFT:
    816				s = "<AValid"; break;
    817			case 2 << MUSB_DEVCTL_VBUS_SHIFT:
    818				s = "<VBusValid"; break;
    819			/* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
    820			default:
    821				s = "VALID"; break;
    822			} s; }),
    823			VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
    824			musb->port1_status);
    825
    826	/* go through A_WAIT_VFALL then start a new session */
    827	if (!ignore)
    828		musb_platform_set_vbus(musb, 0);
    829}
    830
    831static void musb_handle_intr_suspend(struct musb *musb, u8 devctl)
    832{
    833	musb_dbg(musb, "SUSPEND (%s) devctl %02x",
    834		usb_otg_state_string(musb->xceiv->otg->state), devctl);
    835
    836	switch (musb->xceiv->otg->state) {
    837	case OTG_STATE_A_PERIPHERAL:
    838		/* We also come here if the cable is removed, since
    839		 * this silicon doesn't report ID-no-longer-grounded.
    840		 *
    841		 * We depend on T(a_wait_bcon) to shut us down, and
    842		 * hope users don't do anything dicey during this
    843		 * undesired detour through A_WAIT_BCON.
    844		 */
    845		musb_hnp_stop(musb);
    846		musb_host_resume_root_hub(musb);
    847		musb_root_disconnect(musb);
    848		musb_platform_try_idle(musb, jiffies
    849				+ msecs_to_jiffies(musb->a_wait_bcon
    850					? : OTG_TIME_A_WAIT_BCON));
    851
    852		break;
    853	case OTG_STATE_B_IDLE:
    854		if (!musb->is_active)
    855			break;
    856		fallthrough;
    857	case OTG_STATE_B_PERIPHERAL:
    858		musb_g_suspend(musb);
    859		musb->is_active = musb->g.b_hnp_enable;
    860		if (musb->is_active) {
    861			musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
    862			musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
    863			mod_timer(&musb->otg_timer, jiffies
    864				+ msecs_to_jiffies(
    865						OTG_TIME_B_ASE0_BRST));
    866		}
    867		break;
    868	case OTG_STATE_A_WAIT_BCON:
    869		if (musb->a_wait_bcon != 0)
    870			musb_platform_try_idle(musb, jiffies
    871				+ msecs_to_jiffies(musb->a_wait_bcon));
    872		break;
    873	case OTG_STATE_A_HOST:
    874		musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
    875		musb->is_active = musb->hcd->self.b_hnp_enable;
    876		break;
    877	case OTG_STATE_B_HOST:
    878		/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
    879		musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
    880		break;
    881	default:
    882		/* "should not happen" */
    883		musb->is_active = 0;
    884		break;
    885	}
    886}
    887
    888static void musb_handle_intr_connect(struct musb *musb, u8 devctl, u8 int_usb)
    889{
    890	struct usb_hcd *hcd = musb->hcd;
    891
    892	musb->is_active = 1;
    893	musb->ep0_stage = MUSB_EP0_START;
    894
    895	musb->intrtxe = musb->epmask;
    896	musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
    897	musb->intrrxe = musb->epmask & 0xfffe;
    898	musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
    899	musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
    900	musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
    901				|USB_PORT_STAT_HIGH_SPEED
    902				|USB_PORT_STAT_ENABLE
    903				);
    904	musb->port1_status |= USB_PORT_STAT_CONNECTION
    905				|(USB_PORT_STAT_C_CONNECTION << 16);
    906
    907	/* high vs full speed is just a guess until after reset */
    908	if (devctl & MUSB_DEVCTL_LSDEV)
    909		musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
    910
    911	/* indicate new connection to OTG machine */
    912	switch (musb->xceiv->otg->state) {
    913	case OTG_STATE_B_PERIPHERAL:
    914		if (int_usb & MUSB_INTR_SUSPEND) {
    915			musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
    916			int_usb &= ~MUSB_INTR_SUSPEND;
    917			goto b_host;
    918		} else
    919			musb_dbg(musb, "CONNECT as b_peripheral???");
    920		break;
    921	case OTG_STATE_B_WAIT_ACON:
    922		musb_dbg(musb, "HNP: CONNECT, now b_host");
    923b_host:
    924		musb->xceiv->otg->state = OTG_STATE_B_HOST;
    925		if (musb->hcd)
    926			musb->hcd->self.is_b_host = 1;
    927		del_timer(&musb->otg_timer);
    928		break;
    929	default:
    930		if ((devctl & MUSB_DEVCTL_VBUS)
    931				== (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
    932			musb->xceiv->otg->state = OTG_STATE_A_HOST;
    933			if (hcd)
    934				hcd->self.is_b_host = 0;
    935		}
    936		break;
    937	}
    938
    939	musb_host_poke_root_hub(musb);
    940
    941	musb_dbg(musb, "CONNECT (%s) devctl %02x",
    942			usb_otg_state_string(musb->xceiv->otg->state), devctl);
    943}
    944
    945static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl)
    946{
    947	musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
    948			usb_otg_state_string(musb->xceiv->otg->state),
    949			MUSB_MODE(musb), devctl);
    950
    951	switch (musb->xceiv->otg->state) {
    952	case OTG_STATE_A_HOST:
    953	case OTG_STATE_A_SUSPEND:
    954		musb_host_resume_root_hub(musb);
    955		musb_root_disconnect(musb);
    956		if (musb->a_wait_bcon != 0)
    957			musb_platform_try_idle(musb, jiffies
    958				+ msecs_to_jiffies(musb->a_wait_bcon));
    959		break;
    960	case OTG_STATE_B_HOST:
    961		/* REVISIT this behaves for "real disconnect"
    962		 * cases; make sure the other transitions from
    963		 * from B_HOST act right too.  The B_HOST code
    964		 * in hnp_stop() is currently not used...
    965		 */
    966		musb_root_disconnect(musb);
    967		if (musb->hcd)
    968			musb->hcd->self.is_b_host = 0;
    969		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
    970		MUSB_DEV_MODE(musb);
    971		musb_g_disconnect(musb);
    972		break;
    973	case OTG_STATE_A_PERIPHERAL:
    974		musb_hnp_stop(musb);
    975		musb_root_disconnect(musb);
    976		fallthrough;
    977	case OTG_STATE_B_WAIT_ACON:
    978	case OTG_STATE_B_PERIPHERAL:
    979	case OTG_STATE_B_IDLE:
    980		musb_g_disconnect(musb);
    981		break;
    982	default:
    983		WARNING("unhandled DISCONNECT transition (%s)\n",
    984			usb_otg_state_string(musb->xceiv->otg->state));
    985		break;
    986	}
    987}
    988
    989/*
    990 * mentor saves a bit: bus reset and babble share the same irq.
    991 * only host sees babble; only peripheral sees bus reset.
    992 */
    993static void musb_handle_intr_reset(struct musb *musb)
    994{
    995	if (is_host_active(musb)) {
    996		/*
    997		 * When BABBLE happens what we can depends on which
    998		 * platform MUSB is running, because some platforms
    999		 * implemented proprietary means for 'recovering' from
   1000		 * Babble conditions. One such platform is AM335x. In
   1001		 * most cases, however, the only thing we can do is
   1002		 * drop the session.
   1003		 */
   1004		dev_err(musb->controller, "Babble\n");
   1005		musb_recover_from_babble(musb);
   1006	} else {
   1007		musb_dbg(musb, "BUS RESET as %s",
   1008			usb_otg_state_string(musb->xceiv->otg->state));
   1009		switch (musb->xceiv->otg->state) {
   1010		case OTG_STATE_A_SUSPEND:
   1011			musb_g_reset(musb);
   1012			fallthrough;
   1013		case OTG_STATE_A_WAIT_BCON:	/* OPT TD.4.7-900ms */
   1014			/* never use invalid T(a_wait_bcon) */
   1015			musb_dbg(musb, "HNP: in %s, %d msec timeout",
   1016				usb_otg_state_string(musb->xceiv->otg->state),
   1017				TA_WAIT_BCON(musb));
   1018			mod_timer(&musb->otg_timer, jiffies
   1019				+ msecs_to_jiffies(TA_WAIT_BCON(musb)));
   1020			break;
   1021		case OTG_STATE_A_PERIPHERAL:
   1022			del_timer(&musb->otg_timer);
   1023			musb_g_reset(musb);
   1024			break;
   1025		case OTG_STATE_B_WAIT_ACON:
   1026			musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
   1027				usb_otg_state_string(musb->xceiv->otg->state));
   1028			musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
   1029			musb_g_reset(musb);
   1030			break;
   1031		case OTG_STATE_B_IDLE:
   1032			musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
   1033			fallthrough;
   1034		case OTG_STATE_B_PERIPHERAL:
   1035			musb_g_reset(musb);
   1036			break;
   1037		default:
   1038			musb_dbg(musb, "Unhandled BUS RESET as %s",
   1039				usb_otg_state_string(musb->xceiv->otg->state));
   1040		}
   1041	}
   1042}
   1043
   1044/*
   1045 * Interrupt Service Routine to record USB "global" interrupts.
   1046 * Since these do not happen often and signify things of
   1047 * paramount importance, it seems OK to check them individually;
   1048 * the order of the tests is specified in the manual
   1049 *
   1050 * @param musb instance pointer
   1051 * @param int_usb register contents
   1052 * @param devctl
   1053 */
   1054
   1055static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
   1056				u8 devctl)
   1057{
   1058	irqreturn_t handled = IRQ_NONE;
   1059
   1060	musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
   1061
   1062	/* in host mode, the peripheral may issue remote wakeup.
   1063	 * in peripheral mode, the host may resume the link.
   1064	 * spurious RESUME irqs happen too, paired with SUSPEND.
   1065	 */
   1066	if (int_usb & MUSB_INTR_RESUME) {
   1067		musb_handle_intr_resume(musb, devctl);
   1068		handled = IRQ_HANDLED;
   1069	}
   1070
   1071	/* see manual for the order of the tests */
   1072	if (int_usb & MUSB_INTR_SESSREQ) {
   1073		if (musb_handle_intr_sessreq(musb, devctl))
   1074			return IRQ_HANDLED;
   1075		handled = IRQ_HANDLED;
   1076	}
   1077
   1078	if (int_usb & MUSB_INTR_VBUSERROR) {
   1079		musb_handle_intr_vbuserr(musb, devctl);
   1080		handled = IRQ_HANDLED;
   1081	}
   1082
   1083	if (int_usb & MUSB_INTR_SUSPEND) {
   1084		musb_handle_intr_suspend(musb, devctl);
   1085		handled = IRQ_HANDLED;
   1086	}
   1087
   1088	if (int_usb & MUSB_INTR_CONNECT) {
   1089		musb_handle_intr_connect(musb, devctl, int_usb);
   1090		handled = IRQ_HANDLED;
   1091	}
   1092
   1093	if (int_usb & MUSB_INTR_DISCONNECT) {
   1094		musb_handle_intr_disconnect(musb, devctl);
   1095		handled = IRQ_HANDLED;
   1096	}
   1097
   1098	if (int_usb & MUSB_INTR_RESET) {
   1099		musb_handle_intr_reset(musb);
   1100		handled = IRQ_HANDLED;
   1101	}
   1102
   1103#if 0
   1104/* REVISIT ... this would be for multiplexing periodic endpoints, or
   1105 * supporting transfer phasing to prevent exceeding ISO bandwidth
   1106 * limits of a given frame or microframe.
   1107 *
   1108 * It's not needed for peripheral side, which dedicates endpoints;
   1109 * though it _might_ use SOF irqs for other purposes.
   1110 *
   1111 * And it's not currently needed for host side, which also dedicates
   1112 * endpoints, relies on TX/RX interval registers, and isn't claimed
   1113 * to support ISO transfers yet.
   1114 */
   1115	if (int_usb & MUSB_INTR_SOF) {
   1116		void __iomem *mbase = musb->mregs;
   1117		struct musb_hw_ep	*ep;
   1118		u8 epnum;
   1119		u16 frame;
   1120
   1121		dev_dbg(musb->controller, "START_OF_FRAME\n");
   1122		handled = IRQ_HANDLED;
   1123
   1124		/* start any periodic Tx transfers waiting for current frame */
   1125		frame = musb_readw(mbase, MUSB_FRAME);
   1126		ep = musb->endpoints;
   1127		for (epnum = 1; (epnum < musb->nr_endpoints)
   1128					&& (musb->epmask >= (1 << epnum));
   1129				epnum++, ep++) {
   1130			/*
   1131			 * FIXME handle framecounter wraps (12 bits)
   1132			 * eliminate duplicated StartUrb logic
   1133			 */
   1134			if (ep->dwWaitFrame >= frame) {
   1135				ep->dwWaitFrame = 0;
   1136				pr_debug("SOF --> periodic TX%s on %d\n",
   1137					ep->tx_channel ? " DMA" : "",
   1138					epnum);
   1139				if (!ep->tx_channel)
   1140					musb_h_tx_start(musb, epnum);
   1141				else
   1142					cppi_hostdma_start(musb, epnum);
   1143			}
   1144		}		/* end of for loop */
   1145	}
   1146#endif
   1147
   1148	schedule_delayed_work(&musb->irq_work, 0);
   1149
   1150	return handled;
   1151}
   1152
   1153/*-------------------------------------------------------------------------*/
   1154
   1155static void musb_disable_interrupts(struct musb *musb)
   1156{
   1157	void __iomem	*mbase = musb->mregs;
   1158
   1159	/* disable interrupts */
   1160	musb_writeb(mbase, MUSB_INTRUSBE, 0);
   1161	musb->intrtxe = 0;
   1162	musb_writew(mbase, MUSB_INTRTXE, 0);
   1163	musb->intrrxe = 0;
   1164	musb_writew(mbase, MUSB_INTRRXE, 0);
   1165
   1166	/*  flush pending interrupts */
   1167	musb_clearb(mbase, MUSB_INTRUSB);
   1168	musb_clearw(mbase, MUSB_INTRTX);
   1169	musb_clearw(mbase, MUSB_INTRRX);
   1170}
   1171
   1172static void musb_enable_interrupts(struct musb *musb)
   1173{
   1174	void __iomem    *regs = musb->mregs;
   1175
   1176	/*  Set INT enable registers, enable interrupts */
   1177	musb->intrtxe = musb->epmask;
   1178	musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
   1179	musb->intrrxe = musb->epmask & 0xfffe;
   1180	musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
   1181	musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
   1182
   1183}
   1184
   1185/*
   1186 * Program the HDRC to start (enable interrupts, dma, etc.).
   1187 */
   1188void musb_start(struct musb *musb)
   1189{
   1190	void __iomem    *regs = musb->mregs;
   1191	u8              devctl = musb_readb(regs, MUSB_DEVCTL);
   1192	u8		power;
   1193
   1194	musb_dbg(musb, "<== devctl %02x", devctl);
   1195
   1196	musb_enable_interrupts(musb);
   1197	musb_writeb(regs, MUSB_TESTMODE, 0);
   1198
   1199	power = MUSB_POWER_ISOUPDATE;
   1200	/*
   1201	 * treating UNKNOWN as unspecified maximum speed, in which case
   1202	 * we will default to high-speed.
   1203	 */
   1204	if (musb->config->maximum_speed == USB_SPEED_HIGH ||
   1205			musb->config->maximum_speed == USB_SPEED_UNKNOWN)
   1206		power |= MUSB_POWER_HSENAB;
   1207	musb_writeb(regs, MUSB_POWER, power);
   1208
   1209	musb->is_active = 0;
   1210	devctl = musb_readb(regs, MUSB_DEVCTL);
   1211	devctl &= ~MUSB_DEVCTL_SESSION;
   1212
   1213	/* session started after:
   1214	 * (a) ID-grounded irq, host mode;
   1215	 * (b) vbus present/connect IRQ, peripheral mode;
   1216	 * (c) peripheral initiates, using SRP
   1217	 */
   1218	if (musb->port_mode != MUSB_HOST &&
   1219			musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
   1220			(devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
   1221		musb->is_active = 1;
   1222	} else {
   1223		devctl |= MUSB_DEVCTL_SESSION;
   1224	}
   1225
   1226	musb_platform_enable(musb);
   1227	musb_writeb(regs, MUSB_DEVCTL, devctl);
   1228}
   1229
   1230/*
   1231 * Make the HDRC stop (disable interrupts, etc.);
   1232 * reversible by musb_start
   1233 * called on gadget driver unregister
   1234 * with controller locked, irqs blocked
   1235 * acts as a NOP unless some role activated the hardware
   1236 */
   1237void musb_stop(struct musb *musb)
   1238{
   1239	/* stop IRQs, timers, ... */
   1240	musb_platform_disable(musb);
   1241	musb_disable_interrupts(musb);
   1242	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
   1243
   1244	/* FIXME
   1245	 *  - mark host and/or peripheral drivers unusable/inactive
   1246	 *  - disable DMA (and enable it in HdrcStart)
   1247	 *  - make sure we can musb_start() after musb_stop(); with
   1248	 *    OTG mode, gadget driver module rmmod/modprobe cycles that
   1249	 *  - ...
   1250	 */
   1251	musb_platform_try_idle(musb, 0);
   1252}
   1253
   1254/*-------------------------------------------------------------------------*/
   1255
   1256/*
   1257 * The silicon either has hard-wired endpoint configurations, or else
   1258 * "dynamic fifo" sizing.  The driver has support for both, though at this
   1259 * writing only the dynamic sizing is very well tested.   Since we switched
   1260 * away from compile-time hardware parameters, we can no longer rely on
   1261 * dead code elimination to leave only the relevant one in the object file.
   1262 *
   1263 * We don't currently use dynamic fifo setup capability to do anything
   1264 * more than selecting one of a bunch of predefined configurations.
   1265 */
   1266static ushort fifo_mode;
   1267
   1268/* "modprobe ... fifo_mode=1" etc */
   1269module_param(fifo_mode, ushort, 0);
   1270MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
   1271
   1272/*
   1273 * tables defining fifo_mode values.  define more if you like.
   1274 * for host side, make sure both halves of ep1 are set up.
   1275 */
   1276
   1277/* mode 0 - fits in 2KB */
   1278static struct musb_fifo_cfg mode_0_cfg[] = {
   1279{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
   1280{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
   1281{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
   1282{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
   1283{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
   1284};
   1285
   1286/* mode 1 - fits in 4KB */
   1287static struct musb_fifo_cfg mode_1_cfg[] = {
   1288{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
   1289{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
   1290{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
   1291{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
   1292{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
   1293};
   1294
   1295/* mode 2 - fits in 4KB */
   1296static struct musb_fifo_cfg mode_2_cfg[] = {
   1297{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
   1298{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
   1299{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
   1300{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
   1301{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
   1302{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
   1303};
   1304
   1305/* mode 3 - fits in 4KB */
   1306static struct musb_fifo_cfg mode_3_cfg[] = {
   1307{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
   1308{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
   1309{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
   1310{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
   1311{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
   1312{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
   1313};
   1314
   1315/* mode 4 - fits in 16KB */
   1316static struct musb_fifo_cfg mode_4_cfg[] = {
   1317{ .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
   1318{ .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
   1319{ .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
   1320{ .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
   1321{ .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
   1322{ .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
   1323{ .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
   1324{ .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
   1325{ .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
   1326{ .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
   1327{ .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 512, },
   1328{ .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 512, },
   1329{ .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 512, },
   1330{ .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 512, },
   1331{ .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 512, },
   1332{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
   1333{ .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
   1334{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
   1335{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 256, },
   1336{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 64, },
   1337{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 256, },
   1338{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 64, },
   1339{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 256, },
   1340{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 64, },
   1341{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
   1342{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
   1343{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
   1344};
   1345
   1346/* mode 5 - fits in 8KB */
   1347static struct musb_fifo_cfg mode_5_cfg[] = {
   1348{ .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
   1349{ .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
   1350{ .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
   1351{ .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
   1352{ .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
   1353{ .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
   1354{ .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
   1355{ .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
   1356{ .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
   1357{ .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
   1358{ .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 32, },
   1359{ .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 32, },
   1360{ .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 32, },
   1361{ .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 32, },
   1362{ .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 32, },
   1363{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 32, },
   1364{ .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 32, },
   1365{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 32, },
   1366{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 32, },
   1367{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 32, },
   1368{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 32, },
   1369{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 32, },
   1370{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 32, },
   1371{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 32, },
   1372{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
   1373{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
   1374{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
   1375};
   1376
   1377/*
   1378 * configure a fifo; for non-shared endpoints, this may be called
   1379 * once for a tx fifo and once for an rx fifo.
   1380 *
   1381 * returns negative errno or offset for next fifo.
   1382 */
   1383static int
   1384fifo_setup(struct musb *musb, struct musb_hw_ep  *hw_ep,
   1385		const struct musb_fifo_cfg *cfg, u16 offset)
   1386{
   1387	void __iomem	*mbase = musb->mregs;
   1388	int	size = 0;
   1389	u16	maxpacket = cfg->maxpacket;
   1390	u16	c_off = offset >> 3;
   1391	u8	c_size;
   1392
   1393	/* expect hw_ep has already been zero-initialized */
   1394
   1395	size = ffs(max(maxpacket, (u16) 8)) - 1;
   1396	maxpacket = 1 << size;
   1397
   1398	c_size = size - 3;
   1399	if (cfg->mode == BUF_DOUBLE) {
   1400		if ((offset + (maxpacket << 1)) >
   1401				(1 << (musb->config->ram_bits + 2)))
   1402			return -EMSGSIZE;
   1403		c_size |= MUSB_FIFOSZ_DPB;
   1404	} else {
   1405		if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
   1406			return -EMSGSIZE;
   1407	}
   1408
   1409	/* configure the FIFO */
   1410	musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
   1411
   1412	/* EP0 reserved endpoint for control, bidirectional;
   1413	 * EP1 reserved for bulk, two unidirectional halves.
   1414	 */
   1415	if (hw_ep->epnum == 1)
   1416		musb->bulk_ep = hw_ep;
   1417	/* REVISIT error check:  be sure ep0 can both rx and tx ... */
   1418	switch (cfg->style) {
   1419	case FIFO_TX:
   1420		musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
   1421		musb_writew(mbase, MUSB_TXFIFOADD, c_off);
   1422		hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
   1423		hw_ep->max_packet_sz_tx = maxpacket;
   1424		break;
   1425	case FIFO_RX:
   1426		musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
   1427		musb_writew(mbase, MUSB_RXFIFOADD, c_off);
   1428		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
   1429		hw_ep->max_packet_sz_rx = maxpacket;
   1430		break;
   1431	case FIFO_RXTX:
   1432		musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
   1433		musb_writew(mbase, MUSB_TXFIFOADD, c_off);
   1434		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
   1435		hw_ep->max_packet_sz_rx = maxpacket;
   1436
   1437		musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
   1438		musb_writew(mbase, MUSB_RXFIFOADD, c_off);
   1439		hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
   1440		hw_ep->max_packet_sz_tx = maxpacket;
   1441
   1442		hw_ep->is_shared_fifo = true;
   1443		break;
   1444	}
   1445
   1446	/* NOTE rx and tx endpoint irqs aren't managed separately,
   1447	 * which happens to be ok
   1448	 */
   1449	musb->epmask |= (1 << hw_ep->epnum);
   1450
   1451	return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
   1452}
   1453
   1454static struct musb_fifo_cfg ep0_cfg = {
   1455	.style = FIFO_RXTX, .maxpacket = 64,
   1456};
   1457
   1458static int ep_config_from_table(struct musb *musb)
   1459{
   1460	const struct musb_fifo_cfg	*cfg;
   1461	unsigned		i, n;
   1462	int			offset;
   1463	struct musb_hw_ep	*hw_ep = musb->endpoints;
   1464
   1465	if (musb->config->fifo_cfg) {
   1466		cfg = musb->config->fifo_cfg;
   1467		n = musb->config->fifo_cfg_size;
   1468		goto done;
   1469	}
   1470
   1471	switch (fifo_mode) {
   1472	default:
   1473		fifo_mode = 0;
   1474		fallthrough;
   1475	case 0:
   1476		cfg = mode_0_cfg;
   1477		n = ARRAY_SIZE(mode_0_cfg);
   1478		break;
   1479	case 1:
   1480		cfg = mode_1_cfg;
   1481		n = ARRAY_SIZE(mode_1_cfg);
   1482		break;
   1483	case 2:
   1484		cfg = mode_2_cfg;
   1485		n = ARRAY_SIZE(mode_2_cfg);
   1486		break;
   1487	case 3:
   1488		cfg = mode_3_cfg;
   1489		n = ARRAY_SIZE(mode_3_cfg);
   1490		break;
   1491	case 4:
   1492		cfg = mode_4_cfg;
   1493		n = ARRAY_SIZE(mode_4_cfg);
   1494		break;
   1495	case 5:
   1496		cfg = mode_5_cfg;
   1497		n = ARRAY_SIZE(mode_5_cfg);
   1498		break;
   1499	}
   1500
   1501	pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
   1502
   1503
   1504done:
   1505	offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
   1506	/* assert(offset > 0) */
   1507
   1508	/* NOTE:  for RTL versions >= 1.400 EPINFO and RAMINFO would
   1509	 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
   1510	 */
   1511
   1512	for (i = 0; i < n; i++) {
   1513		u8	epn = cfg->hw_ep_num;
   1514
   1515		if (epn >= musb->config->num_eps) {
   1516			pr_debug("%s: invalid ep %d\n",
   1517					musb_driver_name, epn);
   1518			return -EINVAL;
   1519		}
   1520		offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
   1521		if (offset < 0) {
   1522			pr_debug("%s: mem overrun, ep %d\n",
   1523					musb_driver_name, epn);
   1524			return offset;
   1525		}
   1526		epn++;
   1527		musb->nr_endpoints = max(epn, musb->nr_endpoints);
   1528	}
   1529
   1530	pr_debug("%s: %d/%d max ep, %d/%d memory\n",
   1531			musb_driver_name,
   1532			n + 1, musb->config->num_eps * 2 - 1,
   1533			offset, (1 << (musb->config->ram_bits + 2)));
   1534
   1535	if (!musb->bulk_ep) {
   1536		pr_debug("%s: missing bulk\n", musb_driver_name);
   1537		return -EINVAL;
   1538	}
   1539
   1540	return 0;
   1541}
   1542
   1543
   1544/*
   1545 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
   1546 * @param musb the controller
   1547 */
   1548static int ep_config_from_hw(struct musb *musb)
   1549{
   1550	u8 epnum = 0;
   1551	struct musb_hw_ep *hw_ep;
   1552	void __iomem *mbase = musb->mregs;
   1553	int ret = 0;
   1554
   1555	musb_dbg(musb, "<== static silicon ep config");
   1556
   1557	/* FIXME pick up ep0 maxpacket size */
   1558
   1559	for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
   1560		musb_ep_select(mbase, epnum);
   1561		hw_ep = musb->endpoints + epnum;
   1562
   1563		ret = musb_read_fifosize(musb, hw_ep, epnum);
   1564		if (ret < 0)
   1565			break;
   1566
   1567		/* FIXME set up hw_ep->{rx,tx}_double_buffered */
   1568
   1569		/* pick an RX/TX endpoint for bulk */
   1570		if (hw_ep->max_packet_sz_tx < 512
   1571				|| hw_ep->max_packet_sz_rx < 512)
   1572			continue;
   1573
   1574		/* REVISIT:  this algorithm is lazy, we should at least
   1575		 * try to pick a double buffered endpoint.
   1576		 */
   1577		if (musb->bulk_ep)
   1578			continue;
   1579		musb->bulk_ep = hw_ep;
   1580	}
   1581
   1582	if (!musb->bulk_ep) {
   1583		pr_debug("%s: missing bulk\n", musb_driver_name);
   1584		return -EINVAL;
   1585	}
   1586
   1587	return 0;
   1588}
   1589
   1590enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
   1591
   1592/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
   1593 * configure endpoints, or take their config from silicon
   1594 */
   1595static int musb_core_init(u16 musb_type, struct musb *musb)
   1596{
   1597	u8 reg;
   1598	char *type;
   1599	char aInfo[90];
   1600	void __iomem	*mbase = musb->mregs;
   1601	int		status = 0;
   1602	int		i;
   1603
   1604	/* log core options (read using indexed model) */
   1605	reg = musb_read_configdata(mbase);
   1606
   1607	strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
   1608	if (reg & MUSB_CONFIGDATA_DYNFIFO) {
   1609		strcat(aInfo, ", dyn FIFOs");
   1610		musb->dyn_fifo = true;
   1611	}
   1612	if (reg & MUSB_CONFIGDATA_MPRXE) {
   1613		strcat(aInfo, ", bulk combine");
   1614		musb->bulk_combine = true;
   1615	}
   1616	if (reg & MUSB_CONFIGDATA_MPTXE) {
   1617		strcat(aInfo, ", bulk split");
   1618		musb->bulk_split = true;
   1619	}
   1620	if (reg & MUSB_CONFIGDATA_HBRXE) {
   1621		strcat(aInfo, ", HB-ISO Rx");
   1622		musb->hb_iso_rx = true;
   1623	}
   1624	if (reg & MUSB_CONFIGDATA_HBTXE) {
   1625		strcat(aInfo, ", HB-ISO Tx");
   1626		musb->hb_iso_tx = true;
   1627	}
   1628	if (reg & MUSB_CONFIGDATA_SOFTCONE)
   1629		strcat(aInfo, ", SoftConn");
   1630
   1631	pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
   1632
   1633	if (MUSB_CONTROLLER_MHDRC == musb_type) {
   1634		musb->is_multipoint = 1;
   1635		type = "M";
   1636	} else {
   1637		musb->is_multipoint = 0;
   1638		type = "";
   1639		if (IS_ENABLED(CONFIG_USB) &&
   1640		    !IS_ENABLED(CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB)) {
   1641			pr_err("%s: kernel must disable external hubs, please fix the configuration\n",
   1642			       musb_driver_name);
   1643		}
   1644	}
   1645
   1646	/* log release info */
   1647	musb->hwvers = musb_readw(mbase, MUSB_HWVERS);
   1648	pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
   1649		 musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
   1650		 MUSB_HWVERS_MINOR(musb->hwvers),
   1651		 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
   1652
   1653	/* configure ep0 */
   1654	musb_configure_ep0(musb);
   1655
   1656	/* discover endpoint configuration */
   1657	musb->nr_endpoints = 1;
   1658	musb->epmask = 1;
   1659
   1660	if (musb->dyn_fifo)
   1661		status = ep_config_from_table(musb);
   1662	else
   1663		status = ep_config_from_hw(musb);
   1664
   1665	if (status < 0)
   1666		return status;
   1667
   1668	/* finish init, and print endpoint config */
   1669	for (i = 0; i < musb->nr_endpoints; i++) {
   1670		struct musb_hw_ep	*hw_ep = musb->endpoints + i;
   1671
   1672		hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
   1673#if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
   1674		if (musb->ops->quirks & MUSB_IN_TUSB) {
   1675			hw_ep->fifo_async = musb->async + 0x400 +
   1676				musb->io.fifo_offset(i);
   1677			hw_ep->fifo_sync = musb->sync + 0x400 +
   1678				musb->io.fifo_offset(i);
   1679			hw_ep->fifo_sync_va =
   1680				musb->sync_va + 0x400 + musb->io.fifo_offset(i);
   1681
   1682			if (i == 0)
   1683				hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
   1684			else
   1685				hw_ep->conf = mbase + 0x400 +
   1686					(((i - 1) & 0xf) << 2);
   1687		}
   1688#endif
   1689
   1690		hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
   1691		hw_ep->rx_reinit = 1;
   1692		hw_ep->tx_reinit = 1;
   1693
   1694		if (hw_ep->max_packet_sz_tx) {
   1695			musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
   1696				musb_driver_name, i,
   1697				hw_ep->is_shared_fifo ? "shared" : "tx",
   1698				hw_ep->tx_double_buffered
   1699					? "doublebuffer, " : "",
   1700				hw_ep->max_packet_sz_tx);
   1701		}
   1702		if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
   1703			musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
   1704				musb_driver_name, i,
   1705				"rx",
   1706				hw_ep->rx_double_buffered
   1707					? "doublebuffer, " : "",
   1708				hw_ep->max_packet_sz_rx);
   1709		}
   1710		if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
   1711			musb_dbg(musb, "hw_ep %d not configured", i);
   1712	}
   1713
   1714	return 0;
   1715}
   1716
   1717/*-------------------------------------------------------------------------*/
   1718
   1719/*
   1720 * handle all the irqs defined by the HDRC core. for now we expect:  other
   1721 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
   1722 * will be assigned, and the irq will already have been acked.
   1723 *
   1724 * called in irq context with spinlock held, irqs blocked
   1725 */
   1726irqreturn_t musb_interrupt(struct musb *musb)
   1727{
   1728	irqreturn_t	retval = IRQ_NONE;
   1729	unsigned long	status;
   1730	unsigned long	epnum;
   1731	u8		devctl;
   1732
   1733	if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
   1734		return IRQ_NONE;
   1735
   1736	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
   1737
   1738	trace_musb_isr(musb);
   1739
   1740	/**
   1741	 * According to Mentor Graphics' documentation, flowchart on page 98,
   1742	 * IRQ should be handled as follows:
   1743	 *
   1744	 * . Resume IRQ
   1745	 * . Session Request IRQ
   1746	 * . VBUS Error IRQ
   1747	 * . Suspend IRQ
   1748	 * . Connect IRQ
   1749	 * . Disconnect IRQ
   1750	 * . Reset/Babble IRQ
   1751	 * . SOF IRQ (we're not using this one)
   1752	 * . Endpoint 0 IRQ
   1753	 * . TX Endpoints
   1754	 * . RX Endpoints
   1755	 *
   1756	 * We will be following that flowchart in order to avoid any problems
   1757	 * that might arise with internal Finite State Machine.
   1758	 */
   1759
   1760	if (musb->int_usb)
   1761		retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
   1762
   1763	if (musb->int_tx & 1) {
   1764		if (is_host_active(musb))
   1765			retval |= musb_h_ep0_irq(musb);
   1766		else
   1767			retval |= musb_g_ep0_irq(musb);
   1768
   1769		/* we have just handled endpoint 0 IRQ, clear it */
   1770		musb->int_tx &= ~BIT(0);
   1771	}
   1772
   1773	status = musb->int_tx;
   1774
   1775	for_each_set_bit(epnum, &status, 16) {
   1776		retval = IRQ_HANDLED;
   1777		if (is_host_active(musb))
   1778			musb_host_tx(musb, epnum);
   1779		else
   1780			musb_g_tx(musb, epnum);
   1781	}
   1782
   1783	status = musb->int_rx;
   1784
   1785	for_each_set_bit(epnum, &status, 16) {
   1786		retval = IRQ_HANDLED;
   1787		if (is_host_active(musb))
   1788			musb_host_rx(musb, epnum);
   1789		else
   1790			musb_g_rx(musb, epnum);
   1791	}
   1792
   1793	return retval;
   1794}
   1795EXPORT_SYMBOL_GPL(musb_interrupt);
   1796
   1797#ifndef CONFIG_MUSB_PIO_ONLY
   1798static bool use_dma = true;
   1799
   1800/* "modprobe ... use_dma=0" etc */
   1801module_param(use_dma, bool, 0644);
   1802MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
   1803
   1804void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
   1805{
   1806	/* called with controller lock already held */
   1807
   1808	if (!epnum) {
   1809		if (!is_cppi_enabled(musb)) {
   1810			/* endpoint 0 */
   1811			if (is_host_active(musb))
   1812				musb_h_ep0_irq(musb);
   1813			else
   1814				musb_g_ep0_irq(musb);
   1815		}
   1816	} else {
   1817		/* endpoints 1..15 */
   1818		if (transmit) {
   1819			if (is_host_active(musb))
   1820				musb_host_tx(musb, epnum);
   1821			else
   1822				musb_g_tx(musb, epnum);
   1823		} else {
   1824			/* receive */
   1825			if (is_host_active(musb))
   1826				musb_host_rx(musb, epnum);
   1827			else
   1828				musb_g_rx(musb, epnum);
   1829		}
   1830	}
   1831}
   1832EXPORT_SYMBOL_GPL(musb_dma_completion);
   1833
   1834#else
   1835#define use_dma			0
   1836#endif
   1837
   1838static int (*musb_phy_callback)(enum musb_vbus_id_status status);
   1839
   1840/*
   1841 * musb_mailbox - optional phy notifier function
   1842 * @status phy state change
   1843 *
   1844 * Optionally gets called from the USB PHY. Note that the USB PHY must be
   1845 * disabled at the point the phy_callback is registered or unregistered.
   1846 */
   1847int musb_mailbox(enum musb_vbus_id_status status)
   1848{
   1849	if (musb_phy_callback)
   1850		return musb_phy_callback(status);
   1851
   1852	return -ENODEV;
   1853};
   1854EXPORT_SYMBOL_GPL(musb_mailbox);
   1855
   1856/*-------------------------------------------------------------------------*/
   1857
   1858static ssize_t
   1859mode_show(struct device *dev, struct device_attribute *attr, char *buf)
   1860{
   1861	struct musb *musb = dev_to_musb(dev);
   1862	unsigned long flags;
   1863	int ret;
   1864
   1865	spin_lock_irqsave(&musb->lock, flags);
   1866	ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
   1867	spin_unlock_irqrestore(&musb->lock, flags);
   1868
   1869	return ret;
   1870}
   1871
   1872static ssize_t
   1873mode_store(struct device *dev, struct device_attribute *attr,
   1874		const char *buf, size_t n)
   1875{
   1876	struct musb	*musb = dev_to_musb(dev);
   1877	unsigned long	flags;
   1878	int		status;
   1879
   1880	spin_lock_irqsave(&musb->lock, flags);
   1881	if (sysfs_streq(buf, "host"))
   1882		status = musb_platform_set_mode(musb, MUSB_HOST);
   1883	else if (sysfs_streq(buf, "peripheral"))
   1884		status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
   1885	else if (sysfs_streq(buf, "otg"))
   1886		status = musb_platform_set_mode(musb, MUSB_OTG);
   1887	else
   1888		status = -EINVAL;
   1889	spin_unlock_irqrestore(&musb->lock, flags);
   1890
   1891	return (status == 0) ? n : status;
   1892}
   1893static DEVICE_ATTR_RW(mode);
   1894
   1895static ssize_t
   1896vbus_store(struct device *dev, struct device_attribute *attr,
   1897		const char *buf, size_t n)
   1898{
   1899	struct musb	*musb = dev_to_musb(dev);
   1900	unsigned long	flags;
   1901	unsigned long	val;
   1902
   1903	if (sscanf(buf, "%lu", &val) < 1) {
   1904		dev_err(dev, "Invalid VBUS timeout ms value\n");
   1905		return -EINVAL;
   1906	}
   1907
   1908	spin_lock_irqsave(&musb->lock, flags);
   1909	/* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
   1910	musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
   1911	if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
   1912		musb->is_active = 0;
   1913	musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
   1914	spin_unlock_irqrestore(&musb->lock, flags);
   1915
   1916	return n;
   1917}
   1918
   1919static ssize_t
   1920vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
   1921{
   1922	struct musb	*musb = dev_to_musb(dev);
   1923	unsigned long	flags;
   1924	unsigned long	val;
   1925	int		vbus;
   1926	u8		devctl;
   1927
   1928	pm_runtime_get_sync(dev);
   1929	spin_lock_irqsave(&musb->lock, flags);
   1930	val = musb->a_wait_bcon;
   1931	vbus = musb_platform_get_vbus_status(musb);
   1932	if (vbus < 0) {
   1933		/* Use default MUSB method by means of DEVCTL register */
   1934		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
   1935		if ((devctl & MUSB_DEVCTL_VBUS)
   1936				== (3 << MUSB_DEVCTL_VBUS_SHIFT))
   1937			vbus = 1;
   1938		else
   1939			vbus = 0;
   1940	}
   1941	spin_unlock_irqrestore(&musb->lock, flags);
   1942	pm_runtime_put_sync(dev);
   1943
   1944	return sprintf(buf, "Vbus %s, timeout %lu msec\n",
   1945			vbus ? "on" : "off", val);
   1946}
   1947static DEVICE_ATTR_RW(vbus);
   1948
   1949/* Gadget drivers can't know that a host is connected so they might want
   1950 * to start SRP, but users can.  This allows userspace to trigger SRP.
   1951 */
   1952static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
   1953		const char *buf, size_t n)
   1954{
   1955	struct musb	*musb = dev_to_musb(dev);
   1956	unsigned short	srp;
   1957
   1958	if (sscanf(buf, "%hu", &srp) != 1
   1959			|| (srp != 1)) {
   1960		dev_err(dev, "SRP: Value must be 1\n");
   1961		return -EINVAL;
   1962	}
   1963
   1964	if (srp == 1)
   1965		musb_g_wakeup(musb);
   1966
   1967	return n;
   1968}
   1969static DEVICE_ATTR_WO(srp);
   1970
   1971static struct attribute *musb_attrs[] = {
   1972	&dev_attr_mode.attr,
   1973	&dev_attr_vbus.attr,
   1974	&dev_attr_srp.attr,
   1975	NULL
   1976};
   1977ATTRIBUTE_GROUPS(musb);
   1978
   1979#define MUSB_QUIRK_B_INVALID_VBUS_91	(MUSB_DEVCTL_BDEVICE | \
   1980					 (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
   1981					 MUSB_DEVCTL_SESSION)
   1982#define MUSB_QUIRK_B_DISCONNECT_99	(MUSB_DEVCTL_BDEVICE | \
   1983					 (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
   1984					 MUSB_DEVCTL_SESSION)
   1985#define MUSB_QUIRK_A_DISCONNECT_19	((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
   1986					 MUSB_DEVCTL_SESSION)
   1987
   1988static bool musb_state_needs_recheck(struct musb *musb, u8 devctl,
   1989				     const char *desc)
   1990{
   1991	if (musb->quirk_retries && !musb->flush_irq_work) {
   1992		trace_musb_state(musb, devctl, desc);
   1993		schedule_delayed_work(&musb->irq_work,
   1994				      msecs_to_jiffies(1000));
   1995		musb->quirk_retries--;
   1996
   1997		return true;
   1998	}
   1999
   2000	return false;
   2001}
   2002
   2003/*
   2004 * Check the musb devctl session bit to determine if we want to
   2005 * allow PM runtime for the device. In general, we want to keep things
   2006 * active when the session bit is set except after host disconnect.
   2007 *
   2008 * Only called from musb_irq_work. If this ever needs to get called
   2009 * elsewhere, proper locking must be implemented for musb->session.
   2010 */
   2011static void musb_pm_runtime_check_session(struct musb *musb)
   2012{
   2013	u8 devctl, s;
   2014	int error;
   2015
   2016	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
   2017
   2018	/* Handle session status quirks first */
   2019	s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
   2020		MUSB_DEVCTL_HR;
   2021	switch (devctl & ~s) {
   2022	case MUSB_QUIRK_B_DISCONNECT_99:
   2023		musb_state_needs_recheck(musb, devctl,
   2024			"Poll devctl in case of suspend after disconnect");
   2025		break;
   2026	case MUSB_QUIRK_B_INVALID_VBUS_91:
   2027		if (musb_state_needs_recheck(musb, devctl,
   2028				"Poll devctl on invalid vbus, assume no session"))
   2029			return;
   2030		fallthrough;
   2031	case MUSB_QUIRK_A_DISCONNECT_19:
   2032		if (musb_state_needs_recheck(musb, devctl,
   2033				"Poll devctl on possible host mode disconnect"))
   2034			return;
   2035		if (!musb->session)
   2036			break;
   2037		trace_musb_state(musb, devctl, "Allow PM on possible host mode disconnect");
   2038		pm_runtime_mark_last_busy(musb->controller);
   2039		pm_runtime_put_autosuspend(musb->controller);
   2040		musb->session = false;
   2041		return;
   2042	default:
   2043		break;
   2044	}
   2045
   2046	/* No need to do anything if session has not changed */
   2047	s = devctl & MUSB_DEVCTL_SESSION;
   2048	if (s == musb->session)
   2049		return;
   2050
   2051	/* Block PM or allow PM? */
   2052	if (s) {
   2053		trace_musb_state(musb, devctl, "Block PM on active session");
   2054		error = pm_runtime_get_sync(musb->controller);
   2055		if (error < 0)
   2056			dev_err(musb->controller, "Could not enable: %i\n",
   2057				error);
   2058		musb->quirk_retries = 3;
   2059
   2060		/*
   2061		 * We can get a spurious MUSB_INTR_SESSREQ interrupt on start-up
   2062		 * in B-peripheral mode with nothing connected and the session
   2063		 * bit clears silently. Check status again in 3 seconds.
   2064		 */
   2065		if (devctl & MUSB_DEVCTL_BDEVICE)
   2066			schedule_delayed_work(&musb->irq_work,
   2067					      msecs_to_jiffies(3000));
   2068	} else {
   2069		trace_musb_state(musb, devctl, "Allow PM with no session");
   2070		pm_runtime_mark_last_busy(musb->controller);
   2071		pm_runtime_put_autosuspend(musb->controller);
   2072	}
   2073
   2074	musb->session = s;
   2075}
   2076
   2077/* Only used to provide driver mode change events */
   2078static void musb_irq_work(struct work_struct *data)
   2079{
   2080	struct musb *musb = container_of(data, struct musb, irq_work.work);
   2081	int error;
   2082
   2083	error = pm_runtime_resume_and_get(musb->controller);
   2084	if (error < 0) {
   2085		dev_err(musb->controller, "Could not enable: %i\n", error);
   2086
   2087		return;
   2088	}
   2089
   2090	musb_pm_runtime_check_session(musb);
   2091
   2092	if (musb->xceiv->otg->state != musb->xceiv_old_state) {
   2093		musb->xceiv_old_state = musb->xceiv->otg->state;
   2094		sysfs_notify(&musb->controller->kobj, NULL, "mode");
   2095	}
   2096
   2097	pm_runtime_mark_last_busy(musb->controller);
   2098	pm_runtime_put_autosuspend(musb->controller);
   2099}
   2100
   2101static void musb_recover_from_babble(struct musb *musb)
   2102{
   2103	int ret;
   2104	u8 devctl;
   2105
   2106	musb_disable_interrupts(musb);
   2107
   2108	/*
   2109	 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
   2110	 * it some slack and wait for 10us.
   2111	 */
   2112	udelay(10);
   2113
   2114	ret  = musb_platform_recover(musb);
   2115	if (ret) {
   2116		musb_enable_interrupts(musb);
   2117		return;
   2118	}
   2119
   2120	/* drop session bit */
   2121	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
   2122	devctl &= ~MUSB_DEVCTL_SESSION;
   2123	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
   2124
   2125	/* tell usbcore about it */
   2126	musb_root_disconnect(musb);
   2127
   2128	/*
   2129	 * When a babble condition occurs, the musb controller
   2130	 * removes the session bit and the endpoint config is lost.
   2131	 */
   2132	if (musb->dyn_fifo)
   2133		ret = ep_config_from_table(musb);
   2134	else
   2135		ret = ep_config_from_hw(musb);
   2136
   2137	/* restart session */
   2138	if (ret == 0)
   2139		musb_start(musb);
   2140}
   2141
   2142/* --------------------------------------------------------------------------
   2143 * Init support
   2144 */
   2145
   2146static struct musb *allocate_instance(struct device *dev,
   2147		const struct musb_hdrc_config *config, void __iomem *mbase)
   2148{
   2149	struct musb		*musb;
   2150	struct musb_hw_ep	*ep;
   2151	int			epnum;
   2152	int			ret;
   2153
   2154	musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
   2155	if (!musb)
   2156		return NULL;
   2157
   2158	INIT_LIST_HEAD(&musb->control);
   2159	INIT_LIST_HEAD(&musb->in_bulk);
   2160	INIT_LIST_HEAD(&musb->out_bulk);
   2161	INIT_LIST_HEAD(&musb->pending_list);
   2162
   2163	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
   2164	musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
   2165	musb->mregs = mbase;
   2166	musb->ctrl_base = mbase;
   2167	musb->nIrq = -ENODEV;
   2168	musb->config = config;
   2169	BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
   2170	for (epnum = 0, ep = musb->endpoints;
   2171			epnum < musb->config->num_eps;
   2172			epnum++, ep++) {
   2173		ep->musb = musb;
   2174		ep->epnum = epnum;
   2175	}
   2176
   2177	musb->controller = dev;
   2178
   2179	ret = musb_host_alloc(musb);
   2180	if (ret < 0)
   2181		goto err_free;
   2182
   2183	dev_set_drvdata(dev, musb);
   2184
   2185	return musb;
   2186
   2187err_free:
   2188	return NULL;
   2189}
   2190
   2191static void musb_free(struct musb *musb)
   2192{
   2193	/* this has multiple entry modes. it handles fault cleanup after
   2194	 * probe(), where things may be partially set up, as well as rmmod
   2195	 * cleanup after everything's been de-activated.
   2196	 */
   2197
   2198	if (musb->nIrq >= 0) {
   2199		if (musb->irq_wake)
   2200			disable_irq_wake(musb->nIrq);
   2201		free_irq(musb->nIrq, musb);
   2202	}
   2203
   2204	musb_host_free(musb);
   2205}
   2206
   2207struct musb_pending_work {
   2208	int (*callback)(struct musb *musb, void *data);
   2209	void *data;
   2210	struct list_head node;
   2211};
   2212
   2213#ifdef CONFIG_PM
   2214/*
   2215 * Called from musb_runtime_resume(), musb_resume(), and
   2216 * musb_queue_resume_work(). Callers must take musb->lock.
   2217 */
   2218static int musb_run_resume_work(struct musb *musb)
   2219{
   2220	struct musb_pending_work *w, *_w;
   2221	unsigned long flags;
   2222	int error = 0;
   2223
   2224	spin_lock_irqsave(&musb->list_lock, flags);
   2225	list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
   2226		if (w->callback) {
   2227			error = w->callback(musb, w->data);
   2228			if (error < 0) {
   2229				dev_err(musb->controller,
   2230					"resume callback %p failed: %i\n",
   2231					w->callback, error);
   2232			}
   2233		}
   2234		list_del(&w->node);
   2235		devm_kfree(musb->controller, w);
   2236	}
   2237	spin_unlock_irqrestore(&musb->list_lock, flags);
   2238
   2239	return error;
   2240}
   2241#endif
   2242
   2243/*
   2244 * Called to run work if device is active or else queue the work to happen
   2245 * on resume. Caller must take musb->lock and must hold an RPM reference.
   2246 *
   2247 * Note that we cowardly refuse queuing work after musb PM runtime
   2248 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
   2249 * instead.
   2250 */
   2251int musb_queue_resume_work(struct musb *musb,
   2252			   int (*callback)(struct musb *musb, void *data),
   2253			   void *data)
   2254{
   2255	struct musb_pending_work *w;
   2256	unsigned long flags;
   2257	bool is_suspended;
   2258	int error;
   2259
   2260	if (WARN_ON(!callback))
   2261		return -EINVAL;
   2262
   2263	spin_lock_irqsave(&musb->list_lock, flags);
   2264	is_suspended = musb->is_runtime_suspended;
   2265
   2266	if (is_suspended) {
   2267		w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
   2268		if (!w) {
   2269			error = -ENOMEM;
   2270			goto out_unlock;
   2271		}
   2272
   2273		w->callback = callback;
   2274		w->data = data;
   2275
   2276		list_add_tail(&w->node, &musb->pending_list);
   2277		error = 0;
   2278	}
   2279
   2280out_unlock:
   2281	spin_unlock_irqrestore(&musb->list_lock, flags);
   2282
   2283	if (!is_suspended)
   2284		error = callback(musb, data);
   2285
   2286	return error;
   2287}
   2288EXPORT_SYMBOL_GPL(musb_queue_resume_work);
   2289
   2290static void musb_deassert_reset(struct work_struct *work)
   2291{
   2292	struct musb *musb;
   2293	unsigned long flags;
   2294
   2295	musb = container_of(work, struct musb, deassert_reset_work.work);
   2296
   2297	spin_lock_irqsave(&musb->lock, flags);
   2298
   2299	if (musb->port1_status & USB_PORT_STAT_RESET)
   2300		musb_port_reset(musb, false);
   2301
   2302	spin_unlock_irqrestore(&musb->lock, flags);
   2303}
   2304
   2305/*
   2306 * Perform generic per-controller initialization.
   2307 *
   2308 * @dev: the controller (already clocked, etc)
   2309 * @nIrq: IRQ number
   2310 * @ctrl: virtual address of controller registers,
   2311 *	not yet corrected for platform-specific offsets
   2312 */
   2313static int
   2314musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
   2315{
   2316	int			status;
   2317	struct musb		*musb;
   2318	struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
   2319
   2320	/* The driver might handle more features than the board; OK.
   2321	 * Fail when the board needs a feature that's not enabled.
   2322	 */
   2323	if (!plat) {
   2324		dev_err(dev, "no platform_data?\n");
   2325		status = -ENODEV;
   2326		goto fail0;
   2327	}
   2328
   2329	/* allocate */
   2330	musb = allocate_instance(dev, plat->config, ctrl);
   2331	if (!musb) {
   2332		status = -ENOMEM;
   2333		goto fail0;
   2334	}
   2335
   2336	spin_lock_init(&musb->lock);
   2337	spin_lock_init(&musb->list_lock);
   2338	musb->board_set_power = plat->set_power;
   2339	musb->min_power = plat->min_power;
   2340	musb->ops = plat->platform_ops;
   2341	musb->port_mode = plat->mode;
   2342
   2343	/*
   2344	 * Initialize the default IO functions. At least omap2430 needs
   2345	 * these early. We initialize the platform specific IO functions
   2346	 * later on.
   2347	 */
   2348	musb_readb = musb_default_readb;
   2349	musb_writeb = musb_default_writeb;
   2350	musb_readw = musb_default_readw;
   2351	musb_writew = musb_default_writew;
   2352
   2353	/* The musb_platform_init() call:
   2354	 *   - adjusts musb->mregs
   2355	 *   - sets the musb->isr
   2356	 *   - may initialize an integrated transceiver
   2357	 *   - initializes musb->xceiv, usually by otg_get_phy()
   2358	 *   - stops powering VBUS
   2359	 *
   2360	 * There are various transceiver configurations.
   2361	 * DaVinci, TUSB60x0, and others integrate them.  OMAP3 uses
   2362	 * external/discrete ones in various flavors (twl4030 family,
   2363	 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
   2364	 */
   2365	status = musb_platform_init(musb);
   2366	if (status < 0)
   2367		goto fail1;
   2368
   2369	if (!musb->isr) {
   2370		status = -ENODEV;
   2371		goto fail2;
   2372	}
   2373
   2374
   2375	/* Most devices use indexed offset or flat offset */
   2376	if (musb->ops->quirks & MUSB_INDEXED_EP) {
   2377		musb->io.ep_offset = musb_indexed_ep_offset;
   2378		musb->io.ep_select = musb_indexed_ep_select;
   2379	} else {
   2380		musb->io.ep_offset = musb_flat_ep_offset;
   2381		musb->io.ep_select = musb_flat_ep_select;
   2382	}
   2383
   2384	if (musb->ops->quirks & MUSB_G_NO_SKB_RESERVE)
   2385		musb->g.quirk_avoids_skb_reserve = 1;
   2386
   2387	/* At least tusb6010 has its own offsets */
   2388	if (musb->ops->ep_offset)
   2389		musb->io.ep_offset = musb->ops->ep_offset;
   2390	if (musb->ops->ep_select)
   2391		musb->io.ep_select = musb->ops->ep_select;
   2392
   2393	if (musb->ops->fifo_mode)
   2394		fifo_mode = musb->ops->fifo_mode;
   2395	else
   2396		fifo_mode = 4;
   2397
   2398	if (musb->ops->fifo_offset)
   2399		musb->io.fifo_offset = musb->ops->fifo_offset;
   2400	else
   2401		musb->io.fifo_offset = musb_default_fifo_offset;
   2402
   2403	if (musb->ops->busctl_offset)
   2404		musb->io.busctl_offset = musb->ops->busctl_offset;
   2405	else
   2406		musb->io.busctl_offset = musb_default_busctl_offset;
   2407
   2408	if (musb->ops->readb)
   2409		musb_readb = musb->ops->readb;
   2410	if (musb->ops->writeb)
   2411		musb_writeb = musb->ops->writeb;
   2412	if (musb->ops->clearb)
   2413		musb_clearb = musb->ops->clearb;
   2414	else
   2415		musb_clearb = musb_readb;
   2416
   2417	if (musb->ops->readw)
   2418		musb_readw = musb->ops->readw;
   2419	if (musb->ops->writew)
   2420		musb_writew = musb->ops->writew;
   2421	if (musb->ops->clearw)
   2422		musb_clearw = musb->ops->clearw;
   2423	else
   2424		musb_clearw = musb_readw;
   2425
   2426#ifndef CONFIG_MUSB_PIO_ONLY
   2427	if (!musb->ops->dma_init || !musb->ops->dma_exit) {
   2428		dev_err(dev, "DMA controller not set\n");
   2429		status = -ENODEV;
   2430		goto fail2;
   2431	}
   2432	musb_dma_controller_create = musb->ops->dma_init;
   2433	musb_dma_controller_destroy = musb->ops->dma_exit;
   2434#endif
   2435
   2436	if (musb->ops->read_fifo)
   2437		musb->io.read_fifo = musb->ops->read_fifo;
   2438	else
   2439		musb->io.read_fifo = musb_default_read_fifo;
   2440
   2441	if (musb->ops->write_fifo)
   2442		musb->io.write_fifo = musb->ops->write_fifo;
   2443	else
   2444		musb->io.write_fifo = musb_default_write_fifo;
   2445
   2446	if (musb->ops->get_toggle)
   2447		musb->io.get_toggle = musb->ops->get_toggle;
   2448	else
   2449		musb->io.get_toggle = musb_default_get_toggle;
   2450
   2451	if (musb->ops->set_toggle)
   2452		musb->io.set_toggle = musb->ops->set_toggle;
   2453	else
   2454		musb->io.set_toggle = musb_default_set_toggle;
   2455
   2456	if (!musb->xceiv->io_ops) {
   2457		musb->xceiv->io_dev = musb->controller;
   2458		musb->xceiv->io_priv = musb->mregs;
   2459		musb->xceiv->io_ops = &musb_ulpi_access;
   2460	}
   2461
   2462	if (musb->ops->phy_callback)
   2463		musb_phy_callback = musb->ops->phy_callback;
   2464
   2465	/*
   2466	 * We need musb_read/write functions initialized for PM.
   2467	 * Note that at least 2430 glue needs autosuspend delay
   2468	 * somewhere above 300 ms for the hardware to idle properly
   2469	 * after disconnecting the cable in host mode. Let's use
   2470	 * 500 ms for some margin.
   2471	 */
   2472	pm_runtime_use_autosuspend(musb->controller);
   2473	pm_runtime_set_autosuspend_delay(musb->controller, 500);
   2474	pm_runtime_enable(musb->controller);
   2475	pm_runtime_get_sync(musb->controller);
   2476
   2477	status = usb_phy_init(musb->xceiv);
   2478	if (status < 0)
   2479		goto err_usb_phy_init;
   2480
   2481	if (use_dma && dev->dma_mask) {
   2482		musb->dma_controller =
   2483			musb_dma_controller_create(musb, musb->mregs);
   2484		if (IS_ERR(musb->dma_controller)) {
   2485			status = PTR_ERR(musb->dma_controller);
   2486			goto fail2_5;
   2487		}
   2488	}
   2489
   2490	/* be sure interrupts are disabled before connecting ISR */
   2491	musb_platform_disable(musb);
   2492	musb_disable_interrupts(musb);
   2493	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
   2494
   2495	/* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
   2496	musb_writeb(musb->mregs, MUSB_POWER, 0);
   2497
   2498	/* Init IRQ workqueue before request_irq */
   2499	INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
   2500	INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
   2501	INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
   2502
   2503	/* setup musb parts of the core (especially endpoints) */
   2504	status = musb_core_init(plat->config->multipoint
   2505			? MUSB_CONTROLLER_MHDRC
   2506			: MUSB_CONTROLLER_HDRC, musb);
   2507	if (status < 0)
   2508		goto fail3;
   2509
   2510	timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
   2511
   2512	/* attach to the IRQ */
   2513	if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
   2514		dev_err(dev, "request_irq %d failed!\n", nIrq);
   2515		status = -ENODEV;
   2516		goto fail3;
   2517	}
   2518	musb->nIrq = nIrq;
   2519	/* FIXME this handles wakeup irqs wrong */
   2520	if (enable_irq_wake(nIrq) == 0) {
   2521		musb->irq_wake = 1;
   2522		device_init_wakeup(dev, 1);
   2523	} else {
   2524		musb->irq_wake = 0;
   2525	}
   2526
   2527	/* program PHY to use external vBus if required */
   2528	if (plat->extvbus) {
   2529		u8 busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
   2530		busctl |= MUSB_ULPI_USE_EXTVBUS;
   2531		musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
   2532	}
   2533
   2534	MUSB_DEV_MODE(musb);
   2535	musb->xceiv->otg->state = OTG_STATE_B_IDLE;
   2536
   2537	switch (musb->port_mode) {
   2538	case MUSB_HOST:
   2539		status = musb_host_setup(musb, plat->power);
   2540		if (status < 0)
   2541			goto fail3;
   2542		status = musb_platform_set_mode(musb, MUSB_HOST);
   2543		break;
   2544	case MUSB_PERIPHERAL:
   2545		status = musb_gadget_setup(musb);
   2546		if (status < 0)
   2547			goto fail3;
   2548		status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
   2549		break;
   2550	case MUSB_OTG:
   2551		status = musb_host_setup(musb, plat->power);
   2552		if (status < 0)
   2553			goto fail3;
   2554		status = musb_gadget_setup(musb);
   2555		if (status) {
   2556			musb_host_cleanup(musb);
   2557			goto fail3;
   2558		}
   2559		status = musb_platform_set_mode(musb, MUSB_OTG);
   2560		break;
   2561	default:
   2562		dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
   2563		break;
   2564	}
   2565
   2566	if (status < 0)
   2567		goto fail3;
   2568
   2569	musb_init_debugfs(musb);
   2570
   2571	musb->is_initialized = 1;
   2572	pm_runtime_mark_last_busy(musb->controller);
   2573	pm_runtime_put_autosuspend(musb->controller);
   2574
   2575	return 0;
   2576
   2577fail3:
   2578	cancel_delayed_work_sync(&musb->irq_work);
   2579	cancel_delayed_work_sync(&musb->finish_resume_work);
   2580	cancel_delayed_work_sync(&musb->deassert_reset_work);
   2581	if (musb->dma_controller)
   2582		musb_dma_controller_destroy(musb->dma_controller);
   2583
   2584fail2_5:
   2585	usb_phy_shutdown(musb->xceiv);
   2586
   2587err_usb_phy_init:
   2588	pm_runtime_dont_use_autosuspend(musb->controller);
   2589	pm_runtime_put_sync(musb->controller);
   2590	pm_runtime_disable(musb->controller);
   2591
   2592fail2:
   2593	if (musb->irq_wake)
   2594		device_init_wakeup(dev, 0);
   2595	musb_platform_exit(musb);
   2596
   2597fail1:
   2598	if (status != -EPROBE_DEFER)
   2599		dev_err(musb->controller,
   2600			"%s failed with status %d\n", __func__, status);
   2601
   2602	musb_free(musb);
   2603
   2604fail0:
   2605
   2606	return status;
   2607
   2608}
   2609
   2610/*-------------------------------------------------------------------------*/
   2611
   2612/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
   2613 * bridge to a platform device; this driver then suffices.
   2614 */
   2615static int musb_probe(struct platform_device *pdev)
   2616{
   2617	struct device	*dev = &pdev->dev;
   2618	int		irq = platform_get_irq_byname(pdev, "mc");
   2619	void __iomem	*base;
   2620
   2621	if (irq <= 0)
   2622		return -ENODEV;
   2623
   2624	base = devm_platform_ioremap_resource(pdev, 0);
   2625	if (IS_ERR(base))
   2626		return PTR_ERR(base);
   2627
   2628	return musb_init_controller(dev, irq, base);
   2629}
   2630
   2631static int musb_remove(struct platform_device *pdev)
   2632{
   2633	struct device	*dev = &pdev->dev;
   2634	struct musb	*musb = dev_to_musb(dev);
   2635	unsigned long	flags;
   2636
   2637	/* this gets called on rmmod.
   2638	 *  - Host mode: host may still be active
   2639	 *  - Peripheral mode: peripheral is deactivated (or never-activated)
   2640	 *  - OTG mode: both roles are deactivated (or never-activated)
   2641	 */
   2642	musb_exit_debugfs(musb);
   2643
   2644	cancel_delayed_work_sync(&musb->irq_work);
   2645	cancel_delayed_work_sync(&musb->finish_resume_work);
   2646	cancel_delayed_work_sync(&musb->deassert_reset_work);
   2647	pm_runtime_get_sync(musb->controller);
   2648	musb_host_cleanup(musb);
   2649	musb_gadget_cleanup(musb);
   2650
   2651	musb_platform_disable(musb);
   2652	spin_lock_irqsave(&musb->lock, flags);
   2653	musb_disable_interrupts(musb);
   2654	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
   2655	spin_unlock_irqrestore(&musb->lock, flags);
   2656	musb_platform_exit(musb);
   2657
   2658	pm_runtime_dont_use_autosuspend(musb->controller);
   2659	pm_runtime_put_sync(musb->controller);
   2660	pm_runtime_disable(musb->controller);
   2661	musb_phy_callback = NULL;
   2662	if (musb->dma_controller)
   2663		musb_dma_controller_destroy(musb->dma_controller);
   2664	usb_phy_shutdown(musb->xceiv);
   2665	musb_free(musb);
   2666	device_init_wakeup(dev, 0);
   2667	return 0;
   2668}
   2669
   2670#ifdef	CONFIG_PM
   2671
   2672static void musb_save_context(struct musb *musb)
   2673{
   2674	int i;
   2675	void __iomem *musb_base = musb->mregs;
   2676	void __iomem *epio;
   2677
   2678	musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
   2679	musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
   2680	musb->context.busctl = musb_readb(musb_base, MUSB_ULPI_BUSCONTROL);
   2681	musb->context.power = musb_readb(musb_base, MUSB_POWER);
   2682	musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
   2683	musb->context.index = musb_readb(musb_base, MUSB_INDEX);
   2684	musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
   2685
   2686	for (i = 0; i < musb->config->num_eps; ++i) {
   2687		struct musb_hw_ep	*hw_ep;
   2688
   2689		hw_ep = &musb->endpoints[i];
   2690		if (!hw_ep)
   2691			continue;
   2692
   2693		epio = hw_ep->regs;
   2694		if (!epio)
   2695			continue;
   2696
   2697		musb_writeb(musb_base, MUSB_INDEX, i);
   2698		musb->context.index_regs[i].txmaxp =
   2699			musb_readw(epio, MUSB_TXMAXP);
   2700		musb->context.index_regs[i].txcsr =
   2701			musb_readw(epio, MUSB_TXCSR);
   2702		musb->context.index_regs[i].rxmaxp =
   2703			musb_readw(epio, MUSB_RXMAXP);
   2704		musb->context.index_regs[i].rxcsr =
   2705			musb_readw(epio, MUSB_RXCSR);
   2706
   2707		if (musb->dyn_fifo) {
   2708			musb->context.index_regs[i].txfifoadd =
   2709					musb_readw(musb_base, MUSB_TXFIFOADD);
   2710			musb->context.index_regs[i].rxfifoadd =
   2711					musb_readw(musb_base, MUSB_RXFIFOADD);
   2712			musb->context.index_regs[i].txfifosz =
   2713					musb_readb(musb_base, MUSB_TXFIFOSZ);
   2714			musb->context.index_regs[i].rxfifosz =
   2715					musb_readb(musb_base, MUSB_RXFIFOSZ);
   2716		}
   2717
   2718		musb->context.index_regs[i].txtype =
   2719			musb_readb(epio, MUSB_TXTYPE);
   2720		musb->context.index_regs[i].txinterval =
   2721			musb_readb(epio, MUSB_TXINTERVAL);
   2722		musb->context.index_regs[i].rxtype =
   2723			musb_readb(epio, MUSB_RXTYPE);
   2724		musb->context.index_regs[i].rxinterval =
   2725			musb_readb(epio, MUSB_RXINTERVAL);
   2726
   2727		musb->context.index_regs[i].txfunaddr =
   2728			musb_read_txfunaddr(musb, i);
   2729		musb->context.index_regs[i].txhubaddr =
   2730			musb_read_txhubaddr(musb, i);
   2731		musb->context.index_regs[i].txhubport =
   2732			musb_read_txhubport(musb, i);
   2733
   2734		musb->context.index_regs[i].rxfunaddr =
   2735			musb_read_rxfunaddr(musb, i);
   2736		musb->context.index_regs[i].rxhubaddr =
   2737			musb_read_rxhubaddr(musb, i);
   2738		musb->context.index_regs[i].rxhubport =
   2739			musb_read_rxhubport(musb, i);
   2740	}
   2741}
   2742
   2743static void musb_restore_context(struct musb *musb)
   2744{
   2745	int i;
   2746	void __iomem *musb_base = musb->mregs;
   2747	void __iomem *epio;
   2748	u8 power;
   2749
   2750	musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
   2751	musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
   2752	musb_writeb(musb_base, MUSB_ULPI_BUSCONTROL, musb->context.busctl);
   2753
   2754	/* Don't affect SUSPENDM/RESUME bits in POWER reg */
   2755	power = musb_readb(musb_base, MUSB_POWER);
   2756	power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
   2757	musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
   2758	power |= musb->context.power;
   2759	musb_writeb(musb_base, MUSB_POWER, power);
   2760
   2761	musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
   2762	musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
   2763	musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
   2764	if (musb->context.devctl & MUSB_DEVCTL_SESSION)
   2765		musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
   2766
   2767	for (i = 0; i < musb->config->num_eps; ++i) {
   2768		struct musb_hw_ep	*hw_ep;
   2769
   2770		hw_ep = &musb->endpoints[i];
   2771		if (!hw_ep)
   2772			continue;
   2773
   2774		epio = hw_ep->regs;
   2775		if (!epio)
   2776			continue;
   2777
   2778		musb_writeb(musb_base, MUSB_INDEX, i);
   2779		musb_writew(epio, MUSB_TXMAXP,
   2780			musb->context.index_regs[i].txmaxp);
   2781		musb_writew(epio, MUSB_TXCSR,
   2782			musb->context.index_regs[i].txcsr);
   2783		musb_writew(epio, MUSB_RXMAXP,
   2784			musb->context.index_regs[i].rxmaxp);
   2785		musb_writew(epio, MUSB_RXCSR,
   2786			musb->context.index_regs[i].rxcsr);
   2787
   2788		if (musb->dyn_fifo) {
   2789			musb_writeb(musb_base, MUSB_TXFIFOSZ,
   2790				musb->context.index_regs[i].txfifosz);
   2791			musb_writeb(musb_base, MUSB_RXFIFOSZ,
   2792				musb->context.index_regs[i].rxfifosz);
   2793			musb_writew(musb_base, MUSB_TXFIFOADD,
   2794				musb->context.index_regs[i].txfifoadd);
   2795			musb_writew(musb_base, MUSB_RXFIFOADD,
   2796				musb->context.index_regs[i].rxfifoadd);
   2797		}
   2798
   2799		musb_writeb(epio, MUSB_TXTYPE,
   2800				musb->context.index_regs[i].txtype);
   2801		musb_writeb(epio, MUSB_TXINTERVAL,
   2802				musb->context.index_regs[i].txinterval);
   2803		musb_writeb(epio, MUSB_RXTYPE,
   2804				musb->context.index_regs[i].rxtype);
   2805		musb_writeb(epio, MUSB_RXINTERVAL,
   2806
   2807				musb->context.index_regs[i].rxinterval);
   2808		musb_write_txfunaddr(musb, i,
   2809				musb->context.index_regs[i].txfunaddr);
   2810		musb_write_txhubaddr(musb, i,
   2811				musb->context.index_regs[i].txhubaddr);
   2812		musb_write_txhubport(musb, i,
   2813				musb->context.index_regs[i].txhubport);
   2814
   2815		musb_write_rxfunaddr(musb, i,
   2816				musb->context.index_regs[i].rxfunaddr);
   2817		musb_write_rxhubaddr(musb, i,
   2818				musb->context.index_regs[i].rxhubaddr);
   2819		musb_write_rxhubport(musb, i,
   2820				musb->context.index_regs[i].rxhubport);
   2821	}
   2822	musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
   2823}
   2824
   2825static int musb_suspend(struct device *dev)
   2826{
   2827	struct musb	*musb = dev_to_musb(dev);
   2828	unsigned long	flags;
   2829	int ret;
   2830
   2831	ret = pm_runtime_get_sync(dev);
   2832	if (ret < 0) {
   2833		pm_runtime_put_noidle(dev);
   2834		return ret;
   2835	}
   2836
   2837	musb_platform_disable(musb);
   2838	musb_disable_interrupts(musb);
   2839
   2840	musb->flush_irq_work = true;
   2841	while (flush_delayed_work(&musb->irq_work))
   2842		;
   2843	musb->flush_irq_work = false;
   2844
   2845	if (!(musb->ops->quirks & MUSB_PRESERVE_SESSION))
   2846		musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
   2847
   2848	WARN_ON(!list_empty(&musb->pending_list));
   2849
   2850	spin_lock_irqsave(&musb->lock, flags);
   2851
   2852	if (is_peripheral_active(musb)) {
   2853		/* FIXME force disconnect unless we know USB will wake
   2854		 * the system up quickly enough to respond ...
   2855		 */
   2856	} else if (is_host_active(musb)) {
   2857		/* we know all the children are suspended; sometimes
   2858		 * they will even be wakeup-enabled.
   2859		 */
   2860	}
   2861
   2862	musb_save_context(musb);
   2863
   2864	spin_unlock_irqrestore(&musb->lock, flags);
   2865	return 0;
   2866}
   2867
   2868static int musb_resume(struct device *dev)
   2869{
   2870	struct musb *musb = dev_to_musb(dev);
   2871	unsigned long flags;
   2872	int error;
   2873	u8 devctl;
   2874	u8 mask;
   2875
   2876	/*
   2877	 * For static cmos like DaVinci, register values were preserved
   2878	 * unless for some reason the whole soc powered down or the USB
   2879	 * module got reset through the PSC (vs just being disabled).
   2880	 *
   2881	 * For the DSPS glue layer though, a full register restore has to
   2882	 * be done. As it shouldn't harm other platforms, we do it
   2883	 * unconditionally.
   2884	 */
   2885
   2886	musb_restore_context(musb);
   2887
   2888	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
   2889	mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
   2890	if ((devctl & mask) != (musb->context.devctl & mask))
   2891		musb->port1_status = 0;
   2892
   2893	musb_enable_interrupts(musb);
   2894	musb_platform_enable(musb);
   2895
   2896	/* session might be disabled in suspend */
   2897	if (musb->port_mode == MUSB_HOST &&
   2898	    !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) {
   2899		devctl |= MUSB_DEVCTL_SESSION;
   2900		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
   2901	}
   2902
   2903	spin_lock_irqsave(&musb->lock, flags);
   2904	error = musb_run_resume_work(musb);
   2905	if (error)
   2906		dev_err(musb->controller, "resume work failed with %i\n",
   2907			error);
   2908	spin_unlock_irqrestore(&musb->lock, flags);
   2909
   2910	pm_runtime_mark_last_busy(dev);
   2911	pm_runtime_put_autosuspend(dev);
   2912
   2913	return 0;
   2914}
   2915
   2916static int musb_runtime_suspend(struct device *dev)
   2917{
   2918	struct musb	*musb = dev_to_musb(dev);
   2919
   2920	musb_save_context(musb);
   2921	musb->is_runtime_suspended = 1;
   2922
   2923	return 0;
   2924}
   2925
   2926static int musb_runtime_resume(struct device *dev)
   2927{
   2928	struct musb *musb = dev_to_musb(dev);
   2929	unsigned long flags;
   2930	int error;
   2931
   2932	/*
   2933	 * When pm_runtime_get_sync called for the first time in driver
   2934	 * init,  some of the structure is still not initialized which is
   2935	 * used in restore function. But clock needs to be
   2936	 * enabled before any register access, so
   2937	 * pm_runtime_get_sync has to be called.
   2938	 * Also context restore without save does not make
   2939	 * any sense
   2940	 */
   2941	if (!musb->is_initialized)
   2942		return 0;
   2943
   2944	musb_restore_context(musb);
   2945
   2946	spin_lock_irqsave(&musb->lock, flags);
   2947	error = musb_run_resume_work(musb);
   2948	if (error)
   2949		dev_err(musb->controller, "resume work failed with %i\n",
   2950			error);
   2951	musb->is_runtime_suspended = 0;
   2952	spin_unlock_irqrestore(&musb->lock, flags);
   2953
   2954	return 0;
   2955}
   2956
   2957static const struct dev_pm_ops musb_dev_pm_ops = {
   2958	.suspend	= musb_suspend,
   2959	.resume		= musb_resume,
   2960	.runtime_suspend = musb_runtime_suspend,
   2961	.runtime_resume = musb_runtime_resume,
   2962};
   2963
   2964#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
   2965#else
   2966#define	MUSB_DEV_PM_OPS	NULL
   2967#endif
   2968
   2969static struct platform_driver musb_driver = {
   2970	.driver = {
   2971		.name		= musb_driver_name,
   2972		.bus		= &platform_bus_type,
   2973		.pm		= MUSB_DEV_PM_OPS,
   2974		.dev_groups	= musb_groups,
   2975	},
   2976	.probe		= musb_probe,
   2977	.remove		= musb_remove,
   2978};
   2979
   2980module_platform_driver(musb_driver);