cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

musb_gadget.c (54267B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * MUSB OTG driver peripheral support
      4 *
      5 * Copyright 2005 Mentor Graphics Corporation
      6 * Copyright (C) 2005-2006 by Texas Instruments
      7 * Copyright (C) 2006-2007 Nokia Corporation
      8 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
      9 */
     10
     11#include <linux/kernel.h>
     12#include <linux/list.h>
     13#include <linux/timer.h>
     14#include <linux/module.h>
     15#include <linux/smp.h>
     16#include <linux/spinlock.h>
     17#include <linux/delay.h>
     18#include <linux/dma-mapping.h>
     19#include <linux/slab.h>
     20
     21#include "musb_core.h"
     22#include "musb_trace.h"
     23
     24
     25/* ----------------------------------------------------------------------- */
     26
     27#define is_buffer_mapped(req) (is_dma_capable() && \
     28					(req->map_state != UN_MAPPED))
     29
     30/* Maps the buffer to dma  */
     31
     32static inline void map_dma_buffer(struct musb_request *request,
     33			struct musb *musb, struct musb_ep *musb_ep)
     34{
     35	int compatible = true;
     36	struct dma_controller *dma = musb->dma_controller;
     37
     38	request->map_state = UN_MAPPED;
     39
     40	if (!is_dma_capable() || !musb_ep->dma)
     41		return;
     42
     43	/* Check if DMA engine can handle this request.
     44	 * DMA code must reject the USB request explicitly.
     45	 * Default behaviour is to map the request.
     46	 */
     47	if (dma->is_compatible)
     48		compatible = dma->is_compatible(musb_ep->dma,
     49				musb_ep->packet_sz, request->request.buf,
     50				request->request.length);
     51	if (!compatible)
     52		return;
     53
     54	if (request->request.dma == DMA_ADDR_INVALID) {
     55		dma_addr_t dma_addr;
     56		int ret;
     57
     58		dma_addr = dma_map_single(
     59				musb->controller,
     60				request->request.buf,
     61				request->request.length,
     62				request->tx
     63					? DMA_TO_DEVICE
     64					: DMA_FROM_DEVICE);
     65		ret = dma_mapping_error(musb->controller, dma_addr);
     66		if (ret)
     67			return;
     68
     69		request->request.dma = dma_addr;
     70		request->map_state = MUSB_MAPPED;
     71	} else {
     72		dma_sync_single_for_device(musb->controller,
     73			request->request.dma,
     74			request->request.length,
     75			request->tx
     76				? DMA_TO_DEVICE
     77				: DMA_FROM_DEVICE);
     78		request->map_state = PRE_MAPPED;
     79	}
     80}
     81
     82/* Unmap the buffer from dma and maps it back to cpu */
     83static inline void unmap_dma_buffer(struct musb_request *request,
     84				struct musb *musb)
     85{
     86	struct musb_ep *musb_ep = request->ep;
     87
     88	if (!is_buffer_mapped(request) || !musb_ep->dma)
     89		return;
     90
     91	if (request->request.dma == DMA_ADDR_INVALID) {
     92		dev_vdbg(musb->controller,
     93				"not unmapping a never mapped buffer\n");
     94		return;
     95	}
     96	if (request->map_state == MUSB_MAPPED) {
     97		dma_unmap_single(musb->controller,
     98			request->request.dma,
     99			request->request.length,
    100			request->tx
    101				? DMA_TO_DEVICE
    102				: DMA_FROM_DEVICE);
    103		request->request.dma = DMA_ADDR_INVALID;
    104	} else { /* PRE_MAPPED */
    105		dma_sync_single_for_cpu(musb->controller,
    106			request->request.dma,
    107			request->request.length,
    108			request->tx
    109				? DMA_TO_DEVICE
    110				: DMA_FROM_DEVICE);
    111	}
    112	request->map_state = UN_MAPPED;
    113}
    114
    115/*
    116 * Immediately complete a request.
    117 *
    118 * @param request the request to complete
    119 * @param status the status to complete the request with
    120 * Context: controller locked, IRQs blocked.
    121 */
    122void musb_g_giveback(
    123	struct musb_ep		*ep,
    124	struct usb_request	*request,
    125	int			status)
    126__releases(ep->musb->lock)
    127__acquires(ep->musb->lock)
    128{
    129	struct musb_request	*req;
    130	struct musb		*musb;
    131	int			busy = ep->busy;
    132
    133	req = to_musb_request(request);
    134
    135	list_del(&req->list);
    136	if (req->request.status == -EINPROGRESS)
    137		req->request.status = status;
    138	musb = req->musb;
    139
    140	ep->busy = 1;
    141	spin_unlock(&musb->lock);
    142
    143	if (!dma_mapping_error(&musb->g.dev, request->dma))
    144		unmap_dma_buffer(req, musb);
    145
    146	trace_musb_req_gb(req);
    147	usb_gadget_giveback_request(&req->ep->end_point, &req->request);
    148	spin_lock(&musb->lock);
    149	ep->busy = busy;
    150}
    151
    152/* ----------------------------------------------------------------------- */
    153
    154/*
    155 * Abort requests queued to an endpoint using the status. Synchronous.
    156 * caller locked controller and blocked irqs, and selected this ep.
    157 */
    158static void nuke(struct musb_ep *ep, const int status)
    159{
    160	struct musb		*musb = ep->musb;
    161	struct musb_request	*req = NULL;
    162	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
    163
    164	ep->busy = 1;
    165
    166	if (is_dma_capable() && ep->dma) {
    167		struct dma_controller	*c = ep->musb->dma_controller;
    168		int value;
    169
    170		if (ep->is_in) {
    171			/*
    172			 * The programming guide says that we must not clear
    173			 * the DMAMODE bit before DMAENAB, so we only
    174			 * clear it in the second write...
    175			 */
    176			musb_writew(epio, MUSB_TXCSR,
    177				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
    178			musb_writew(epio, MUSB_TXCSR,
    179					0 | MUSB_TXCSR_FLUSHFIFO);
    180		} else {
    181			musb_writew(epio, MUSB_RXCSR,
    182					0 | MUSB_RXCSR_FLUSHFIFO);
    183			musb_writew(epio, MUSB_RXCSR,
    184					0 | MUSB_RXCSR_FLUSHFIFO);
    185		}
    186
    187		value = c->channel_abort(ep->dma);
    188		musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
    189		c->channel_release(ep->dma);
    190		ep->dma = NULL;
    191	}
    192
    193	while (!list_empty(&ep->req_list)) {
    194		req = list_first_entry(&ep->req_list, struct musb_request, list);
    195		musb_g_giveback(ep, &req->request, status);
    196	}
    197}
    198
    199/* ----------------------------------------------------------------------- */
    200
    201/* Data transfers - pure PIO, pure DMA, or mixed mode */
    202
    203/*
    204 * This assumes the separate CPPI engine is responding to DMA requests
    205 * from the usb core ... sequenced a bit differently from mentor dma.
    206 */
    207
    208static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
    209{
    210	if (can_bulk_split(musb, ep->type))
    211		return ep->hw_ep->max_packet_sz_tx;
    212	else
    213		return ep->packet_sz;
    214}
    215
    216/*
    217 * An endpoint is transmitting data. This can be called either from
    218 * the IRQ routine or from ep.queue() to kickstart a request on an
    219 * endpoint.
    220 *
    221 * Context: controller locked, IRQs blocked, endpoint selected
    222 */
    223static void txstate(struct musb *musb, struct musb_request *req)
    224{
    225	u8			epnum = req->epnum;
    226	struct musb_ep		*musb_ep;
    227	void __iomem		*epio = musb->endpoints[epnum].regs;
    228	struct usb_request	*request;
    229	u16			fifo_count = 0, csr;
    230	int			use_dma = 0;
    231
    232	musb_ep = req->ep;
    233
    234	/* Check if EP is disabled */
    235	if (!musb_ep->desc) {
    236		musb_dbg(musb, "ep:%s disabled - ignore request",
    237						musb_ep->end_point.name);
    238		return;
    239	}
    240
    241	/* we shouldn't get here while DMA is active ... but we do ... */
    242	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
    243		musb_dbg(musb, "dma pending...");
    244		return;
    245	}
    246
    247	/* read TXCSR before */
    248	csr = musb_readw(epio, MUSB_TXCSR);
    249
    250	request = &req->request;
    251	fifo_count = min(max_ep_writesize(musb, musb_ep),
    252			(int)(request->length - request->actual));
    253
    254	if (csr & MUSB_TXCSR_TXPKTRDY) {
    255		musb_dbg(musb, "%s old packet still ready , txcsr %03x",
    256				musb_ep->end_point.name, csr);
    257		return;
    258	}
    259
    260	if (csr & MUSB_TXCSR_P_SENDSTALL) {
    261		musb_dbg(musb, "%s stalling, txcsr %03x",
    262				musb_ep->end_point.name, csr);
    263		return;
    264	}
    265
    266	musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
    267			epnum, musb_ep->packet_sz, fifo_count,
    268			csr);
    269
    270#ifndef	CONFIG_MUSB_PIO_ONLY
    271	if (is_buffer_mapped(req)) {
    272		struct dma_controller	*c = musb->dma_controller;
    273		size_t request_size;
    274
    275		/* setup DMA, then program endpoint CSR */
    276		request_size = min_t(size_t, request->length - request->actual,
    277					musb_ep->dma->max_len);
    278
    279		use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
    280
    281		/* MUSB_TXCSR_P_ISO is still set correctly */
    282
    283		if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
    284			if (request_size < musb_ep->packet_sz)
    285				musb_ep->dma->desired_mode = 0;
    286			else
    287				musb_ep->dma->desired_mode = 1;
    288
    289			use_dma = use_dma && c->channel_program(
    290					musb_ep->dma, musb_ep->packet_sz,
    291					musb_ep->dma->desired_mode,
    292					request->dma + request->actual, request_size);
    293			if (use_dma) {
    294				if (musb_ep->dma->desired_mode == 0) {
    295					/*
    296					 * We must not clear the DMAMODE bit
    297					 * before the DMAENAB bit -- and the
    298					 * latter doesn't always get cleared
    299					 * before we get here...
    300					 */
    301					csr &= ~(MUSB_TXCSR_AUTOSET
    302						| MUSB_TXCSR_DMAENAB);
    303					musb_writew(epio, MUSB_TXCSR, csr
    304						| MUSB_TXCSR_P_WZC_BITS);
    305					csr &= ~MUSB_TXCSR_DMAMODE;
    306					csr |= (MUSB_TXCSR_DMAENAB |
    307							MUSB_TXCSR_MODE);
    308					/* against programming guide */
    309				} else {
    310					csr |= (MUSB_TXCSR_DMAENAB
    311							| MUSB_TXCSR_DMAMODE
    312							| MUSB_TXCSR_MODE);
    313					/*
    314					 * Enable Autoset according to table
    315					 * below
    316					 * bulk_split hb_mult	Autoset_Enable
    317					 *	0	0	Yes(Normal)
    318					 *	0	>0	No(High BW ISO)
    319					 *	1	0	Yes(HS bulk)
    320					 *	1	>0	Yes(FS bulk)
    321					 */
    322					if (!musb_ep->hb_mult ||
    323					    can_bulk_split(musb,
    324							   musb_ep->type))
    325						csr |= MUSB_TXCSR_AUTOSET;
    326				}
    327				csr &= ~MUSB_TXCSR_P_UNDERRUN;
    328
    329				musb_writew(epio, MUSB_TXCSR, csr);
    330			}
    331		}
    332
    333		if (is_cppi_enabled(musb)) {
    334			/* program endpoint CSR first, then setup DMA */
    335			csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
    336			csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
    337				MUSB_TXCSR_MODE;
    338			musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
    339						~MUSB_TXCSR_P_UNDERRUN) | csr);
    340
    341			/* ensure writebuffer is empty */
    342			csr = musb_readw(epio, MUSB_TXCSR);
    343
    344			/*
    345			 * NOTE host side sets DMAENAB later than this; both are
    346			 * OK since the transfer dma glue (between CPPI and
    347			 * Mentor fifos) just tells CPPI it could start. Data
    348			 * only moves to the USB TX fifo when both fifos are
    349			 * ready.
    350			 */
    351			/*
    352			 * "mode" is irrelevant here; handle terminating ZLPs
    353			 * like PIO does, since the hardware RNDIS mode seems
    354			 * unreliable except for the
    355			 * last-packet-is-already-short case.
    356			 */
    357			use_dma = use_dma && c->channel_program(
    358					musb_ep->dma, musb_ep->packet_sz,
    359					0,
    360					request->dma + request->actual,
    361					request_size);
    362			if (!use_dma) {
    363				c->channel_release(musb_ep->dma);
    364				musb_ep->dma = NULL;
    365				csr &= ~MUSB_TXCSR_DMAENAB;
    366				musb_writew(epio, MUSB_TXCSR, csr);
    367				/* invariant: prequest->buf is non-null */
    368			}
    369		} else if (tusb_dma_omap(musb))
    370			use_dma = use_dma && c->channel_program(
    371					musb_ep->dma, musb_ep->packet_sz,
    372					request->zero,
    373					request->dma + request->actual,
    374					request_size);
    375	}
    376#endif
    377
    378	if (!use_dma) {
    379		/*
    380		 * Unmap the dma buffer back to cpu if dma channel
    381		 * programming fails
    382		 */
    383		unmap_dma_buffer(req, musb);
    384
    385		musb_write_fifo(musb_ep->hw_ep, fifo_count,
    386				(u8 *) (request->buf + request->actual));
    387		request->actual += fifo_count;
    388		csr |= MUSB_TXCSR_TXPKTRDY;
    389		csr &= ~MUSB_TXCSR_P_UNDERRUN;
    390		musb_writew(epio, MUSB_TXCSR, csr);
    391	}
    392
    393	/* host may already have the data when this message shows... */
    394	musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
    395			musb_ep->end_point.name, use_dma ? "dma" : "pio",
    396			request->actual, request->length,
    397			musb_readw(epio, MUSB_TXCSR),
    398			fifo_count,
    399			musb_readw(epio, MUSB_TXMAXP));
    400}
    401
    402/*
    403 * FIFO state update (e.g. data ready).
    404 * Called from IRQ,  with controller locked.
    405 */
    406void musb_g_tx(struct musb *musb, u8 epnum)
    407{
    408	u16			csr;
    409	struct musb_request	*req;
    410	struct usb_request	*request;
    411	u8 __iomem		*mbase = musb->mregs;
    412	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
    413	void __iomem		*epio = musb->endpoints[epnum].regs;
    414	struct dma_channel	*dma;
    415
    416	musb_ep_select(mbase, epnum);
    417	req = next_request(musb_ep);
    418	request = &req->request;
    419
    420	csr = musb_readw(epio, MUSB_TXCSR);
    421	musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
    422
    423	dma = is_dma_capable() ? musb_ep->dma : NULL;
    424
    425	/*
    426	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
    427	 * probably rates reporting as a host error.
    428	 */
    429	if (csr & MUSB_TXCSR_P_SENTSTALL) {
    430		csr |=	MUSB_TXCSR_P_WZC_BITS;
    431		csr &= ~MUSB_TXCSR_P_SENTSTALL;
    432		musb_writew(epio, MUSB_TXCSR, csr);
    433		return;
    434	}
    435
    436	if (csr & MUSB_TXCSR_P_UNDERRUN) {
    437		/* We NAKed, no big deal... little reason to care. */
    438		csr |=	 MUSB_TXCSR_P_WZC_BITS;
    439		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
    440		musb_writew(epio, MUSB_TXCSR, csr);
    441		dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
    442				epnum, request);
    443	}
    444
    445	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
    446		/*
    447		 * SHOULD NOT HAPPEN... has with CPPI though, after
    448		 * changing SENDSTALL (and other cases); harmless?
    449		 */
    450		musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
    451		return;
    452	}
    453
    454	if (req) {
    455
    456		trace_musb_req_tx(req);
    457
    458		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
    459			csr |= MUSB_TXCSR_P_WZC_BITS;
    460			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
    461				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
    462			musb_writew(epio, MUSB_TXCSR, csr);
    463			/* Ensure writebuffer is empty. */
    464			csr = musb_readw(epio, MUSB_TXCSR);
    465			request->actual += musb_ep->dma->actual_len;
    466			musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
    467				epnum, csr, musb_ep->dma->actual_len, request);
    468		}
    469
    470		/*
    471		 * First, maybe a terminating short packet. Some DMA
    472		 * engines might handle this by themselves.
    473		 */
    474		if ((request->zero && request->length)
    475			&& (request->length % musb_ep->packet_sz == 0)
    476			&& (request->actual == request->length)) {
    477
    478			/*
    479			 * On DMA completion, FIFO may not be
    480			 * available yet...
    481			 */
    482			if (csr & MUSB_TXCSR_TXPKTRDY)
    483				return;
    484
    485			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
    486					| MUSB_TXCSR_TXPKTRDY);
    487			request->zero = 0;
    488		}
    489
    490		if (request->actual == request->length) {
    491			musb_g_giveback(musb_ep, request, 0);
    492			/*
    493			 * In the giveback function the MUSB lock is
    494			 * released and acquired after sometime. During
    495			 * this time period the INDEX register could get
    496			 * changed by the gadget_queue function especially
    497			 * on SMP systems. Reselect the INDEX to be sure
    498			 * we are reading/modifying the right registers
    499			 */
    500			musb_ep_select(mbase, epnum);
    501			req = musb_ep->desc ? next_request(musb_ep) : NULL;
    502			if (!req) {
    503				musb_dbg(musb, "%s idle now",
    504					musb_ep->end_point.name);
    505				return;
    506			}
    507		}
    508
    509		txstate(musb, req);
    510	}
    511}
    512
    513/* ------------------------------------------------------------ */
    514
    515/*
    516 * Context: controller locked, IRQs blocked, endpoint selected
    517 */
    518static void rxstate(struct musb *musb, struct musb_request *req)
    519{
    520	const u8		epnum = req->epnum;
    521	struct usb_request	*request = &req->request;
    522	struct musb_ep		*musb_ep;
    523	void __iomem		*epio = musb->endpoints[epnum].regs;
    524	unsigned		len = 0;
    525	u16			fifo_count;
    526	u16			csr = musb_readw(epio, MUSB_RXCSR);
    527	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
    528	u8			use_mode_1;
    529
    530	if (hw_ep->is_shared_fifo)
    531		musb_ep = &hw_ep->ep_in;
    532	else
    533		musb_ep = &hw_ep->ep_out;
    534
    535	fifo_count = musb_ep->packet_sz;
    536
    537	/* Check if EP is disabled */
    538	if (!musb_ep->desc) {
    539		musb_dbg(musb, "ep:%s disabled - ignore request",
    540						musb_ep->end_point.name);
    541		return;
    542	}
    543
    544	/* We shouldn't get here while DMA is active, but we do... */
    545	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
    546		musb_dbg(musb, "DMA pending...");
    547		return;
    548	}
    549
    550	if (csr & MUSB_RXCSR_P_SENDSTALL) {
    551		musb_dbg(musb, "%s stalling, RXCSR %04x",
    552		    musb_ep->end_point.name, csr);
    553		return;
    554	}
    555
    556	if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
    557		struct dma_controller	*c = musb->dma_controller;
    558		struct dma_channel	*channel = musb_ep->dma;
    559
    560		/* NOTE:  CPPI won't actually stop advancing the DMA
    561		 * queue after short packet transfers, so this is almost
    562		 * always going to run as IRQ-per-packet DMA so that
    563		 * faults will be handled correctly.
    564		 */
    565		if (c->channel_program(channel,
    566				musb_ep->packet_sz,
    567				!request->short_not_ok,
    568				request->dma + request->actual,
    569				request->length - request->actual)) {
    570
    571			/* make sure that if an rxpkt arrived after the irq,
    572			 * the cppi engine will be ready to take it as soon
    573			 * as DMA is enabled
    574			 */
    575			csr &= ~(MUSB_RXCSR_AUTOCLEAR
    576					| MUSB_RXCSR_DMAMODE);
    577			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
    578			musb_writew(epio, MUSB_RXCSR, csr);
    579			return;
    580		}
    581	}
    582
    583	if (csr & MUSB_RXCSR_RXPKTRDY) {
    584		fifo_count = musb_readw(epio, MUSB_RXCOUNT);
    585
    586		/*
    587		 * Enable Mode 1 on RX transfers only when short_not_ok flag
    588		 * is set. Currently short_not_ok flag is set only from
    589		 * file_storage and f_mass_storage drivers
    590		 */
    591
    592		if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
    593			use_mode_1 = 1;
    594		else
    595			use_mode_1 = 0;
    596
    597		if (request->actual < request->length) {
    598			if (!is_buffer_mapped(req))
    599				goto buffer_aint_mapped;
    600
    601			if (musb_dma_inventra(musb)) {
    602				struct dma_controller	*c;
    603				struct dma_channel	*channel;
    604				int			use_dma = 0;
    605				unsigned int transfer_size;
    606
    607				c = musb->dma_controller;
    608				channel = musb_ep->dma;
    609
    610	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
    611	 * mode 0 only. So we do not get endpoint interrupts due to DMA
    612	 * completion. We only get interrupts from DMA controller.
    613	 *
    614	 * We could operate in DMA mode 1 if we knew the size of the transfer
    615	 * in advance. For mass storage class, request->length = what the host
    616	 * sends, so that'd work.  But for pretty much everything else,
    617	 * request->length is routinely more than what the host sends. For
    618	 * most these gadgets, end of is signified either by a short packet,
    619	 * or filling the last byte of the buffer.  (Sending extra data in
    620	 * that last pckate should trigger an overflow fault.)  But in mode 1,
    621	 * we don't get DMA completion interrupt for short packets.
    622	 *
    623	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
    624	 * to get endpoint interrupt on every DMA req, but that didn't seem
    625	 * to work reliably.
    626	 *
    627	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
    628	 * then becomes usable as a runtime "use mode 1" hint...
    629	 */
    630
    631				/* Experimental: Mode1 works with mass storage use cases */
    632				if (use_mode_1) {
    633					csr |= MUSB_RXCSR_AUTOCLEAR;
    634					musb_writew(epio, MUSB_RXCSR, csr);
    635					csr |= MUSB_RXCSR_DMAENAB;
    636					musb_writew(epio, MUSB_RXCSR, csr);
    637
    638					/*
    639					 * this special sequence (enabling and then
    640					 * disabling MUSB_RXCSR_DMAMODE) is required
    641					 * to get DMAReq to activate
    642					 */
    643					musb_writew(epio, MUSB_RXCSR,
    644						csr | MUSB_RXCSR_DMAMODE);
    645					musb_writew(epio, MUSB_RXCSR, csr);
    646
    647					transfer_size = min_t(unsigned int,
    648							request->length -
    649							request->actual,
    650							channel->max_len);
    651					musb_ep->dma->desired_mode = 1;
    652				} else {
    653					if (!musb_ep->hb_mult &&
    654						musb_ep->hw_ep->rx_double_buffered)
    655						csr |= MUSB_RXCSR_AUTOCLEAR;
    656					csr |= MUSB_RXCSR_DMAENAB;
    657					musb_writew(epio, MUSB_RXCSR, csr);
    658
    659					transfer_size = min(request->length - request->actual,
    660							(unsigned)fifo_count);
    661					musb_ep->dma->desired_mode = 0;
    662				}
    663
    664				use_dma = c->channel_program(
    665						channel,
    666						musb_ep->packet_sz,
    667						channel->desired_mode,
    668						request->dma
    669						+ request->actual,
    670						transfer_size);
    671
    672				if (use_dma)
    673					return;
    674			}
    675
    676			if ((musb_dma_ux500(musb)) &&
    677				(request->actual < request->length)) {
    678
    679				struct dma_controller *c;
    680				struct dma_channel *channel;
    681				unsigned int transfer_size = 0;
    682
    683				c = musb->dma_controller;
    684				channel = musb_ep->dma;
    685
    686				/* In case first packet is short */
    687				if (fifo_count < musb_ep->packet_sz)
    688					transfer_size = fifo_count;
    689				else if (request->short_not_ok)
    690					transfer_size =	min_t(unsigned int,
    691							request->length -
    692							request->actual,
    693							channel->max_len);
    694				else
    695					transfer_size = min_t(unsigned int,
    696							request->length -
    697							request->actual,
    698							(unsigned)fifo_count);
    699
    700				csr &= ~MUSB_RXCSR_DMAMODE;
    701				csr |= (MUSB_RXCSR_DMAENAB |
    702					MUSB_RXCSR_AUTOCLEAR);
    703
    704				musb_writew(epio, MUSB_RXCSR, csr);
    705
    706				if (transfer_size <= musb_ep->packet_sz) {
    707					musb_ep->dma->desired_mode = 0;
    708				} else {
    709					musb_ep->dma->desired_mode = 1;
    710					/* Mode must be set after DMAENAB */
    711					csr |= MUSB_RXCSR_DMAMODE;
    712					musb_writew(epio, MUSB_RXCSR, csr);
    713				}
    714
    715				if (c->channel_program(channel,
    716							musb_ep->packet_sz,
    717							channel->desired_mode,
    718							request->dma
    719							+ request->actual,
    720							transfer_size))
    721
    722					return;
    723			}
    724
    725			len = request->length - request->actual;
    726			musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
    727					musb_ep->end_point.name,
    728					fifo_count, len,
    729					musb_ep->packet_sz);
    730
    731			fifo_count = min_t(unsigned, len, fifo_count);
    732
    733			if (tusb_dma_omap(musb)) {
    734				struct dma_controller *c = musb->dma_controller;
    735				struct dma_channel *channel = musb_ep->dma;
    736				u32 dma_addr = request->dma + request->actual;
    737				int ret;
    738
    739				ret = c->channel_program(channel,
    740						musb_ep->packet_sz,
    741						channel->desired_mode,
    742						dma_addr,
    743						fifo_count);
    744				if (ret)
    745					return;
    746			}
    747
    748			/*
    749			 * Unmap the dma buffer back to cpu if dma channel
    750			 * programming fails. This buffer is mapped if the
    751			 * channel allocation is successful
    752			 */
    753			unmap_dma_buffer(req, musb);
    754
    755			/*
    756			 * Clear DMAENAB and AUTOCLEAR for the
    757			 * PIO mode transfer
    758			 */
    759			csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
    760			musb_writew(epio, MUSB_RXCSR, csr);
    761
    762buffer_aint_mapped:
    763			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
    764					(request->buf + request->actual));
    765			request->actual += fifo_count;
    766
    767			/* REVISIT if we left anything in the fifo, flush
    768			 * it and report -EOVERFLOW
    769			 */
    770
    771			/* ack the read! */
    772			csr |= MUSB_RXCSR_P_WZC_BITS;
    773			csr &= ~MUSB_RXCSR_RXPKTRDY;
    774			musb_writew(epio, MUSB_RXCSR, csr);
    775		}
    776	}
    777
    778	/* reach the end or short packet detected */
    779	if (request->actual == request->length ||
    780	    fifo_count < musb_ep->packet_sz)
    781		musb_g_giveback(musb_ep, request, 0);
    782}
    783
    784/*
    785 * Data ready for a request; called from IRQ
    786 */
    787void musb_g_rx(struct musb *musb, u8 epnum)
    788{
    789	u16			csr;
    790	struct musb_request	*req;
    791	struct usb_request	*request;
    792	void __iomem		*mbase = musb->mregs;
    793	struct musb_ep		*musb_ep;
    794	void __iomem		*epio = musb->endpoints[epnum].regs;
    795	struct dma_channel	*dma;
    796	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
    797
    798	if (hw_ep->is_shared_fifo)
    799		musb_ep = &hw_ep->ep_in;
    800	else
    801		musb_ep = &hw_ep->ep_out;
    802
    803	musb_ep_select(mbase, epnum);
    804
    805	req = next_request(musb_ep);
    806	if (!req)
    807		return;
    808
    809	trace_musb_req_rx(req);
    810	request = &req->request;
    811
    812	csr = musb_readw(epio, MUSB_RXCSR);
    813	dma = is_dma_capable() ? musb_ep->dma : NULL;
    814
    815	musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
    816			csr, dma ? " (dma)" : "", request);
    817
    818	if (csr & MUSB_RXCSR_P_SENTSTALL) {
    819		csr |= MUSB_RXCSR_P_WZC_BITS;
    820		csr &= ~MUSB_RXCSR_P_SENTSTALL;
    821		musb_writew(epio, MUSB_RXCSR, csr);
    822		return;
    823	}
    824
    825	if (csr & MUSB_RXCSR_P_OVERRUN) {
    826		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
    827		csr &= ~MUSB_RXCSR_P_OVERRUN;
    828		musb_writew(epio, MUSB_RXCSR, csr);
    829
    830		musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
    831		if (request->status == -EINPROGRESS)
    832			request->status = -EOVERFLOW;
    833	}
    834	if (csr & MUSB_RXCSR_INCOMPRX) {
    835		/* REVISIT not necessarily an error */
    836		musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
    837	}
    838
    839	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
    840		/* "should not happen"; likely RXPKTRDY pending for DMA */
    841		musb_dbg(musb, "%s busy, csr %04x",
    842			musb_ep->end_point.name, csr);
    843		return;
    844	}
    845
    846	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
    847		csr &= ~(MUSB_RXCSR_AUTOCLEAR
    848				| MUSB_RXCSR_DMAENAB
    849				| MUSB_RXCSR_DMAMODE);
    850		musb_writew(epio, MUSB_RXCSR,
    851			MUSB_RXCSR_P_WZC_BITS | csr);
    852
    853		request->actual += musb_ep->dma->actual_len;
    854
    855#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
    856	defined(CONFIG_USB_UX500_DMA)
    857		/* Autoclear doesn't clear RxPktRdy for short packets */
    858		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
    859				|| (dma->actual_len
    860					& (musb_ep->packet_sz - 1))) {
    861			/* ack the read! */
    862			csr &= ~MUSB_RXCSR_RXPKTRDY;
    863			musb_writew(epio, MUSB_RXCSR, csr);
    864		}
    865
    866		/* incomplete, and not short? wait for next IN packet */
    867		if ((request->actual < request->length)
    868				&& (musb_ep->dma->actual_len
    869					== musb_ep->packet_sz)) {
    870			/* In double buffer case, continue to unload fifo if
    871 			 * there is Rx packet in FIFO.
    872 			 **/
    873			csr = musb_readw(epio, MUSB_RXCSR);
    874			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
    875				hw_ep->rx_double_buffered)
    876				goto exit;
    877			return;
    878		}
    879#endif
    880		musb_g_giveback(musb_ep, request, 0);
    881		/*
    882		 * In the giveback function the MUSB lock is
    883		 * released and acquired after sometime. During
    884		 * this time period the INDEX register could get
    885		 * changed by the gadget_queue function especially
    886		 * on SMP systems. Reselect the INDEX to be sure
    887		 * we are reading/modifying the right registers
    888		 */
    889		musb_ep_select(mbase, epnum);
    890
    891		req = next_request(musb_ep);
    892		if (!req)
    893			return;
    894	}
    895#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
    896	defined(CONFIG_USB_UX500_DMA)
    897exit:
    898#endif
    899	/* Analyze request */
    900	rxstate(musb, req);
    901}
    902
    903/* ------------------------------------------------------------ */
    904
    905static int musb_gadget_enable(struct usb_ep *ep,
    906			const struct usb_endpoint_descriptor *desc)
    907{
    908	unsigned long		flags;
    909	struct musb_ep		*musb_ep;
    910	struct musb_hw_ep	*hw_ep;
    911	void __iomem		*regs;
    912	struct musb		*musb;
    913	void __iomem	*mbase;
    914	u8		epnum;
    915	u16		csr;
    916	unsigned	tmp;
    917	int		status = -EINVAL;
    918
    919	if (!ep || !desc)
    920		return -EINVAL;
    921
    922	musb_ep = to_musb_ep(ep);
    923	hw_ep = musb_ep->hw_ep;
    924	regs = hw_ep->regs;
    925	musb = musb_ep->musb;
    926	mbase = musb->mregs;
    927	epnum = musb_ep->current_epnum;
    928
    929	spin_lock_irqsave(&musb->lock, flags);
    930
    931	if (musb_ep->desc) {
    932		status = -EBUSY;
    933		goto fail;
    934	}
    935	musb_ep->type = usb_endpoint_type(desc);
    936
    937	/* check direction and (later) maxpacket size against endpoint */
    938	if (usb_endpoint_num(desc) != epnum)
    939		goto fail;
    940
    941	/* REVISIT this rules out high bandwidth periodic transfers */
    942	tmp = usb_endpoint_maxp_mult(desc) - 1;
    943	if (tmp) {
    944		int ok;
    945
    946		if (usb_endpoint_dir_in(desc))
    947			ok = musb->hb_iso_tx;
    948		else
    949			ok = musb->hb_iso_rx;
    950
    951		if (!ok) {
    952			musb_dbg(musb, "no support for high bandwidth ISO");
    953			goto fail;
    954		}
    955		musb_ep->hb_mult = tmp;
    956	} else {
    957		musb_ep->hb_mult = 0;
    958	}
    959
    960	musb_ep->packet_sz = usb_endpoint_maxp(desc);
    961	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
    962
    963	/* enable the interrupts for the endpoint, set the endpoint
    964	 * packet size (or fail), set the mode, clear the fifo
    965	 */
    966	musb_ep_select(mbase, epnum);
    967	if (usb_endpoint_dir_in(desc)) {
    968
    969		if (hw_ep->is_shared_fifo)
    970			musb_ep->is_in = 1;
    971		if (!musb_ep->is_in)
    972			goto fail;
    973
    974		if (tmp > hw_ep->max_packet_sz_tx) {
    975			musb_dbg(musb, "packet size beyond hardware FIFO size");
    976			goto fail;
    977		}
    978
    979		musb->intrtxe |= (1 << epnum);
    980		musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
    981
    982		/* REVISIT if can_bulk_split(), use by updating "tmp";
    983		 * likewise high bandwidth periodic tx
    984		 */
    985		/* Set TXMAXP with the FIFO size of the endpoint
    986		 * to disable double buffering mode.
    987		 */
    988		if (can_bulk_split(musb, musb_ep->type))
    989			musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
    990						musb_ep->packet_sz) - 1;
    991		musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
    992				| (musb_ep->hb_mult << 11));
    993
    994		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
    995		if (musb_readw(regs, MUSB_TXCSR)
    996				& MUSB_TXCSR_FIFONOTEMPTY)
    997			csr |= MUSB_TXCSR_FLUSHFIFO;
    998		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
    999			csr |= MUSB_TXCSR_P_ISO;
   1000
   1001		/* set twice in case of double buffering */
   1002		musb_writew(regs, MUSB_TXCSR, csr);
   1003		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
   1004		musb_writew(regs, MUSB_TXCSR, csr);
   1005
   1006	} else {
   1007
   1008		if (hw_ep->is_shared_fifo)
   1009			musb_ep->is_in = 0;
   1010		if (musb_ep->is_in)
   1011			goto fail;
   1012
   1013		if (tmp > hw_ep->max_packet_sz_rx) {
   1014			musb_dbg(musb, "packet size beyond hardware FIFO size");
   1015			goto fail;
   1016		}
   1017
   1018		musb->intrrxe |= (1 << epnum);
   1019		musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
   1020
   1021		/* REVISIT if can_bulk_combine() use by updating "tmp"
   1022		 * likewise high bandwidth periodic rx
   1023		 */
   1024		/* Set RXMAXP with the FIFO size of the endpoint
   1025		 * to disable double buffering mode.
   1026		 */
   1027		musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
   1028				| (musb_ep->hb_mult << 11));
   1029
   1030		/* force shared fifo to OUT-only mode */
   1031		if (hw_ep->is_shared_fifo) {
   1032			csr = musb_readw(regs, MUSB_TXCSR);
   1033			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
   1034			musb_writew(regs, MUSB_TXCSR, csr);
   1035		}
   1036
   1037		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
   1038		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
   1039			csr |= MUSB_RXCSR_P_ISO;
   1040		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
   1041			csr |= MUSB_RXCSR_DISNYET;
   1042
   1043		/* set twice in case of double buffering */
   1044		musb_writew(regs, MUSB_RXCSR, csr);
   1045		musb_writew(regs, MUSB_RXCSR, csr);
   1046	}
   1047
   1048	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
   1049	 * for some reason you run out of channels here.
   1050	 */
   1051	if (is_dma_capable() && musb->dma_controller) {
   1052		struct dma_controller	*c = musb->dma_controller;
   1053
   1054		musb_ep->dma = c->channel_alloc(c, hw_ep,
   1055				(desc->bEndpointAddress & USB_DIR_IN));
   1056	} else
   1057		musb_ep->dma = NULL;
   1058
   1059	musb_ep->desc = desc;
   1060	musb_ep->busy = 0;
   1061	musb_ep->wedged = 0;
   1062	status = 0;
   1063
   1064	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
   1065			musb_driver_name, musb_ep->end_point.name,
   1066			musb_ep_xfertype_string(musb_ep->type),
   1067			musb_ep->is_in ? "IN" : "OUT",
   1068			musb_ep->dma ? "dma, " : "",
   1069			musb_ep->packet_sz);
   1070
   1071	schedule_delayed_work(&musb->irq_work, 0);
   1072
   1073fail:
   1074	spin_unlock_irqrestore(&musb->lock, flags);
   1075	return status;
   1076}
   1077
   1078/*
   1079 * Disable an endpoint flushing all requests queued.
   1080 */
   1081static int musb_gadget_disable(struct usb_ep *ep)
   1082{
   1083	unsigned long	flags;
   1084	struct musb	*musb;
   1085	u8		epnum;
   1086	struct musb_ep	*musb_ep;
   1087	void __iomem	*epio;
   1088
   1089	musb_ep = to_musb_ep(ep);
   1090	musb = musb_ep->musb;
   1091	epnum = musb_ep->current_epnum;
   1092	epio = musb->endpoints[epnum].regs;
   1093
   1094	spin_lock_irqsave(&musb->lock, flags);
   1095	musb_ep_select(musb->mregs, epnum);
   1096
   1097	/* zero the endpoint sizes */
   1098	if (musb_ep->is_in) {
   1099		musb->intrtxe &= ~(1 << epnum);
   1100		musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
   1101		musb_writew(epio, MUSB_TXMAXP, 0);
   1102	} else {
   1103		musb->intrrxe &= ~(1 << epnum);
   1104		musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
   1105		musb_writew(epio, MUSB_RXMAXP, 0);
   1106	}
   1107
   1108	/* abort all pending DMA and requests */
   1109	nuke(musb_ep, -ESHUTDOWN);
   1110
   1111	musb_ep->desc = NULL;
   1112	musb_ep->end_point.desc = NULL;
   1113
   1114	schedule_delayed_work(&musb->irq_work, 0);
   1115
   1116	spin_unlock_irqrestore(&(musb->lock), flags);
   1117
   1118	musb_dbg(musb, "%s", musb_ep->end_point.name);
   1119
   1120	return 0;
   1121}
   1122
   1123/*
   1124 * Allocate a request for an endpoint.
   1125 * Reused by ep0 code.
   1126 */
   1127struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
   1128{
   1129	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1130	struct musb_request	*request = NULL;
   1131
   1132	request = kzalloc(sizeof *request, gfp_flags);
   1133	if (!request)
   1134		return NULL;
   1135
   1136	request->request.dma = DMA_ADDR_INVALID;
   1137	request->epnum = musb_ep->current_epnum;
   1138	request->ep = musb_ep;
   1139
   1140	trace_musb_req_alloc(request);
   1141	return &request->request;
   1142}
   1143
   1144/*
   1145 * Free a request
   1146 * Reused by ep0 code.
   1147 */
   1148void musb_free_request(struct usb_ep *ep, struct usb_request *req)
   1149{
   1150	struct musb_request *request = to_musb_request(req);
   1151
   1152	trace_musb_req_free(request);
   1153	kfree(request);
   1154}
   1155
   1156static LIST_HEAD(buffers);
   1157
   1158struct free_record {
   1159	struct list_head	list;
   1160	struct device		*dev;
   1161	unsigned		bytes;
   1162	dma_addr_t		dma;
   1163};
   1164
   1165/*
   1166 * Context: controller locked, IRQs blocked.
   1167 */
   1168void musb_ep_restart(struct musb *musb, struct musb_request *req)
   1169{
   1170	trace_musb_req_start(req);
   1171	musb_ep_select(musb->mregs, req->epnum);
   1172	if (req->tx)
   1173		txstate(musb, req);
   1174	else
   1175		rxstate(musb, req);
   1176}
   1177
   1178static int musb_ep_restart_resume_work(struct musb *musb, void *data)
   1179{
   1180	struct musb_request *req = data;
   1181
   1182	musb_ep_restart(musb, req);
   1183
   1184	return 0;
   1185}
   1186
   1187static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
   1188			gfp_t gfp_flags)
   1189{
   1190	struct musb_ep		*musb_ep;
   1191	struct musb_request	*request;
   1192	struct musb		*musb;
   1193	int			status;
   1194	unsigned long		lockflags;
   1195
   1196	if (!ep || !req)
   1197		return -EINVAL;
   1198	if (!req->buf)
   1199		return -ENODATA;
   1200
   1201	musb_ep = to_musb_ep(ep);
   1202	musb = musb_ep->musb;
   1203
   1204	request = to_musb_request(req);
   1205	request->musb = musb;
   1206
   1207	if (request->ep != musb_ep)
   1208		return -EINVAL;
   1209
   1210	status = pm_runtime_get(musb->controller);
   1211	if ((status != -EINPROGRESS) && status < 0) {
   1212		dev_err(musb->controller,
   1213			"pm runtime get failed in %s\n",
   1214			__func__);
   1215		pm_runtime_put_noidle(musb->controller);
   1216
   1217		return status;
   1218	}
   1219	status = 0;
   1220
   1221	trace_musb_req_enq(request);
   1222
   1223	/* request is mine now... */
   1224	request->request.actual = 0;
   1225	request->request.status = -EINPROGRESS;
   1226	request->epnum = musb_ep->current_epnum;
   1227	request->tx = musb_ep->is_in;
   1228
   1229	map_dma_buffer(request, musb, musb_ep);
   1230
   1231	spin_lock_irqsave(&musb->lock, lockflags);
   1232
   1233	/* don't queue if the ep is down */
   1234	if (!musb_ep->desc) {
   1235		musb_dbg(musb, "req %p queued to %s while ep %s",
   1236				req, ep->name, "disabled");
   1237		status = -ESHUTDOWN;
   1238		unmap_dma_buffer(request, musb);
   1239		goto unlock;
   1240	}
   1241
   1242	/* add request to the list */
   1243	list_add_tail(&request->list, &musb_ep->req_list);
   1244
   1245	/* it this is the head of the queue, start i/o ... */
   1246	if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
   1247		status = musb_queue_resume_work(musb,
   1248						musb_ep_restart_resume_work,
   1249						request);
   1250		if (status < 0) {
   1251			dev_err(musb->controller, "%s resume work: %i\n",
   1252				__func__, status);
   1253			list_del(&request->list);
   1254		}
   1255	}
   1256
   1257unlock:
   1258	spin_unlock_irqrestore(&musb->lock, lockflags);
   1259	pm_runtime_mark_last_busy(musb->controller);
   1260	pm_runtime_put_autosuspend(musb->controller);
   1261
   1262	return status;
   1263}
   1264
   1265static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
   1266{
   1267	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1268	struct musb_request	*req = to_musb_request(request);
   1269	struct musb_request	*r;
   1270	unsigned long		flags;
   1271	int			status = 0;
   1272	struct musb		*musb = musb_ep->musb;
   1273
   1274	if (!ep || !request || req->ep != musb_ep)
   1275		return -EINVAL;
   1276
   1277	trace_musb_req_deq(req);
   1278
   1279	spin_lock_irqsave(&musb->lock, flags);
   1280
   1281	list_for_each_entry(r, &musb_ep->req_list, list) {
   1282		if (r == req)
   1283			break;
   1284	}
   1285	if (r != req) {
   1286		dev_err(musb->controller, "request %p not queued to %s\n",
   1287				request, ep->name);
   1288		status = -EINVAL;
   1289		goto done;
   1290	}
   1291
   1292	/* if the hardware doesn't have the request, easy ... */
   1293	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
   1294		musb_g_giveback(musb_ep, request, -ECONNRESET);
   1295
   1296	/* ... else abort the dma transfer ... */
   1297	else if (is_dma_capable() && musb_ep->dma) {
   1298		struct dma_controller	*c = musb->dma_controller;
   1299
   1300		musb_ep_select(musb->mregs, musb_ep->current_epnum);
   1301		if (c->channel_abort)
   1302			status = c->channel_abort(musb_ep->dma);
   1303		else
   1304			status = -EBUSY;
   1305		if (status == 0)
   1306			musb_g_giveback(musb_ep, request, -ECONNRESET);
   1307	} else {
   1308		/* NOTE: by sticking to easily tested hardware/driver states,
   1309		 * we leave counting of in-flight packets imprecise.
   1310		 */
   1311		musb_g_giveback(musb_ep, request, -ECONNRESET);
   1312	}
   1313
   1314done:
   1315	spin_unlock_irqrestore(&musb->lock, flags);
   1316	return status;
   1317}
   1318
   1319/*
   1320 * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
   1321 * data but will queue requests.
   1322 *
   1323 * exported to ep0 code
   1324 */
   1325static int musb_gadget_set_halt(struct usb_ep *ep, int value)
   1326{
   1327	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1328	u8			epnum = musb_ep->current_epnum;
   1329	struct musb		*musb = musb_ep->musb;
   1330	void __iomem		*epio = musb->endpoints[epnum].regs;
   1331	void __iomem		*mbase;
   1332	unsigned long		flags;
   1333	u16			csr;
   1334	struct musb_request	*request;
   1335	int			status = 0;
   1336
   1337	if (!ep)
   1338		return -EINVAL;
   1339	mbase = musb->mregs;
   1340
   1341	spin_lock_irqsave(&musb->lock, flags);
   1342
   1343	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
   1344		status = -EINVAL;
   1345		goto done;
   1346	}
   1347
   1348	musb_ep_select(mbase, epnum);
   1349
   1350	request = next_request(musb_ep);
   1351	if (value) {
   1352		if (request) {
   1353			musb_dbg(musb, "request in progress, cannot halt %s",
   1354			    ep->name);
   1355			status = -EAGAIN;
   1356			goto done;
   1357		}
   1358		/* Cannot portably stall with non-empty FIFO */
   1359		if (musb_ep->is_in) {
   1360			csr = musb_readw(epio, MUSB_TXCSR);
   1361			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
   1362				musb_dbg(musb, "FIFO busy, cannot halt %s",
   1363						ep->name);
   1364				status = -EAGAIN;
   1365				goto done;
   1366			}
   1367		}
   1368	} else
   1369		musb_ep->wedged = 0;
   1370
   1371	/* set/clear the stall and toggle bits */
   1372	musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
   1373	if (musb_ep->is_in) {
   1374		csr = musb_readw(epio, MUSB_TXCSR);
   1375		csr |= MUSB_TXCSR_P_WZC_BITS
   1376			| MUSB_TXCSR_CLRDATATOG;
   1377		if (value)
   1378			csr |= MUSB_TXCSR_P_SENDSTALL;
   1379		else
   1380			csr &= ~(MUSB_TXCSR_P_SENDSTALL
   1381				| MUSB_TXCSR_P_SENTSTALL);
   1382		csr &= ~MUSB_TXCSR_TXPKTRDY;
   1383		musb_writew(epio, MUSB_TXCSR, csr);
   1384	} else {
   1385		csr = musb_readw(epio, MUSB_RXCSR);
   1386		csr |= MUSB_RXCSR_P_WZC_BITS
   1387			| MUSB_RXCSR_FLUSHFIFO
   1388			| MUSB_RXCSR_CLRDATATOG;
   1389		if (value)
   1390			csr |= MUSB_RXCSR_P_SENDSTALL;
   1391		else
   1392			csr &= ~(MUSB_RXCSR_P_SENDSTALL
   1393				| MUSB_RXCSR_P_SENTSTALL);
   1394		musb_writew(epio, MUSB_RXCSR, csr);
   1395	}
   1396
   1397	/* maybe start the first request in the queue */
   1398	if (!musb_ep->busy && !value && request) {
   1399		musb_dbg(musb, "restarting the request");
   1400		musb_ep_restart(musb, request);
   1401	}
   1402
   1403done:
   1404	spin_unlock_irqrestore(&musb->lock, flags);
   1405	return status;
   1406}
   1407
   1408/*
   1409 * Sets the halt feature with the clear requests ignored
   1410 */
   1411static int musb_gadget_set_wedge(struct usb_ep *ep)
   1412{
   1413	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1414
   1415	if (!ep)
   1416		return -EINVAL;
   1417
   1418	musb_ep->wedged = 1;
   1419
   1420	return usb_ep_set_halt(ep);
   1421}
   1422
   1423static int musb_gadget_fifo_status(struct usb_ep *ep)
   1424{
   1425	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1426	void __iomem		*epio = musb_ep->hw_ep->regs;
   1427	int			retval = -EINVAL;
   1428
   1429	if (musb_ep->desc && !musb_ep->is_in) {
   1430		struct musb		*musb = musb_ep->musb;
   1431		int			epnum = musb_ep->current_epnum;
   1432		void __iomem		*mbase = musb->mregs;
   1433		unsigned long		flags;
   1434
   1435		spin_lock_irqsave(&musb->lock, flags);
   1436
   1437		musb_ep_select(mbase, epnum);
   1438		/* FIXME return zero unless RXPKTRDY is set */
   1439		retval = musb_readw(epio, MUSB_RXCOUNT);
   1440
   1441		spin_unlock_irqrestore(&musb->lock, flags);
   1442	}
   1443	return retval;
   1444}
   1445
   1446static void musb_gadget_fifo_flush(struct usb_ep *ep)
   1447{
   1448	struct musb_ep	*musb_ep = to_musb_ep(ep);
   1449	struct musb	*musb = musb_ep->musb;
   1450	u8		epnum = musb_ep->current_epnum;
   1451	void __iomem	*epio = musb->endpoints[epnum].regs;
   1452	void __iomem	*mbase;
   1453	unsigned long	flags;
   1454	u16		csr;
   1455
   1456	mbase = musb->mregs;
   1457
   1458	spin_lock_irqsave(&musb->lock, flags);
   1459	musb_ep_select(mbase, (u8) epnum);
   1460
   1461	/* disable interrupts */
   1462	musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
   1463
   1464	if (musb_ep->is_in) {
   1465		csr = musb_readw(epio, MUSB_TXCSR);
   1466		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
   1467			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
   1468			/*
   1469			 * Setting both TXPKTRDY and FLUSHFIFO makes controller
   1470			 * to interrupt current FIFO loading, but not flushing
   1471			 * the already loaded ones.
   1472			 */
   1473			csr &= ~MUSB_TXCSR_TXPKTRDY;
   1474			musb_writew(epio, MUSB_TXCSR, csr);
   1475			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
   1476			musb_writew(epio, MUSB_TXCSR, csr);
   1477		}
   1478	} else {
   1479		csr = musb_readw(epio, MUSB_RXCSR);
   1480		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
   1481		musb_writew(epio, MUSB_RXCSR, csr);
   1482		musb_writew(epio, MUSB_RXCSR, csr);
   1483	}
   1484
   1485	/* re-enable interrupt */
   1486	musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
   1487	spin_unlock_irqrestore(&musb->lock, flags);
   1488}
   1489
   1490static const struct usb_ep_ops musb_ep_ops = {
   1491	.enable		= musb_gadget_enable,
   1492	.disable	= musb_gadget_disable,
   1493	.alloc_request	= musb_alloc_request,
   1494	.free_request	= musb_free_request,
   1495	.queue		= musb_gadget_queue,
   1496	.dequeue	= musb_gadget_dequeue,
   1497	.set_halt	= musb_gadget_set_halt,
   1498	.set_wedge	= musb_gadget_set_wedge,
   1499	.fifo_status	= musb_gadget_fifo_status,
   1500	.fifo_flush	= musb_gadget_fifo_flush
   1501};
   1502
   1503/* ----------------------------------------------------------------------- */
   1504
   1505static int musb_gadget_get_frame(struct usb_gadget *gadget)
   1506{
   1507	struct musb	*musb = gadget_to_musb(gadget);
   1508
   1509	return (int)musb_readw(musb->mregs, MUSB_FRAME);
   1510}
   1511
   1512static int musb_gadget_wakeup(struct usb_gadget *gadget)
   1513{
   1514	struct musb	*musb = gadget_to_musb(gadget);
   1515	void __iomem	*mregs = musb->mregs;
   1516	unsigned long	flags;
   1517	int		status = -EINVAL;
   1518	u8		power, devctl;
   1519	int		retries;
   1520
   1521	spin_lock_irqsave(&musb->lock, flags);
   1522
   1523	switch (musb->xceiv->otg->state) {
   1524	case OTG_STATE_B_PERIPHERAL:
   1525		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
   1526		 * that's part of the standard usb 1.1 state machine, and
   1527		 * doesn't affect OTG transitions.
   1528		 */
   1529		if (musb->may_wakeup && musb->is_suspended)
   1530			break;
   1531		goto done;
   1532	case OTG_STATE_B_IDLE:
   1533		/* Start SRP ... OTG not required. */
   1534		devctl = musb_readb(mregs, MUSB_DEVCTL);
   1535		musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
   1536		devctl |= MUSB_DEVCTL_SESSION;
   1537		musb_writeb(mregs, MUSB_DEVCTL, devctl);
   1538		devctl = musb_readb(mregs, MUSB_DEVCTL);
   1539		retries = 100;
   1540		while (!(devctl & MUSB_DEVCTL_SESSION)) {
   1541			devctl = musb_readb(mregs, MUSB_DEVCTL);
   1542			if (retries-- < 1)
   1543				break;
   1544		}
   1545		retries = 10000;
   1546		while (devctl & MUSB_DEVCTL_SESSION) {
   1547			devctl = musb_readb(mregs, MUSB_DEVCTL);
   1548			if (retries-- < 1)
   1549				break;
   1550		}
   1551
   1552		spin_unlock_irqrestore(&musb->lock, flags);
   1553		otg_start_srp(musb->xceiv->otg);
   1554		spin_lock_irqsave(&musb->lock, flags);
   1555
   1556		/* Block idling for at least 1s */
   1557		musb_platform_try_idle(musb,
   1558			jiffies + msecs_to_jiffies(1 * HZ));
   1559
   1560		status = 0;
   1561		goto done;
   1562	default:
   1563		musb_dbg(musb, "Unhandled wake: %s",
   1564			usb_otg_state_string(musb->xceiv->otg->state));
   1565		goto done;
   1566	}
   1567
   1568	status = 0;
   1569
   1570	power = musb_readb(mregs, MUSB_POWER);
   1571	power |= MUSB_POWER_RESUME;
   1572	musb_writeb(mregs, MUSB_POWER, power);
   1573	musb_dbg(musb, "issue wakeup");
   1574
   1575	/* FIXME do this next chunk in a timer callback, no udelay */
   1576	mdelay(2);
   1577
   1578	power = musb_readb(mregs, MUSB_POWER);
   1579	power &= ~MUSB_POWER_RESUME;
   1580	musb_writeb(mregs, MUSB_POWER, power);
   1581done:
   1582	spin_unlock_irqrestore(&musb->lock, flags);
   1583	return status;
   1584}
   1585
   1586static int
   1587musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
   1588{
   1589	gadget->is_selfpowered = !!is_selfpowered;
   1590	return 0;
   1591}
   1592
   1593static void musb_pullup(struct musb *musb, int is_on)
   1594{
   1595	u8 power;
   1596
   1597	power = musb_readb(musb->mregs, MUSB_POWER);
   1598	if (is_on)
   1599		power |= MUSB_POWER_SOFTCONN;
   1600	else
   1601		power &= ~MUSB_POWER_SOFTCONN;
   1602
   1603	/* FIXME if on, HdrcStart; if off, HdrcStop */
   1604
   1605	musb_dbg(musb, "gadget D+ pullup %s",
   1606		is_on ? "on" : "off");
   1607	musb_writeb(musb->mregs, MUSB_POWER, power);
   1608}
   1609
   1610#if 0
   1611static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
   1612{
   1613	musb_dbg(musb, "<= %s =>\n", __func__);
   1614
   1615	/*
   1616	 * FIXME iff driver's softconnect flag is set (as it is during probe,
   1617	 * though that can clear it), just musb_pullup().
   1618	 */
   1619
   1620	return -EINVAL;
   1621}
   1622#endif
   1623
   1624static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
   1625{
   1626	struct musb	*musb = gadget_to_musb(gadget);
   1627
   1628	if (!musb->xceiv->set_power)
   1629		return -EOPNOTSUPP;
   1630	return usb_phy_set_power(musb->xceiv, mA);
   1631}
   1632
   1633static void musb_gadget_work(struct work_struct *work)
   1634{
   1635	struct musb *musb;
   1636	unsigned long flags;
   1637
   1638	musb = container_of(work, struct musb, gadget_work.work);
   1639	pm_runtime_get_sync(musb->controller);
   1640	spin_lock_irqsave(&musb->lock, flags);
   1641	musb_pullup(musb, musb->softconnect);
   1642	spin_unlock_irqrestore(&musb->lock, flags);
   1643	pm_runtime_mark_last_busy(musb->controller);
   1644	pm_runtime_put_autosuspend(musb->controller);
   1645}
   1646
   1647static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
   1648{
   1649	struct musb	*musb = gadget_to_musb(gadget);
   1650	unsigned long	flags;
   1651
   1652	is_on = !!is_on;
   1653
   1654	/* NOTE: this assumes we are sensing vbus; we'd rather
   1655	 * not pullup unless the B-session is active.
   1656	 */
   1657	spin_lock_irqsave(&musb->lock, flags);
   1658	if (is_on != musb->softconnect) {
   1659		musb->softconnect = is_on;
   1660		schedule_delayed_work(&musb->gadget_work, 0);
   1661	}
   1662	spin_unlock_irqrestore(&musb->lock, flags);
   1663
   1664	return 0;
   1665}
   1666
   1667static int musb_gadget_start(struct usb_gadget *g,
   1668		struct usb_gadget_driver *driver);
   1669static int musb_gadget_stop(struct usb_gadget *g);
   1670
   1671static const struct usb_gadget_ops musb_gadget_operations = {
   1672	.get_frame		= musb_gadget_get_frame,
   1673	.wakeup			= musb_gadget_wakeup,
   1674	.set_selfpowered	= musb_gadget_set_self_powered,
   1675	/* .vbus_session		= musb_gadget_vbus_session, */
   1676	.vbus_draw		= musb_gadget_vbus_draw,
   1677	.pullup			= musb_gadget_pullup,
   1678	.udc_start		= musb_gadget_start,
   1679	.udc_stop		= musb_gadget_stop,
   1680};
   1681
   1682/* ----------------------------------------------------------------------- */
   1683
   1684/* Registration */
   1685
   1686/* Only this registration code "knows" the rule (from USB standards)
   1687 * about there being only one external upstream port.  It assumes
   1688 * all peripheral ports are external...
   1689 */
   1690
   1691static void
   1692init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
   1693{
   1694	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
   1695
   1696	memset(ep, 0, sizeof *ep);
   1697
   1698	ep->current_epnum = epnum;
   1699	ep->musb = musb;
   1700	ep->hw_ep = hw_ep;
   1701	ep->is_in = is_in;
   1702
   1703	INIT_LIST_HEAD(&ep->req_list);
   1704
   1705	sprintf(ep->name, "ep%d%s", epnum,
   1706			(!epnum || hw_ep->is_shared_fifo) ? "" : (
   1707				is_in ? "in" : "out"));
   1708	ep->end_point.name = ep->name;
   1709	INIT_LIST_HEAD(&ep->end_point.ep_list);
   1710	if (!epnum) {
   1711		usb_ep_set_maxpacket_limit(&ep->end_point, 64);
   1712		ep->end_point.caps.type_control = true;
   1713		ep->end_point.ops = &musb_g_ep0_ops;
   1714		musb->g.ep0 = &ep->end_point;
   1715	} else {
   1716		if (is_in)
   1717			usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
   1718		else
   1719			usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
   1720		ep->end_point.caps.type_iso = true;
   1721		ep->end_point.caps.type_bulk = true;
   1722		ep->end_point.caps.type_int = true;
   1723		ep->end_point.ops = &musb_ep_ops;
   1724		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
   1725	}
   1726
   1727	if (!epnum || hw_ep->is_shared_fifo) {
   1728		ep->end_point.caps.dir_in = true;
   1729		ep->end_point.caps.dir_out = true;
   1730	} else if (is_in)
   1731		ep->end_point.caps.dir_in = true;
   1732	else
   1733		ep->end_point.caps.dir_out = true;
   1734}
   1735
   1736/*
   1737 * Initialize the endpoints exposed to peripheral drivers, with backlinks
   1738 * to the rest of the driver state.
   1739 */
   1740static inline void musb_g_init_endpoints(struct musb *musb)
   1741{
   1742	u8			epnum;
   1743	struct musb_hw_ep	*hw_ep;
   1744	unsigned		count = 0;
   1745
   1746	/* initialize endpoint list just once */
   1747	INIT_LIST_HEAD(&(musb->g.ep_list));
   1748
   1749	for (epnum = 0, hw_ep = musb->endpoints;
   1750			epnum < musb->nr_endpoints;
   1751			epnum++, hw_ep++) {
   1752		if (hw_ep->is_shared_fifo /* || !epnum */) {
   1753			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
   1754			count++;
   1755		} else {
   1756			if (hw_ep->max_packet_sz_tx) {
   1757				init_peripheral_ep(musb, &hw_ep->ep_in,
   1758							epnum, 1);
   1759				count++;
   1760			}
   1761			if (hw_ep->max_packet_sz_rx) {
   1762				init_peripheral_ep(musb, &hw_ep->ep_out,
   1763							epnum, 0);
   1764				count++;
   1765			}
   1766		}
   1767	}
   1768}
   1769
   1770/* called once during driver setup to initialize and link into
   1771 * the driver model; memory is zeroed.
   1772 */
   1773int musb_gadget_setup(struct musb *musb)
   1774{
   1775	int status;
   1776
   1777	/* REVISIT minor race:  if (erroneously) setting up two
   1778	 * musb peripherals at the same time, only the bus lock
   1779	 * is probably held.
   1780	 */
   1781
   1782	musb->g.ops = &musb_gadget_operations;
   1783	musb->g.max_speed = USB_SPEED_HIGH;
   1784	musb->g.speed = USB_SPEED_UNKNOWN;
   1785
   1786	MUSB_DEV_MODE(musb);
   1787	musb->xceiv->otg->state = OTG_STATE_B_IDLE;
   1788
   1789	/* this "gadget" abstracts/virtualizes the controller */
   1790	musb->g.name = musb_driver_name;
   1791	/* don't support otg protocols */
   1792	musb->g.is_otg = 0;
   1793	INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
   1794	musb_g_init_endpoints(musb);
   1795
   1796	musb->is_active = 0;
   1797	musb_platform_try_idle(musb, 0);
   1798
   1799	status = usb_add_gadget_udc(musb->controller, &musb->g);
   1800	if (status)
   1801		goto err;
   1802
   1803	return 0;
   1804err:
   1805	musb->g.dev.parent = NULL;
   1806	device_unregister(&musb->g.dev);
   1807	return status;
   1808}
   1809
   1810void musb_gadget_cleanup(struct musb *musb)
   1811{
   1812	if (musb->port_mode == MUSB_HOST)
   1813		return;
   1814
   1815	cancel_delayed_work_sync(&musb->gadget_work);
   1816	usb_del_gadget_udc(&musb->g);
   1817}
   1818
   1819/*
   1820 * Register the gadget driver. Used by gadget drivers when
   1821 * registering themselves with the controller.
   1822 *
   1823 * -EINVAL something went wrong (not driver)
   1824 * -EBUSY another gadget is already using the controller
   1825 * -ENOMEM no memory to perform the operation
   1826 *
   1827 * @param driver the gadget driver
   1828 * @return <0 if error, 0 if everything is fine
   1829 */
   1830static int musb_gadget_start(struct usb_gadget *g,
   1831		struct usb_gadget_driver *driver)
   1832{
   1833	struct musb		*musb = gadget_to_musb(g);
   1834	struct usb_otg		*otg = musb->xceiv->otg;
   1835	unsigned long		flags;
   1836	int			retval = 0;
   1837
   1838	if (driver->max_speed < USB_SPEED_HIGH) {
   1839		retval = -EINVAL;
   1840		goto err;
   1841	}
   1842
   1843	pm_runtime_get_sync(musb->controller);
   1844
   1845	musb->softconnect = 0;
   1846	musb->gadget_driver = driver;
   1847
   1848	spin_lock_irqsave(&musb->lock, flags);
   1849	musb->is_active = 1;
   1850
   1851	otg_set_peripheral(otg, &musb->g);
   1852	musb->xceiv->otg->state = OTG_STATE_B_IDLE;
   1853	spin_unlock_irqrestore(&musb->lock, flags);
   1854
   1855	musb_start(musb);
   1856
   1857	/* REVISIT:  funcall to other code, which also
   1858	 * handles power budgeting ... this way also
   1859	 * ensures HdrcStart is indirectly called.
   1860	 */
   1861	if (musb->xceiv->last_event == USB_EVENT_ID)
   1862		musb_platform_set_vbus(musb, 1);
   1863
   1864	pm_runtime_mark_last_busy(musb->controller);
   1865	pm_runtime_put_autosuspend(musb->controller);
   1866
   1867	return 0;
   1868
   1869err:
   1870	return retval;
   1871}
   1872
   1873/*
   1874 * Unregister the gadget driver. Used by gadget drivers when
   1875 * unregistering themselves from the controller.
   1876 *
   1877 * @param driver the gadget driver to unregister
   1878 */
   1879static int musb_gadget_stop(struct usb_gadget *g)
   1880{
   1881	struct musb	*musb = gadget_to_musb(g);
   1882	unsigned long	flags;
   1883
   1884	pm_runtime_get_sync(musb->controller);
   1885
   1886	/*
   1887	 * REVISIT always use otg_set_peripheral() here too;
   1888	 * this needs to shut down the OTG engine.
   1889	 */
   1890
   1891	spin_lock_irqsave(&musb->lock, flags);
   1892
   1893	musb_hnp_stop(musb);
   1894
   1895	(void) musb_gadget_vbus_draw(&musb->g, 0);
   1896
   1897	musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
   1898	musb_stop(musb);
   1899	otg_set_peripheral(musb->xceiv->otg, NULL);
   1900
   1901	musb->is_active = 0;
   1902	musb->gadget_driver = NULL;
   1903	musb_platform_try_idle(musb, 0);
   1904	spin_unlock_irqrestore(&musb->lock, flags);
   1905
   1906	/*
   1907	 * FIXME we need to be able to register another
   1908	 * gadget driver here and have everything work;
   1909	 * that currently misbehaves.
   1910	 */
   1911
   1912	/* Force check of devctl register for PM runtime */
   1913	schedule_delayed_work(&musb->irq_work, 0);
   1914
   1915	pm_runtime_mark_last_busy(musb->controller);
   1916	pm_runtime_put_autosuspend(musb->controller);
   1917
   1918	return 0;
   1919}
   1920
   1921/* ----------------------------------------------------------------------- */
   1922
   1923/* lifecycle operations called through plat_uds.c */
   1924
   1925void musb_g_resume(struct musb *musb)
   1926{
   1927	musb->is_suspended = 0;
   1928	switch (musb->xceiv->otg->state) {
   1929	case OTG_STATE_B_IDLE:
   1930		break;
   1931	case OTG_STATE_B_WAIT_ACON:
   1932	case OTG_STATE_B_PERIPHERAL:
   1933		musb->is_active = 1;
   1934		if (musb->gadget_driver && musb->gadget_driver->resume) {
   1935			spin_unlock(&musb->lock);
   1936			musb->gadget_driver->resume(&musb->g);
   1937			spin_lock(&musb->lock);
   1938		}
   1939		break;
   1940	default:
   1941		WARNING("unhandled RESUME transition (%s)\n",
   1942				usb_otg_state_string(musb->xceiv->otg->state));
   1943	}
   1944}
   1945
   1946/* called when SOF packets stop for 3+ msec */
   1947void musb_g_suspend(struct musb *musb)
   1948{
   1949	u8	devctl;
   1950
   1951	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
   1952	musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
   1953
   1954	switch (musb->xceiv->otg->state) {
   1955	case OTG_STATE_B_IDLE:
   1956		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
   1957			musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
   1958		break;
   1959	case OTG_STATE_B_PERIPHERAL:
   1960		musb->is_suspended = 1;
   1961		if (musb->gadget_driver && musb->gadget_driver->suspend) {
   1962			spin_unlock(&musb->lock);
   1963			musb->gadget_driver->suspend(&musb->g);
   1964			spin_lock(&musb->lock);
   1965		}
   1966		break;
   1967	default:
   1968		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
   1969		 * A_PERIPHERAL may need care too
   1970		 */
   1971		WARNING("unhandled SUSPEND transition (%s)",
   1972				usb_otg_state_string(musb->xceiv->otg->state));
   1973	}
   1974}
   1975
   1976/* Called during SRP */
   1977void musb_g_wakeup(struct musb *musb)
   1978{
   1979	musb_gadget_wakeup(&musb->g);
   1980}
   1981
   1982/* called when VBUS drops below session threshold, and in other cases */
   1983void musb_g_disconnect(struct musb *musb)
   1984{
   1985	void __iomem	*mregs = musb->mregs;
   1986	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
   1987
   1988	musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
   1989
   1990	/* clear HR */
   1991	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
   1992
   1993	/* don't draw vbus until new b-default session */
   1994	(void) musb_gadget_vbus_draw(&musb->g, 0);
   1995
   1996	musb->g.speed = USB_SPEED_UNKNOWN;
   1997	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
   1998		spin_unlock(&musb->lock);
   1999		musb->gadget_driver->disconnect(&musb->g);
   2000		spin_lock(&musb->lock);
   2001	}
   2002
   2003	switch (musb->xceiv->otg->state) {
   2004	default:
   2005		musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
   2006			usb_otg_state_string(musb->xceiv->otg->state));
   2007		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
   2008		MUSB_HST_MODE(musb);
   2009		break;
   2010	case OTG_STATE_A_PERIPHERAL:
   2011		musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
   2012		MUSB_HST_MODE(musb);
   2013		break;
   2014	case OTG_STATE_B_WAIT_ACON:
   2015	case OTG_STATE_B_HOST:
   2016	case OTG_STATE_B_PERIPHERAL:
   2017	case OTG_STATE_B_IDLE:
   2018		musb->xceiv->otg->state = OTG_STATE_B_IDLE;
   2019		break;
   2020	case OTG_STATE_B_SRP_INIT:
   2021		break;
   2022	}
   2023
   2024	musb->is_active = 0;
   2025}
   2026
   2027void musb_g_reset(struct musb *musb)
   2028__releases(musb->lock)
   2029__acquires(musb->lock)
   2030{
   2031	void __iomem	*mbase = musb->mregs;
   2032	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
   2033	u8		power;
   2034
   2035	musb_dbg(musb, "<== %s driver '%s'",
   2036			(devctl & MUSB_DEVCTL_BDEVICE)
   2037				? "B-Device" : "A-Device",
   2038			musb->gadget_driver
   2039				? musb->gadget_driver->driver.name
   2040				: NULL
   2041			);
   2042
   2043	/* report reset, if we didn't already (flushing EP state) */
   2044	if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
   2045		spin_unlock(&musb->lock);
   2046		usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
   2047		spin_lock(&musb->lock);
   2048	}
   2049
   2050	/* clear HR */
   2051	else if (devctl & MUSB_DEVCTL_HR)
   2052		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
   2053
   2054
   2055	/* what speed did we negotiate? */
   2056	power = musb_readb(mbase, MUSB_POWER);
   2057	musb->g.speed = (power & MUSB_POWER_HSMODE)
   2058			? USB_SPEED_HIGH : USB_SPEED_FULL;
   2059
   2060	/* start in USB_STATE_DEFAULT */
   2061	musb->is_active = 1;
   2062	musb->is_suspended = 0;
   2063	MUSB_DEV_MODE(musb);
   2064	musb->address = 0;
   2065	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
   2066
   2067	musb->may_wakeup = 0;
   2068	musb->g.b_hnp_enable = 0;
   2069	musb->g.a_alt_hnp_support = 0;
   2070	musb->g.a_hnp_support = 0;
   2071	musb->g.quirk_zlp_not_supp = 1;
   2072
   2073	/* Normal reset, as B-Device;
   2074	 * or else after HNP, as A-Device
   2075	 */
   2076	if (!musb->g.is_otg) {
   2077		/* USB device controllers that are not OTG compatible
   2078		 * may not have DEVCTL register in silicon.
   2079		 * In that case, do not rely on devctl for setting
   2080		 * peripheral mode.
   2081		 */
   2082		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
   2083		musb->g.is_a_peripheral = 0;
   2084	} else if (devctl & MUSB_DEVCTL_BDEVICE) {
   2085		musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
   2086		musb->g.is_a_peripheral = 0;
   2087	} else {
   2088		musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
   2089		musb->g.is_a_peripheral = 1;
   2090	}
   2091
   2092	/* start with default limits on VBUS power draw */
   2093	(void) musb_gadget_vbus_draw(&musb->g, 8);
   2094}