cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gadget.c (149887B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
      4 *		http://www.samsung.com
      5 *
      6 * Copyright 2008 Openmoko, Inc.
      7 * Copyright 2008 Simtec Electronics
      8 *      Ben Dooks <ben@simtec.co.uk>
      9 *      http://armlinux.simtec.co.uk/
     10 *
     11 * S3C USB2.0 High-speed / OtG driver
     12 */
     13
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/spinlock.h>
     17#include <linux/interrupt.h>
     18#include <linux/platform_device.h>
     19#include <linux/dma-mapping.h>
     20#include <linux/mutex.h>
     21#include <linux/seq_file.h>
     22#include <linux/delay.h>
     23#include <linux/io.h>
     24#include <linux/slab.h>
     25#include <linux/of_platform.h>
     26
     27#include <linux/usb/ch9.h>
     28#include <linux/usb/gadget.h>
     29#include <linux/usb/phy.h>
     30#include <linux/usb/composite.h>
     31
     32
     33#include "core.h"
     34#include "hw.h"
     35
     36/* conversion functions */
     37static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
     38{
     39	return container_of(req, struct dwc2_hsotg_req, req);
     40}
     41
     42static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
     43{
     44	return container_of(ep, struct dwc2_hsotg_ep, ep);
     45}
     46
     47static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
     48{
     49	return container_of(gadget, struct dwc2_hsotg, gadget);
     50}
     51
     52static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
     53{
     54	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
     55}
     56
     57static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
     58{
     59	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
     60}
     61
     62static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
     63						u32 ep_index, u32 dir_in)
     64{
     65	if (dir_in)
     66		return hsotg->eps_in[ep_index];
     67	else
     68		return hsotg->eps_out[ep_index];
     69}
     70
     71/* forward declaration of functions */
     72static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
     73
     74/**
     75 * using_dma - return the DMA status of the driver.
     76 * @hsotg: The driver state.
     77 *
     78 * Return true if we're using DMA.
     79 *
     80 * Currently, we have the DMA support code worked into everywhere
     81 * that needs it, but the AMBA DMA implementation in the hardware can
     82 * only DMA from 32bit aligned addresses. This means that gadgets such
     83 * as the CDC Ethernet cannot work as they often pass packets which are
     84 * not 32bit aligned.
     85 *
     86 * Unfortunately the choice to use DMA or not is global to the controller
     87 * and seems to be only settable when the controller is being put through
     88 * a core reset. This means we either need to fix the gadgets to take
     89 * account of DMA alignment, or add bounce buffers (yuerk).
     90 *
     91 * g_using_dma is set depending on dts flag.
     92 */
     93static inline bool using_dma(struct dwc2_hsotg *hsotg)
     94{
     95	return hsotg->params.g_dma;
     96}
     97
     98/*
     99 * using_desc_dma - return the descriptor DMA status of the driver.
    100 * @hsotg: The driver state.
    101 *
    102 * Return true if we're using descriptor DMA.
    103 */
    104static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
    105{
    106	return hsotg->params.g_dma_desc;
    107}
    108
    109/**
    110 * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
    111 * @hs_ep: The endpoint
    112 *
    113 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
    114 * If an overrun occurs it will wrap the value and set the frame_overrun flag.
    115 */
    116static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
    117{
    118	struct dwc2_hsotg *hsotg = hs_ep->parent;
    119	u16 limit = DSTS_SOFFN_LIMIT;
    120
    121	if (hsotg->gadget.speed != USB_SPEED_HIGH)
    122		limit >>= 3;
    123
    124	hs_ep->target_frame += hs_ep->interval;
    125	if (hs_ep->target_frame > limit) {
    126		hs_ep->frame_overrun = true;
    127		hs_ep->target_frame &= limit;
    128	} else {
    129		hs_ep->frame_overrun = false;
    130	}
    131}
    132
    133/**
    134 * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
    135 *                                    by one.
    136 * @hs_ep: The endpoint.
    137 *
    138 * This function used in service interval based scheduling flow to calculate
    139 * descriptor frame number filed value. For service interval mode frame
    140 * number in descriptor should point to last (u)frame in the interval.
    141 *
    142 */
    143static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
    144{
    145	struct dwc2_hsotg *hsotg = hs_ep->parent;
    146	u16 limit = DSTS_SOFFN_LIMIT;
    147
    148	if (hsotg->gadget.speed != USB_SPEED_HIGH)
    149		limit >>= 3;
    150
    151	if (hs_ep->target_frame)
    152		hs_ep->target_frame -= 1;
    153	else
    154		hs_ep->target_frame = limit;
    155}
    156
    157/**
    158 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
    159 * @hsotg: The device state
    160 * @ints: A bitmask of the interrupts to enable
    161 */
    162static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
    163{
    164	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
    165	u32 new_gsintmsk;
    166
    167	new_gsintmsk = gsintmsk | ints;
    168
    169	if (new_gsintmsk != gsintmsk) {
    170		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
    171		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
    172	}
    173}
    174
    175/**
    176 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
    177 * @hsotg: The device state
    178 * @ints: A bitmask of the interrupts to enable
    179 */
    180static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
    181{
    182	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
    183	u32 new_gsintmsk;
    184
    185	new_gsintmsk = gsintmsk & ~ints;
    186
    187	if (new_gsintmsk != gsintmsk)
    188		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
    189}
    190
    191/**
    192 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
    193 * @hsotg: The device state
    194 * @ep: The endpoint index
    195 * @dir_in: True if direction is in.
    196 * @en: The enable value, true to enable
    197 *
    198 * Set or clear the mask for an individual endpoint's interrupt
    199 * request.
    200 */
    201static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
    202				  unsigned int ep, unsigned int dir_in,
    203				 unsigned int en)
    204{
    205	unsigned long flags;
    206	u32 bit = 1 << ep;
    207	u32 daint;
    208
    209	if (!dir_in)
    210		bit <<= 16;
    211
    212	local_irq_save(flags);
    213	daint = dwc2_readl(hsotg, DAINTMSK);
    214	if (en)
    215		daint |= bit;
    216	else
    217		daint &= ~bit;
    218	dwc2_writel(hsotg, daint, DAINTMSK);
    219	local_irq_restore(flags);
    220}
    221
    222/**
    223 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
    224 *
    225 * @hsotg: Programming view of the DWC_otg controller
    226 */
    227int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
    228{
    229	if (hsotg->hw_params.en_multiple_tx_fifo)
    230		/* In dedicated FIFO mode we need count of IN EPs */
    231		return hsotg->hw_params.num_dev_in_eps;
    232	else
    233		/* In shared FIFO mode we need count of Periodic IN EPs */
    234		return hsotg->hw_params.num_dev_perio_in_ep;
    235}
    236
    237/**
    238 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
    239 * device mode TX FIFOs
    240 *
    241 * @hsotg: Programming view of the DWC_otg controller
    242 */
    243int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
    244{
    245	int addr;
    246	int tx_addr_max;
    247	u32 np_tx_fifo_size;
    248
    249	np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
    250				hsotg->params.g_np_tx_fifo_size);
    251
    252	/* Get Endpoint Info Control block size in DWORDs. */
    253	tx_addr_max = hsotg->hw_params.total_fifo_size;
    254
    255	addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
    256	if (tx_addr_max <= addr)
    257		return 0;
    258
    259	return tx_addr_max - addr;
    260}
    261
    262/**
    263 * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
    264 *
    265 * @hsotg: Programming view of the DWC_otg controller
    266 *
    267 */
    268static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
    269{
    270	u32 gintsts2;
    271	u32 gintmsk2;
    272
    273	gintsts2 = dwc2_readl(hsotg, GINTSTS2);
    274	gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
    275	gintsts2 &= gintmsk2;
    276
    277	if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
    278		dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
    279		dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
    280		dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
    281	}
    282}
    283
    284/**
    285 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
    286 * TX FIFOs
    287 *
    288 * @hsotg: Programming view of the DWC_otg controller
    289 */
    290int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
    291{
    292	int tx_fifo_count;
    293	int tx_fifo_depth;
    294
    295	tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
    296
    297	tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
    298
    299	if (!tx_fifo_count)
    300		return tx_fifo_depth;
    301	else
    302		return tx_fifo_depth / tx_fifo_count;
    303}
    304
    305/**
    306 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
    307 * @hsotg: The device instance.
    308 */
    309static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
    310{
    311	unsigned int ep;
    312	unsigned int addr;
    313	int timeout;
    314
    315	u32 val;
    316	u32 *txfsz = hsotg->params.g_tx_fifo_size;
    317
    318	/* Reset fifo map if not correctly cleared during previous session */
    319	WARN_ON(hsotg->fifo_map);
    320	hsotg->fifo_map = 0;
    321
    322	/* set RX/NPTX FIFO sizes */
    323	dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
    324	dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
    325		    FIFOSIZE_STARTADDR_SHIFT) |
    326		    (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
    327		    GNPTXFSIZ);
    328
    329	/*
    330	 * arange all the rest of the TX FIFOs, as some versions of this
    331	 * block have overlapping default addresses. This also ensures
    332	 * that if the settings have been changed, then they are set to
    333	 * known values.
    334	 */
    335
    336	/* start at the end of the GNPTXFSIZ, rounded up */
    337	addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
    338
    339	/*
    340	 * Configure fifos sizes from provided configuration and assign
    341	 * them to endpoints dynamically according to maxpacket size value of
    342	 * given endpoint.
    343	 */
    344	for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
    345		if (!txfsz[ep])
    346			continue;
    347		val = addr;
    348		val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
    349		WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
    350			  "insufficient fifo memory");
    351		addr += txfsz[ep];
    352
    353		dwc2_writel(hsotg, val, DPTXFSIZN(ep));
    354		val = dwc2_readl(hsotg, DPTXFSIZN(ep));
    355	}
    356
    357	dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
    358		    addr << GDFIFOCFG_EPINFOBASE_SHIFT,
    359		    GDFIFOCFG);
    360	/*
    361	 * according to p428 of the design guide, we need to ensure that
    362	 * all fifos are flushed before continuing
    363	 */
    364
    365	dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
    366	       GRSTCTL_RXFFLSH, GRSTCTL);
    367
    368	/* wait until the fifos are both flushed */
    369	timeout = 100;
    370	while (1) {
    371		val = dwc2_readl(hsotg, GRSTCTL);
    372
    373		if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
    374			break;
    375
    376		if (--timeout == 0) {
    377			dev_err(hsotg->dev,
    378				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
    379				__func__, val);
    380			break;
    381		}
    382
    383		udelay(1);
    384	}
    385
    386	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
    387}
    388
    389/**
    390 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
    391 * @ep: USB endpoint to allocate request for.
    392 * @flags: Allocation flags
    393 *
    394 * Allocate a new USB request structure appropriate for the specified endpoint
    395 */
    396static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
    397						       gfp_t flags)
    398{
    399	struct dwc2_hsotg_req *req;
    400
    401	req = kzalloc(sizeof(*req), flags);
    402	if (!req)
    403		return NULL;
    404
    405	INIT_LIST_HEAD(&req->queue);
    406
    407	return &req->req;
    408}
    409
    410/**
    411 * is_ep_periodic - return true if the endpoint is in periodic mode.
    412 * @hs_ep: The endpoint to query.
    413 *
    414 * Returns true if the endpoint is in periodic mode, meaning it is being
    415 * used for an Interrupt or ISO transfer.
    416 */
    417static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
    418{
    419	return hs_ep->periodic;
    420}
    421
    422/**
    423 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
    424 * @hsotg: The device state.
    425 * @hs_ep: The endpoint for the request
    426 * @hs_req: The request being processed.
    427 *
    428 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
    429 * of a request to ensure the buffer is ready for access by the caller.
    430 */
    431static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
    432				 struct dwc2_hsotg_ep *hs_ep,
    433				struct dwc2_hsotg_req *hs_req)
    434{
    435	struct usb_request *req = &hs_req->req;
    436
    437	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
    438}
    439
    440/*
    441 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
    442 * for Control endpoint
    443 * @hsotg: The device state.
    444 *
    445 * This function will allocate 4 descriptor chains for EP 0: 2 for
    446 * Setup stage, per one for IN and OUT data/status transactions.
    447 */
    448static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
    449{
    450	hsotg->setup_desc[0] =
    451		dmam_alloc_coherent(hsotg->dev,
    452				    sizeof(struct dwc2_dma_desc),
    453				    &hsotg->setup_desc_dma[0],
    454				    GFP_KERNEL);
    455	if (!hsotg->setup_desc[0])
    456		goto fail;
    457
    458	hsotg->setup_desc[1] =
    459		dmam_alloc_coherent(hsotg->dev,
    460				    sizeof(struct dwc2_dma_desc),
    461				    &hsotg->setup_desc_dma[1],
    462				    GFP_KERNEL);
    463	if (!hsotg->setup_desc[1])
    464		goto fail;
    465
    466	hsotg->ctrl_in_desc =
    467		dmam_alloc_coherent(hsotg->dev,
    468				    sizeof(struct dwc2_dma_desc),
    469				    &hsotg->ctrl_in_desc_dma,
    470				    GFP_KERNEL);
    471	if (!hsotg->ctrl_in_desc)
    472		goto fail;
    473
    474	hsotg->ctrl_out_desc =
    475		dmam_alloc_coherent(hsotg->dev,
    476				    sizeof(struct dwc2_dma_desc),
    477				    &hsotg->ctrl_out_desc_dma,
    478				    GFP_KERNEL);
    479	if (!hsotg->ctrl_out_desc)
    480		goto fail;
    481
    482	return 0;
    483
    484fail:
    485	return -ENOMEM;
    486}
    487
    488/**
    489 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
    490 * @hsotg: The controller state.
    491 * @hs_ep: The endpoint we're going to write for.
    492 * @hs_req: The request to write data for.
    493 *
    494 * This is called when the TxFIFO has some space in it to hold a new
    495 * transmission and we have something to give it. The actual setup of
    496 * the data size is done elsewhere, so all we have to do is to actually
    497 * write the data.
    498 *
    499 * The return value is zero if there is more space (or nothing was done)
    500 * otherwise -ENOSPC is returned if the FIFO space was used up.
    501 *
    502 * This routine is only needed for PIO
    503 */
    504static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
    505				 struct dwc2_hsotg_ep *hs_ep,
    506				struct dwc2_hsotg_req *hs_req)
    507{
    508	bool periodic = is_ep_periodic(hs_ep);
    509	u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
    510	int buf_pos = hs_req->req.actual;
    511	int to_write = hs_ep->size_loaded;
    512	void *data;
    513	int can_write;
    514	int pkt_round;
    515	int max_transfer;
    516
    517	to_write -= (buf_pos - hs_ep->last_load);
    518
    519	/* if there's nothing to write, get out early */
    520	if (to_write == 0)
    521		return 0;
    522
    523	if (periodic && !hsotg->dedicated_fifos) {
    524		u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
    525		int size_left;
    526		int size_done;
    527
    528		/*
    529		 * work out how much data was loaded so we can calculate
    530		 * how much data is left in the fifo.
    531		 */
    532
    533		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
    534
    535		/*
    536		 * if shared fifo, we cannot write anything until the
    537		 * previous data has been completely sent.
    538		 */
    539		if (hs_ep->fifo_load != 0) {
    540			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
    541			return -ENOSPC;
    542		}
    543
    544		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
    545			__func__, size_left,
    546			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
    547
    548		/* how much of the data has moved */
    549		size_done = hs_ep->size_loaded - size_left;
    550
    551		/* how much data is left in the fifo */
    552		can_write = hs_ep->fifo_load - size_done;
    553		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
    554			__func__, can_write);
    555
    556		can_write = hs_ep->fifo_size - can_write;
    557		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
    558			__func__, can_write);
    559
    560		if (can_write <= 0) {
    561			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
    562			return -ENOSPC;
    563		}
    564	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
    565		can_write = dwc2_readl(hsotg,
    566				       DTXFSTS(hs_ep->fifo_index));
    567
    568		can_write &= 0xffff;
    569		can_write *= 4;
    570	} else {
    571		if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
    572			dev_dbg(hsotg->dev,
    573				"%s: no queue slots available (0x%08x)\n",
    574				__func__, gnptxsts);
    575
    576			dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
    577			return -ENOSPC;
    578		}
    579
    580		can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
    581		can_write *= 4;	/* fifo size is in 32bit quantities. */
    582	}
    583
    584	max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
    585
    586	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
    587		__func__, gnptxsts, can_write, to_write, max_transfer);
    588
    589	/*
    590	 * limit to 512 bytes of data, it seems at least on the non-periodic
    591	 * FIFO, requests of >512 cause the endpoint to get stuck with a
    592	 * fragment of the end of the transfer in it.
    593	 */
    594	if (can_write > 512 && !periodic)
    595		can_write = 512;
    596
    597	/*
    598	 * limit the write to one max-packet size worth of data, but allow
    599	 * the transfer to return that it did not run out of fifo space
    600	 * doing it.
    601	 */
    602	if (to_write > max_transfer) {
    603		to_write = max_transfer;
    604
    605		/* it's needed only when we do not use dedicated fifos */
    606		if (!hsotg->dedicated_fifos)
    607			dwc2_hsotg_en_gsint(hsotg,
    608					    periodic ? GINTSTS_PTXFEMP :
    609					   GINTSTS_NPTXFEMP);
    610	}
    611
    612	/* see if we can write data */
    613
    614	if (to_write > can_write) {
    615		to_write = can_write;
    616		pkt_round = to_write % max_transfer;
    617
    618		/*
    619		 * Round the write down to an
    620		 * exact number of packets.
    621		 *
    622		 * Note, we do not currently check to see if we can ever
    623		 * write a full packet or not to the FIFO.
    624		 */
    625
    626		if (pkt_round)
    627			to_write -= pkt_round;
    628
    629		/*
    630		 * enable correct FIFO interrupt to alert us when there
    631		 * is more room left.
    632		 */
    633
    634		/* it's needed only when we do not use dedicated fifos */
    635		if (!hsotg->dedicated_fifos)
    636			dwc2_hsotg_en_gsint(hsotg,
    637					    periodic ? GINTSTS_PTXFEMP :
    638					   GINTSTS_NPTXFEMP);
    639	}
    640
    641	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
    642		to_write, hs_req->req.length, can_write, buf_pos);
    643
    644	if (to_write <= 0)
    645		return -ENOSPC;
    646
    647	hs_req->req.actual = buf_pos + to_write;
    648	hs_ep->total_data += to_write;
    649
    650	if (periodic)
    651		hs_ep->fifo_load += to_write;
    652
    653	to_write = DIV_ROUND_UP(to_write, 4);
    654	data = hs_req->req.buf + buf_pos;
    655
    656	dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
    657
    658	return (to_write >= can_write) ? -ENOSPC : 0;
    659}
    660
    661/**
    662 * get_ep_limit - get the maximum data legnth for this endpoint
    663 * @hs_ep: The endpoint
    664 *
    665 * Return the maximum data that can be queued in one go on a given endpoint
    666 * so that transfers that are too long can be split.
    667 */
    668static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
    669{
    670	int index = hs_ep->index;
    671	unsigned int maxsize;
    672	unsigned int maxpkt;
    673
    674	if (index != 0) {
    675		maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
    676		maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
    677	} else {
    678		maxsize = 64 + 64;
    679		if (hs_ep->dir_in)
    680			maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
    681		else
    682			maxpkt = 2;
    683	}
    684
    685	/* we made the constant loading easier above by using +1 */
    686	maxpkt--;
    687	maxsize--;
    688
    689	/*
    690	 * constrain by packet count if maxpkts*pktsize is greater
    691	 * than the length register size.
    692	 */
    693
    694	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
    695		maxsize = maxpkt * hs_ep->ep.maxpacket;
    696
    697	return maxsize;
    698}
    699
    700/**
    701 * dwc2_hsotg_read_frameno - read current frame number
    702 * @hsotg: The device instance
    703 *
    704 * Return the current frame number
    705 */
    706static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
    707{
    708	u32 dsts;
    709
    710	dsts = dwc2_readl(hsotg, DSTS);
    711	dsts &= DSTS_SOFFN_MASK;
    712	dsts >>= DSTS_SOFFN_SHIFT;
    713
    714	return dsts;
    715}
    716
    717/**
    718 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
    719 * DMA descriptor chain prepared for specific endpoint
    720 * @hs_ep: The endpoint
    721 *
    722 * Return the maximum data that can be queued in one go on a given endpoint
    723 * depending on its descriptor chain capacity so that transfers that
    724 * are too long can be split.
    725 */
    726static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
    727{
    728	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
    729	int is_isoc = hs_ep->isochronous;
    730	unsigned int maxsize;
    731	u32 mps = hs_ep->ep.maxpacket;
    732	int dir_in = hs_ep->dir_in;
    733
    734	if (is_isoc)
    735		maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
    736					   DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
    737					   MAX_DMA_DESC_NUM_HS_ISOC;
    738	else
    739		maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
    740
    741	/* Interrupt OUT EP with mps not multiple of 4 */
    742	if (hs_ep->index)
    743		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
    744			maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
    745
    746	return maxsize;
    747}
    748
    749/*
    750 * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
    751 * @hs_ep: The endpoint
    752 * @mask: RX/TX bytes mask to be defined
    753 *
    754 * Returns maximum data payload for one descriptor after analyzing endpoint
    755 * characteristics.
    756 * DMA descriptor transfer bytes limit depends on EP type:
    757 * Control out - MPS,
    758 * Isochronous - descriptor rx/tx bytes bitfield limit,
    759 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
    760 * have concatenations from various descriptors within one packet.
    761 * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
    762 * to a single descriptor.
    763 *
    764 * Selects corresponding mask for RX/TX bytes as well.
    765 */
    766static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
    767{
    768	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
    769	u32 mps = hs_ep->ep.maxpacket;
    770	int dir_in = hs_ep->dir_in;
    771	u32 desc_size = 0;
    772
    773	if (!hs_ep->index && !dir_in) {
    774		desc_size = mps;
    775		*mask = DEV_DMA_NBYTES_MASK;
    776	} else if (hs_ep->isochronous) {
    777		if (dir_in) {
    778			desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
    779			*mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
    780		} else {
    781			desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
    782			*mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
    783		}
    784	} else {
    785		desc_size = DEV_DMA_NBYTES_LIMIT;
    786		*mask = DEV_DMA_NBYTES_MASK;
    787
    788		/* Round down desc_size to be mps multiple */
    789		desc_size -= desc_size % mps;
    790	}
    791
    792	/* Interrupt OUT EP with mps not multiple of 4 */
    793	if (hs_ep->index)
    794		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
    795			desc_size = mps;
    796			*mask = DEV_DMA_NBYTES_MASK;
    797		}
    798
    799	return desc_size;
    800}
    801
    802static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
    803						 struct dwc2_dma_desc **desc,
    804						 dma_addr_t dma_buff,
    805						 unsigned int len,
    806						 bool true_last)
    807{
    808	int dir_in = hs_ep->dir_in;
    809	u32 mps = hs_ep->ep.maxpacket;
    810	u32 maxsize = 0;
    811	u32 offset = 0;
    812	u32 mask = 0;
    813	int i;
    814
    815	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
    816
    817	hs_ep->desc_count = (len / maxsize) +
    818				((len % maxsize) ? 1 : 0);
    819	if (len == 0)
    820		hs_ep->desc_count = 1;
    821
    822	for (i = 0; i < hs_ep->desc_count; ++i) {
    823		(*desc)->status = 0;
    824		(*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
    825				 << DEV_DMA_BUFF_STS_SHIFT);
    826
    827		if (len > maxsize) {
    828			if (!hs_ep->index && !dir_in)
    829				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
    830
    831			(*desc)->status |=
    832				maxsize << DEV_DMA_NBYTES_SHIFT & mask;
    833			(*desc)->buf = dma_buff + offset;
    834
    835			len -= maxsize;
    836			offset += maxsize;
    837		} else {
    838			if (true_last)
    839				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
    840
    841			if (dir_in)
    842				(*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
    843					((hs_ep->send_zlp && true_last) ?
    844					DEV_DMA_SHORT : 0);
    845
    846			(*desc)->status |=
    847				len << DEV_DMA_NBYTES_SHIFT & mask;
    848			(*desc)->buf = dma_buff + offset;
    849		}
    850
    851		(*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
    852		(*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
    853				 << DEV_DMA_BUFF_STS_SHIFT);
    854		(*desc)++;
    855	}
    856}
    857
    858/*
    859 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
    860 * @hs_ep: The endpoint
    861 * @ureq: Request to transfer
    862 * @offset: offset in bytes
    863 * @len: Length of the transfer
    864 *
    865 * This function will iterate over descriptor chain and fill its entries
    866 * with corresponding information based on transfer data.
    867 */
    868static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
    869						 dma_addr_t dma_buff,
    870						 unsigned int len)
    871{
    872	struct usb_request *ureq = NULL;
    873	struct dwc2_dma_desc *desc = hs_ep->desc_list;
    874	struct scatterlist *sg;
    875	int i;
    876	u8 desc_count = 0;
    877
    878	if (hs_ep->req)
    879		ureq = &hs_ep->req->req;
    880
    881	/* non-DMA sg buffer */
    882	if (!ureq || !ureq->num_sgs) {
    883		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
    884			dma_buff, len, true);
    885		return;
    886	}
    887
    888	/* DMA sg buffer */
    889	for_each_sg(ureq->sg, sg, ureq->num_sgs, i) {
    890		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
    891			sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
    892			sg_is_last(sg));
    893		desc_count += hs_ep->desc_count;
    894	}
    895
    896	hs_ep->desc_count = desc_count;
    897}
    898
    899/*
    900 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
    901 * @hs_ep: The isochronous endpoint.
    902 * @dma_buff: usb requests dma buffer.
    903 * @len: usb request transfer length.
    904 *
    905 * Fills next free descriptor with the data of the arrived usb request,
    906 * frame info, sets Last and IOC bits increments next_desc. If filled
    907 * descriptor is not the first one, removes L bit from the previous descriptor
    908 * status.
    909 */
    910static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
    911				      dma_addr_t dma_buff, unsigned int len)
    912{
    913	struct dwc2_dma_desc *desc;
    914	struct dwc2_hsotg *hsotg = hs_ep->parent;
    915	u32 index;
    916	u32 mask = 0;
    917	u8 pid = 0;
    918
    919	dwc2_gadget_get_desc_params(hs_ep, &mask);
    920
    921	index = hs_ep->next_desc;
    922	desc = &hs_ep->desc_list[index];
    923
    924	/* Check if descriptor chain full */
    925	if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
    926	    DEV_DMA_BUFF_STS_HREADY) {
    927		dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
    928		return 1;
    929	}
    930
    931	/* Clear L bit of previous desc if more than one entries in the chain */
    932	if (hs_ep->next_desc)
    933		hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
    934
    935	dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
    936		__func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
    937
    938	desc->status = 0;
    939	desc->status |= (DEV_DMA_BUFF_STS_HBUSY	<< DEV_DMA_BUFF_STS_SHIFT);
    940
    941	desc->buf = dma_buff;
    942	desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
    943			 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
    944
    945	if (hs_ep->dir_in) {
    946		if (len)
    947			pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
    948		else
    949			pid = 1;
    950		desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
    951				 DEV_DMA_ISOC_PID_MASK) |
    952				((len % hs_ep->ep.maxpacket) ?
    953				 DEV_DMA_SHORT : 0) |
    954				((hs_ep->target_frame <<
    955				  DEV_DMA_ISOC_FRNUM_SHIFT) &
    956				 DEV_DMA_ISOC_FRNUM_MASK);
    957	}
    958
    959	desc->status &= ~DEV_DMA_BUFF_STS_MASK;
    960	desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
    961
    962	/* Increment frame number by interval for IN */
    963	if (hs_ep->dir_in)
    964		dwc2_gadget_incr_frame_num(hs_ep);
    965
    966	/* Update index of last configured entry in the chain */
    967	hs_ep->next_desc++;
    968	if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
    969		hs_ep->next_desc = 0;
    970
    971	return 0;
    972}
    973
    974/*
    975 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
    976 * @hs_ep: The isochronous endpoint.
    977 *
    978 * Prepare descriptor chain for isochronous endpoints. Afterwards
    979 * write DMA address to HW and enable the endpoint.
    980 */
    981static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
    982{
    983	struct dwc2_hsotg *hsotg = hs_ep->parent;
    984	struct dwc2_hsotg_req *hs_req, *treq;
    985	int index = hs_ep->index;
    986	int ret;
    987	int i;
    988	u32 dma_reg;
    989	u32 depctl;
    990	u32 ctrl;
    991	struct dwc2_dma_desc *desc;
    992
    993	if (list_empty(&hs_ep->queue)) {
    994		hs_ep->target_frame = TARGET_FRAME_INITIAL;
    995		dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
    996		return;
    997	}
    998
    999	/* Initialize descriptor chain by Host Busy status */
   1000	for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
   1001		desc = &hs_ep->desc_list[i];
   1002		desc->status = 0;
   1003		desc->status |= (DEV_DMA_BUFF_STS_HBUSY
   1004				    << DEV_DMA_BUFF_STS_SHIFT);
   1005	}
   1006
   1007	hs_ep->next_desc = 0;
   1008	list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
   1009		dma_addr_t dma_addr = hs_req->req.dma;
   1010
   1011		if (hs_req->req.num_sgs) {
   1012			WARN_ON(hs_req->req.num_sgs > 1);
   1013			dma_addr = sg_dma_address(hs_req->req.sg);
   1014		}
   1015		ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
   1016						 hs_req->req.length);
   1017		if (ret)
   1018			break;
   1019	}
   1020
   1021	hs_ep->compl_desc = 0;
   1022	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
   1023	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
   1024
   1025	/* write descriptor chain address to control register */
   1026	dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
   1027
   1028	ctrl = dwc2_readl(hsotg, depctl);
   1029	ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
   1030	dwc2_writel(hsotg, ctrl, depctl);
   1031}
   1032
   1033static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
   1034static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
   1035					struct dwc2_hsotg_ep *hs_ep,
   1036				       struct dwc2_hsotg_req *hs_req,
   1037				       int result);
   1038
   1039/**
   1040 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
   1041 * @hsotg: The controller state.
   1042 * @hs_ep: The endpoint to process a request for
   1043 * @hs_req: The request to start.
   1044 * @continuing: True if we are doing more for the current request.
   1045 *
   1046 * Start the given request running by setting the endpoint registers
   1047 * appropriately, and writing any data to the FIFOs.
   1048 */
   1049static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
   1050				 struct dwc2_hsotg_ep *hs_ep,
   1051				struct dwc2_hsotg_req *hs_req,
   1052				bool continuing)
   1053{
   1054	struct usb_request *ureq = &hs_req->req;
   1055	int index = hs_ep->index;
   1056	int dir_in = hs_ep->dir_in;
   1057	u32 epctrl_reg;
   1058	u32 epsize_reg;
   1059	u32 epsize;
   1060	u32 ctrl;
   1061	unsigned int length;
   1062	unsigned int packets;
   1063	unsigned int maxreq;
   1064	unsigned int dma_reg;
   1065
   1066	if (index != 0) {
   1067		if (hs_ep->req && !continuing) {
   1068			dev_err(hsotg->dev, "%s: active request\n", __func__);
   1069			WARN_ON(1);
   1070			return;
   1071		} else if (hs_ep->req != hs_req && continuing) {
   1072			dev_err(hsotg->dev,
   1073				"%s: continue different req\n", __func__);
   1074			WARN_ON(1);
   1075			return;
   1076		}
   1077	}
   1078
   1079	dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
   1080	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
   1081	epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
   1082
   1083	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
   1084		__func__, dwc2_readl(hsotg, epctrl_reg), index,
   1085		hs_ep->dir_in ? "in" : "out");
   1086
   1087	/* If endpoint is stalled, we will restart request later */
   1088	ctrl = dwc2_readl(hsotg, epctrl_reg);
   1089
   1090	if (index && ctrl & DXEPCTL_STALL) {
   1091		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
   1092		return;
   1093	}
   1094
   1095	length = ureq->length - ureq->actual;
   1096	dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
   1097		ureq->length, ureq->actual);
   1098
   1099	if (!using_desc_dma(hsotg))
   1100		maxreq = get_ep_limit(hs_ep);
   1101	else
   1102		maxreq = dwc2_gadget_get_chain_limit(hs_ep);
   1103
   1104	if (length > maxreq) {
   1105		int round = maxreq % hs_ep->ep.maxpacket;
   1106
   1107		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
   1108			__func__, length, maxreq, round);
   1109
   1110		/* round down to multiple of packets */
   1111		if (round)
   1112			maxreq -= round;
   1113
   1114		length = maxreq;
   1115	}
   1116
   1117	if (length)
   1118		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
   1119	else
   1120		packets = 1;	/* send one packet if length is zero. */
   1121
   1122	if (dir_in && index != 0)
   1123		if (hs_ep->isochronous)
   1124			epsize = DXEPTSIZ_MC(packets);
   1125		else
   1126			epsize = DXEPTSIZ_MC(1);
   1127	else
   1128		epsize = 0;
   1129
   1130	/*
   1131	 * zero length packet should be programmed on its own and should not
   1132	 * be counted in DIEPTSIZ.PktCnt with other packets.
   1133	 */
   1134	if (dir_in && ureq->zero && !continuing) {
   1135		/* Test if zlp is actually required. */
   1136		if ((ureq->length >= hs_ep->ep.maxpacket) &&
   1137		    !(ureq->length % hs_ep->ep.maxpacket))
   1138			hs_ep->send_zlp = 1;
   1139	}
   1140
   1141	epsize |= DXEPTSIZ_PKTCNT(packets);
   1142	epsize |= DXEPTSIZ_XFERSIZE(length);
   1143
   1144	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
   1145		__func__, packets, length, ureq->length, epsize, epsize_reg);
   1146
   1147	/* store the request as the current one we're doing */
   1148	hs_ep->req = hs_req;
   1149
   1150	if (using_desc_dma(hsotg)) {
   1151		u32 offset = 0;
   1152		u32 mps = hs_ep->ep.maxpacket;
   1153
   1154		/* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
   1155		if (!dir_in) {
   1156			if (!index)
   1157				length = mps;
   1158			else if (length % mps)
   1159				length += (mps - (length % mps));
   1160		}
   1161
   1162		if (continuing)
   1163			offset = ureq->actual;
   1164
   1165		/* Fill DDMA chain entries */
   1166		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
   1167						     length);
   1168
   1169		/* write descriptor chain address to control register */
   1170		dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
   1171
   1172		dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
   1173			__func__, (u32)hs_ep->desc_list_dma, dma_reg);
   1174	} else {
   1175		/* write size / packets */
   1176		dwc2_writel(hsotg, epsize, epsize_reg);
   1177
   1178		if (using_dma(hsotg) && !continuing && (length != 0)) {
   1179			/*
   1180			 * write DMA address to control register, buffer
   1181			 * already synced by dwc2_hsotg_ep_queue().
   1182			 */
   1183
   1184			dwc2_writel(hsotg, ureq->dma, dma_reg);
   1185
   1186			dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
   1187				__func__, &ureq->dma, dma_reg);
   1188		}
   1189	}
   1190
   1191	if (hs_ep->isochronous) {
   1192		if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
   1193			if (hs_ep->interval == 1) {
   1194				if (hs_ep->target_frame & 0x1)
   1195					ctrl |= DXEPCTL_SETODDFR;
   1196				else
   1197					ctrl |= DXEPCTL_SETEVENFR;
   1198			}
   1199			ctrl |= DXEPCTL_CNAK;
   1200		} else {
   1201			hs_req->req.frame_number = hs_ep->target_frame;
   1202			hs_req->req.actual = 0;
   1203			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
   1204			return;
   1205		}
   1206	}
   1207
   1208	ctrl |= DXEPCTL_EPENA;	/* ensure ep enabled */
   1209
   1210	dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
   1211
   1212	/* For Setup request do not clear NAK */
   1213	if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
   1214		ctrl |= DXEPCTL_CNAK;	/* clear NAK set by core */
   1215
   1216	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
   1217	dwc2_writel(hsotg, ctrl, epctrl_reg);
   1218
   1219	/*
   1220	 * set these, it seems that DMA support increments past the end
   1221	 * of the packet buffer so we need to calculate the length from
   1222	 * this information.
   1223	 */
   1224	hs_ep->size_loaded = length;
   1225	hs_ep->last_load = ureq->actual;
   1226
   1227	if (dir_in && !using_dma(hsotg)) {
   1228		/* set these anyway, we may need them for non-periodic in */
   1229		hs_ep->fifo_load = 0;
   1230
   1231		dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
   1232	}
   1233
   1234	/*
   1235	 * Note, trying to clear the NAK here causes problems with transmit
   1236	 * on the S3C6400 ending up with the TXFIFO becoming full.
   1237	 */
   1238
   1239	/* check ep is enabled */
   1240	if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
   1241		dev_dbg(hsotg->dev,
   1242			"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
   1243			 index, dwc2_readl(hsotg, epctrl_reg));
   1244
   1245	dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
   1246		__func__, dwc2_readl(hsotg, epctrl_reg));
   1247
   1248	/* enable ep interrupts */
   1249	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
   1250}
   1251
   1252/**
   1253 * dwc2_hsotg_map_dma - map the DMA memory being used for the request
   1254 * @hsotg: The device state.
   1255 * @hs_ep: The endpoint the request is on.
   1256 * @req: The request being processed.
   1257 *
   1258 * We've been asked to queue a request, so ensure that the memory buffer
   1259 * is correctly setup for DMA. If we've been passed an extant DMA address
   1260 * then ensure the buffer has been synced to memory. If our buffer has no
   1261 * DMA memory, then we map the memory and mark our request to allow us to
   1262 * cleanup on completion.
   1263 */
   1264static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
   1265			      struct dwc2_hsotg_ep *hs_ep,
   1266			     struct usb_request *req)
   1267{
   1268	int ret;
   1269
   1270	hs_ep->map_dir = hs_ep->dir_in;
   1271	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
   1272	if (ret)
   1273		goto dma_error;
   1274
   1275	return 0;
   1276
   1277dma_error:
   1278	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
   1279		__func__, req->buf, req->length);
   1280
   1281	return -EIO;
   1282}
   1283
   1284static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
   1285						 struct dwc2_hsotg_ep *hs_ep,
   1286						 struct dwc2_hsotg_req *hs_req)
   1287{
   1288	void *req_buf = hs_req->req.buf;
   1289
   1290	/* If dma is not being used or buffer is aligned */
   1291	if (!using_dma(hsotg) || !((long)req_buf & 3))
   1292		return 0;
   1293
   1294	WARN_ON(hs_req->saved_req_buf);
   1295
   1296	dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
   1297		hs_ep->ep.name, req_buf, hs_req->req.length);
   1298
   1299	hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
   1300	if (!hs_req->req.buf) {
   1301		hs_req->req.buf = req_buf;
   1302		dev_err(hsotg->dev,
   1303			"%s: unable to allocate memory for bounce buffer\n",
   1304			__func__);
   1305		return -ENOMEM;
   1306	}
   1307
   1308	/* Save actual buffer */
   1309	hs_req->saved_req_buf = req_buf;
   1310
   1311	if (hs_ep->dir_in)
   1312		memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
   1313	return 0;
   1314}
   1315
   1316static void
   1317dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
   1318					 struct dwc2_hsotg_ep *hs_ep,
   1319					 struct dwc2_hsotg_req *hs_req)
   1320{
   1321	/* If dma is not being used or buffer was aligned */
   1322	if (!using_dma(hsotg) || !hs_req->saved_req_buf)
   1323		return;
   1324
   1325	dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
   1326		hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
   1327
   1328	/* Copy data from bounce buffer on successful out transfer */
   1329	if (!hs_ep->dir_in && !hs_req->req.status)
   1330		memcpy(hs_req->saved_req_buf, hs_req->req.buf,
   1331		       hs_req->req.actual);
   1332
   1333	/* Free bounce buffer */
   1334	kfree(hs_req->req.buf);
   1335
   1336	hs_req->req.buf = hs_req->saved_req_buf;
   1337	hs_req->saved_req_buf = NULL;
   1338}
   1339
   1340/**
   1341 * dwc2_gadget_target_frame_elapsed - Checks target frame
   1342 * @hs_ep: The driver endpoint to check
   1343 *
   1344 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
   1345 * corresponding transfer.
   1346 */
   1347static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
   1348{
   1349	struct dwc2_hsotg *hsotg = hs_ep->parent;
   1350	u32 target_frame = hs_ep->target_frame;
   1351	u32 current_frame = hsotg->frame_number;
   1352	bool frame_overrun = hs_ep->frame_overrun;
   1353	u16 limit = DSTS_SOFFN_LIMIT;
   1354
   1355	if (hsotg->gadget.speed != USB_SPEED_HIGH)
   1356		limit >>= 3;
   1357
   1358	if (!frame_overrun && current_frame >= target_frame)
   1359		return true;
   1360
   1361	if (frame_overrun && current_frame >= target_frame &&
   1362	    ((current_frame - target_frame) < limit / 2))
   1363		return true;
   1364
   1365	return false;
   1366}
   1367
   1368/*
   1369 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
   1370 * @hsotg: The driver state
   1371 * @hs_ep: the ep descriptor chain is for
   1372 *
   1373 * Called to update EP0 structure's pointers depend on stage of
   1374 * control transfer.
   1375 */
   1376static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
   1377					  struct dwc2_hsotg_ep *hs_ep)
   1378{
   1379	switch (hsotg->ep0_state) {
   1380	case DWC2_EP0_SETUP:
   1381	case DWC2_EP0_STATUS_OUT:
   1382		hs_ep->desc_list = hsotg->setup_desc[0];
   1383		hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
   1384		break;
   1385	case DWC2_EP0_DATA_IN:
   1386	case DWC2_EP0_STATUS_IN:
   1387		hs_ep->desc_list = hsotg->ctrl_in_desc;
   1388		hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
   1389		break;
   1390	case DWC2_EP0_DATA_OUT:
   1391		hs_ep->desc_list = hsotg->ctrl_out_desc;
   1392		hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
   1393		break;
   1394	default:
   1395		dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
   1396			hsotg->ep0_state);
   1397		return -EINVAL;
   1398	}
   1399
   1400	return 0;
   1401}
   1402
   1403static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
   1404			       gfp_t gfp_flags)
   1405{
   1406	struct dwc2_hsotg_req *hs_req = our_req(req);
   1407	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   1408	struct dwc2_hsotg *hs = hs_ep->parent;
   1409	bool first;
   1410	int ret;
   1411	u32 maxsize = 0;
   1412	u32 mask = 0;
   1413
   1414
   1415	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
   1416		ep->name, req, req->length, req->buf, req->no_interrupt,
   1417		req->zero, req->short_not_ok);
   1418
   1419	/* Prevent new request submission when controller is suspended */
   1420	if (hs->lx_state != DWC2_L0) {
   1421		dev_dbg(hs->dev, "%s: submit request only in active state\n",
   1422			__func__);
   1423		return -EAGAIN;
   1424	}
   1425
   1426	/* initialise status of the request */
   1427	INIT_LIST_HEAD(&hs_req->queue);
   1428	req->actual = 0;
   1429	req->status = -EINPROGRESS;
   1430
   1431	/* Don't queue ISOC request if length greater than mps*mc */
   1432	if (hs_ep->isochronous &&
   1433	    req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
   1434		dev_err(hs->dev, "req length > maxpacket*mc\n");
   1435		return -EINVAL;
   1436	}
   1437
   1438	/* In DDMA mode for ISOC's don't queue request if length greater
   1439	 * than descriptor limits.
   1440	 */
   1441	if (using_desc_dma(hs) && hs_ep->isochronous) {
   1442		maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
   1443		if (hs_ep->dir_in && req->length > maxsize) {
   1444			dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
   1445				req->length, maxsize);
   1446			return -EINVAL;
   1447		}
   1448
   1449		if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
   1450			dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
   1451				req->length, hs_ep->ep.maxpacket);
   1452			return -EINVAL;
   1453		}
   1454	}
   1455
   1456	ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
   1457	if (ret)
   1458		return ret;
   1459
   1460	/* if we're using DMA, sync the buffers as necessary */
   1461	if (using_dma(hs)) {
   1462		ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
   1463		if (ret)
   1464			return ret;
   1465	}
   1466	/* If using descriptor DMA configure EP0 descriptor chain pointers */
   1467	if (using_desc_dma(hs) && !hs_ep->index) {
   1468		ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
   1469		if (ret)
   1470			return ret;
   1471	}
   1472
   1473	first = list_empty(&hs_ep->queue);
   1474	list_add_tail(&hs_req->queue, &hs_ep->queue);
   1475
   1476	/*
   1477	 * Handle DDMA isochronous transfers separately - just add new entry
   1478	 * to the descriptor chain.
   1479	 * Transfer will be started once SW gets either one of NAK or
   1480	 * OutTknEpDis interrupts.
   1481	 */
   1482	if (using_desc_dma(hs) && hs_ep->isochronous) {
   1483		if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
   1484			dma_addr_t dma_addr = hs_req->req.dma;
   1485
   1486			if (hs_req->req.num_sgs) {
   1487				WARN_ON(hs_req->req.num_sgs > 1);
   1488				dma_addr = sg_dma_address(hs_req->req.sg);
   1489			}
   1490			dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
   1491						   hs_req->req.length);
   1492		}
   1493		return 0;
   1494	}
   1495
   1496	/* Change EP direction if status phase request is after data out */
   1497	if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
   1498	    hs->ep0_state == DWC2_EP0_DATA_OUT)
   1499		hs_ep->dir_in = 1;
   1500
   1501	if (first) {
   1502		if (!hs_ep->isochronous) {
   1503			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
   1504			return 0;
   1505		}
   1506
   1507		/* Update current frame number value. */
   1508		hs->frame_number = dwc2_hsotg_read_frameno(hs);
   1509		while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
   1510			dwc2_gadget_incr_frame_num(hs_ep);
   1511			/* Update current frame number value once more as it
   1512			 * changes here.
   1513			 */
   1514			hs->frame_number = dwc2_hsotg_read_frameno(hs);
   1515		}
   1516
   1517		if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
   1518			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
   1519	}
   1520	return 0;
   1521}
   1522
   1523static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
   1524				    gfp_t gfp_flags)
   1525{
   1526	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   1527	struct dwc2_hsotg *hs = hs_ep->parent;
   1528	unsigned long flags;
   1529	int ret;
   1530
   1531	spin_lock_irqsave(&hs->lock, flags);
   1532	ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
   1533	spin_unlock_irqrestore(&hs->lock, flags);
   1534
   1535	return ret;
   1536}
   1537
   1538static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
   1539				       struct usb_request *req)
   1540{
   1541	struct dwc2_hsotg_req *hs_req = our_req(req);
   1542
   1543	kfree(hs_req);
   1544}
   1545
   1546/**
   1547 * dwc2_hsotg_complete_oursetup - setup completion callback
   1548 * @ep: The endpoint the request was on.
   1549 * @req: The request completed.
   1550 *
   1551 * Called on completion of any requests the driver itself
   1552 * submitted that need cleaning up.
   1553 */
   1554static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
   1555					 struct usb_request *req)
   1556{
   1557	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   1558	struct dwc2_hsotg *hsotg = hs_ep->parent;
   1559
   1560	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
   1561
   1562	dwc2_hsotg_ep_free_request(ep, req);
   1563}
   1564
   1565/**
   1566 * ep_from_windex - convert control wIndex value to endpoint
   1567 * @hsotg: The driver state.
   1568 * @windex: The control request wIndex field (in host order).
   1569 *
   1570 * Convert the given wIndex into a pointer to an driver endpoint
   1571 * structure, or return NULL if it is not a valid endpoint.
   1572 */
   1573static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
   1574					    u32 windex)
   1575{
   1576	int dir = (windex & USB_DIR_IN) ? 1 : 0;
   1577	int idx = windex & 0x7F;
   1578
   1579	if (windex >= 0x100)
   1580		return NULL;
   1581
   1582	if (idx > hsotg->num_of_eps)
   1583		return NULL;
   1584
   1585	return index_to_ep(hsotg, idx, dir);
   1586}
   1587
   1588/**
   1589 * dwc2_hsotg_set_test_mode - Enable usb Test Modes
   1590 * @hsotg: The driver state.
   1591 * @testmode: requested usb test mode
   1592 * Enable usb Test Mode requested by the Host.
   1593 */
   1594int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
   1595{
   1596	int dctl = dwc2_readl(hsotg, DCTL);
   1597
   1598	dctl &= ~DCTL_TSTCTL_MASK;
   1599	switch (testmode) {
   1600	case USB_TEST_J:
   1601	case USB_TEST_K:
   1602	case USB_TEST_SE0_NAK:
   1603	case USB_TEST_PACKET:
   1604	case USB_TEST_FORCE_ENABLE:
   1605		dctl |= testmode << DCTL_TSTCTL_SHIFT;
   1606		break;
   1607	default:
   1608		return -EINVAL;
   1609	}
   1610	dwc2_writel(hsotg, dctl, DCTL);
   1611	return 0;
   1612}
   1613
   1614/**
   1615 * dwc2_hsotg_send_reply - send reply to control request
   1616 * @hsotg: The device state
   1617 * @ep: Endpoint 0
   1618 * @buff: Buffer for request
   1619 * @length: Length of reply.
   1620 *
   1621 * Create a request and queue it on the given endpoint. This is useful as
   1622 * an internal method of sending replies to certain control requests, etc.
   1623 */
   1624static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
   1625				 struct dwc2_hsotg_ep *ep,
   1626				void *buff,
   1627				int length)
   1628{
   1629	struct usb_request *req;
   1630	int ret;
   1631
   1632	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
   1633
   1634	req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
   1635	hsotg->ep0_reply = req;
   1636	if (!req) {
   1637		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
   1638		return -ENOMEM;
   1639	}
   1640
   1641	req->buf = hsotg->ep0_buff;
   1642	req->length = length;
   1643	/*
   1644	 * zero flag is for sending zlp in DATA IN stage. It has no impact on
   1645	 * STATUS stage.
   1646	 */
   1647	req->zero = 0;
   1648	req->complete = dwc2_hsotg_complete_oursetup;
   1649
   1650	if (length)
   1651		memcpy(req->buf, buff, length);
   1652
   1653	ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
   1654	if (ret) {
   1655		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
   1656		return ret;
   1657	}
   1658
   1659	return 0;
   1660}
   1661
   1662/**
   1663 * dwc2_hsotg_process_req_status - process request GET_STATUS
   1664 * @hsotg: The device state
   1665 * @ctrl: USB control request
   1666 */
   1667static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
   1668					 struct usb_ctrlrequest *ctrl)
   1669{
   1670	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
   1671	struct dwc2_hsotg_ep *ep;
   1672	__le16 reply;
   1673	u16 status;
   1674	int ret;
   1675
   1676	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
   1677
   1678	if (!ep0->dir_in) {
   1679		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
   1680		return -EINVAL;
   1681	}
   1682
   1683	switch (ctrl->bRequestType & USB_RECIP_MASK) {
   1684	case USB_RECIP_DEVICE:
   1685		status = hsotg->gadget.is_selfpowered <<
   1686			 USB_DEVICE_SELF_POWERED;
   1687		status |= hsotg->remote_wakeup_allowed <<
   1688			  USB_DEVICE_REMOTE_WAKEUP;
   1689		reply = cpu_to_le16(status);
   1690		break;
   1691
   1692	case USB_RECIP_INTERFACE:
   1693		/* currently, the data result should be zero */
   1694		reply = cpu_to_le16(0);
   1695		break;
   1696
   1697	case USB_RECIP_ENDPOINT:
   1698		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
   1699		if (!ep)
   1700			return -ENOENT;
   1701
   1702		reply = cpu_to_le16(ep->halted ? 1 : 0);
   1703		break;
   1704
   1705	default:
   1706		return 0;
   1707	}
   1708
   1709	if (le16_to_cpu(ctrl->wLength) != 2)
   1710		return -EINVAL;
   1711
   1712	ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
   1713	if (ret) {
   1714		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
   1715		return ret;
   1716	}
   1717
   1718	return 1;
   1719}
   1720
   1721static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
   1722
   1723/**
   1724 * get_ep_head - return the first request on the endpoint
   1725 * @hs_ep: The controller endpoint to get
   1726 *
   1727 * Get the first request on the endpoint.
   1728 */
   1729static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
   1730{
   1731	return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
   1732					queue);
   1733}
   1734
   1735/**
   1736 * dwc2_gadget_start_next_request - Starts next request from ep queue
   1737 * @hs_ep: Endpoint structure
   1738 *
   1739 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
   1740 * in its handler. Hence we need to unmask it here to be able to do
   1741 * resynchronization.
   1742 */
   1743static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
   1744{
   1745	struct dwc2_hsotg *hsotg = hs_ep->parent;
   1746	int dir_in = hs_ep->dir_in;
   1747	struct dwc2_hsotg_req *hs_req;
   1748
   1749	if (!list_empty(&hs_ep->queue)) {
   1750		hs_req = get_ep_head(hs_ep);
   1751		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
   1752		return;
   1753	}
   1754	if (!hs_ep->isochronous)
   1755		return;
   1756
   1757	if (dir_in) {
   1758		dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
   1759			__func__);
   1760	} else {
   1761		dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
   1762			__func__);
   1763	}
   1764}
   1765
   1766/**
   1767 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
   1768 * @hsotg: The device state
   1769 * @ctrl: USB control request
   1770 */
   1771static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
   1772					  struct usb_ctrlrequest *ctrl)
   1773{
   1774	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
   1775	struct dwc2_hsotg_req *hs_req;
   1776	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
   1777	struct dwc2_hsotg_ep *ep;
   1778	int ret;
   1779	bool halted;
   1780	u32 recip;
   1781	u32 wValue;
   1782	u32 wIndex;
   1783
   1784	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
   1785		__func__, set ? "SET" : "CLEAR");
   1786
   1787	wValue = le16_to_cpu(ctrl->wValue);
   1788	wIndex = le16_to_cpu(ctrl->wIndex);
   1789	recip = ctrl->bRequestType & USB_RECIP_MASK;
   1790
   1791	switch (recip) {
   1792	case USB_RECIP_DEVICE:
   1793		switch (wValue) {
   1794		case USB_DEVICE_REMOTE_WAKEUP:
   1795			if (set)
   1796				hsotg->remote_wakeup_allowed = 1;
   1797			else
   1798				hsotg->remote_wakeup_allowed = 0;
   1799			break;
   1800
   1801		case USB_DEVICE_TEST_MODE:
   1802			if ((wIndex & 0xff) != 0)
   1803				return -EINVAL;
   1804			if (!set)
   1805				return -EINVAL;
   1806
   1807			hsotg->test_mode = wIndex >> 8;
   1808			break;
   1809		default:
   1810			return -ENOENT;
   1811		}
   1812
   1813		ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
   1814		if (ret) {
   1815			dev_err(hsotg->dev,
   1816				"%s: failed to send reply\n", __func__);
   1817			return ret;
   1818		}
   1819		break;
   1820
   1821	case USB_RECIP_ENDPOINT:
   1822		ep = ep_from_windex(hsotg, wIndex);
   1823		if (!ep) {
   1824			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
   1825				__func__, wIndex);
   1826			return -ENOENT;
   1827		}
   1828
   1829		switch (wValue) {
   1830		case USB_ENDPOINT_HALT:
   1831			halted = ep->halted;
   1832
   1833			if (!ep->wedged)
   1834				dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
   1835
   1836			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
   1837			if (ret) {
   1838				dev_err(hsotg->dev,
   1839					"%s: failed to send reply\n", __func__);
   1840				return ret;
   1841			}
   1842
   1843			/*
   1844			 * we have to complete all requests for ep if it was
   1845			 * halted, and the halt was cleared by CLEAR_FEATURE
   1846			 */
   1847
   1848			if (!set && halted) {
   1849				/*
   1850				 * If we have request in progress,
   1851				 * then complete it
   1852				 */
   1853				if (ep->req) {
   1854					hs_req = ep->req;
   1855					ep->req = NULL;
   1856					list_del_init(&hs_req->queue);
   1857					if (hs_req->req.complete) {
   1858						spin_unlock(&hsotg->lock);
   1859						usb_gadget_giveback_request(
   1860							&ep->ep, &hs_req->req);
   1861						spin_lock(&hsotg->lock);
   1862					}
   1863				}
   1864
   1865				/* If we have pending request, then start it */
   1866				if (!ep->req)
   1867					dwc2_gadget_start_next_request(ep);
   1868			}
   1869
   1870			break;
   1871
   1872		default:
   1873			return -ENOENT;
   1874		}
   1875		break;
   1876	default:
   1877		return -ENOENT;
   1878	}
   1879	return 1;
   1880}
   1881
   1882static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
   1883
   1884/**
   1885 * dwc2_hsotg_stall_ep0 - stall ep0
   1886 * @hsotg: The device state
   1887 *
   1888 * Set stall for ep0 as response for setup request.
   1889 */
   1890static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
   1891{
   1892	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
   1893	u32 reg;
   1894	u32 ctrl;
   1895
   1896	dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
   1897	reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
   1898
   1899	/*
   1900	 * DxEPCTL_Stall will be cleared by EP once it has
   1901	 * taken effect, so no need to clear later.
   1902	 */
   1903
   1904	ctrl = dwc2_readl(hsotg, reg);
   1905	ctrl |= DXEPCTL_STALL;
   1906	ctrl |= DXEPCTL_CNAK;
   1907	dwc2_writel(hsotg, ctrl, reg);
   1908
   1909	dev_dbg(hsotg->dev,
   1910		"written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
   1911		ctrl, reg, dwc2_readl(hsotg, reg));
   1912
   1913	 /*
   1914	  * complete won't be called, so we enqueue
   1915	  * setup request here
   1916	  */
   1917	 dwc2_hsotg_enqueue_setup(hsotg);
   1918}
   1919
   1920/**
   1921 * dwc2_hsotg_process_control - process a control request
   1922 * @hsotg: The device state
   1923 * @ctrl: The control request received
   1924 *
   1925 * The controller has received the SETUP phase of a control request, and
   1926 * needs to work out what to do next (and whether to pass it on to the
   1927 * gadget driver).
   1928 */
   1929static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
   1930				       struct usb_ctrlrequest *ctrl)
   1931{
   1932	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
   1933	int ret = 0;
   1934	u32 dcfg;
   1935
   1936	dev_dbg(hsotg->dev,
   1937		"ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
   1938		ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
   1939		ctrl->wIndex, ctrl->wLength);
   1940
   1941	if (ctrl->wLength == 0) {
   1942		ep0->dir_in = 1;
   1943		hsotg->ep0_state = DWC2_EP0_STATUS_IN;
   1944	} else if (ctrl->bRequestType & USB_DIR_IN) {
   1945		ep0->dir_in = 1;
   1946		hsotg->ep0_state = DWC2_EP0_DATA_IN;
   1947	} else {
   1948		ep0->dir_in = 0;
   1949		hsotg->ep0_state = DWC2_EP0_DATA_OUT;
   1950	}
   1951
   1952	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
   1953		switch (ctrl->bRequest) {
   1954		case USB_REQ_SET_ADDRESS:
   1955			hsotg->connected = 1;
   1956			dcfg = dwc2_readl(hsotg, DCFG);
   1957			dcfg &= ~DCFG_DEVADDR_MASK;
   1958			dcfg |= (le16_to_cpu(ctrl->wValue) <<
   1959				 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
   1960			dwc2_writel(hsotg, dcfg, DCFG);
   1961
   1962			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
   1963
   1964			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
   1965			return;
   1966
   1967		case USB_REQ_GET_STATUS:
   1968			ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
   1969			break;
   1970
   1971		case USB_REQ_CLEAR_FEATURE:
   1972		case USB_REQ_SET_FEATURE:
   1973			ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
   1974			break;
   1975		}
   1976	}
   1977
   1978	/* as a fallback, try delivering it to the driver to deal with */
   1979
   1980	if (ret == 0 && hsotg->driver) {
   1981		spin_unlock(&hsotg->lock);
   1982		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
   1983		spin_lock(&hsotg->lock);
   1984		if (ret < 0)
   1985			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
   1986	}
   1987
   1988	hsotg->delayed_status = false;
   1989	if (ret == USB_GADGET_DELAYED_STATUS)
   1990		hsotg->delayed_status = true;
   1991
   1992	/*
   1993	 * the request is either unhandlable, or is not formatted correctly
   1994	 * so respond with a STALL for the status stage to indicate failure.
   1995	 */
   1996
   1997	if (ret < 0)
   1998		dwc2_hsotg_stall_ep0(hsotg);
   1999}
   2000
   2001/**
   2002 * dwc2_hsotg_complete_setup - completion of a setup transfer
   2003 * @ep: The endpoint the request was on.
   2004 * @req: The request completed.
   2005 *
   2006 * Called on completion of any requests the driver itself submitted for
   2007 * EP0 setup packets
   2008 */
   2009static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
   2010				      struct usb_request *req)
   2011{
   2012	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   2013	struct dwc2_hsotg *hsotg = hs_ep->parent;
   2014
   2015	if (req->status < 0) {
   2016		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
   2017		return;
   2018	}
   2019
   2020	spin_lock(&hsotg->lock);
   2021	if (req->actual == 0)
   2022		dwc2_hsotg_enqueue_setup(hsotg);
   2023	else
   2024		dwc2_hsotg_process_control(hsotg, req->buf);
   2025	spin_unlock(&hsotg->lock);
   2026}
   2027
   2028/**
   2029 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
   2030 * @hsotg: The device state.
   2031 *
   2032 * Enqueue a request on EP0 if necessary to received any SETUP packets
   2033 * received from the host.
   2034 */
   2035static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
   2036{
   2037	struct usb_request *req = hsotg->ctrl_req;
   2038	struct dwc2_hsotg_req *hs_req = our_req(req);
   2039	int ret;
   2040
   2041	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
   2042
   2043	req->zero = 0;
   2044	req->length = 8;
   2045	req->buf = hsotg->ctrl_buff;
   2046	req->complete = dwc2_hsotg_complete_setup;
   2047
   2048	if (!list_empty(&hs_req->queue)) {
   2049		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
   2050		return;
   2051	}
   2052
   2053	hsotg->eps_out[0]->dir_in = 0;
   2054	hsotg->eps_out[0]->send_zlp = 0;
   2055	hsotg->ep0_state = DWC2_EP0_SETUP;
   2056
   2057	ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
   2058	if (ret < 0) {
   2059		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
   2060		/*
   2061		 * Don't think there's much we can do other than watch the
   2062		 * driver fail.
   2063		 */
   2064	}
   2065}
   2066
   2067static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
   2068				   struct dwc2_hsotg_ep *hs_ep)
   2069{
   2070	u32 ctrl;
   2071	u8 index = hs_ep->index;
   2072	u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
   2073	u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
   2074
   2075	if (hs_ep->dir_in)
   2076		dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
   2077			index);
   2078	else
   2079		dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
   2080			index);
   2081	if (using_desc_dma(hsotg)) {
   2082		/* Not specific buffer needed for ep0 ZLP */
   2083		dma_addr_t dma = hs_ep->desc_list_dma;
   2084
   2085		if (!index)
   2086			dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
   2087
   2088		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
   2089	} else {
   2090		dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
   2091			    DXEPTSIZ_XFERSIZE(0),
   2092			    epsiz_reg);
   2093	}
   2094
   2095	ctrl = dwc2_readl(hsotg, epctl_reg);
   2096	ctrl |= DXEPCTL_CNAK;  /* clear NAK set by core */
   2097	ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
   2098	ctrl |= DXEPCTL_USBACTEP;
   2099	dwc2_writel(hsotg, ctrl, epctl_reg);
   2100}
   2101
   2102/**
   2103 * dwc2_hsotg_complete_request - complete a request given to us
   2104 * @hsotg: The device state.
   2105 * @hs_ep: The endpoint the request was on.
   2106 * @hs_req: The request to complete.
   2107 * @result: The result code (0 => Ok, otherwise errno)
   2108 *
   2109 * The given request has finished, so call the necessary completion
   2110 * if it has one and then look to see if we can start a new request
   2111 * on the endpoint.
   2112 *
   2113 * Note, expects the ep to already be locked as appropriate.
   2114 */
   2115static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
   2116					struct dwc2_hsotg_ep *hs_ep,
   2117				       struct dwc2_hsotg_req *hs_req,
   2118				       int result)
   2119{
   2120	if (!hs_req) {
   2121		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
   2122		return;
   2123	}
   2124
   2125	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
   2126		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
   2127
   2128	/*
   2129	 * only replace the status if we've not already set an error
   2130	 * from a previous transaction
   2131	 */
   2132
   2133	if (hs_req->req.status == -EINPROGRESS)
   2134		hs_req->req.status = result;
   2135
   2136	if (using_dma(hsotg))
   2137		dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
   2138
   2139	dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
   2140
   2141	hs_ep->req = NULL;
   2142	list_del_init(&hs_req->queue);
   2143
   2144	/*
   2145	 * call the complete request with the locks off, just in case the
   2146	 * request tries to queue more work for this endpoint.
   2147	 */
   2148
   2149	if (hs_req->req.complete) {
   2150		spin_unlock(&hsotg->lock);
   2151		usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
   2152		spin_lock(&hsotg->lock);
   2153	}
   2154
   2155	/* In DDMA don't need to proceed to starting of next ISOC request */
   2156	if (using_desc_dma(hsotg) && hs_ep->isochronous)
   2157		return;
   2158
   2159	/*
   2160	 * Look to see if there is anything else to do. Note, the completion
   2161	 * of the previous request may have caused a new request to be started
   2162	 * so be careful when doing this.
   2163	 */
   2164
   2165	if (!hs_ep->req && result >= 0)
   2166		dwc2_gadget_start_next_request(hs_ep);
   2167}
   2168
   2169/*
   2170 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
   2171 * @hs_ep: The endpoint the request was on.
   2172 *
   2173 * Get first request from the ep queue, determine descriptor on which complete
   2174 * happened. SW discovers which descriptor currently in use by HW, adjusts
   2175 * dma_address and calculates index of completed descriptor based on the value
   2176 * of DEPDMA register. Update actual length of request, giveback to gadget.
   2177 */
   2178static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
   2179{
   2180	struct dwc2_hsotg *hsotg = hs_ep->parent;
   2181	struct dwc2_hsotg_req *hs_req;
   2182	struct usb_request *ureq;
   2183	u32 desc_sts;
   2184	u32 mask;
   2185
   2186	desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
   2187
   2188	/* Process only descriptors with buffer status set to DMA done */
   2189	while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
   2190		DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
   2191
   2192		hs_req = get_ep_head(hs_ep);
   2193		if (!hs_req) {
   2194			dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
   2195			return;
   2196		}
   2197		ureq = &hs_req->req;
   2198
   2199		/* Check completion status */
   2200		if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
   2201			DEV_DMA_STS_SUCC) {
   2202			mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
   2203				DEV_DMA_ISOC_RX_NBYTES_MASK;
   2204			ureq->actual = ureq->length - ((desc_sts & mask) >>
   2205				DEV_DMA_ISOC_NBYTES_SHIFT);
   2206
   2207			/* Adjust actual len for ISOC Out if len is
   2208			 * not align of 4
   2209			 */
   2210			if (!hs_ep->dir_in && ureq->length & 0x3)
   2211				ureq->actual += 4 - (ureq->length & 0x3);
   2212
   2213			/* Set actual frame number for completed transfers */
   2214			ureq->frame_number =
   2215				(desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
   2216				DEV_DMA_ISOC_FRNUM_SHIFT;
   2217		}
   2218
   2219		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
   2220
   2221		hs_ep->compl_desc++;
   2222		if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
   2223			hs_ep->compl_desc = 0;
   2224		desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
   2225	}
   2226}
   2227
   2228/*
   2229 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
   2230 * @hs_ep: The isochronous endpoint.
   2231 *
   2232 * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
   2233 * interrupt. Reset target frame and next_desc to allow to start
   2234 * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
   2235 * interrupt for OUT direction.
   2236 */
   2237static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
   2238{
   2239	struct dwc2_hsotg *hsotg = hs_ep->parent;
   2240
   2241	if (!hs_ep->dir_in)
   2242		dwc2_flush_rx_fifo(hsotg);
   2243	dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
   2244
   2245	hs_ep->target_frame = TARGET_FRAME_INITIAL;
   2246	hs_ep->next_desc = 0;
   2247	hs_ep->compl_desc = 0;
   2248}
   2249
   2250/**
   2251 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
   2252 * @hsotg: The device state.
   2253 * @ep_idx: The endpoint index for the data
   2254 * @size: The size of data in the fifo, in bytes
   2255 *
   2256 * The FIFO status shows there is data to read from the FIFO for a given
   2257 * endpoint, so sort out whether we need to read the data into a request
   2258 * that has been made for that endpoint.
   2259 */
   2260static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
   2261{
   2262	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
   2263	struct dwc2_hsotg_req *hs_req = hs_ep->req;
   2264	int to_read;
   2265	int max_req;
   2266	int read_ptr;
   2267
   2268	if (!hs_req) {
   2269		u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
   2270		int ptr;
   2271
   2272		dev_dbg(hsotg->dev,
   2273			"%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
   2274			 __func__, size, ep_idx, epctl);
   2275
   2276		/* dump the data from the FIFO, we've nothing we can do */
   2277		for (ptr = 0; ptr < size; ptr += 4)
   2278			(void)dwc2_readl(hsotg, EPFIFO(ep_idx));
   2279
   2280		return;
   2281	}
   2282
   2283	to_read = size;
   2284	read_ptr = hs_req->req.actual;
   2285	max_req = hs_req->req.length - read_ptr;
   2286
   2287	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
   2288		__func__, to_read, max_req, read_ptr, hs_req->req.length);
   2289
   2290	if (to_read > max_req) {
   2291		/*
   2292		 * more data appeared than we where willing
   2293		 * to deal with in this request.
   2294		 */
   2295
   2296		/* currently we don't deal this */
   2297		WARN_ON_ONCE(1);
   2298	}
   2299
   2300	hs_ep->total_data += to_read;
   2301	hs_req->req.actual += to_read;
   2302	to_read = DIV_ROUND_UP(to_read, 4);
   2303
   2304	/*
   2305	 * note, we might over-write the buffer end by 3 bytes depending on
   2306	 * alignment of the data.
   2307	 */
   2308	dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
   2309		       hs_req->req.buf + read_ptr, to_read);
   2310}
   2311
   2312/**
   2313 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
   2314 * @hsotg: The device instance
   2315 * @dir_in: If IN zlp
   2316 *
   2317 * Generate a zero-length IN packet request for terminating a SETUP
   2318 * transaction.
   2319 *
   2320 * Note, since we don't write any data to the TxFIFO, then it is
   2321 * currently believed that we do not need to wait for any space in
   2322 * the TxFIFO.
   2323 */
   2324static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
   2325{
   2326	/* eps_out[0] is used in both directions */
   2327	hsotg->eps_out[0]->dir_in = dir_in;
   2328	hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
   2329
   2330	dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
   2331}
   2332
   2333/*
   2334 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
   2335 * @hs_ep - The endpoint on which transfer went
   2336 *
   2337 * Iterate over endpoints descriptor chain and get info on bytes remained
   2338 * in DMA descriptors after transfer has completed. Used for non isoc EPs.
   2339 */
   2340static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
   2341{
   2342	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
   2343	struct dwc2_hsotg *hsotg = hs_ep->parent;
   2344	unsigned int bytes_rem = 0;
   2345	unsigned int bytes_rem_correction = 0;
   2346	struct dwc2_dma_desc *desc = hs_ep->desc_list;
   2347	int i;
   2348	u32 status;
   2349	u32 mps = hs_ep->ep.maxpacket;
   2350	int dir_in = hs_ep->dir_in;
   2351
   2352	if (!desc)
   2353		return -EINVAL;
   2354
   2355	/* Interrupt OUT EP with mps not multiple of 4 */
   2356	if (hs_ep->index)
   2357		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
   2358			bytes_rem_correction = 4 - (mps % 4);
   2359
   2360	for (i = 0; i < hs_ep->desc_count; ++i) {
   2361		status = desc->status;
   2362		bytes_rem += status & DEV_DMA_NBYTES_MASK;
   2363		bytes_rem -= bytes_rem_correction;
   2364
   2365		if (status & DEV_DMA_STS_MASK)
   2366			dev_err(hsotg->dev, "descriptor %d closed with %x\n",
   2367				i, status & DEV_DMA_STS_MASK);
   2368
   2369		if (status & DEV_DMA_L)
   2370			break;
   2371
   2372		desc++;
   2373	}
   2374
   2375	return bytes_rem;
   2376}
   2377
   2378/**
   2379 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
   2380 * @hsotg: The device instance
   2381 * @epnum: The endpoint received from
   2382 *
   2383 * The RXFIFO has delivered an OutDone event, which means that the data
   2384 * transfer for an OUT endpoint has been completed, either by a short
   2385 * packet or by the finish of a transfer.
   2386 */
   2387static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
   2388{
   2389	u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
   2390	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
   2391	struct dwc2_hsotg_req *hs_req = hs_ep->req;
   2392	struct usb_request *req = &hs_req->req;
   2393	unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
   2394	int result = 0;
   2395
   2396	if (!hs_req) {
   2397		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
   2398		return;
   2399	}
   2400
   2401	if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
   2402		dev_dbg(hsotg->dev, "zlp packet received\n");
   2403		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
   2404		dwc2_hsotg_enqueue_setup(hsotg);
   2405		return;
   2406	}
   2407
   2408	if (using_desc_dma(hsotg))
   2409		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
   2410
   2411	if (using_dma(hsotg)) {
   2412		unsigned int size_done;
   2413
   2414		/*
   2415		 * Calculate the size of the transfer by checking how much
   2416		 * is left in the endpoint size register and then working it
   2417		 * out from the amount we loaded for the transfer.
   2418		 *
   2419		 * We need to do this as DMA pointers are always 32bit aligned
   2420		 * so may overshoot/undershoot the transfer.
   2421		 */
   2422
   2423		size_done = hs_ep->size_loaded - size_left;
   2424		size_done += hs_ep->last_load;
   2425
   2426		req->actual = size_done;
   2427	}
   2428
   2429	/* if there is more request to do, schedule new transfer */
   2430	if (req->actual < req->length && size_left == 0) {
   2431		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
   2432		return;
   2433	}
   2434
   2435	if (req->actual < req->length && req->short_not_ok) {
   2436		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
   2437			__func__, req->actual, req->length);
   2438
   2439		/*
   2440		 * todo - what should we return here? there's no one else
   2441		 * even bothering to check the status.
   2442		 */
   2443	}
   2444
   2445	/* DDMA IN status phase will start from StsPhseRcvd interrupt */
   2446	if (!using_desc_dma(hsotg) && epnum == 0 &&
   2447	    hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
   2448		/* Move to STATUS IN */
   2449		if (!hsotg->delayed_status)
   2450			dwc2_hsotg_ep0_zlp(hsotg, true);
   2451	}
   2452
   2453	/* Set actual frame number for completed transfers */
   2454	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
   2455		req->frame_number = hs_ep->target_frame;
   2456		dwc2_gadget_incr_frame_num(hs_ep);
   2457	}
   2458
   2459	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
   2460}
   2461
   2462/**
   2463 * dwc2_hsotg_handle_rx - RX FIFO has data
   2464 * @hsotg: The device instance
   2465 *
   2466 * The IRQ handler has detected that the RX FIFO has some data in it
   2467 * that requires processing, so find out what is in there and do the
   2468 * appropriate read.
   2469 *
   2470 * The RXFIFO is a true FIFO, the packets coming out are still in packet
   2471 * chunks, so if you have x packets received on an endpoint you'll get x
   2472 * FIFO events delivered, each with a packet's worth of data in it.
   2473 *
   2474 * When using DMA, we should not be processing events from the RXFIFO
   2475 * as the actual data should be sent to the memory directly and we turn
   2476 * on the completion interrupts to get notifications of transfer completion.
   2477 */
   2478static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
   2479{
   2480	u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
   2481	u32 epnum, status, size;
   2482
   2483	WARN_ON(using_dma(hsotg));
   2484
   2485	epnum = grxstsr & GRXSTS_EPNUM_MASK;
   2486	status = grxstsr & GRXSTS_PKTSTS_MASK;
   2487
   2488	size = grxstsr & GRXSTS_BYTECNT_MASK;
   2489	size >>= GRXSTS_BYTECNT_SHIFT;
   2490
   2491	dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
   2492		__func__, grxstsr, size, epnum);
   2493
   2494	switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
   2495	case GRXSTS_PKTSTS_GLOBALOUTNAK:
   2496		dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
   2497		break;
   2498
   2499	case GRXSTS_PKTSTS_OUTDONE:
   2500		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
   2501			dwc2_hsotg_read_frameno(hsotg));
   2502
   2503		if (!using_dma(hsotg))
   2504			dwc2_hsotg_handle_outdone(hsotg, epnum);
   2505		break;
   2506
   2507	case GRXSTS_PKTSTS_SETUPDONE:
   2508		dev_dbg(hsotg->dev,
   2509			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
   2510			dwc2_hsotg_read_frameno(hsotg),
   2511			dwc2_readl(hsotg, DOEPCTL(0)));
   2512		/*
   2513		 * Call dwc2_hsotg_handle_outdone here if it was not called from
   2514		 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
   2515		 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
   2516		 */
   2517		if (hsotg->ep0_state == DWC2_EP0_SETUP)
   2518			dwc2_hsotg_handle_outdone(hsotg, epnum);
   2519		break;
   2520
   2521	case GRXSTS_PKTSTS_OUTRX:
   2522		dwc2_hsotg_rx_data(hsotg, epnum, size);
   2523		break;
   2524
   2525	case GRXSTS_PKTSTS_SETUPRX:
   2526		dev_dbg(hsotg->dev,
   2527			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
   2528			dwc2_hsotg_read_frameno(hsotg),
   2529			dwc2_readl(hsotg, DOEPCTL(0)));
   2530
   2531		WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
   2532
   2533		dwc2_hsotg_rx_data(hsotg, epnum, size);
   2534		break;
   2535
   2536	default:
   2537		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
   2538			 __func__, grxstsr);
   2539
   2540		dwc2_hsotg_dump(hsotg);
   2541		break;
   2542	}
   2543}
   2544
   2545/**
   2546 * dwc2_hsotg_ep0_mps - turn max packet size into register setting
   2547 * @mps: The maximum packet size in bytes.
   2548 */
   2549static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
   2550{
   2551	switch (mps) {
   2552	case 64:
   2553		return D0EPCTL_MPS_64;
   2554	case 32:
   2555		return D0EPCTL_MPS_32;
   2556	case 16:
   2557		return D0EPCTL_MPS_16;
   2558	case 8:
   2559		return D0EPCTL_MPS_8;
   2560	}
   2561
   2562	/* bad max packet size, warn and return invalid result */
   2563	WARN_ON(1);
   2564	return (u32)-1;
   2565}
   2566
   2567/**
   2568 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
   2569 * @hsotg: The driver state.
   2570 * @ep: The index number of the endpoint
   2571 * @mps: The maximum packet size in bytes
   2572 * @mc: The multicount value
   2573 * @dir_in: True if direction is in.
   2574 *
   2575 * Configure the maximum packet size for the given endpoint, updating
   2576 * the hardware control registers to reflect this.
   2577 */
   2578static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
   2579					unsigned int ep, unsigned int mps,
   2580					unsigned int mc, unsigned int dir_in)
   2581{
   2582	struct dwc2_hsotg_ep *hs_ep;
   2583	u32 reg;
   2584
   2585	hs_ep = index_to_ep(hsotg, ep, dir_in);
   2586	if (!hs_ep)
   2587		return;
   2588
   2589	if (ep == 0) {
   2590		u32 mps_bytes = mps;
   2591
   2592		/* EP0 is a special case */
   2593		mps = dwc2_hsotg_ep0_mps(mps_bytes);
   2594		if (mps > 3)
   2595			goto bad_mps;
   2596		hs_ep->ep.maxpacket = mps_bytes;
   2597		hs_ep->mc = 1;
   2598	} else {
   2599		if (mps > 1024)
   2600			goto bad_mps;
   2601		hs_ep->mc = mc;
   2602		if (mc > 3)
   2603			goto bad_mps;
   2604		hs_ep->ep.maxpacket = mps;
   2605	}
   2606
   2607	if (dir_in) {
   2608		reg = dwc2_readl(hsotg, DIEPCTL(ep));
   2609		reg &= ~DXEPCTL_MPS_MASK;
   2610		reg |= mps;
   2611		dwc2_writel(hsotg, reg, DIEPCTL(ep));
   2612	} else {
   2613		reg = dwc2_readl(hsotg, DOEPCTL(ep));
   2614		reg &= ~DXEPCTL_MPS_MASK;
   2615		reg |= mps;
   2616		dwc2_writel(hsotg, reg, DOEPCTL(ep));
   2617	}
   2618
   2619	return;
   2620
   2621bad_mps:
   2622	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
   2623}
   2624
   2625/**
   2626 * dwc2_hsotg_txfifo_flush - flush Tx FIFO
   2627 * @hsotg: The driver state
   2628 * @idx: The index for the endpoint (0..15)
   2629 */
   2630static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
   2631{
   2632	dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
   2633		    GRSTCTL);
   2634
   2635	/* wait until the fifo is flushed */
   2636	if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
   2637		dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
   2638			 __func__);
   2639}
   2640
   2641/**
   2642 * dwc2_hsotg_trytx - check to see if anything needs transmitting
   2643 * @hsotg: The driver state
   2644 * @hs_ep: The driver endpoint to check.
   2645 *
   2646 * Check to see if there is a request that has data to send, and if so
   2647 * make an attempt to write data into the FIFO.
   2648 */
   2649static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
   2650			    struct dwc2_hsotg_ep *hs_ep)
   2651{
   2652	struct dwc2_hsotg_req *hs_req = hs_ep->req;
   2653
   2654	if (!hs_ep->dir_in || !hs_req) {
   2655		/**
   2656		 * if request is not enqueued, we disable interrupts
   2657		 * for endpoints, excepting ep0
   2658		 */
   2659		if (hs_ep->index != 0)
   2660			dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
   2661					      hs_ep->dir_in, 0);
   2662		return 0;
   2663	}
   2664
   2665	if (hs_req->req.actual < hs_req->req.length) {
   2666		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
   2667			hs_ep->index);
   2668		return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
   2669	}
   2670
   2671	return 0;
   2672}
   2673
   2674/**
   2675 * dwc2_hsotg_complete_in - complete IN transfer
   2676 * @hsotg: The device state.
   2677 * @hs_ep: The endpoint that has just completed.
   2678 *
   2679 * An IN transfer has been completed, update the transfer's state and then
   2680 * call the relevant completion routines.
   2681 */
   2682static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
   2683				   struct dwc2_hsotg_ep *hs_ep)
   2684{
   2685	struct dwc2_hsotg_req *hs_req = hs_ep->req;
   2686	u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
   2687	int size_left, size_done;
   2688
   2689	if (!hs_req) {
   2690		dev_dbg(hsotg->dev, "XferCompl but no req\n");
   2691		return;
   2692	}
   2693
   2694	/* Finish ZLP handling for IN EP0 transactions */
   2695	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
   2696		dev_dbg(hsotg->dev, "zlp packet sent\n");
   2697
   2698		/*
   2699		 * While send zlp for DWC2_EP0_STATUS_IN EP direction was
   2700		 * changed to IN. Change back to complete OUT transfer request
   2701		 */
   2702		hs_ep->dir_in = 0;
   2703
   2704		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
   2705		if (hsotg->test_mode) {
   2706			int ret;
   2707
   2708			ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
   2709			if (ret < 0) {
   2710				dev_dbg(hsotg->dev, "Invalid Test #%d\n",
   2711					hsotg->test_mode);
   2712				dwc2_hsotg_stall_ep0(hsotg);
   2713				return;
   2714			}
   2715		}
   2716		dwc2_hsotg_enqueue_setup(hsotg);
   2717		return;
   2718	}
   2719
   2720	/*
   2721	 * Calculate the size of the transfer by checking how much is left
   2722	 * in the endpoint size register and then working it out from
   2723	 * the amount we loaded for the transfer.
   2724	 *
   2725	 * We do this even for DMA, as the transfer may have incremented
   2726	 * past the end of the buffer (DMA transfers are always 32bit
   2727	 * aligned).
   2728	 */
   2729	if (using_desc_dma(hsotg)) {
   2730		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
   2731		if (size_left < 0)
   2732			dev_err(hsotg->dev, "error parsing DDMA results %d\n",
   2733				size_left);
   2734	} else {
   2735		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
   2736	}
   2737
   2738	size_done = hs_ep->size_loaded - size_left;
   2739	size_done += hs_ep->last_load;
   2740
   2741	if (hs_req->req.actual != size_done)
   2742		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
   2743			__func__, hs_req->req.actual, size_done);
   2744
   2745	hs_req->req.actual = size_done;
   2746	dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
   2747		hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
   2748
   2749	if (!size_left && hs_req->req.actual < hs_req->req.length) {
   2750		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
   2751		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
   2752		return;
   2753	}
   2754
   2755	/* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
   2756	if (hs_ep->send_zlp) {
   2757		hs_ep->send_zlp = 0;
   2758		if (!using_desc_dma(hsotg)) {
   2759			dwc2_hsotg_program_zlp(hsotg, hs_ep);
   2760			/* transfer will be completed on next complete interrupt */
   2761			return;
   2762		}
   2763	}
   2764
   2765	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
   2766		/* Move to STATUS OUT */
   2767		dwc2_hsotg_ep0_zlp(hsotg, false);
   2768		return;
   2769	}
   2770
   2771	/* Set actual frame number for completed transfers */
   2772	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
   2773		hs_req->req.frame_number = hs_ep->target_frame;
   2774		dwc2_gadget_incr_frame_num(hs_ep);
   2775	}
   2776
   2777	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
   2778}
   2779
   2780/**
   2781 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
   2782 * @hsotg: The device state.
   2783 * @idx: Index of ep.
   2784 * @dir_in: Endpoint direction 1-in 0-out.
   2785 *
   2786 * Reads for endpoint with given index and direction, by masking
   2787 * epint_reg with coresponding mask.
   2788 */
   2789static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
   2790					  unsigned int idx, int dir_in)
   2791{
   2792	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
   2793	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
   2794	u32 ints;
   2795	u32 mask;
   2796	u32 diepempmsk;
   2797
   2798	mask = dwc2_readl(hsotg, epmsk_reg);
   2799	diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
   2800	mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
   2801	mask |= DXEPINT_SETUP_RCVD;
   2802
   2803	ints = dwc2_readl(hsotg, epint_reg);
   2804	ints &= mask;
   2805	return ints;
   2806}
   2807
   2808/**
   2809 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
   2810 * @hs_ep: The endpoint on which interrupt is asserted.
   2811 *
   2812 * This interrupt indicates that the endpoint has been disabled per the
   2813 * application's request.
   2814 *
   2815 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
   2816 * in case of ISOC completes current request.
   2817 *
   2818 * For ISOC-OUT endpoints completes expired requests. If there is remaining
   2819 * request starts it.
   2820 */
   2821static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
   2822{
   2823	struct dwc2_hsotg *hsotg = hs_ep->parent;
   2824	struct dwc2_hsotg_req *hs_req;
   2825	unsigned char idx = hs_ep->index;
   2826	int dir_in = hs_ep->dir_in;
   2827	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
   2828	int dctl = dwc2_readl(hsotg, DCTL);
   2829
   2830	dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
   2831
   2832	if (dir_in) {
   2833		int epctl = dwc2_readl(hsotg, epctl_reg);
   2834
   2835		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
   2836
   2837		if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
   2838			int dctl = dwc2_readl(hsotg, DCTL);
   2839
   2840			dctl |= DCTL_CGNPINNAK;
   2841			dwc2_writel(hsotg, dctl, DCTL);
   2842		}
   2843	} else {
   2844
   2845		if (dctl & DCTL_GOUTNAKSTS) {
   2846			dctl |= DCTL_CGOUTNAK;
   2847			dwc2_writel(hsotg, dctl, DCTL);
   2848		}
   2849	}
   2850
   2851	if (!hs_ep->isochronous)
   2852		return;
   2853
   2854	if (list_empty(&hs_ep->queue)) {
   2855		dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
   2856			__func__, hs_ep);
   2857		return;
   2858	}
   2859
   2860	do {
   2861		hs_req = get_ep_head(hs_ep);
   2862		if (hs_req) {
   2863			hs_req->req.frame_number = hs_ep->target_frame;
   2864			hs_req->req.actual = 0;
   2865			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
   2866						    -ENODATA);
   2867		}
   2868		dwc2_gadget_incr_frame_num(hs_ep);
   2869		/* Update current frame number value. */
   2870		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
   2871	} while (dwc2_gadget_target_frame_elapsed(hs_ep));
   2872}
   2873
   2874/**
   2875 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
   2876 * @ep: The endpoint on which interrupt is asserted.
   2877 *
   2878 * This is starting point for ISOC-OUT transfer, synchronization done with
   2879 * first out token received from host while corresponding EP is disabled.
   2880 *
   2881 * Device does not know initial frame in which out token will come. For this
   2882 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
   2883 * getting this interrupt SW starts calculation for next transfer frame.
   2884 */
   2885static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
   2886{
   2887	struct dwc2_hsotg *hsotg = ep->parent;
   2888	struct dwc2_hsotg_req *hs_req;
   2889	int dir_in = ep->dir_in;
   2890
   2891	if (dir_in || !ep->isochronous)
   2892		return;
   2893
   2894	if (using_desc_dma(hsotg)) {
   2895		if (ep->target_frame == TARGET_FRAME_INITIAL) {
   2896			/* Start first ISO Out */
   2897			ep->target_frame = hsotg->frame_number;
   2898			dwc2_gadget_start_isoc_ddma(ep);
   2899		}
   2900		return;
   2901	}
   2902
   2903	if (ep->target_frame == TARGET_FRAME_INITIAL) {
   2904		u32 ctrl;
   2905
   2906		ep->target_frame = hsotg->frame_number;
   2907		if (ep->interval > 1) {
   2908			ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
   2909			if (ep->target_frame & 0x1)
   2910				ctrl |= DXEPCTL_SETODDFR;
   2911			else
   2912				ctrl |= DXEPCTL_SETEVENFR;
   2913
   2914			dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
   2915		}
   2916	}
   2917
   2918	while (dwc2_gadget_target_frame_elapsed(ep)) {
   2919		hs_req = get_ep_head(ep);
   2920		if (hs_req) {
   2921			hs_req->req.frame_number = ep->target_frame;
   2922			hs_req->req.actual = 0;
   2923			dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
   2924		}
   2925
   2926		dwc2_gadget_incr_frame_num(ep);
   2927		/* Update current frame number value. */
   2928		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
   2929	}
   2930
   2931	if (!ep->req)
   2932		dwc2_gadget_start_next_request(ep);
   2933
   2934}
   2935
   2936static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
   2937				   struct dwc2_hsotg_ep *hs_ep);
   2938
   2939/**
   2940 * dwc2_gadget_handle_nak - handle NAK interrupt
   2941 * @hs_ep: The endpoint on which interrupt is asserted.
   2942 *
   2943 * This is starting point for ISOC-IN transfer, synchronization done with
   2944 * first IN token received from host while corresponding EP is disabled.
   2945 *
   2946 * Device does not know when first one token will arrive from host. On first
   2947 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
   2948 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
   2949 * sent in response to that as there was no data in FIFO. SW is basing on this
   2950 * interrupt to obtain frame in which token has come and then based on the
   2951 * interval calculates next frame for transfer.
   2952 */
   2953static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
   2954{
   2955	struct dwc2_hsotg *hsotg = hs_ep->parent;
   2956	struct dwc2_hsotg_req *hs_req;
   2957	int dir_in = hs_ep->dir_in;
   2958	u32 ctrl;
   2959
   2960	if (!dir_in || !hs_ep->isochronous)
   2961		return;
   2962
   2963	if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
   2964
   2965		if (using_desc_dma(hsotg)) {
   2966			hs_ep->target_frame = hsotg->frame_number;
   2967			dwc2_gadget_incr_frame_num(hs_ep);
   2968
   2969			/* In service interval mode target_frame must
   2970			 * be set to last (u)frame of the service interval.
   2971			 */
   2972			if (hsotg->params.service_interval) {
   2973				/* Set target_frame to the first (u)frame of
   2974				 * the service interval
   2975				 */
   2976				hs_ep->target_frame &= ~hs_ep->interval + 1;
   2977
   2978				/* Set target_frame to the last (u)frame of
   2979				 * the service interval
   2980				 */
   2981				dwc2_gadget_incr_frame_num(hs_ep);
   2982				dwc2_gadget_dec_frame_num_by_one(hs_ep);
   2983			}
   2984
   2985			dwc2_gadget_start_isoc_ddma(hs_ep);
   2986			return;
   2987		}
   2988
   2989		hs_ep->target_frame = hsotg->frame_number;
   2990		if (hs_ep->interval > 1) {
   2991			u32 ctrl = dwc2_readl(hsotg,
   2992					      DIEPCTL(hs_ep->index));
   2993			if (hs_ep->target_frame & 0x1)
   2994				ctrl |= DXEPCTL_SETODDFR;
   2995			else
   2996				ctrl |= DXEPCTL_SETEVENFR;
   2997
   2998			dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
   2999		}
   3000	}
   3001
   3002	if (using_desc_dma(hsotg))
   3003		return;
   3004
   3005	ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
   3006	if (ctrl & DXEPCTL_EPENA)
   3007		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
   3008	else
   3009		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
   3010
   3011	while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
   3012		hs_req = get_ep_head(hs_ep);
   3013		if (hs_req) {
   3014			hs_req->req.frame_number = hs_ep->target_frame;
   3015			hs_req->req.actual = 0;
   3016			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
   3017		}
   3018
   3019		dwc2_gadget_incr_frame_num(hs_ep);
   3020		/* Update current frame number value. */
   3021		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
   3022	}
   3023
   3024	if (!hs_ep->req)
   3025		dwc2_gadget_start_next_request(hs_ep);
   3026}
   3027
   3028/**
   3029 * dwc2_hsotg_epint - handle an in/out endpoint interrupt
   3030 * @hsotg: The driver state
   3031 * @idx: The index for the endpoint (0..15)
   3032 * @dir_in: Set if this is an IN endpoint
   3033 *
   3034 * Process and clear any interrupt pending for an individual endpoint
   3035 */
   3036static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
   3037			     int dir_in)
   3038{
   3039	struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
   3040	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
   3041	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
   3042	u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
   3043	u32 ints;
   3044
   3045	ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
   3046
   3047	/* Clear endpoint interrupts */
   3048	dwc2_writel(hsotg, ints, epint_reg);
   3049
   3050	if (!hs_ep) {
   3051		dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
   3052			__func__, idx, dir_in ? "in" : "out");
   3053		return;
   3054	}
   3055
   3056	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
   3057		__func__, idx, dir_in ? "in" : "out", ints);
   3058
   3059	/* Don't process XferCompl interrupt if it is a setup packet */
   3060	if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
   3061		ints &= ~DXEPINT_XFERCOMPL;
   3062
   3063	/*
   3064	 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
   3065	 * stage and xfercomplete was generated without SETUP phase done
   3066	 * interrupt. SW should parse received setup packet only after host's
   3067	 * exit from setup phase of control transfer.
   3068	 */
   3069	if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
   3070	    hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
   3071		ints &= ~DXEPINT_XFERCOMPL;
   3072
   3073	if (ints & DXEPINT_XFERCOMPL) {
   3074		dev_dbg(hsotg->dev,
   3075			"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
   3076			__func__, dwc2_readl(hsotg, epctl_reg),
   3077			dwc2_readl(hsotg, epsiz_reg));
   3078
   3079		/* In DDMA handle isochronous requests separately */
   3080		if (using_desc_dma(hsotg) && hs_ep->isochronous) {
   3081			dwc2_gadget_complete_isoc_request_ddma(hs_ep);
   3082		} else if (dir_in) {
   3083			/*
   3084			 * We get OutDone from the FIFO, so we only
   3085			 * need to look at completing IN requests here
   3086			 * if operating slave mode
   3087			 */
   3088			if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
   3089				dwc2_hsotg_complete_in(hsotg, hs_ep);
   3090
   3091			if (idx == 0 && !hs_ep->req)
   3092				dwc2_hsotg_enqueue_setup(hsotg);
   3093		} else if (using_dma(hsotg)) {
   3094			/*
   3095			 * We're using DMA, we need to fire an OutDone here
   3096			 * as we ignore the RXFIFO.
   3097			 */
   3098			if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
   3099				dwc2_hsotg_handle_outdone(hsotg, idx);
   3100		}
   3101	}
   3102
   3103	if (ints & DXEPINT_EPDISBLD)
   3104		dwc2_gadget_handle_ep_disabled(hs_ep);
   3105
   3106	if (ints & DXEPINT_OUTTKNEPDIS)
   3107		dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
   3108
   3109	if (ints & DXEPINT_NAKINTRPT)
   3110		dwc2_gadget_handle_nak(hs_ep);
   3111
   3112	if (ints & DXEPINT_AHBERR)
   3113		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
   3114
   3115	if (ints & DXEPINT_SETUP) {  /* Setup or Timeout */
   3116		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
   3117
   3118		if (using_dma(hsotg) && idx == 0) {
   3119			/*
   3120			 * this is the notification we've received a
   3121			 * setup packet. In non-DMA mode we'd get this
   3122			 * from the RXFIFO, instead we need to process
   3123			 * the setup here.
   3124			 */
   3125
   3126			if (dir_in)
   3127				WARN_ON_ONCE(1);
   3128			else
   3129				dwc2_hsotg_handle_outdone(hsotg, 0);
   3130		}
   3131	}
   3132
   3133	if (ints & DXEPINT_STSPHSERCVD) {
   3134		dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
   3135
   3136		/* Safety check EP0 state when STSPHSERCVD asserted */
   3137		if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
   3138			/* Move to STATUS IN for DDMA */
   3139			if (using_desc_dma(hsotg)) {
   3140				if (!hsotg->delayed_status)
   3141					dwc2_hsotg_ep0_zlp(hsotg, true);
   3142				else
   3143				/* In case of 3 stage Control Write with delayed
   3144				 * status, when Status IN transfer started
   3145				 * before STSPHSERCVD asserted, NAKSTS bit not
   3146				 * cleared by CNAK in dwc2_hsotg_start_req()
   3147				 * function. Clear now NAKSTS to allow complete
   3148				 * transfer.
   3149				 */
   3150					dwc2_set_bit(hsotg, DIEPCTL(0),
   3151						     DXEPCTL_CNAK);
   3152			}
   3153		}
   3154
   3155	}
   3156
   3157	if (ints & DXEPINT_BACK2BACKSETUP)
   3158		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
   3159
   3160	if (ints & DXEPINT_BNAINTR) {
   3161		dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
   3162		if (hs_ep->isochronous)
   3163			dwc2_gadget_handle_isoc_bna(hs_ep);
   3164	}
   3165
   3166	if (dir_in && !hs_ep->isochronous) {
   3167		/* not sure if this is important, but we'll clear it anyway */
   3168		if (ints & DXEPINT_INTKNTXFEMP) {
   3169			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
   3170				__func__, idx);
   3171		}
   3172
   3173		/* this probably means something bad is happening */
   3174		if (ints & DXEPINT_INTKNEPMIS) {
   3175			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
   3176				 __func__, idx);
   3177		}
   3178
   3179		/* FIFO has space or is empty (see GAHBCFG) */
   3180		if (hsotg->dedicated_fifos &&
   3181		    ints & DXEPINT_TXFEMP) {
   3182			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
   3183				__func__, idx);
   3184			if (!using_dma(hsotg))
   3185				dwc2_hsotg_trytx(hsotg, hs_ep);
   3186		}
   3187	}
   3188}
   3189
   3190/**
   3191 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
   3192 * @hsotg: The device state.
   3193 *
   3194 * Handle updating the device settings after the enumeration phase has
   3195 * been completed.
   3196 */
   3197static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
   3198{
   3199	u32 dsts = dwc2_readl(hsotg, DSTS);
   3200	int ep0_mps = 0, ep_mps = 8;
   3201
   3202	/*
   3203	 * This should signal the finish of the enumeration phase
   3204	 * of the USB handshaking, so we should now know what rate
   3205	 * we connected at.
   3206	 */
   3207
   3208	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
   3209
   3210	/*
   3211	 * note, since we're limited by the size of transfer on EP0, and
   3212	 * it seems IN transfers must be a even number of packets we do
   3213	 * not advertise a 64byte MPS on EP0.
   3214	 */
   3215
   3216	/* catch both EnumSpd_FS and EnumSpd_FS48 */
   3217	switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
   3218	case DSTS_ENUMSPD_FS:
   3219	case DSTS_ENUMSPD_FS48:
   3220		hsotg->gadget.speed = USB_SPEED_FULL;
   3221		ep0_mps = EP0_MPS_LIMIT;
   3222		ep_mps = 1023;
   3223		break;
   3224
   3225	case DSTS_ENUMSPD_HS:
   3226		hsotg->gadget.speed = USB_SPEED_HIGH;
   3227		ep0_mps = EP0_MPS_LIMIT;
   3228		ep_mps = 1024;
   3229		break;
   3230
   3231	case DSTS_ENUMSPD_LS:
   3232		hsotg->gadget.speed = USB_SPEED_LOW;
   3233		ep0_mps = 8;
   3234		ep_mps = 8;
   3235		/*
   3236		 * note, we don't actually support LS in this driver at the
   3237		 * moment, and the documentation seems to imply that it isn't
   3238		 * supported by the PHYs on some of the devices.
   3239		 */
   3240		break;
   3241	}
   3242	dev_info(hsotg->dev, "new device is %s\n",
   3243		 usb_speed_string(hsotg->gadget.speed));
   3244
   3245	/*
   3246	 * we should now know the maximum packet size for an
   3247	 * endpoint, so set the endpoints to a default value.
   3248	 */
   3249
   3250	if (ep0_mps) {
   3251		int i;
   3252		/* Initialize ep0 for both in and out directions */
   3253		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
   3254		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
   3255		for (i = 1; i < hsotg->num_of_eps; i++) {
   3256			if (hsotg->eps_in[i])
   3257				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
   3258							    0, 1);
   3259			if (hsotg->eps_out[i])
   3260				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
   3261							    0, 0);
   3262		}
   3263	}
   3264
   3265	/* ensure after enumeration our EP0 is active */
   3266
   3267	dwc2_hsotg_enqueue_setup(hsotg);
   3268
   3269	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
   3270		dwc2_readl(hsotg, DIEPCTL0),
   3271		dwc2_readl(hsotg, DOEPCTL0));
   3272}
   3273
   3274/**
   3275 * kill_all_requests - remove all requests from the endpoint's queue
   3276 * @hsotg: The device state.
   3277 * @ep: The endpoint the requests may be on.
   3278 * @result: The result code to use.
   3279 *
   3280 * Go through the requests on the given endpoint and mark them
   3281 * completed with the given result code.
   3282 */
   3283static void kill_all_requests(struct dwc2_hsotg *hsotg,
   3284			      struct dwc2_hsotg_ep *ep,
   3285			      int result)
   3286{
   3287	unsigned int size;
   3288
   3289	ep->req = NULL;
   3290
   3291	while (!list_empty(&ep->queue)) {
   3292		struct dwc2_hsotg_req *req = get_ep_head(ep);
   3293
   3294		dwc2_hsotg_complete_request(hsotg, ep, req, result);
   3295	}
   3296
   3297	if (!hsotg->dedicated_fifos)
   3298		return;
   3299	size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
   3300	if (size < ep->fifo_size)
   3301		dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
   3302}
   3303
   3304/**
   3305 * dwc2_hsotg_disconnect - disconnect service
   3306 * @hsotg: The device state.
   3307 *
   3308 * The device has been disconnected. Remove all current
   3309 * transactions and signal the gadget driver that this
   3310 * has happened.
   3311 */
   3312void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
   3313{
   3314	unsigned int ep;
   3315
   3316	if (!hsotg->connected)
   3317		return;
   3318
   3319	hsotg->connected = 0;
   3320	hsotg->test_mode = 0;
   3321
   3322	/* all endpoints should be shutdown */
   3323	for (ep = 0; ep < hsotg->num_of_eps; ep++) {
   3324		if (hsotg->eps_in[ep])
   3325			kill_all_requests(hsotg, hsotg->eps_in[ep],
   3326					  -ESHUTDOWN);
   3327		if (hsotg->eps_out[ep])
   3328			kill_all_requests(hsotg, hsotg->eps_out[ep],
   3329					  -ESHUTDOWN);
   3330	}
   3331
   3332	call_gadget(hsotg, disconnect);
   3333	hsotg->lx_state = DWC2_L3;
   3334
   3335	usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
   3336}
   3337
   3338/**
   3339 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
   3340 * @hsotg: The device state:
   3341 * @periodic: True if this is a periodic FIFO interrupt
   3342 */
   3343static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
   3344{
   3345	struct dwc2_hsotg_ep *ep;
   3346	int epno, ret;
   3347
   3348	/* look through for any more data to transmit */
   3349	for (epno = 0; epno < hsotg->num_of_eps; epno++) {
   3350		ep = index_to_ep(hsotg, epno, 1);
   3351
   3352		if (!ep)
   3353			continue;
   3354
   3355		if (!ep->dir_in)
   3356			continue;
   3357
   3358		if ((periodic && !ep->periodic) ||
   3359		    (!periodic && ep->periodic))
   3360			continue;
   3361
   3362		ret = dwc2_hsotg_trytx(hsotg, ep);
   3363		if (ret < 0)
   3364			break;
   3365	}
   3366}
   3367
   3368/* IRQ flags which will trigger a retry around the IRQ loop */
   3369#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
   3370			GINTSTS_PTXFEMP |  \
   3371			GINTSTS_RXFLVL)
   3372
   3373static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
   3374/**
   3375 * dwc2_hsotg_core_init_disconnected - issue softreset to the core
   3376 * @hsotg: The device state
   3377 * @is_usb_reset: Usb resetting flag
   3378 *
   3379 * Issue a soft reset to the core, and await the core finishing it.
   3380 */
   3381void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
   3382				       bool is_usb_reset)
   3383{
   3384	u32 intmsk;
   3385	u32 val;
   3386	u32 usbcfg;
   3387	u32 dcfg = 0;
   3388	int ep;
   3389
   3390	/* Kill any ep0 requests as controller will be reinitialized */
   3391	kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
   3392
   3393	if (!is_usb_reset) {
   3394		if (dwc2_core_reset(hsotg, true))
   3395			return;
   3396	} else {
   3397		/* all endpoints should be shutdown */
   3398		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
   3399			if (hsotg->eps_in[ep])
   3400				dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
   3401			if (hsotg->eps_out[ep])
   3402				dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
   3403		}
   3404	}
   3405
   3406	/*
   3407	 * we must now enable ep0 ready for host detection and then
   3408	 * set configuration.
   3409	 */
   3410
   3411	/* keep other bits untouched (so e.g. forced modes are not lost) */
   3412	usbcfg = dwc2_readl(hsotg, GUSBCFG);
   3413	usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
   3414	usbcfg |= GUSBCFG_TOUTCAL(7);
   3415
   3416	/* remove the HNP/SRP and set the PHY */
   3417	usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
   3418        dwc2_writel(hsotg, usbcfg, GUSBCFG);
   3419
   3420	dwc2_phy_init(hsotg, true);
   3421
   3422	dwc2_hsotg_init_fifo(hsotg);
   3423
   3424	if (!is_usb_reset)
   3425		dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
   3426
   3427	dcfg |= DCFG_EPMISCNT(1);
   3428
   3429	switch (hsotg->params.speed) {
   3430	case DWC2_SPEED_PARAM_LOW:
   3431		dcfg |= DCFG_DEVSPD_LS;
   3432		break;
   3433	case DWC2_SPEED_PARAM_FULL:
   3434		if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
   3435			dcfg |= DCFG_DEVSPD_FS48;
   3436		else
   3437			dcfg |= DCFG_DEVSPD_FS;
   3438		break;
   3439	default:
   3440		dcfg |= DCFG_DEVSPD_HS;
   3441	}
   3442
   3443	if (hsotg->params.ipg_isoc_en)
   3444		dcfg |= DCFG_IPG_ISOC_SUPPORDED;
   3445
   3446	dwc2_writel(hsotg, dcfg,  DCFG);
   3447
   3448	/* Clear any pending OTG interrupts */
   3449	dwc2_writel(hsotg, 0xffffffff, GOTGINT);
   3450
   3451	/* Clear any pending interrupts */
   3452	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
   3453	intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
   3454		GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
   3455		GINTSTS_USBRST | GINTSTS_RESETDET |
   3456		GINTSTS_ENUMDONE | GINTSTS_OTGINT |
   3457		GINTSTS_USBSUSP | GINTSTS_WKUPINT |
   3458		GINTSTS_LPMTRANRCVD;
   3459
   3460	if (!using_desc_dma(hsotg))
   3461		intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
   3462
   3463	if (!hsotg->params.external_id_pin_ctl)
   3464		intmsk |= GINTSTS_CONIDSTSCHNG;
   3465
   3466	dwc2_writel(hsotg, intmsk, GINTMSK);
   3467
   3468	if (using_dma(hsotg)) {
   3469		dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
   3470			    hsotg->params.ahbcfg,
   3471			    GAHBCFG);
   3472
   3473		/* Set DDMA mode support in the core if needed */
   3474		if (using_desc_dma(hsotg))
   3475			dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
   3476
   3477	} else {
   3478		dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
   3479						(GAHBCFG_NP_TXF_EMP_LVL |
   3480						 GAHBCFG_P_TXF_EMP_LVL) : 0) |
   3481			    GAHBCFG_GLBL_INTR_EN, GAHBCFG);
   3482	}
   3483
   3484	/*
   3485	 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
   3486	 * when we have no data to transfer. Otherwise we get being flooded by
   3487	 * interrupts.
   3488	 */
   3489
   3490	dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
   3491		DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
   3492		DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
   3493		DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
   3494		DIEPMSK);
   3495
   3496	/*
   3497	 * don't need XferCompl, we get that from RXFIFO in slave mode. In
   3498	 * DMA mode we may need this and StsPhseRcvd.
   3499	 */
   3500	dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
   3501		DOEPMSK_STSPHSERCVDMSK) : 0) |
   3502		DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
   3503		DOEPMSK_SETUPMSK,
   3504		DOEPMSK);
   3505
   3506	/* Enable BNA interrupt for DDMA */
   3507	if (using_desc_dma(hsotg)) {
   3508		dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
   3509		dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
   3510	}
   3511
   3512	/* Enable Service Interval mode if supported */
   3513	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
   3514		dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
   3515
   3516	dwc2_writel(hsotg, 0, DAINTMSK);
   3517
   3518	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
   3519		dwc2_readl(hsotg, DIEPCTL0),
   3520		dwc2_readl(hsotg, DOEPCTL0));
   3521
   3522	/* enable in and out endpoint interrupts */
   3523	dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
   3524
   3525	/*
   3526	 * Enable the RXFIFO when in slave mode, as this is how we collect
   3527	 * the data. In DMA mode, we get events from the FIFO but also
   3528	 * things we cannot process, so do not use it.
   3529	 */
   3530	if (!using_dma(hsotg))
   3531		dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
   3532
   3533	/* Enable interrupts for EP0 in and out */
   3534	dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
   3535	dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
   3536
   3537	if (!is_usb_reset) {
   3538		dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
   3539		udelay(10);  /* see openiboot */
   3540		dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
   3541	}
   3542
   3543	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
   3544
   3545	/*
   3546	 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
   3547	 * writing to the EPCTL register..
   3548	 */
   3549
   3550	/* set to read 1 8byte packet */
   3551	dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
   3552	       DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
   3553
   3554	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
   3555	       DXEPCTL_CNAK | DXEPCTL_EPENA |
   3556	       DXEPCTL_USBACTEP,
   3557	       DOEPCTL0);
   3558
   3559	/* enable, but don't activate EP0in */
   3560	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
   3561	       DXEPCTL_USBACTEP, DIEPCTL0);
   3562
   3563	/* clear global NAKs */
   3564	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
   3565	if (!is_usb_reset)
   3566		val |= DCTL_SFTDISCON;
   3567	dwc2_set_bit(hsotg, DCTL, val);
   3568
   3569	/* configure the core to support LPM */
   3570	dwc2_gadget_init_lpm(hsotg);
   3571
   3572	/* program GREFCLK register if needed */
   3573	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
   3574		dwc2_gadget_program_ref_clk(hsotg);
   3575
   3576	/* must be at-least 3ms to allow bus to see disconnect */
   3577	mdelay(3);
   3578
   3579	hsotg->lx_state = DWC2_L0;
   3580
   3581	dwc2_hsotg_enqueue_setup(hsotg);
   3582
   3583	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
   3584		dwc2_readl(hsotg, DIEPCTL0),
   3585		dwc2_readl(hsotg, DOEPCTL0));
   3586}
   3587
   3588void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
   3589{
   3590	/* set the soft-disconnect bit */
   3591	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
   3592}
   3593
   3594void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
   3595{
   3596	/* remove the soft-disconnect and let's go */
   3597	dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
   3598}
   3599
   3600/**
   3601 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
   3602 * @hsotg: The device state:
   3603 *
   3604 * This interrupt indicates one of the following conditions occurred while
   3605 * transmitting an ISOC transaction.
   3606 * - Corrupted IN Token for ISOC EP.
   3607 * - Packet not complete in FIFO.
   3608 *
   3609 * The following actions will be taken:
   3610 * - Determine the EP
   3611 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
   3612 */
   3613static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
   3614{
   3615	struct dwc2_hsotg_ep *hs_ep;
   3616	u32 epctrl;
   3617	u32 daintmsk;
   3618	u32 idx;
   3619
   3620	dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
   3621
   3622	daintmsk = dwc2_readl(hsotg, DAINTMSK);
   3623
   3624	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
   3625		hs_ep = hsotg->eps_in[idx];
   3626		/* Proceed only unmasked ISOC EPs */
   3627		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
   3628			continue;
   3629
   3630		epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
   3631		if ((epctrl & DXEPCTL_EPENA) &&
   3632		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
   3633			epctrl |= DXEPCTL_SNAK;
   3634			epctrl |= DXEPCTL_EPDIS;
   3635			dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
   3636		}
   3637	}
   3638
   3639	/* Clear interrupt */
   3640	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
   3641}
   3642
   3643/**
   3644 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
   3645 * @hsotg: The device state:
   3646 *
   3647 * This interrupt indicates one of the following conditions occurred while
   3648 * transmitting an ISOC transaction.
   3649 * - Corrupted OUT Token for ISOC EP.
   3650 * - Packet not complete in FIFO.
   3651 *
   3652 * The following actions will be taken:
   3653 * - Determine the EP
   3654 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
   3655 */
   3656static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
   3657{
   3658	u32 gintsts;
   3659	u32 gintmsk;
   3660	u32 daintmsk;
   3661	u32 epctrl;
   3662	struct dwc2_hsotg_ep *hs_ep;
   3663	int idx;
   3664
   3665	dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
   3666
   3667	daintmsk = dwc2_readl(hsotg, DAINTMSK);
   3668	daintmsk >>= DAINT_OUTEP_SHIFT;
   3669
   3670	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
   3671		hs_ep = hsotg->eps_out[idx];
   3672		/* Proceed only unmasked ISOC EPs */
   3673		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
   3674			continue;
   3675
   3676		epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
   3677		if ((epctrl & DXEPCTL_EPENA) &&
   3678		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
   3679			/* Unmask GOUTNAKEFF interrupt */
   3680			gintmsk = dwc2_readl(hsotg, GINTMSK);
   3681			gintmsk |= GINTSTS_GOUTNAKEFF;
   3682			dwc2_writel(hsotg, gintmsk, GINTMSK);
   3683
   3684			gintsts = dwc2_readl(hsotg, GINTSTS);
   3685			if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
   3686				dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
   3687				break;
   3688			}
   3689		}
   3690	}
   3691
   3692	/* Clear interrupt */
   3693	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
   3694}
   3695
   3696/**
   3697 * dwc2_hsotg_irq - handle device interrupt
   3698 * @irq: The IRQ number triggered
   3699 * @pw: The pw value when registered the handler.
   3700 */
   3701static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
   3702{
   3703	struct dwc2_hsotg *hsotg = pw;
   3704	int retry_count = 8;
   3705	u32 gintsts;
   3706	u32 gintmsk;
   3707
   3708	if (!dwc2_is_device_mode(hsotg))
   3709		return IRQ_NONE;
   3710
   3711	spin_lock(&hsotg->lock);
   3712irq_retry:
   3713	gintsts = dwc2_readl(hsotg, GINTSTS);
   3714	gintmsk = dwc2_readl(hsotg, GINTMSK);
   3715
   3716	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
   3717		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
   3718
   3719	gintsts &= gintmsk;
   3720
   3721	if (gintsts & GINTSTS_RESETDET) {
   3722		dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
   3723
   3724		dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
   3725
   3726		/* This event must be used only if controller is suspended */
   3727		if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
   3728			dwc2_exit_partial_power_down(hsotg, 0, true);
   3729
   3730		hsotg->lx_state = DWC2_L0;
   3731	}
   3732
   3733	if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
   3734		u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
   3735		u32 connected = hsotg->connected;
   3736
   3737		dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
   3738		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
   3739			dwc2_readl(hsotg, GNPTXSTS));
   3740
   3741		dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
   3742
   3743		/* Report disconnection if it is not already done. */
   3744		dwc2_hsotg_disconnect(hsotg);
   3745
   3746		/* Reset device address to zero */
   3747		dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
   3748
   3749		if (usb_status & GOTGCTL_BSESVLD && connected)
   3750			dwc2_hsotg_core_init_disconnected(hsotg, true);
   3751	}
   3752
   3753	if (gintsts & GINTSTS_ENUMDONE) {
   3754		dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
   3755
   3756		dwc2_hsotg_irq_enumdone(hsotg);
   3757	}
   3758
   3759	if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
   3760		u32 daint = dwc2_readl(hsotg, DAINT);
   3761		u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
   3762		u32 daint_out, daint_in;
   3763		int ep;
   3764
   3765		daint &= daintmsk;
   3766		daint_out = daint >> DAINT_OUTEP_SHIFT;
   3767		daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
   3768
   3769		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
   3770
   3771		for (ep = 0; ep < hsotg->num_of_eps && daint_out;
   3772						ep++, daint_out >>= 1) {
   3773			if (daint_out & 1)
   3774				dwc2_hsotg_epint(hsotg, ep, 0);
   3775		}
   3776
   3777		for (ep = 0; ep < hsotg->num_of_eps  && daint_in;
   3778						ep++, daint_in >>= 1) {
   3779			if (daint_in & 1)
   3780				dwc2_hsotg_epint(hsotg, ep, 1);
   3781		}
   3782	}
   3783
   3784	/* check both FIFOs */
   3785
   3786	if (gintsts & GINTSTS_NPTXFEMP) {
   3787		dev_dbg(hsotg->dev, "NPTxFEmp\n");
   3788
   3789		/*
   3790		 * Disable the interrupt to stop it happening again
   3791		 * unless one of these endpoint routines decides that
   3792		 * it needs re-enabling
   3793		 */
   3794
   3795		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
   3796		dwc2_hsotg_irq_fifoempty(hsotg, false);
   3797	}
   3798
   3799	if (gintsts & GINTSTS_PTXFEMP) {
   3800		dev_dbg(hsotg->dev, "PTxFEmp\n");
   3801
   3802		/* See note in GINTSTS_NPTxFEmp */
   3803
   3804		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
   3805		dwc2_hsotg_irq_fifoempty(hsotg, true);
   3806	}
   3807
   3808	if (gintsts & GINTSTS_RXFLVL) {
   3809		/*
   3810		 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
   3811		 * we need to retry dwc2_hsotg_handle_rx if this is still
   3812		 * set.
   3813		 */
   3814
   3815		dwc2_hsotg_handle_rx(hsotg);
   3816	}
   3817
   3818	if (gintsts & GINTSTS_ERLYSUSP) {
   3819		dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
   3820		dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
   3821	}
   3822
   3823	/*
   3824	 * these next two seem to crop-up occasionally causing the core
   3825	 * to shutdown the USB transfer, so try clearing them and logging
   3826	 * the occurrence.
   3827	 */
   3828
   3829	if (gintsts & GINTSTS_GOUTNAKEFF) {
   3830		u8 idx;
   3831		u32 epctrl;
   3832		u32 gintmsk;
   3833		u32 daintmsk;
   3834		struct dwc2_hsotg_ep *hs_ep;
   3835
   3836		daintmsk = dwc2_readl(hsotg, DAINTMSK);
   3837		daintmsk >>= DAINT_OUTEP_SHIFT;
   3838		/* Mask this interrupt */
   3839		gintmsk = dwc2_readl(hsotg, GINTMSK);
   3840		gintmsk &= ~GINTSTS_GOUTNAKEFF;
   3841		dwc2_writel(hsotg, gintmsk, GINTMSK);
   3842
   3843		dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
   3844		for (idx = 1; idx < hsotg->num_of_eps; idx++) {
   3845			hs_ep = hsotg->eps_out[idx];
   3846			/* Proceed only unmasked ISOC EPs */
   3847			if (BIT(idx) & ~daintmsk)
   3848				continue;
   3849
   3850			epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
   3851
   3852			//ISOC Ep's only
   3853			if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
   3854				epctrl |= DXEPCTL_SNAK;
   3855				epctrl |= DXEPCTL_EPDIS;
   3856				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
   3857				continue;
   3858			}
   3859
   3860			//Non-ISOC EP's
   3861			if (hs_ep->halted) {
   3862				if (!(epctrl & DXEPCTL_EPENA))
   3863					epctrl |= DXEPCTL_EPENA;
   3864				epctrl |= DXEPCTL_EPDIS;
   3865				epctrl |= DXEPCTL_STALL;
   3866				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
   3867			}
   3868		}
   3869
   3870		/* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
   3871	}
   3872
   3873	if (gintsts & GINTSTS_GINNAKEFF) {
   3874		dev_info(hsotg->dev, "GINNakEff triggered\n");
   3875
   3876		dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
   3877
   3878		dwc2_hsotg_dump(hsotg);
   3879	}
   3880
   3881	if (gintsts & GINTSTS_INCOMPL_SOIN)
   3882		dwc2_gadget_handle_incomplete_isoc_in(hsotg);
   3883
   3884	if (gintsts & GINTSTS_INCOMPL_SOOUT)
   3885		dwc2_gadget_handle_incomplete_isoc_out(hsotg);
   3886
   3887	/*
   3888	 * if we've had fifo events, we should try and go around the
   3889	 * loop again to see if there's any point in returning yet.
   3890	 */
   3891
   3892	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
   3893		goto irq_retry;
   3894
   3895	/* Check WKUP_ALERT interrupt*/
   3896	if (hsotg->params.service_interval)
   3897		dwc2_gadget_wkup_alert_handler(hsotg);
   3898
   3899	spin_unlock(&hsotg->lock);
   3900
   3901	return IRQ_HANDLED;
   3902}
   3903
   3904static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
   3905				   struct dwc2_hsotg_ep *hs_ep)
   3906{
   3907	u32 epctrl_reg;
   3908	u32 epint_reg;
   3909
   3910	epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
   3911		DOEPCTL(hs_ep->index);
   3912	epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
   3913		DOEPINT(hs_ep->index);
   3914
   3915	dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
   3916		hs_ep->name);
   3917
   3918	if (hs_ep->dir_in) {
   3919		if (hsotg->dedicated_fifos || hs_ep->periodic) {
   3920			dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
   3921			/* Wait for Nak effect */
   3922			if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
   3923						    DXEPINT_INEPNAKEFF, 100))
   3924				dev_warn(hsotg->dev,
   3925					 "%s: timeout DIEPINT.NAKEFF\n",
   3926					 __func__);
   3927		} else {
   3928			dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
   3929			/* Wait for Nak effect */
   3930			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
   3931						    GINTSTS_GINNAKEFF, 100))
   3932				dev_warn(hsotg->dev,
   3933					 "%s: timeout GINTSTS.GINNAKEFF\n",
   3934					 __func__);
   3935		}
   3936	} else {
   3937		/* Mask GINTSTS_GOUTNAKEFF interrupt */
   3938		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
   3939
   3940		if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
   3941			dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
   3942
   3943		if (!using_dma(hsotg)) {
   3944			/* Wait for GINTSTS_RXFLVL interrupt */
   3945			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
   3946						    GINTSTS_RXFLVL, 100)) {
   3947				dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
   3948					 __func__);
   3949			} else {
   3950				/*
   3951				 * Pop GLOBAL OUT NAK status packet from RxFIFO
   3952				 * to assert GOUTNAKEFF interrupt
   3953				 */
   3954				dwc2_readl(hsotg, GRXSTSP);
   3955			}
   3956		}
   3957
   3958		/* Wait for global nak to take effect */
   3959		if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
   3960					    GINTSTS_GOUTNAKEFF, 100))
   3961			dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
   3962				 __func__);
   3963	}
   3964
   3965	/* Disable ep */
   3966	dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
   3967
   3968	/* Wait for ep to be disabled */
   3969	if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
   3970		dev_warn(hsotg->dev,
   3971			 "%s: timeout DOEPCTL.EPDisable\n", __func__);
   3972
   3973	/* Clear EPDISBLD interrupt */
   3974	dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
   3975
   3976	if (hs_ep->dir_in) {
   3977		unsigned short fifo_index;
   3978
   3979		if (hsotg->dedicated_fifos || hs_ep->periodic)
   3980			fifo_index = hs_ep->fifo_index;
   3981		else
   3982			fifo_index = 0;
   3983
   3984		/* Flush TX FIFO */
   3985		dwc2_flush_tx_fifo(hsotg, fifo_index);
   3986
   3987		/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
   3988		if (!hsotg->dedicated_fifos && !hs_ep->periodic)
   3989			dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
   3990
   3991	} else {
   3992		/* Remove global NAKs */
   3993		dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
   3994	}
   3995}
   3996
   3997/**
   3998 * dwc2_hsotg_ep_enable - enable the given endpoint
   3999 * @ep: The USB endpint to configure
   4000 * @desc: The USB endpoint descriptor to configure with.
   4001 *
   4002 * This is called from the USB gadget code's usb_ep_enable().
   4003 */
   4004static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
   4005				const struct usb_endpoint_descriptor *desc)
   4006{
   4007	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   4008	struct dwc2_hsotg *hsotg = hs_ep->parent;
   4009	unsigned long flags;
   4010	unsigned int index = hs_ep->index;
   4011	u32 epctrl_reg;
   4012	u32 epctrl;
   4013	u32 mps;
   4014	u32 mc;
   4015	u32 mask;
   4016	unsigned int dir_in;
   4017	unsigned int i, val, size;
   4018	int ret = 0;
   4019	unsigned char ep_type;
   4020	int desc_num;
   4021
   4022	dev_dbg(hsotg->dev,
   4023		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
   4024		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
   4025		desc->wMaxPacketSize, desc->bInterval);
   4026
   4027	/* not to be called for EP0 */
   4028	if (index == 0) {
   4029		dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
   4030		return -EINVAL;
   4031	}
   4032
   4033	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
   4034	if (dir_in != hs_ep->dir_in) {
   4035		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
   4036		return -EINVAL;
   4037	}
   4038
   4039	ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
   4040	mps = usb_endpoint_maxp(desc);
   4041	mc = usb_endpoint_maxp_mult(desc);
   4042
   4043	/* ISOC IN in DDMA supported bInterval up to 10 */
   4044	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
   4045	    dir_in && desc->bInterval > 10) {
   4046		dev_err(hsotg->dev,
   4047			"%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
   4048		return -EINVAL;
   4049	}
   4050
   4051	/* High bandwidth ISOC OUT in DDMA not supported */
   4052	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
   4053	    !dir_in && mc > 1) {
   4054		dev_err(hsotg->dev,
   4055			"%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
   4056		return -EINVAL;
   4057	}
   4058
   4059	/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
   4060
   4061	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
   4062	epctrl = dwc2_readl(hsotg, epctrl_reg);
   4063
   4064	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
   4065		__func__, epctrl, epctrl_reg);
   4066
   4067	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
   4068		desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
   4069	else
   4070		desc_num = MAX_DMA_DESC_NUM_GENERIC;
   4071
   4072	/* Allocate DMA descriptor chain for non-ctrl endpoints */
   4073	if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
   4074		hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
   4075			desc_num * sizeof(struct dwc2_dma_desc),
   4076			&hs_ep->desc_list_dma, GFP_ATOMIC);
   4077		if (!hs_ep->desc_list) {
   4078			ret = -ENOMEM;
   4079			goto error2;
   4080		}
   4081	}
   4082
   4083	spin_lock_irqsave(&hsotg->lock, flags);
   4084
   4085	epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
   4086	epctrl |= DXEPCTL_MPS(mps);
   4087
   4088	/*
   4089	 * mark the endpoint as active, otherwise the core may ignore
   4090	 * transactions entirely for this endpoint
   4091	 */
   4092	epctrl |= DXEPCTL_USBACTEP;
   4093
   4094	/* update the endpoint state */
   4095	dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
   4096
   4097	/* default, set to non-periodic */
   4098	hs_ep->isochronous = 0;
   4099	hs_ep->periodic = 0;
   4100	hs_ep->halted = 0;
   4101	hs_ep->wedged = 0;
   4102	hs_ep->interval = desc->bInterval;
   4103
   4104	switch (ep_type) {
   4105	case USB_ENDPOINT_XFER_ISOC:
   4106		epctrl |= DXEPCTL_EPTYPE_ISO;
   4107		epctrl |= DXEPCTL_SETEVENFR;
   4108		hs_ep->isochronous = 1;
   4109		hs_ep->interval = 1 << (desc->bInterval - 1);
   4110		hs_ep->target_frame = TARGET_FRAME_INITIAL;
   4111		hs_ep->next_desc = 0;
   4112		hs_ep->compl_desc = 0;
   4113		if (dir_in) {
   4114			hs_ep->periodic = 1;
   4115			mask = dwc2_readl(hsotg, DIEPMSK);
   4116			mask |= DIEPMSK_NAKMSK;
   4117			dwc2_writel(hsotg, mask, DIEPMSK);
   4118		} else {
   4119			epctrl |= DXEPCTL_SNAK;
   4120			mask = dwc2_readl(hsotg, DOEPMSK);
   4121			mask |= DOEPMSK_OUTTKNEPDISMSK;
   4122			dwc2_writel(hsotg, mask, DOEPMSK);
   4123		}
   4124		break;
   4125
   4126	case USB_ENDPOINT_XFER_BULK:
   4127		epctrl |= DXEPCTL_EPTYPE_BULK;
   4128		break;
   4129
   4130	case USB_ENDPOINT_XFER_INT:
   4131		if (dir_in)
   4132			hs_ep->periodic = 1;
   4133
   4134		if (hsotg->gadget.speed == USB_SPEED_HIGH)
   4135			hs_ep->interval = 1 << (desc->bInterval - 1);
   4136
   4137		epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
   4138		break;
   4139
   4140	case USB_ENDPOINT_XFER_CONTROL:
   4141		epctrl |= DXEPCTL_EPTYPE_CONTROL;
   4142		break;
   4143	}
   4144
   4145	/*
   4146	 * if the hardware has dedicated fifos, we must give each IN EP
   4147	 * a unique tx-fifo even if it is non-periodic.
   4148	 */
   4149	if (dir_in && hsotg->dedicated_fifos) {
   4150		unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
   4151		u32 fifo_index = 0;
   4152		u32 fifo_size = UINT_MAX;
   4153
   4154		size = hs_ep->ep.maxpacket * hs_ep->mc;
   4155		for (i = 1; i <= fifo_count; ++i) {
   4156			if (hsotg->fifo_map & (1 << i))
   4157				continue;
   4158			val = dwc2_readl(hsotg, DPTXFSIZN(i));
   4159			val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
   4160			if (val < size)
   4161				continue;
   4162			/* Search for smallest acceptable fifo */
   4163			if (val < fifo_size) {
   4164				fifo_size = val;
   4165				fifo_index = i;
   4166			}
   4167		}
   4168		if (!fifo_index) {
   4169			dev_err(hsotg->dev,
   4170				"%s: No suitable fifo found\n", __func__);
   4171			ret = -ENOMEM;
   4172			goto error1;
   4173		}
   4174		epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
   4175		hsotg->fifo_map |= 1 << fifo_index;
   4176		epctrl |= DXEPCTL_TXFNUM(fifo_index);
   4177		hs_ep->fifo_index = fifo_index;
   4178		hs_ep->fifo_size = fifo_size;
   4179	}
   4180
   4181	/* for non control endpoints, set PID to D0 */
   4182	if (index && !hs_ep->isochronous)
   4183		epctrl |= DXEPCTL_SETD0PID;
   4184
   4185	/* WA for Full speed ISOC IN in DDMA mode.
   4186	 * By Clear NAK status of EP, core will send ZLP
   4187	 * to IN token and assert NAK interrupt relying
   4188	 * on TxFIFO status only
   4189	 */
   4190
   4191	if (hsotg->gadget.speed == USB_SPEED_FULL &&
   4192	    hs_ep->isochronous && dir_in) {
   4193		/* The WA applies only to core versions from 2.72a
   4194		 * to 4.00a (including both). Also for FS_IOT_1.00a
   4195		 * and HS_IOT_1.00a.
   4196		 */
   4197		u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
   4198
   4199		if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
   4200		     gsnpsid <= DWC2_CORE_REV_4_00a) ||
   4201		     gsnpsid == DWC2_FS_IOT_REV_1_00a ||
   4202		     gsnpsid == DWC2_HS_IOT_REV_1_00a)
   4203			epctrl |= DXEPCTL_CNAK;
   4204	}
   4205
   4206	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
   4207		__func__, epctrl);
   4208
   4209	dwc2_writel(hsotg, epctrl, epctrl_reg);
   4210	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
   4211		__func__, dwc2_readl(hsotg, epctrl_reg));
   4212
   4213	/* enable the endpoint interrupt */
   4214	dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
   4215
   4216error1:
   4217	spin_unlock_irqrestore(&hsotg->lock, flags);
   4218
   4219error2:
   4220	if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
   4221		dmam_free_coherent(hsotg->dev, desc_num *
   4222			sizeof(struct dwc2_dma_desc),
   4223			hs_ep->desc_list, hs_ep->desc_list_dma);
   4224		hs_ep->desc_list = NULL;
   4225	}
   4226
   4227	return ret;
   4228}
   4229
   4230/**
   4231 * dwc2_hsotg_ep_disable - disable given endpoint
   4232 * @ep: The endpoint to disable.
   4233 */
   4234static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
   4235{
   4236	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   4237	struct dwc2_hsotg *hsotg = hs_ep->parent;
   4238	int dir_in = hs_ep->dir_in;
   4239	int index = hs_ep->index;
   4240	u32 epctrl_reg;
   4241	u32 ctrl;
   4242
   4243	dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
   4244
   4245	if (ep == &hsotg->eps_out[0]->ep) {
   4246		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
   4247		return -EINVAL;
   4248	}
   4249
   4250	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
   4251		dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
   4252		return -EINVAL;
   4253	}
   4254
   4255	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
   4256
   4257	ctrl = dwc2_readl(hsotg, epctrl_reg);
   4258
   4259	if (ctrl & DXEPCTL_EPENA)
   4260		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
   4261
   4262	ctrl &= ~DXEPCTL_EPENA;
   4263	ctrl &= ~DXEPCTL_USBACTEP;
   4264	ctrl |= DXEPCTL_SNAK;
   4265
   4266	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
   4267	dwc2_writel(hsotg, ctrl, epctrl_reg);
   4268
   4269	/* disable endpoint interrupts */
   4270	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
   4271
   4272	/* terminate all requests with shutdown */
   4273	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
   4274
   4275	hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
   4276	hs_ep->fifo_index = 0;
   4277	hs_ep->fifo_size = 0;
   4278
   4279	return 0;
   4280}
   4281
   4282static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
   4283{
   4284	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   4285	struct dwc2_hsotg *hsotg = hs_ep->parent;
   4286	unsigned long flags;
   4287	int ret;
   4288
   4289	spin_lock_irqsave(&hsotg->lock, flags);
   4290	ret = dwc2_hsotg_ep_disable(ep);
   4291	spin_unlock_irqrestore(&hsotg->lock, flags);
   4292	return ret;
   4293}
   4294
   4295/**
   4296 * on_list - check request is on the given endpoint
   4297 * @ep: The endpoint to check.
   4298 * @test: The request to test if it is on the endpoint.
   4299 */
   4300static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
   4301{
   4302	struct dwc2_hsotg_req *req, *treq;
   4303
   4304	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
   4305		if (req == test)
   4306			return true;
   4307	}
   4308
   4309	return false;
   4310}
   4311
   4312/**
   4313 * dwc2_hsotg_ep_dequeue - dequeue given endpoint
   4314 * @ep: The endpoint to dequeue.
   4315 * @req: The request to be removed from a queue.
   4316 */
   4317static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
   4318{
   4319	struct dwc2_hsotg_req *hs_req = our_req(req);
   4320	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   4321	struct dwc2_hsotg *hs = hs_ep->parent;
   4322	unsigned long flags;
   4323
   4324	dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
   4325
   4326	spin_lock_irqsave(&hs->lock, flags);
   4327
   4328	if (!on_list(hs_ep, hs_req)) {
   4329		spin_unlock_irqrestore(&hs->lock, flags);
   4330		return -EINVAL;
   4331	}
   4332
   4333	/* Dequeue already started request */
   4334	if (req == &hs_ep->req->req)
   4335		dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
   4336
   4337	dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
   4338	spin_unlock_irqrestore(&hs->lock, flags);
   4339
   4340	return 0;
   4341}
   4342
   4343/**
   4344 * dwc2_gadget_ep_set_wedge - set wedge on a given endpoint
   4345 * @ep: The endpoint to be wedged.
   4346 *
   4347 */
   4348static int dwc2_gadget_ep_set_wedge(struct usb_ep *ep)
   4349{
   4350	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   4351	struct dwc2_hsotg *hs = hs_ep->parent;
   4352
   4353	unsigned long	flags;
   4354	int		ret;
   4355
   4356	spin_lock_irqsave(&hs->lock, flags);
   4357	hs_ep->wedged = 1;
   4358	ret = dwc2_hsotg_ep_sethalt(ep, 1, false);
   4359	spin_unlock_irqrestore(&hs->lock, flags);
   4360
   4361	return ret;
   4362}
   4363
   4364/**
   4365 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
   4366 * @ep: The endpoint to set halt.
   4367 * @value: Set or unset the halt.
   4368 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
   4369 *       the endpoint is busy processing requests.
   4370 *
   4371 * We need to stall the endpoint immediately if request comes from set_feature
   4372 * protocol command handler.
   4373 */
   4374static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
   4375{
   4376	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   4377	struct dwc2_hsotg *hs = hs_ep->parent;
   4378	int index = hs_ep->index;
   4379	u32 epreg;
   4380	u32 epctl;
   4381	u32 xfertype;
   4382
   4383	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
   4384
   4385	if (index == 0) {
   4386		if (value)
   4387			dwc2_hsotg_stall_ep0(hs);
   4388		else
   4389			dev_warn(hs->dev,
   4390				 "%s: can't clear halt on ep0\n", __func__);
   4391		return 0;
   4392	}
   4393
   4394	if (hs_ep->isochronous) {
   4395		dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
   4396		return -EINVAL;
   4397	}
   4398
   4399	if (!now && value && !list_empty(&hs_ep->queue)) {
   4400		dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
   4401			ep->name);
   4402		return -EAGAIN;
   4403	}
   4404
   4405	if (hs_ep->dir_in) {
   4406		epreg = DIEPCTL(index);
   4407		epctl = dwc2_readl(hs, epreg);
   4408
   4409		if (value) {
   4410			epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
   4411			if (epctl & DXEPCTL_EPENA)
   4412				epctl |= DXEPCTL_EPDIS;
   4413		} else {
   4414			epctl &= ~DXEPCTL_STALL;
   4415			hs_ep->wedged = 0;
   4416			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
   4417			if (xfertype == DXEPCTL_EPTYPE_BULK ||
   4418			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
   4419				epctl |= DXEPCTL_SETD0PID;
   4420		}
   4421		dwc2_writel(hs, epctl, epreg);
   4422	} else {
   4423		epreg = DOEPCTL(index);
   4424		epctl = dwc2_readl(hs, epreg);
   4425
   4426		if (value) {
   4427			/* Unmask GOUTNAKEFF interrupt */
   4428			dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
   4429
   4430			if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
   4431				dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
   4432			// STALL bit will be set in GOUTNAKEFF interrupt handler
   4433		} else {
   4434			epctl &= ~DXEPCTL_STALL;
   4435			hs_ep->wedged = 0;
   4436			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
   4437			if (xfertype == DXEPCTL_EPTYPE_BULK ||
   4438			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
   4439				epctl |= DXEPCTL_SETD0PID;
   4440			dwc2_writel(hs, epctl, epreg);
   4441		}
   4442	}
   4443
   4444	hs_ep->halted = value;
   4445	return 0;
   4446}
   4447
   4448/**
   4449 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
   4450 * @ep: The endpoint to set halt.
   4451 * @value: Set or unset the halt.
   4452 */
   4453static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
   4454{
   4455	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
   4456	struct dwc2_hsotg *hs = hs_ep->parent;
   4457	unsigned long flags;
   4458	int ret;
   4459
   4460	spin_lock_irqsave(&hs->lock, flags);
   4461	ret = dwc2_hsotg_ep_sethalt(ep, value, false);
   4462	spin_unlock_irqrestore(&hs->lock, flags);
   4463
   4464	return ret;
   4465}
   4466
   4467static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
   4468	.enable		= dwc2_hsotg_ep_enable,
   4469	.disable	= dwc2_hsotg_ep_disable_lock,
   4470	.alloc_request	= dwc2_hsotg_ep_alloc_request,
   4471	.free_request	= dwc2_hsotg_ep_free_request,
   4472	.queue		= dwc2_hsotg_ep_queue_lock,
   4473	.dequeue	= dwc2_hsotg_ep_dequeue,
   4474	.set_halt	= dwc2_hsotg_ep_sethalt_lock,
   4475	.set_wedge	= dwc2_gadget_ep_set_wedge,
   4476	/* note, don't believe we have any call for the fifo routines */
   4477};
   4478
   4479/**
   4480 * dwc2_hsotg_init - initialize the usb core
   4481 * @hsotg: The driver state
   4482 */
   4483static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
   4484{
   4485	/* unmask subset of endpoint interrupts */
   4486
   4487	dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
   4488		    DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
   4489		    DIEPMSK);
   4490
   4491	dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
   4492		    DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
   4493		    DOEPMSK);
   4494
   4495	dwc2_writel(hsotg, 0, DAINTMSK);
   4496
   4497	/* Be in disconnected state until gadget is registered */
   4498	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
   4499
   4500	/* setup fifos */
   4501
   4502	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
   4503		dwc2_readl(hsotg, GRXFSIZ),
   4504		dwc2_readl(hsotg, GNPTXFSIZ));
   4505
   4506	dwc2_hsotg_init_fifo(hsotg);
   4507
   4508	if (using_dma(hsotg))
   4509		dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
   4510}
   4511
   4512/**
   4513 * dwc2_hsotg_udc_start - prepare the udc for work
   4514 * @gadget: The usb gadget state
   4515 * @driver: The usb gadget driver
   4516 *
   4517 * Perform initialization to prepare udc device and driver
   4518 * to work.
   4519 */
   4520static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
   4521				struct usb_gadget_driver *driver)
   4522{
   4523	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
   4524	unsigned long flags;
   4525	int ret;
   4526
   4527	if (!hsotg) {
   4528		pr_err("%s: called with no device\n", __func__);
   4529		return -ENODEV;
   4530	}
   4531
   4532	if (!driver) {
   4533		dev_err(hsotg->dev, "%s: no driver\n", __func__);
   4534		return -EINVAL;
   4535	}
   4536
   4537	if (driver->max_speed < USB_SPEED_FULL)
   4538		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
   4539
   4540	if (!driver->setup) {
   4541		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
   4542		return -EINVAL;
   4543	}
   4544
   4545	WARN_ON(hsotg->driver);
   4546
   4547	hsotg->driver = driver;
   4548	hsotg->gadget.dev.of_node = hsotg->dev->of_node;
   4549	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
   4550
   4551	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
   4552		ret = dwc2_lowlevel_hw_enable(hsotg);
   4553		if (ret)
   4554			goto err;
   4555	}
   4556
   4557	if (!IS_ERR_OR_NULL(hsotg->uphy))
   4558		otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
   4559
   4560	spin_lock_irqsave(&hsotg->lock, flags);
   4561	if (dwc2_hw_is_device(hsotg)) {
   4562		dwc2_hsotg_init(hsotg);
   4563		dwc2_hsotg_core_init_disconnected(hsotg, false);
   4564	}
   4565
   4566	hsotg->enabled = 0;
   4567	spin_unlock_irqrestore(&hsotg->lock, flags);
   4568
   4569	gadget->sg_supported = using_desc_dma(hsotg);
   4570	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
   4571
   4572	return 0;
   4573
   4574err:
   4575	hsotg->driver = NULL;
   4576	return ret;
   4577}
   4578
   4579/**
   4580 * dwc2_hsotg_udc_stop - stop the udc
   4581 * @gadget: The usb gadget state
   4582 *
   4583 * Stop udc hw block and stay tunned for future transmissions
   4584 */
   4585static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
   4586{
   4587	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
   4588	unsigned long flags;
   4589	int ep;
   4590
   4591	if (!hsotg)
   4592		return -ENODEV;
   4593
   4594	/* all endpoints should be shutdown */
   4595	for (ep = 1; ep < hsotg->num_of_eps; ep++) {
   4596		if (hsotg->eps_in[ep])
   4597			dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
   4598		if (hsotg->eps_out[ep])
   4599			dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
   4600	}
   4601
   4602	spin_lock_irqsave(&hsotg->lock, flags);
   4603
   4604	hsotg->driver = NULL;
   4605	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
   4606	hsotg->enabled = 0;
   4607
   4608	spin_unlock_irqrestore(&hsotg->lock, flags);
   4609
   4610	if (!IS_ERR_OR_NULL(hsotg->uphy))
   4611		otg_set_peripheral(hsotg->uphy->otg, NULL);
   4612
   4613	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
   4614		dwc2_lowlevel_hw_disable(hsotg);
   4615
   4616	return 0;
   4617}
   4618
   4619/**
   4620 * dwc2_hsotg_gadget_getframe - read the frame number
   4621 * @gadget: The usb gadget state
   4622 *
   4623 * Read the {micro} frame number
   4624 */
   4625static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
   4626{
   4627	return dwc2_hsotg_read_frameno(to_hsotg(gadget));
   4628}
   4629
   4630/**
   4631 * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
   4632 * @gadget: The usb gadget state
   4633 * @is_selfpowered: Whether the device is self-powered
   4634 *
   4635 * Set if the device is self or bus powered.
   4636 */
   4637static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
   4638				      int is_selfpowered)
   4639{
   4640	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
   4641	unsigned long flags;
   4642
   4643	spin_lock_irqsave(&hsotg->lock, flags);
   4644	gadget->is_selfpowered = !!is_selfpowered;
   4645	spin_unlock_irqrestore(&hsotg->lock, flags);
   4646
   4647	return 0;
   4648}
   4649
   4650/**
   4651 * dwc2_hsotg_pullup - connect/disconnect the USB PHY
   4652 * @gadget: The usb gadget state
   4653 * @is_on: Current state of the USB PHY
   4654 *
   4655 * Connect/Disconnect the USB PHY pullup
   4656 */
   4657static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
   4658{
   4659	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
   4660	unsigned long flags;
   4661
   4662	dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
   4663		hsotg->op_state);
   4664
   4665	/* Don't modify pullup state while in host mode */
   4666	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
   4667		hsotg->enabled = is_on;
   4668		return 0;
   4669	}
   4670
   4671	spin_lock_irqsave(&hsotg->lock, flags);
   4672	if (is_on) {
   4673		hsotg->enabled = 1;
   4674		dwc2_hsotg_core_init_disconnected(hsotg, false);
   4675		/* Enable ACG feature in device mode,if supported */
   4676		dwc2_enable_acg(hsotg);
   4677		dwc2_hsotg_core_connect(hsotg);
   4678	} else {
   4679		dwc2_hsotg_core_disconnect(hsotg);
   4680		dwc2_hsotg_disconnect(hsotg);
   4681		hsotg->enabled = 0;
   4682	}
   4683
   4684	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
   4685	spin_unlock_irqrestore(&hsotg->lock, flags);
   4686
   4687	return 0;
   4688}
   4689
   4690static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
   4691{
   4692	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
   4693	unsigned long flags;
   4694
   4695	dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
   4696	spin_lock_irqsave(&hsotg->lock, flags);
   4697
   4698	/*
   4699	 * If controller is in partial power down state, it must exit from
   4700	 * that state before being initialized / de-initialized
   4701	 */
   4702	if (hsotg->lx_state == DWC2_L2 && hsotg->in_ppd)
   4703		/*
   4704		 * No need to check the return value as
   4705		 * registers are not being restored.
   4706		 */
   4707		dwc2_exit_partial_power_down(hsotg, 0, false);
   4708
   4709	if (is_active) {
   4710		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
   4711
   4712		dwc2_hsotg_core_init_disconnected(hsotg, false);
   4713		if (hsotg->enabled) {
   4714			/* Enable ACG feature in device mode,if supported */
   4715			dwc2_enable_acg(hsotg);
   4716			dwc2_hsotg_core_connect(hsotg);
   4717		}
   4718	} else {
   4719		dwc2_hsotg_core_disconnect(hsotg);
   4720		dwc2_hsotg_disconnect(hsotg);
   4721	}
   4722
   4723	spin_unlock_irqrestore(&hsotg->lock, flags);
   4724	return 0;
   4725}
   4726
   4727/**
   4728 * dwc2_hsotg_vbus_draw - report bMaxPower field
   4729 * @gadget: The usb gadget state
   4730 * @mA: Amount of current
   4731 *
   4732 * Report how much power the device may consume to the phy.
   4733 */
   4734static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
   4735{
   4736	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
   4737
   4738	if (IS_ERR_OR_NULL(hsotg->uphy))
   4739		return -ENOTSUPP;
   4740	return usb_phy_set_power(hsotg->uphy, mA);
   4741}
   4742
   4743static void dwc2_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
   4744{
   4745	struct dwc2_hsotg *hsotg = to_hsotg(g);
   4746	unsigned long		flags;
   4747
   4748	spin_lock_irqsave(&hsotg->lock, flags);
   4749	switch (speed) {
   4750	case USB_SPEED_HIGH:
   4751		hsotg->params.speed = DWC2_SPEED_PARAM_HIGH;
   4752		break;
   4753	case USB_SPEED_FULL:
   4754		hsotg->params.speed = DWC2_SPEED_PARAM_FULL;
   4755		break;
   4756	case USB_SPEED_LOW:
   4757		hsotg->params.speed = DWC2_SPEED_PARAM_LOW;
   4758		break;
   4759	default:
   4760		dev_err(hsotg->dev, "invalid speed (%d)\n", speed);
   4761	}
   4762	spin_unlock_irqrestore(&hsotg->lock, flags);
   4763}
   4764
   4765static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
   4766	.get_frame	= dwc2_hsotg_gadget_getframe,
   4767	.set_selfpowered	= dwc2_hsotg_set_selfpowered,
   4768	.udc_start		= dwc2_hsotg_udc_start,
   4769	.udc_stop		= dwc2_hsotg_udc_stop,
   4770	.pullup                 = dwc2_hsotg_pullup,
   4771	.udc_set_speed		= dwc2_gadget_set_speed,
   4772	.vbus_session		= dwc2_hsotg_vbus_session,
   4773	.vbus_draw		= dwc2_hsotg_vbus_draw,
   4774};
   4775
   4776/**
   4777 * dwc2_hsotg_initep - initialise a single endpoint
   4778 * @hsotg: The device state.
   4779 * @hs_ep: The endpoint to be initialised.
   4780 * @epnum: The endpoint number
   4781 * @dir_in: True if direction is in.
   4782 *
   4783 * Initialise the given endpoint (as part of the probe and device state
   4784 * creation) to give to the gadget driver. Setup the endpoint name, any
   4785 * direction information and other state that may be required.
   4786 */
   4787static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
   4788			      struct dwc2_hsotg_ep *hs_ep,
   4789				       int epnum,
   4790				       bool dir_in)
   4791{
   4792	char *dir;
   4793
   4794	if (epnum == 0)
   4795		dir = "";
   4796	else if (dir_in)
   4797		dir = "in";
   4798	else
   4799		dir = "out";
   4800
   4801	hs_ep->dir_in = dir_in;
   4802	hs_ep->index = epnum;
   4803
   4804	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
   4805
   4806	INIT_LIST_HEAD(&hs_ep->queue);
   4807	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
   4808
   4809	/* add to the list of endpoints known by the gadget driver */
   4810	if (epnum)
   4811		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
   4812
   4813	hs_ep->parent = hsotg;
   4814	hs_ep->ep.name = hs_ep->name;
   4815
   4816	if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
   4817		usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
   4818	else
   4819		usb_ep_set_maxpacket_limit(&hs_ep->ep,
   4820					   epnum ? 1024 : EP0_MPS_LIMIT);
   4821	hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
   4822
   4823	if (epnum == 0) {
   4824		hs_ep->ep.caps.type_control = true;
   4825	} else {
   4826		if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
   4827			hs_ep->ep.caps.type_iso = true;
   4828			hs_ep->ep.caps.type_bulk = true;
   4829		}
   4830		hs_ep->ep.caps.type_int = true;
   4831	}
   4832
   4833	if (dir_in)
   4834		hs_ep->ep.caps.dir_in = true;
   4835	else
   4836		hs_ep->ep.caps.dir_out = true;
   4837
   4838	/*
   4839	 * if we're using dma, we need to set the next-endpoint pointer
   4840	 * to be something valid.
   4841	 */
   4842
   4843	if (using_dma(hsotg)) {
   4844		u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
   4845
   4846		if (dir_in)
   4847			dwc2_writel(hsotg, next, DIEPCTL(epnum));
   4848		else
   4849			dwc2_writel(hsotg, next, DOEPCTL(epnum));
   4850	}
   4851}
   4852
   4853/**
   4854 * dwc2_hsotg_hw_cfg - read HW configuration registers
   4855 * @hsotg: Programming view of the DWC_otg controller
   4856 *
   4857 * Read the USB core HW configuration registers
   4858 */
   4859static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
   4860{
   4861	u32 cfg;
   4862	u32 ep_type;
   4863	u32 i;
   4864
   4865	/* check hardware configuration */
   4866
   4867	hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
   4868
   4869	/* Add ep0 */
   4870	hsotg->num_of_eps++;
   4871
   4872	hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
   4873					sizeof(struct dwc2_hsotg_ep),
   4874					GFP_KERNEL);
   4875	if (!hsotg->eps_in[0])
   4876		return -ENOMEM;
   4877	/* Same dwc2_hsotg_ep is used in both directions for ep0 */
   4878	hsotg->eps_out[0] = hsotg->eps_in[0];
   4879
   4880	cfg = hsotg->hw_params.dev_ep_dirs;
   4881	for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
   4882		ep_type = cfg & 3;
   4883		/* Direction in or both */
   4884		if (!(ep_type & 2)) {
   4885			hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
   4886				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
   4887			if (!hsotg->eps_in[i])
   4888				return -ENOMEM;
   4889		}
   4890		/* Direction out or both */
   4891		if (!(ep_type & 1)) {
   4892			hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
   4893				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
   4894			if (!hsotg->eps_out[i])
   4895				return -ENOMEM;
   4896		}
   4897	}
   4898
   4899	hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
   4900	hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
   4901
   4902	dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
   4903		 hsotg->num_of_eps,
   4904		 hsotg->dedicated_fifos ? "dedicated" : "shared",
   4905		 hsotg->fifo_mem);
   4906	return 0;
   4907}
   4908
   4909/**
   4910 * dwc2_hsotg_dump - dump state of the udc
   4911 * @hsotg: Programming view of the DWC_otg controller
   4912 *
   4913 */
   4914static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
   4915{
   4916#ifdef DEBUG
   4917	struct device *dev = hsotg->dev;
   4918	u32 val;
   4919	int idx;
   4920
   4921	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
   4922		 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
   4923		 dwc2_readl(hsotg, DIEPMSK));
   4924
   4925	dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
   4926		 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
   4927
   4928	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
   4929		 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
   4930
   4931	/* show periodic fifo settings */
   4932
   4933	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
   4934		val = dwc2_readl(hsotg, DPTXFSIZN(idx));
   4935		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
   4936			 val >> FIFOSIZE_DEPTH_SHIFT,
   4937			 val & FIFOSIZE_STARTADDR_MASK);
   4938	}
   4939
   4940	for (idx = 0; idx < hsotg->num_of_eps; idx++) {
   4941		dev_info(dev,
   4942			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
   4943			 dwc2_readl(hsotg, DIEPCTL(idx)),
   4944			 dwc2_readl(hsotg, DIEPTSIZ(idx)),
   4945			 dwc2_readl(hsotg, DIEPDMA(idx)));
   4946
   4947		val = dwc2_readl(hsotg, DOEPCTL(idx));
   4948		dev_info(dev,
   4949			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
   4950			 idx, dwc2_readl(hsotg, DOEPCTL(idx)),
   4951			 dwc2_readl(hsotg, DOEPTSIZ(idx)),
   4952			 dwc2_readl(hsotg, DOEPDMA(idx)));
   4953	}
   4954
   4955	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
   4956		 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
   4957#endif
   4958}
   4959
   4960/**
   4961 * dwc2_gadget_init - init function for gadget
   4962 * @hsotg: Programming view of the DWC_otg controller
   4963 *
   4964 */
   4965int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
   4966{
   4967	struct device *dev = hsotg->dev;
   4968	int epnum;
   4969	int ret;
   4970
   4971	/* Dump fifo information */
   4972	dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
   4973		hsotg->params.g_np_tx_fifo_size);
   4974	dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
   4975
   4976	switch (hsotg->params.speed) {
   4977	case DWC2_SPEED_PARAM_LOW:
   4978		hsotg->gadget.max_speed = USB_SPEED_LOW;
   4979		break;
   4980	case DWC2_SPEED_PARAM_FULL:
   4981		hsotg->gadget.max_speed = USB_SPEED_FULL;
   4982		break;
   4983	default:
   4984		hsotg->gadget.max_speed = USB_SPEED_HIGH;
   4985		break;
   4986	}
   4987
   4988	hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
   4989	hsotg->gadget.name = dev_name(dev);
   4990	hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
   4991	hsotg->remote_wakeup_allowed = 0;
   4992
   4993	if (hsotg->params.lpm)
   4994		hsotg->gadget.lpm_capable = true;
   4995
   4996	if (hsotg->dr_mode == USB_DR_MODE_OTG)
   4997		hsotg->gadget.is_otg = 1;
   4998	else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
   4999		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
   5000
   5001	ret = dwc2_hsotg_hw_cfg(hsotg);
   5002	if (ret) {
   5003		dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
   5004		return ret;
   5005	}
   5006
   5007	hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
   5008			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
   5009	if (!hsotg->ctrl_buff)
   5010		return -ENOMEM;
   5011
   5012	hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
   5013			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
   5014	if (!hsotg->ep0_buff)
   5015		return -ENOMEM;
   5016
   5017	if (using_desc_dma(hsotg)) {
   5018		ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
   5019		if (ret < 0)
   5020			return ret;
   5021	}
   5022
   5023	ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
   5024			       IRQF_SHARED, dev_name(hsotg->dev), hsotg);
   5025	if (ret < 0) {
   5026		dev_err(dev, "cannot claim IRQ for gadget\n");
   5027		return ret;
   5028	}
   5029
   5030	/* hsotg->num_of_eps holds number of EPs other than ep0 */
   5031
   5032	if (hsotg->num_of_eps == 0) {
   5033		dev_err(dev, "wrong number of EPs (zero)\n");
   5034		return -EINVAL;
   5035	}
   5036
   5037	/* setup endpoint information */
   5038
   5039	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
   5040	hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
   5041
   5042	/* allocate EP0 request */
   5043
   5044	hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
   5045						     GFP_KERNEL);
   5046	if (!hsotg->ctrl_req) {
   5047		dev_err(dev, "failed to allocate ctrl req\n");
   5048		return -ENOMEM;
   5049	}
   5050
   5051	/* initialise the endpoints now the core has been initialised */
   5052	for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
   5053		if (hsotg->eps_in[epnum])
   5054			dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
   5055					  epnum, 1);
   5056		if (hsotg->eps_out[epnum])
   5057			dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
   5058					  epnum, 0);
   5059	}
   5060
   5061	dwc2_hsotg_dump(hsotg);
   5062
   5063	return 0;
   5064}
   5065
   5066/**
   5067 * dwc2_hsotg_remove - remove function for hsotg driver
   5068 * @hsotg: Programming view of the DWC_otg controller
   5069 *
   5070 */
   5071int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
   5072{
   5073	usb_del_gadget_udc(&hsotg->gadget);
   5074	dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
   5075
   5076	return 0;
   5077}
   5078
   5079int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
   5080{
   5081	unsigned long flags;
   5082
   5083	if (hsotg->lx_state != DWC2_L0)
   5084		return 0;
   5085
   5086	if (hsotg->driver) {
   5087		int ep;
   5088
   5089		dev_info(hsotg->dev, "suspending usb gadget %s\n",
   5090			 hsotg->driver->driver.name);
   5091
   5092		spin_lock_irqsave(&hsotg->lock, flags);
   5093		if (hsotg->enabled)
   5094			dwc2_hsotg_core_disconnect(hsotg);
   5095		dwc2_hsotg_disconnect(hsotg);
   5096		hsotg->gadget.speed = USB_SPEED_UNKNOWN;
   5097		spin_unlock_irqrestore(&hsotg->lock, flags);
   5098
   5099		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
   5100			if (hsotg->eps_in[ep])
   5101				dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
   5102			if (hsotg->eps_out[ep])
   5103				dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
   5104		}
   5105	}
   5106
   5107	return 0;
   5108}
   5109
   5110int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
   5111{
   5112	unsigned long flags;
   5113
   5114	if (hsotg->lx_state == DWC2_L2)
   5115		return 0;
   5116
   5117	if (hsotg->driver) {
   5118		dev_info(hsotg->dev, "resuming usb gadget %s\n",
   5119			 hsotg->driver->driver.name);
   5120
   5121		spin_lock_irqsave(&hsotg->lock, flags);
   5122		dwc2_hsotg_core_init_disconnected(hsotg, false);
   5123		if (hsotg->enabled) {
   5124			/* Enable ACG feature in device mode,if supported */
   5125			dwc2_enable_acg(hsotg);
   5126			dwc2_hsotg_core_connect(hsotg);
   5127		}
   5128		spin_unlock_irqrestore(&hsotg->lock, flags);
   5129	}
   5130
   5131	return 0;
   5132}
   5133
   5134/**
   5135 * dwc2_backup_device_registers() - Backup controller device registers.
   5136 * When suspending usb bus, registers needs to be backuped
   5137 * if controller power is disabled once suspended.
   5138 *
   5139 * @hsotg: Programming view of the DWC_otg controller
   5140 */
   5141int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
   5142{
   5143	struct dwc2_dregs_backup *dr;
   5144	int i;
   5145
   5146	dev_dbg(hsotg->dev, "%s\n", __func__);
   5147
   5148	/* Backup dev regs */
   5149	dr = &hsotg->dr_backup;
   5150
   5151	dr->dcfg = dwc2_readl(hsotg, DCFG);
   5152	dr->dctl = dwc2_readl(hsotg, DCTL);
   5153	dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
   5154	dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
   5155	dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
   5156
   5157	for (i = 0; i < hsotg->num_of_eps; i++) {
   5158		/* Backup IN EPs */
   5159		dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
   5160
   5161		/* Ensure DATA PID is correctly configured */
   5162		if (dr->diepctl[i] & DXEPCTL_DPID)
   5163			dr->diepctl[i] |= DXEPCTL_SETD1PID;
   5164		else
   5165			dr->diepctl[i] |= DXEPCTL_SETD0PID;
   5166
   5167		dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
   5168		dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
   5169
   5170		/* Backup OUT EPs */
   5171		dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
   5172
   5173		/* Ensure DATA PID is correctly configured */
   5174		if (dr->doepctl[i] & DXEPCTL_DPID)
   5175			dr->doepctl[i] |= DXEPCTL_SETD1PID;
   5176		else
   5177			dr->doepctl[i] |= DXEPCTL_SETD0PID;
   5178
   5179		dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
   5180		dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
   5181		dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
   5182	}
   5183	dr->valid = true;
   5184	return 0;
   5185}
   5186
   5187/**
   5188 * dwc2_restore_device_registers() - Restore controller device registers.
   5189 * When resuming usb bus, device registers needs to be restored
   5190 * if controller power were disabled.
   5191 *
   5192 * @hsotg: Programming view of the DWC_otg controller
   5193 * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
   5194 *
   5195 * Return: 0 if successful, negative error code otherwise
   5196 */
   5197int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
   5198{
   5199	struct dwc2_dregs_backup *dr;
   5200	int i;
   5201
   5202	dev_dbg(hsotg->dev, "%s\n", __func__);
   5203
   5204	/* Restore dev regs */
   5205	dr = &hsotg->dr_backup;
   5206	if (!dr->valid) {
   5207		dev_err(hsotg->dev, "%s: no device registers to restore\n",
   5208			__func__);
   5209		return -EINVAL;
   5210	}
   5211	dr->valid = false;
   5212
   5213	if (!remote_wakeup)
   5214		dwc2_writel(hsotg, dr->dctl, DCTL);
   5215
   5216	dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
   5217	dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
   5218	dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
   5219
   5220	for (i = 0; i < hsotg->num_of_eps; i++) {
   5221		/* Restore IN EPs */
   5222		dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
   5223		dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
   5224		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
   5225		/** WA for enabled EPx's IN in DDMA mode. On entering to
   5226		 * hibernation wrong value read and saved from DIEPDMAx,
   5227		 * as result BNA interrupt asserted on hibernation exit
   5228		 * by restoring from saved area.
   5229		 */
   5230		if (using_desc_dma(hsotg) &&
   5231		    (dr->diepctl[i] & DXEPCTL_EPENA))
   5232			dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
   5233		dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
   5234		dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
   5235		/* Restore OUT EPs */
   5236		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
   5237		/* WA for enabled EPx's OUT in DDMA mode. On entering to
   5238		 * hibernation wrong value read and saved from DOEPDMAx,
   5239		 * as result BNA interrupt asserted on hibernation exit
   5240		 * by restoring from saved area.
   5241		 */
   5242		if (using_desc_dma(hsotg) &&
   5243		    (dr->doepctl[i] & DXEPCTL_EPENA))
   5244			dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
   5245		dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
   5246		dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
   5247	}
   5248
   5249	return 0;
   5250}
   5251
   5252/**
   5253 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
   5254 *
   5255 * @hsotg: Programming view of DWC_otg controller
   5256 *
   5257 */
   5258void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
   5259{
   5260	u32 val;
   5261
   5262	if (!hsotg->params.lpm)
   5263		return;
   5264
   5265	val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
   5266	val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
   5267	val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
   5268	val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
   5269	val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
   5270	val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
   5271	val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
   5272	dwc2_writel(hsotg, val, GLPMCFG);
   5273	dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
   5274
   5275	/* Unmask WKUP_ALERT Interrupt */
   5276	if (hsotg->params.service_interval)
   5277		dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
   5278}
   5279
   5280/**
   5281 * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
   5282 *
   5283 * @hsotg: Programming view of DWC_otg controller
   5284 *
   5285 */
   5286void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
   5287{
   5288	u32 val = 0;
   5289
   5290	val |= GREFCLK_REF_CLK_MODE;
   5291	val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
   5292	val |= hsotg->params.sof_cnt_wkup_alert <<
   5293	       GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
   5294
   5295	dwc2_writel(hsotg, val, GREFCLK);
   5296	dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
   5297}
   5298
   5299/**
   5300 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
   5301 *
   5302 * @hsotg: Programming view of the DWC_otg controller
   5303 *
   5304 * Return non-zero if failed to enter to hibernation.
   5305 */
   5306int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
   5307{
   5308	u32 gpwrdn;
   5309	int ret = 0;
   5310
   5311	/* Change to L2(suspend) state */
   5312	hsotg->lx_state = DWC2_L2;
   5313	dev_dbg(hsotg->dev, "Start of hibernation completed\n");
   5314	ret = dwc2_backup_global_registers(hsotg);
   5315	if (ret) {
   5316		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
   5317			__func__);
   5318		return ret;
   5319	}
   5320	ret = dwc2_backup_device_registers(hsotg);
   5321	if (ret) {
   5322		dev_err(hsotg->dev, "%s: failed to backup device registers\n",
   5323			__func__);
   5324		return ret;
   5325	}
   5326
   5327	gpwrdn = GPWRDN_PWRDNRSTN;
   5328	gpwrdn |= GPWRDN_PMUACTV;
   5329	dwc2_writel(hsotg, gpwrdn, GPWRDN);
   5330	udelay(10);
   5331
   5332	/* Set flag to indicate that we are in hibernation */
   5333	hsotg->hibernated = 1;
   5334
   5335	/* Enable interrupts from wake up logic */
   5336	gpwrdn = dwc2_readl(hsotg, GPWRDN);
   5337	gpwrdn |= GPWRDN_PMUINTSEL;
   5338	dwc2_writel(hsotg, gpwrdn, GPWRDN);
   5339	udelay(10);
   5340
   5341	/* Unmask device mode interrupts in GPWRDN */
   5342	gpwrdn = dwc2_readl(hsotg, GPWRDN);
   5343	gpwrdn |= GPWRDN_RST_DET_MSK;
   5344	gpwrdn |= GPWRDN_LNSTSCHG_MSK;
   5345	gpwrdn |= GPWRDN_STS_CHGINT_MSK;
   5346	dwc2_writel(hsotg, gpwrdn, GPWRDN);
   5347	udelay(10);
   5348
   5349	/* Enable Power Down Clamp */
   5350	gpwrdn = dwc2_readl(hsotg, GPWRDN);
   5351	gpwrdn |= GPWRDN_PWRDNCLMP;
   5352	dwc2_writel(hsotg, gpwrdn, GPWRDN);
   5353	udelay(10);
   5354
   5355	/* Switch off VDD */
   5356	gpwrdn = dwc2_readl(hsotg, GPWRDN);
   5357	gpwrdn |= GPWRDN_PWRDNSWTCH;
   5358	dwc2_writel(hsotg, gpwrdn, GPWRDN);
   5359	udelay(10);
   5360
   5361	/* Save gpwrdn register for further usage if stschng interrupt */
   5362	hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
   5363	dev_dbg(hsotg->dev, "Hibernation completed\n");
   5364
   5365	return ret;
   5366}
   5367
   5368/**
   5369 * dwc2_gadget_exit_hibernation()
   5370 * This function is for exiting from Device mode hibernation by host initiated
   5371 * resume/reset and device initiated remote-wakeup.
   5372 *
   5373 * @hsotg: Programming view of the DWC_otg controller
   5374 * @rem_wakeup: indicates whether resume is initiated by Device or Host.
   5375 * @reset: indicates whether resume is initiated by Reset.
   5376 *
   5377 * Return non-zero if failed to exit from hibernation.
   5378 */
   5379int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
   5380				 int rem_wakeup, int reset)
   5381{
   5382	u32 pcgcctl;
   5383	u32 gpwrdn;
   5384	u32 dctl;
   5385	int ret = 0;
   5386	struct dwc2_gregs_backup *gr;
   5387	struct dwc2_dregs_backup *dr;
   5388
   5389	gr = &hsotg->gr_backup;
   5390	dr = &hsotg->dr_backup;
   5391
   5392	if (!hsotg->hibernated) {
   5393		dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
   5394		return 1;
   5395	}
   5396	dev_dbg(hsotg->dev,
   5397		"%s: called with rem_wakeup = %d reset = %d\n",
   5398		__func__, rem_wakeup, reset);
   5399
   5400	dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
   5401
   5402	if (!reset) {
   5403		/* Clear all pending interupts */
   5404		dwc2_writel(hsotg, 0xffffffff, GINTSTS);
   5405	}
   5406
   5407	/* De-assert Restore */
   5408	gpwrdn = dwc2_readl(hsotg, GPWRDN);
   5409	gpwrdn &= ~GPWRDN_RESTORE;
   5410	dwc2_writel(hsotg, gpwrdn, GPWRDN);
   5411	udelay(10);
   5412
   5413	if (!rem_wakeup) {
   5414		pcgcctl = dwc2_readl(hsotg, PCGCTL);
   5415		pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
   5416		dwc2_writel(hsotg, pcgcctl, PCGCTL);
   5417	}
   5418
   5419	/* Restore GUSBCFG, DCFG and DCTL */
   5420	dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
   5421	dwc2_writel(hsotg, dr->dcfg, DCFG);
   5422	dwc2_writel(hsotg, dr->dctl, DCTL);
   5423
   5424	/* On USB Reset, reset device address to zero */
   5425	if (reset)
   5426		dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
   5427
   5428	/* De-assert Wakeup Logic */
   5429	gpwrdn = dwc2_readl(hsotg, GPWRDN);
   5430	gpwrdn &= ~GPWRDN_PMUACTV;
   5431	dwc2_writel(hsotg, gpwrdn, GPWRDN);
   5432
   5433	if (rem_wakeup) {
   5434		udelay(10);
   5435		/* Start Remote Wakeup Signaling */
   5436		dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
   5437	} else {
   5438		udelay(50);
   5439		/* Set Device programming done bit */
   5440		dctl = dwc2_readl(hsotg, DCTL);
   5441		dctl |= DCTL_PWRONPRGDONE;
   5442		dwc2_writel(hsotg, dctl, DCTL);
   5443	}
   5444	/* Wait for interrupts which must be cleared */
   5445	mdelay(2);
   5446	/* Clear all pending interupts */
   5447	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
   5448
   5449	/* Restore global registers */
   5450	ret = dwc2_restore_global_registers(hsotg);
   5451	if (ret) {
   5452		dev_err(hsotg->dev, "%s: failed to restore registers\n",
   5453			__func__);
   5454		return ret;
   5455	}
   5456
   5457	/* Restore device registers */
   5458	ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
   5459	if (ret) {
   5460		dev_err(hsotg->dev, "%s: failed to restore device registers\n",
   5461			__func__);
   5462		return ret;
   5463	}
   5464
   5465	if (rem_wakeup) {
   5466		mdelay(10);
   5467		dctl = dwc2_readl(hsotg, DCTL);
   5468		dctl &= ~DCTL_RMTWKUPSIG;
   5469		dwc2_writel(hsotg, dctl, DCTL);
   5470	}
   5471
   5472	hsotg->hibernated = 0;
   5473	hsotg->lx_state = DWC2_L0;
   5474	dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
   5475
   5476	return ret;
   5477}
   5478
   5479/**
   5480 * dwc2_gadget_enter_partial_power_down() - Put controller in partial
   5481 * power down.
   5482 *
   5483 * @hsotg: Programming view of the DWC_otg controller
   5484 *
   5485 * Return: non-zero if failed to enter device partial power down.
   5486 *
   5487 * This function is for entering device mode partial power down.
   5488 */
   5489int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
   5490{
   5491	u32 pcgcctl;
   5492	int ret = 0;
   5493
   5494	dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
   5495
   5496	/* Backup all registers */
   5497	ret = dwc2_backup_global_registers(hsotg);
   5498	if (ret) {
   5499		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
   5500			__func__);
   5501		return ret;
   5502	}
   5503
   5504	ret = dwc2_backup_device_registers(hsotg);
   5505	if (ret) {
   5506		dev_err(hsotg->dev, "%s: failed to backup device registers\n",
   5507			__func__);
   5508		return ret;
   5509	}
   5510
   5511	/*
   5512	 * Clear any pending interrupts since dwc2 will not be able to
   5513	 * clear them after entering partial_power_down.
   5514	 */
   5515	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
   5516
   5517	/* Put the controller in low power state */
   5518	pcgcctl = dwc2_readl(hsotg, PCGCTL);
   5519
   5520	pcgcctl |= PCGCTL_PWRCLMP;
   5521	dwc2_writel(hsotg, pcgcctl, PCGCTL);
   5522	udelay(5);
   5523
   5524	pcgcctl |= PCGCTL_RSTPDWNMODULE;
   5525	dwc2_writel(hsotg, pcgcctl, PCGCTL);
   5526	udelay(5);
   5527
   5528	pcgcctl |= PCGCTL_STOPPCLK;
   5529	dwc2_writel(hsotg, pcgcctl, PCGCTL);
   5530
   5531	/* Set in_ppd flag to 1 as here core enters suspend. */
   5532	hsotg->in_ppd = 1;
   5533	hsotg->lx_state = DWC2_L2;
   5534
   5535	dev_dbg(hsotg->dev, "Entering device partial power down completed.\n");
   5536
   5537	return ret;
   5538}
   5539
   5540/*
   5541 * dwc2_gadget_exit_partial_power_down() - Exit controller from device partial
   5542 * power down.
   5543 *
   5544 * @hsotg: Programming view of the DWC_otg controller
   5545 * @restore: indicates whether need to restore the registers or not.
   5546 *
   5547 * Return: non-zero if failed to exit device partial power down.
   5548 *
   5549 * This function is for exiting from device mode partial power down.
   5550 */
   5551int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
   5552					bool restore)
   5553{
   5554	u32 pcgcctl;
   5555	u32 dctl;
   5556	struct dwc2_dregs_backup *dr;
   5557	int ret = 0;
   5558
   5559	dr = &hsotg->dr_backup;
   5560
   5561	dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
   5562
   5563	pcgcctl = dwc2_readl(hsotg, PCGCTL);
   5564	pcgcctl &= ~PCGCTL_STOPPCLK;
   5565	dwc2_writel(hsotg, pcgcctl, PCGCTL);
   5566
   5567	pcgcctl = dwc2_readl(hsotg, PCGCTL);
   5568	pcgcctl &= ~PCGCTL_PWRCLMP;
   5569	dwc2_writel(hsotg, pcgcctl, PCGCTL);
   5570
   5571	pcgcctl = dwc2_readl(hsotg, PCGCTL);
   5572	pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
   5573	dwc2_writel(hsotg, pcgcctl, PCGCTL);
   5574
   5575	udelay(100);
   5576	if (restore) {
   5577		ret = dwc2_restore_global_registers(hsotg);
   5578		if (ret) {
   5579			dev_err(hsotg->dev, "%s: failed to restore registers\n",
   5580				__func__);
   5581			return ret;
   5582		}
   5583		/* Restore DCFG */
   5584		dwc2_writel(hsotg, dr->dcfg, DCFG);
   5585
   5586		ret = dwc2_restore_device_registers(hsotg, 0);
   5587		if (ret) {
   5588			dev_err(hsotg->dev, "%s: failed to restore device registers\n",
   5589				__func__);
   5590			return ret;
   5591		}
   5592	}
   5593
   5594	/* Set the Power-On Programming done bit */
   5595	dctl = dwc2_readl(hsotg, DCTL);
   5596	dctl |= DCTL_PWRONPRGDONE;
   5597	dwc2_writel(hsotg, dctl, DCTL);
   5598
   5599	/* Set in_ppd flag to 0 as here core exits from suspend. */
   5600	hsotg->in_ppd = 0;
   5601	hsotg->lx_state = DWC2_L0;
   5602
   5603	dev_dbg(hsotg->dev, "Exiting device partial Power Down completed.\n");
   5604	return ret;
   5605}
   5606
   5607/**
   5608 * dwc2_gadget_enter_clock_gating() - Put controller in clock gating.
   5609 *
   5610 * @hsotg: Programming view of the DWC_otg controller
   5611 *
   5612 * Return: non-zero if failed to enter device partial power down.
   5613 *
   5614 * This function is for entering device mode clock gating.
   5615 */
   5616void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg)
   5617{
   5618	u32 pcgctl;
   5619
   5620	dev_dbg(hsotg->dev, "Entering device clock gating.\n");
   5621
   5622	/* Set the Phy Clock bit as suspend is received. */
   5623	pcgctl = dwc2_readl(hsotg, PCGCTL);
   5624	pcgctl |= PCGCTL_STOPPCLK;
   5625	dwc2_writel(hsotg, pcgctl, PCGCTL);
   5626	udelay(5);
   5627
   5628	/* Set the Gate hclk as suspend is received. */
   5629	pcgctl = dwc2_readl(hsotg, PCGCTL);
   5630	pcgctl |= PCGCTL_GATEHCLK;
   5631	dwc2_writel(hsotg, pcgctl, PCGCTL);
   5632	udelay(5);
   5633
   5634	hsotg->lx_state = DWC2_L2;
   5635	hsotg->bus_suspended = true;
   5636}
   5637
   5638/*
   5639 * dwc2_gadget_exit_clock_gating() - Exit controller from device clock gating.
   5640 *
   5641 * @hsotg: Programming view of the DWC_otg controller
   5642 * @rem_wakeup: indicates whether remote wake up is enabled.
   5643 *
   5644 * This function is for exiting from device mode clock gating.
   5645 */
   5646void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
   5647{
   5648	u32 pcgctl;
   5649	u32 dctl;
   5650
   5651	dev_dbg(hsotg->dev, "Exiting device clock gating.\n");
   5652
   5653	/* Clear the Gate hclk. */
   5654	pcgctl = dwc2_readl(hsotg, PCGCTL);
   5655	pcgctl &= ~PCGCTL_GATEHCLK;
   5656	dwc2_writel(hsotg, pcgctl, PCGCTL);
   5657	udelay(5);
   5658
   5659	/* Phy Clock bit. */
   5660	pcgctl = dwc2_readl(hsotg, PCGCTL);
   5661	pcgctl &= ~PCGCTL_STOPPCLK;
   5662	dwc2_writel(hsotg, pcgctl, PCGCTL);
   5663	udelay(5);
   5664
   5665	if (rem_wakeup) {
   5666		/* Set Remote Wakeup Signaling */
   5667		dctl = dwc2_readl(hsotg, DCTL);
   5668		dctl |= DCTL_RMTWKUPSIG;
   5669		dwc2_writel(hsotg, dctl, DCTL);
   5670	}
   5671
   5672	/* Change to L0 state */
   5673	call_gadget(hsotg, resume);
   5674	hsotg->lx_state = DWC2_L0;
   5675	hsotg->bus_suspended = false;
   5676}