cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

snps_udc_core.c (81731B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
      4 *
      5 * Copyright (C) 2005-2007 AMD (https://www.amd.com)
      6 * Author: Thomas Dahlmann
      7 */
      8
      9/*
     10 * This file does the core driver implementation for the UDC that is based
     11 * on Synopsys device controller IP (different than HS OTG IP) that is either
     12 * connected through PCI bus or integrated to SoC platforms.
     13 */
     14
     15/* Driver strings */
     16#define UDC_MOD_DESCRIPTION		"Synopsys USB Device Controller"
     17#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
     18
     19#include <linux/module.h>
     20#include <linux/pci.h>
     21#include <linux/kernel.h>
     22#include <linux/delay.h>
     23#include <linux/ioport.h>
     24#include <linux/sched.h>
     25#include <linux/slab.h>
     26#include <linux/errno.h>
     27#include <linux/timer.h>
     28#include <linux/list.h>
     29#include <linux/interrupt.h>
     30#include <linux/ioctl.h>
     31#include <linux/fs.h>
     32#include <linux/dmapool.h>
     33#include <linux/prefetch.h>
     34#include <linux/moduleparam.h>
     35#include <asm/byteorder.h>
     36#include <asm/unaligned.h>
     37#include "amd5536udc.h"
     38
     39static void udc_setup_endpoints(struct udc *dev);
     40static void udc_soft_reset(struct udc *dev);
     41static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
     42static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
     43
     44/* description */
     45static const char mod_desc[] = UDC_MOD_DESCRIPTION;
     46static const char name[] = "udc";
     47
     48/* structure to hold endpoint function pointers */
     49static const struct usb_ep_ops udc_ep_ops;
     50
     51/* received setup data */
     52static union udc_setup_data setup_data;
     53
     54/* pointer to device object */
     55static struct udc *udc;
     56
     57/* irq spin lock for soft reset */
     58static DEFINE_SPINLOCK(udc_irq_spinlock);
     59/* stall spin lock */
     60static DEFINE_SPINLOCK(udc_stall_spinlock);
     61
     62/*
     63* slave mode: pending bytes in rx fifo after nyet,
     64* used if EPIN irq came but no req was available
     65*/
     66static unsigned int udc_rxfifo_pending;
     67
     68/* count soft resets after suspend to avoid loop */
     69static int soft_reset_occured;
     70static int soft_reset_after_usbreset_occured;
     71
     72/* timer */
     73static struct timer_list udc_timer;
     74static int stop_timer;
     75
     76/* set_rde -- Is used to control enabling of RX DMA. Problem is
     77 * that UDC has only one bit (RDE) to enable/disable RX DMA for
     78 * all OUT endpoints. So we have to handle race conditions like
     79 * when OUT data reaches the fifo but no request was queued yet.
     80 * This cannot be solved by letting the RX DMA disabled until a
     81 * request gets queued because there may be other OUT packets
     82 * in the FIFO (important for not blocking control traffic).
     83 * The value of set_rde controls the corresponding timer.
     84 *
     85 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
     86 * set_rde  0 == do not touch RDE, do no start the RDE timer
     87 * set_rde  1 == timer function will look whether FIFO has data
     88 * set_rde  2 == set by timer function to enable RX DMA on next call
     89 */
     90static int set_rde = -1;
     91
     92static DECLARE_COMPLETION(on_exit);
     93static struct timer_list udc_pollstall_timer;
     94static int stop_pollstall_timer;
     95static DECLARE_COMPLETION(on_pollstall_exit);
     96
     97/* endpoint names used for print */
     98static const char ep0_string[] = "ep0in";
     99static const struct {
    100	const char *name;
    101	const struct usb_ep_caps caps;
    102} ep_info[] = {
    103#define EP_INFO(_name, _caps) \
    104	{ \
    105		.name = _name, \
    106		.caps = _caps, \
    107	}
    108
    109	EP_INFO(ep0_string,
    110		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
    111	EP_INFO("ep1in-int",
    112		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    113	EP_INFO("ep2in-bulk",
    114		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    115	EP_INFO("ep3in-bulk",
    116		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    117	EP_INFO("ep4in-bulk",
    118		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    119	EP_INFO("ep5in-bulk",
    120		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    121	EP_INFO("ep6in-bulk",
    122		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    123	EP_INFO("ep7in-bulk",
    124		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    125	EP_INFO("ep8in-bulk",
    126		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    127	EP_INFO("ep9in-bulk",
    128		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    129	EP_INFO("ep10in-bulk",
    130		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    131	EP_INFO("ep11in-bulk",
    132		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    133	EP_INFO("ep12in-bulk",
    134		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    135	EP_INFO("ep13in-bulk",
    136		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    137	EP_INFO("ep14in-bulk",
    138		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    139	EP_INFO("ep15in-bulk",
    140		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
    141	EP_INFO("ep0out",
    142		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
    143	EP_INFO("ep1out-bulk",
    144		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    145	EP_INFO("ep2out-bulk",
    146		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    147	EP_INFO("ep3out-bulk",
    148		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    149	EP_INFO("ep4out-bulk",
    150		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    151	EP_INFO("ep5out-bulk",
    152		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    153	EP_INFO("ep6out-bulk",
    154		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    155	EP_INFO("ep7out-bulk",
    156		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    157	EP_INFO("ep8out-bulk",
    158		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    159	EP_INFO("ep9out-bulk",
    160		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    161	EP_INFO("ep10out-bulk",
    162		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    163	EP_INFO("ep11out-bulk",
    164		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    165	EP_INFO("ep12out-bulk",
    166		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    167	EP_INFO("ep13out-bulk",
    168		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    169	EP_INFO("ep14out-bulk",
    170		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    171	EP_INFO("ep15out-bulk",
    172		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
    173
    174#undef EP_INFO
    175};
    176
    177/* buffer fill mode */
    178static int use_dma_bufferfill_mode;
    179/* tx buffer size for high speed */
    180static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
    181
    182/*---------------------------------------------------------------------------*/
    183/* Prints UDC device registers and endpoint irq registers */
    184static void print_regs(struct udc *dev)
    185{
    186	DBG(dev, "------- Device registers -------\n");
    187	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
    188	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
    189	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
    190	DBG(dev, "\n");
    191	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
    192	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
    193	DBG(dev, "\n");
    194	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
    195	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
    196	DBG(dev, "\n");
    197	DBG(dev, "USE DMA        = %d\n", use_dma);
    198	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
    199		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
    200			"WITHOUT desc. update)\n");
    201		dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
    202	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
    203		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
    204			"WITH desc. update)\n");
    205		dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
    206	}
    207	if (use_dma && use_dma_bufferfill_mode) {
    208		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
    209		dev_info(dev->dev, "DMA mode (%s)\n", "BF");
    210	}
    211	if (!use_dma)
    212		dev_info(dev->dev, "FIFO mode\n");
    213	DBG(dev, "-------------------------------------------------------\n");
    214}
    215
    216/* Masks unused interrupts */
    217int udc_mask_unused_interrupts(struct udc *dev)
    218{
    219	u32 tmp;
    220
    221	/* mask all dev interrupts */
    222	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
    223		AMD_BIT(UDC_DEVINT_ENUM) |
    224		AMD_BIT(UDC_DEVINT_US) |
    225		AMD_BIT(UDC_DEVINT_UR) |
    226		AMD_BIT(UDC_DEVINT_ES) |
    227		AMD_BIT(UDC_DEVINT_SI) |
    228		AMD_BIT(UDC_DEVINT_SOF)|
    229		AMD_BIT(UDC_DEVINT_SC);
    230	writel(tmp, &dev->regs->irqmsk);
    231
    232	/* mask all ep interrupts */
    233	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
    234
    235	return 0;
    236}
    237EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
    238
    239/* Enables endpoint 0 interrupts */
    240static int udc_enable_ep0_interrupts(struct udc *dev)
    241{
    242	u32 tmp;
    243
    244	DBG(dev, "udc_enable_ep0_interrupts()\n");
    245
    246	/* read irq mask */
    247	tmp = readl(&dev->regs->ep_irqmsk);
    248	/* enable ep0 irq's */
    249	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
    250		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
    251	writel(tmp, &dev->regs->ep_irqmsk);
    252
    253	return 0;
    254}
    255
    256/* Enables device interrupts for SET_INTF and SET_CONFIG */
    257int udc_enable_dev_setup_interrupts(struct udc *dev)
    258{
    259	u32 tmp;
    260
    261	DBG(dev, "enable device interrupts for setup data\n");
    262
    263	/* read irq mask */
    264	tmp = readl(&dev->regs->irqmsk);
    265
    266	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
    267	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
    268		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
    269		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
    270		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
    271		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
    272	writel(tmp, &dev->regs->irqmsk);
    273
    274	return 0;
    275}
    276EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
    277
    278/* Calculates fifo start of endpoint based on preceding endpoints */
    279static int udc_set_txfifo_addr(struct udc_ep *ep)
    280{
    281	struct udc	*dev;
    282	u32 tmp;
    283	int i;
    284
    285	if (!ep || !(ep->in))
    286		return -EINVAL;
    287
    288	dev = ep->dev;
    289	ep->txfifo = dev->txfifo;
    290
    291	/* traverse ep's */
    292	for (i = 0; i < ep->num; i++) {
    293		if (dev->ep[i].regs) {
    294			/* read fifo size */
    295			tmp = readl(&dev->ep[i].regs->bufin_framenum);
    296			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
    297			ep->txfifo += tmp;
    298		}
    299	}
    300	return 0;
    301}
    302
    303/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
    304static u32 cnak_pending;
    305
    306static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
    307{
    308	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
    309		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
    310		cnak_pending |= 1 << (num);
    311		ep->naking = 1;
    312	} else
    313		cnak_pending = cnak_pending & (~(1 << (num)));
    314}
    315
    316
    317/* Enables endpoint, is called by gadget driver */
    318static int
    319udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
    320{
    321	struct udc_ep		*ep;
    322	struct udc		*dev;
    323	u32			tmp;
    324	unsigned long		iflags;
    325	u8 udc_csr_epix;
    326	unsigned		maxpacket;
    327
    328	if (!usbep
    329			|| usbep->name == ep0_string
    330			|| !desc
    331			|| desc->bDescriptorType != USB_DT_ENDPOINT)
    332		return -EINVAL;
    333
    334	ep = container_of(usbep, struct udc_ep, ep);
    335	dev = ep->dev;
    336
    337	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
    338
    339	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
    340		return -ESHUTDOWN;
    341
    342	spin_lock_irqsave(&dev->lock, iflags);
    343	ep->ep.desc = desc;
    344
    345	ep->halted = 0;
    346
    347	/* set traffic type */
    348	tmp = readl(&dev->ep[ep->num].regs->ctl);
    349	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
    350	writel(tmp, &dev->ep[ep->num].regs->ctl);
    351
    352	/* set max packet size */
    353	maxpacket = usb_endpoint_maxp(desc);
    354	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
    355	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
    356	ep->ep.maxpacket = maxpacket;
    357	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
    358
    359	/* IN ep */
    360	if (ep->in) {
    361
    362		/* ep ix in UDC CSR register space */
    363		udc_csr_epix = ep->num;
    364
    365		/* set buffer size (tx fifo entries) */
    366		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
    367		/* double buffering: fifo size = 2 x max packet size */
    368		tmp = AMD_ADDBITS(
    369				tmp,
    370				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
    371					  / UDC_DWORD_BYTES,
    372				UDC_EPIN_BUFF_SIZE);
    373		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
    374
    375		/* calc. tx fifo base addr */
    376		udc_set_txfifo_addr(ep);
    377
    378		/* flush fifo */
    379		tmp = readl(&ep->regs->ctl);
    380		tmp |= AMD_BIT(UDC_EPCTL_F);
    381		writel(tmp, &ep->regs->ctl);
    382
    383	/* OUT ep */
    384	} else {
    385		/* ep ix in UDC CSR register space */
    386		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
    387
    388		/* set max packet size UDC CSR	*/
    389		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
    390		tmp = AMD_ADDBITS(tmp, maxpacket,
    391					UDC_CSR_NE_MAX_PKT);
    392		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
    393
    394		if (use_dma && !ep->in) {
    395			/* alloc and init BNA dummy request */
    396			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
    397			ep->bna_occurred = 0;
    398		}
    399
    400		if (ep->num != UDC_EP0OUT_IX)
    401			dev->data_ep_enabled = 1;
    402	}
    403
    404	/* set ep values */
    405	tmp = readl(&dev->csr->ne[udc_csr_epix]);
    406	/* max packet */
    407	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
    408	/* ep number */
    409	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
    410	/* ep direction */
    411	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
    412	/* ep type */
    413	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
    414	/* ep config */
    415	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
    416	/* ep interface */
    417	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
    418	/* ep alt */
    419	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
    420	/* write reg */
    421	writel(tmp, &dev->csr->ne[udc_csr_epix]);
    422
    423	/* enable ep irq */
    424	tmp = readl(&dev->regs->ep_irqmsk);
    425	tmp &= AMD_UNMASK_BIT(ep->num);
    426	writel(tmp, &dev->regs->ep_irqmsk);
    427
    428	/*
    429	 * clear NAK by writing CNAK
    430	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
    431	 */
    432	if (!use_dma || ep->in) {
    433		tmp = readl(&ep->regs->ctl);
    434		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
    435		writel(tmp, &ep->regs->ctl);
    436		ep->naking = 0;
    437		UDC_QUEUE_CNAK(ep, ep->num);
    438	}
    439	tmp = desc->bEndpointAddress;
    440	DBG(dev, "%s enabled\n", usbep->name);
    441
    442	spin_unlock_irqrestore(&dev->lock, iflags);
    443	return 0;
    444}
    445
    446/* Resets endpoint */
    447static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
    448{
    449	u32		tmp;
    450
    451	VDBG(ep->dev, "ep-%d reset\n", ep->num);
    452	ep->ep.desc = NULL;
    453	ep->ep.ops = &udc_ep_ops;
    454	INIT_LIST_HEAD(&ep->queue);
    455
    456	usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
    457	/* set NAK */
    458	tmp = readl(&ep->regs->ctl);
    459	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
    460	writel(tmp, &ep->regs->ctl);
    461	ep->naking = 1;
    462
    463	/* disable interrupt */
    464	tmp = readl(&regs->ep_irqmsk);
    465	tmp |= AMD_BIT(ep->num);
    466	writel(tmp, &regs->ep_irqmsk);
    467
    468	if (ep->in) {
    469		/* unset P and IN bit of potential former DMA */
    470		tmp = readl(&ep->regs->ctl);
    471		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
    472		writel(tmp, &ep->regs->ctl);
    473
    474		tmp = readl(&ep->regs->sts);
    475		tmp |= AMD_BIT(UDC_EPSTS_IN);
    476		writel(tmp, &ep->regs->sts);
    477
    478		/* flush the fifo */
    479		tmp = readl(&ep->regs->ctl);
    480		tmp |= AMD_BIT(UDC_EPCTL_F);
    481		writel(tmp, &ep->regs->ctl);
    482
    483	}
    484	/* reset desc pointer */
    485	writel(0, &ep->regs->desptr);
    486}
    487
    488/* Disables endpoint, is called by gadget driver */
    489static int udc_ep_disable(struct usb_ep *usbep)
    490{
    491	struct udc_ep	*ep = NULL;
    492	unsigned long	iflags;
    493
    494	if (!usbep)
    495		return -EINVAL;
    496
    497	ep = container_of(usbep, struct udc_ep, ep);
    498	if (usbep->name == ep0_string || !ep->ep.desc)
    499		return -EINVAL;
    500
    501	DBG(ep->dev, "Disable ep-%d\n", ep->num);
    502
    503	spin_lock_irqsave(&ep->dev->lock, iflags);
    504	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
    505	empty_req_queue(ep);
    506	ep_init(ep->dev->regs, ep);
    507	spin_unlock_irqrestore(&ep->dev->lock, iflags);
    508
    509	return 0;
    510}
    511
    512/* Allocates request packet, called by gadget driver */
    513static struct usb_request *
    514udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
    515{
    516	struct udc_request	*req;
    517	struct udc_data_dma	*dma_desc;
    518	struct udc_ep	*ep;
    519
    520	if (!usbep)
    521		return NULL;
    522
    523	ep = container_of(usbep, struct udc_ep, ep);
    524
    525	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
    526	req = kzalloc(sizeof(struct udc_request), gfp);
    527	if (!req)
    528		return NULL;
    529
    530	req->req.dma = DMA_DONT_USE;
    531	INIT_LIST_HEAD(&req->queue);
    532
    533	if (ep->dma) {
    534		/* ep0 in requests are allocated from data pool here */
    535		dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
    536						&req->td_phys);
    537		if (!dma_desc) {
    538			kfree(req);
    539			return NULL;
    540		}
    541
    542		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
    543				"td_phys = %lx\n",
    544				req, dma_desc,
    545				(unsigned long)req->td_phys);
    546		/* prevent from using desc. - set HOST BUSY */
    547		dma_desc->status = AMD_ADDBITS(dma_desc->status,
    548						UDC_DMA_STP_STS_BS_HOST_BUSY,
    549						UDC_DMA_STP_STS_BS);
    550		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
    551		req->td_data = dma_desc;
    552		req->td_data_last = NULL;
    553		req->chain_len = 1;
    554	}
    555
    556	return &req->req;
    557}
    558
    559/* frees pci pool descriptors of a DMA chain */
    560static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
    561{
    562	struct udc_data_dma *td = req->td_data;
    563	unsigned int i;
    564
    565	dma_addr_t addr_next = 0x00;
    566	dma_addr_t addr = (dma_addr_t)td->next;
    567
    568	DBG(dev, "free chain req = %p\n", req);
    569
    570	/* do not free first desc., will be done by free for request */
    571	for (i = 1; i < req->chain_len; i++) {
    572		td = phys_to_virt(addr);
    573		addr_next = (dma_addr_t)td->next;
    574		dma_pool_free(dev->data_requests, td, addr);
    575		addr = addr_next;
    576	}
    577}
    578
    579/* Frees request packet, called by gadget driver */
    580static void
    581udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
    582{
    583	struct udc_ep	*ep;
    584	struct udc_request	*req;
    585
    586	if (!usbep || !usbreq)
    587		return;
    588
    589	ep = container_of(usbep, struct udc_ep, ep);
    590	req = container_of(usbreq, struct udc_request, req);
    591	VDBG(ep->dev, "free_req req=%p\n", req);
    592	BUG_ON(!list_empty(&req->queue));
    593	if (req->td_data) {
    594		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
    595
    596		/* free dma chain if created */
    597		if (req->chain_len > 1)
    598			udc_free_dma_chain(ep->dev, req);
    599
    600		dma_pool_free(ep->dev->data_requests, req->td_data,
    601							req->td_phys);
    602	}
    603	kfree(req);
    604}
    605
    606/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
    607static void udc_init_bna_dummy(struct udc_request *req)
    608{
    609	if (req) {
    610		/* set last bit */
    611		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
    612		/* set next pointer to itself */
    613		req->td_data->next = req->td_phys;
    614		/* set HOST BUSY */
    615		req->td_data->status
    616			= AMD_ADDBITS(req->td_data->status,
    617					UDC_DMA_STP_STS_BS_DMA_DONE,
    618					UDC_DMA_STP_STS_BS);
    619#ifdef UDC_VERBOSE
    620		pr_debug("bna desc = %p, sts = %08x\n",
    621			req->td_data, req->td_data->status);
    622#endif
    623	}
    624}
    625
    626/* Allocate BNA dummy descriptor */
    627static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
    628{
    629	struct udc_request *req = NULL;
    630	struct usb_request *_req = NULL;
    631
    632	/* alloc the dummy request */
    633	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
    634	if (_req) {
    635		req = container_of(_req, struct udc_request, req);
    636		ep->bna_dummy_req = req;
    637		udc_init_bna_dummy(req);
    638	}
    639	return req;
    640}
    641
    642/* Write data to TX fifo for IN packets */
    643static void
    644udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
    645{
    646	u8			*req_buf;
    647	u32			*buf;
    648	int			i, j;
    649	unsigned		bytes = 0;
    650	unsigned		remaining = 0;
    651
    652	if (!req || !ep)
    653		return;
    654
    655	req_buf = req->buf + req->actual;
    656	prefetch(req_buf);
    657	remaining = req->length - req->actual;
    658
    659	buf = (u32 *) req_buf;
    660
    661	bytes = ep->ep.maxpacket;
    662	if (bytes > remaining)
    663		bytes = remaining;
    664
    665	/* dwords first */
    666	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
    667		writel(*(buf + i), ep->txfifo);
    668
    669	/* remaining bytes must be written by byte access */
    670	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
    671		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
    672							ep->txfifo);
    673	}
    674
    675	/* dummy write confirm */
    676	writel(0, &ep->regs->confirm);
    677}
    678
    679/* Read dwords from RX fifo for OUT transfers */
    680static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
    681{
    682	int i;
    683
    684	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
    685
    686	for (i = 0; i < dwords; i++)
    687		*(buf + i) = readl(dev->rxfifo);
    688	return 0;
    689}
    690
    691/* Read bytes from RX fifo for OUT transfers */
    692static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
    693{
    694	int i, j;
    695	u32 tmp;
    696
    697	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
    698
    699	/* dwords first */
    700	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
    701		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
    702
    703	/* remaining bytes must be read by byte access */
    704	if (bytes % UDC_DWORD_BYTES) {
    705		tmp = readl(dev->rxfifo);
    706		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
    707			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
    708			tmp = tmp >> UDC_BITS_PER_BYTE;
    709		}
    710	}
    711
    712	return 0;
    713}
    714
    715/* Read data from RX fifo for OUT transfers */
    716static int
    717udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
    718{
    719	u8 *buf;
    720	unsigned buf_space;
    721	unsigned bytes = 0;
    722	unsigned finished = 0;
    723
    724	/* received number bytes */
    725	bytes = readl(&ep->regs->sts);
    726	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
    727
    728	buf_space = req->req.length - req->req.actual;
    729	buf = req->req.buf + req->req.actual;
    730	if (bytes > buf_space) {
    731		if ((buf_space % ep->ep.maxpacket) != 0) {
    732			DBG(ep->dev,
    733				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
    734				ep->ep.name, bytes, buf_space);
    735			req->req.status = -EOVERFLOW;
    736		}
    737		bytes = buf_space;
    738	}
    739	req->req.actual += bytes;
    740
    741	/* last packet ? */
    742	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
    743		|| ((req->req.actual == req->req.length) && !req->req.zero))
    744		finished = 1;
    745
    746	/* read rx fifo bytes */
    747	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
    748	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
    749
    750	return finished;
    751}
    752
    753/* Creates or re-inits a DMA chain */
    754static int udc_create_dma_chain(
    755	struct udc_ep *ep,
    756	struct udc_request *req,
    757	unsigned long buf_len, gfp_t gfp_flags
    758)
    759{
    760	unsigned long bytes = req->req.length;
    761	unsigned int i;
    762	dma_addr_t dma_addr;
    763	struct udc_data_dma	*td = NULL;
    764	struct udc_data_dma	*last = NULL;
    765	unsigned long txbytes;
    766	unsigned create_new_chain = 0;
    767	unsigned len;
    768
    769	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
    770	     bytes, buf_len);
    771	dma_addr = DMA_DONT_USE;
    772
    773	/* unset L bit in first desc for OUT */
    774	if (!ep->in)
    775		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
    776
    777	/* alloc only new desc's if not already available */
    778	len = req->req.length / ep->ep.maxpacket;
    779	if (req->req.length % ep->ep.maxpacket)
    780		len++;
    781
    782	if (len > req->chain_len) {
    783		/* shorter chain already allocated before */
    784		if (req->chain_len > 1)
    785			udc_free_dma_chain(ep->dev, req);
    786		req->chain_len = len;
    787		create_new_chain = 1;
    788	}
    789
    790	td = req->td_data;
    791	/* gen. required number of descriptors and buffers */
    792	for (i = buf_len; i < bytes; i += buf_len) {
    793		/* create or determine next desc. */
    794		if (create_new_chain) {
    795			td = dma_pool_alloc(ep->dev->data_requests,
    796					    gfp_flags, &dma_addr);
    797			if (!td)
    798				return -ENOMEM;
    799
    800			td->status = 0;
    801		} else if (i == buf_len) {
    802			/* first td */
    803			td = (struct udc_data_dma *)phys_to_virt(
    804						req->td_data->next);
    805			td->status = 0;
    806		} else {
    807			td = (struct udc_data_dma *)phys_to_virt(last->next);
    808			td->status = 0;
    809		}
    810
    811		if (td)
    812			td->bufptr = req->req.dma + i; /* assign buffer */
    813		else
    814			break;
    815
    816		/* short packet ? */
    817		if ((bytes - i) >= buf_len) {
    818			txbytes = buf_len;
    819		} else {
    820			/* short packet */
    821			txbytes = bytes - i;
    822		}
    823
    824		/* link td and assign tx bytes */
    825		if (i == buf_len) {
    826			if (create_new_chain)
    827				req->td_data->next = dma_addr;
    828			/*
    829			 * else
    830			 *	req->td_data->next = virt_to_phys(td);
    831			 */
    832			/* write tx bytes */
    833			if (ep->in) {
    834				/* first desc */
    835				req->td_data->status =
    836					AMD_ADDBITS(req->td_data->status,
    837						    ep->ep.maxpacket,
    838						    UDC_DMA_IN_STS_TXBYTES);
    839				/* second desc */
    840				td->status = AMD_ADDBITS(td->status,
    841							txbytes,
    842							UDC_DMA_IN_STS_TXBYTES);
    843			}
    844		} else {
    845			if (create_new_chain)
    846				last->next = dma_addr;
    847			/*
    848			 * else
    849			 *	last->next = virt_to_phys(td);
    850			 */
    851			if (ep->in) {
    852				/* write tx bytes */
    853				td->status = AMD_ADDBITS(td->status,
    854							txbytes,
    855							UDC_DMA_IN_STS_TXBYTES);
    856			}
    857		}
    858		last = td;
    859	}
    860	/* set last bit */
    861	if (td) {
    862		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
    863		/* last desc. points to itself */
    864		req->td_data_last = td;
    865	}
    866
    867	return 0;
    868}
    869
    870/* create/re-init a DMA descriptor or a DMA descriptor chain */
    871static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
    872{
    873	int	retval = 0;
    874	u32	tmp;
    875
    876	VDBG(ep->dev, "prep_dma\n");
    877	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
    878			ep->num, req->td_data);
    879
    880	/* set buffer pointer */
    881	req->td_data->bufptr = req->req.dma;
    882
    883	/* set last bit */
    884	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
    885
    886	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
    887	if (use_dma_ppb) {
    888
    889		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
    890		if (retval != 0) {
    891			if (retval == -ENOMEM)
    892				DBG(ep->dev, "Out of DMA memory\n");
    893			return retval;
    894		}
    895		if (ep->in) {
    896			if (req->req.length == ep->ep.maxpacket) {
    897				/* write tx bytes */
    898				req->td_data->status =
    899					AMD_ADDBITS(req->td_data->status,
    900						ep->ep.maxpacket,
    901						UDC_DMA_IN_STS_TXBYTES);
    902
    903			}
    904		}
    905
    906	}
    907
    908	if (ep->in) {
    909		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
    910				"maxpacket=%d ep%d\n",
    911				use_dma_ppb, req->req.length,
    912				ep->ep.maxpacket, ep->num);
    913		/*
    914		 * if bytes < max packet then tx bytes must
    915		 * be written in packet per buffer mode
    916		 */
    917		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
    918				|| ep->num == UDC_EP0OUT_IX
    919				|| ep->num == UDC_EP0IN_IX) {
    920			/* write tx bytes */
    921			req->td_data->status =
    922				AMD_ADDBITS(req->td_data->status,
    923						req->req.length,
    924						UDC_DMA_IN_STS_TXBYTES);
    925			/* reset frame num */
    926			req->td_data->status =
    927				AMD_ADDBITS(req->td_data->status,
    928						0,
    929						UDC_DMA_IN_STS_FRAMENUM);
    930		}
    931		/* set HOST BUSY */
    932		req->td_data->status =
    933			AMD_ADDBITS(req->td_data->status,
    934				UDC_DMA_STP_STS_BS_HOST_BUSY,
    935				UDC_DMA_STP_STS_BS);
    936	} else {
    937		VDBG(ep->dev, "OUT set host ready\n");
    938		/* set HOST READY */
    939		req->td_data->status =
    940			AMD_ADDBITS(req->td_data->status,
    941				UDC_DMA_STP_STS_BS_HOST_READY,
    942				UDC_DMA_STP_STS_BS);
    943
    944		/* clear NAK by writing CNAK */
    945		if (ep->naking) {
    946			tmp = readl(&ep->regs->ctl);
    947			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
    948			writel(tmp, &ep->regs->ctl);
    949			ep->naking = 0;
    950			UDC_QUEUE_CNAK(ep, ep->num);
    951		}
    952
    953	}
    954
    955	return retval;
    956}
    957
    958/* Completes request packet ... caller MUST hold lock */
    959static void
    960complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
    961__releases(ep->dev->lock)
    962__acquires(ep->dev->lock)
    963{
    964	struct udc		*dev;
    965	unsigned		halted;
    966
    967	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
    968
    969	dev = ep->dev;
    970	/* unmap DMA */
    971	if (ep->dma)
    972		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
    973
    974	halted = ep->halted;
    975	ep->halted = 1;
    976
    977	/* set new status if pending */
    978	if (req->req.status == -EINPROGRESS)
    979		req->req.status = sts;
    980
    981	/* remove from ep queue */
    982	list_del_init(&req->queue);
    983
    984	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
    985		&req->req, req->req.length, ep->ep.name, sts);
    986
    987	spin_unlock(&dev->lock);
    988	usb_gadget_giveback_request(&ep->ep, &req->req);
    989	spin_lock(&dev->lock);
    990	ep->halted = halted;
    991}
    992
    993/* Iterates to the end of a DMA chain and returns last descriptor */
    994static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
    995{
    996	struct udc_data_dma	*td;
    997
    998	td = req->td_data;
    999	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
   1000		td = phys_to_virt(td->next);
   1001
   1002	return td;
   1003
   1004}
   1005
   1006/* Iterates to the end of a DMA chain and counts bytes received */
   1007static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
   1008{
   1009	struct udc_data_dma	*td;
   1010	u32 count;
   1011
   1012	td = req->td_data;
   1013	/* received number bytes */
   1014	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
   1015
   1016	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
   1017		td = phys_to_virt(td->next);
   1018		/* received number bytes */
   1019		if (td) {
   1020			count += AMD_GETBITS(td->status,
   1021				UDC_DMA_OUT_STS_RXBYTES);
   1022		}
   1023	}
   1024
   1025	return count;
   1026
   1027}
   1028
   1029/* Enabling RX DMA */
   1030static void udc_set_rde(struct udc *dev)
   1031{
   1032	u32 tmp;
   1033
   1034	VDBG(dev, "udc_set_rde()\n");
   1035	/* stop RDE timer */
   1036	if (timer_pending(&udc_timer)) {
   1037		set_rde = 0;
   1038		mod_timer(&udc_timer, jiffies - 1);
   1039	}
   1040	/* set RDE */
   1041	tmp = readl(&dev->regs->ctl);
   1042	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
   1043	writel(tmp, &dev->regs->ctl);
   1044}
   1045
   1046/* Queues a request packet, called by gadget driver */
   1047static int
   1048udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
   1049{
   1050	int			retval = 0;
   1051	u8			open_rxfifo = 0;
   1052	unsigned long		iflags;
   1053	struct udc_ep		*ep;
   1054	struct udc_request	*req;
   1055	struct udc		*dev;
   1056	u32			tmp;
   1057
   1058	/* check the inputs */
   1059	req = container_of(usbreq, struct udc_request, req);
   1060
   1061	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
   1062			|| !list_empty(&req->queue))
   1063		return -EINVAL;
   1064
   1065	ep = container_of(usbep, struct udc_ep, ep);
   1066	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
   1067		return -EINVAL;
   1068
   1069	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
   1070	dev = ep->dev;
   1071
   1072	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
   1073		return -ESHUTDOWN;
   1074
   1075	/* map dma (usually done before) */
   1076	if (ep->dma) {
   1077		VDBG(dev, "DMA map req %p\n", req);
   1078		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
   1079		if (retval)
   1080			return retval;
   1081	}
   1082
   1083	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
   1084			usbep->name, usbreq, usbreq->length,
   1085			req->td_data, usbreq->buf);
   1086
   1087	spin_lock_irqsave(&dev->lock, iflags);
   1088	usbreq->actual = 0;
   1089	usbreq->status = -EINPROGRESS;
   1090	req->dma_done = 0;
   1091
   1092	/* on empty queue just do first transfer */
   1093	if (list_empty(&ep->queue)) {
   1094		/* zlp */
   1095		if (usbreq->length == 0) {
   1096			/* IN zlp's are handled by hardware */
   1097			complete_req(ep, req, 0);
   1098			VDBG(dev, "%s: zlp\n", ep->ep.name);
   1099			/*
   1100			 * if set_config or set_intf is waiting for ack by zlp
   1101			 * then set CSR_DONE
   1102			 */
   1103			if (dev->set_cfg_not_acked) {
   1104				tmp = readl(&dev->regs->ctl);
   1105				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
   1106				writel(tmp, &dev->regs->ctl);
   1107				dev->set_cfg_not_acked = 0;
   1108			}
   1109			/* setup command is ACK'ed now by zlp */
   1110			if (dev->waiting_zlp_ack_ep0in) {
   1111				/* clear NAK by writing CNAK in EP0_IN */
   1112				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
   1113				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   1114				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
   1115				dev->ep[UDC_EP0IN_IX].naking = 0;
   1116				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
   1117							UDC_EP0IN_IX);
   1118				dev->waiting_zlp_ack_ep0in = 0;
   1119			}
   1120			goto finished;
   1121		}
   1122		if (ep->dma) {
   1123			retval = prep_dma(ep, req, GFP_ATOMIC);
   1124			if (retval != 0)
   1125				goto finished;
   1126			/* write desc pointer to enable DMA */
   1127			if (ep->in) {
   1128				/* set HOST READY */
   1129				req->td_data->status =
   1130					AMD_ADDBITS(req->td_data->status,
   1131						UDC_DMA_IN_STS_BS_HOST_READY,
   1132						UDC_DMA_IN_STS_BS);
   1133			}
   1134
   1135			/* disabled rx dma while descriptor update */
   1136			if (!ep->in) {
   1137				/* stop RDE timer */
   1138				if (timer_pending(&udc_timer)) {
   1139					set_rde = 0;
   1140					mod_timer(&udc_timer, jiffies - 1);
   1141				}
   1142				/* clear RDE */
   1143				tmp = readl(&dev->regs->ctl);
   1144				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
   1145				writel(tmp, &dev->regs->ctl);
   1146				open_rxfifo = 1;
   1147
   1148				/*
   1149				 * if BNA occurred then let BNA dummy desc.
   1150				 * point to current desc.
   1151				 */
   1152				if (ep->bna_occurred) {
   1153					VDBG(dev, "copy to BNA dummy desc.\n");
   1154					memcpy(ep->bna_dummy_req->td_data,
   1155						req->td_data,
   1156						sizeof(struct udc_data_dma));
   1157				}
   1158			}
   1159			/* write desc pointer */
   1160			writel(req->td_phys, &ep->regs->desptr);
   1161
   1162			/* clear NAK by writing CNAK */
   1163			if (ep->naking) {
   1164				tmp = readl(&ep->regs->ctl);
   1165				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   1166				writel(tmp, &ep->regs->ctl);
   1167				ep->naking = 0;
   1168				UDC_QUEUE_CNAK(ep, ep->num);
   1169			}
   1170
   1171			if (ep->in) {
   1172				/* enable ep irq */
   1173				tmp = readl(&dev->regs->ep_irqmsk);
   1174				tmp &= AMD_UNMASK_BIT(ep->num);
   1175				writel(tmp, &dev->regs->ep_irqmsk);
   1176			}
   1177		} else if (ep->in) {
   1178				/* enable ep irq */
   1179				tmp = readl(&dev->regs->ep_irqmsk);
   1180				tmp &= AMD_UNMASK_BIT(ep->num);
   1181				writel(tmp, &dev->regs->ep_irqmsk);
   1182			}
   1183
   1184	} else if (ep->dma) {
   1185
   1186		/*
   1187		 * prep_dma not used for OUT ep's, this is not possible
   1188		 * for PPB modes, because of chain creation reasons
   1189		 */
   1190		if (ep->in) {
   1191			retval = prep_dma(ep, req, GFP_ATOMIC);
   1192			if (retval != 0)
   1193				goto finished;
   1194		}
   1195	}
   1196	VDBG(dev, "list_add\n");
   1197	/* add request to ep queue */
   1198	if (req) {
   1199
   1200		list_add_tail(&req->queue, &ep->queue);
   1201
   1202		/* open rxfifo if out data queued */
   1203		if (open_rxfifo) {
   1204			/* enable DMA */
   1205			req->dma_going = 1;
   1206			udc_set_rde(dev);
   1207			if (ep->num != UDC_EP0OUT_IX)
   1208				dev->data_ep_queued = 1;
   1209		}
   1210		/* stop OUT naking */
   1211		if (!ep->in) {
   1212			if (!use_dma && udc_rxfifo_pending) {
   1213				DBG(dev, "udc_queue(): pending bytes in "
   1214					"rxfifo after nyet\n");
   1215				/*
   1216				 * read pending bytes afer nyet:
   1217				 * referring to isr
   1218				 */
   1219				if (udc_rxfifo_read(ep, req)) {
   1220					/* finish */
   1221					complete_req(ep, req, 0);
   1222				}
   1223				udc_rxfifo_pending = 0;
   1224
   1225			}
   1226		}
   1227	}
   1228
   1229finished:
   1230	spin_unlock_irqrestore(&dev->lock, iflags);
   1231	return retval;
   1232}
   1233
   1234/* Empty request queue of an endpoint; caller holds spinlock */
   1235void empty_req_queue(struct udc_ep *ep)
   1236{
   1237	struct udc_request	*req;
   1238
   1239	ep->halted = 1;
   1240	while (!list_empty(&ep->queue)) {
   1241		req = list_entry(ep->queue.next,
   1242			struct udc_request,
   1243			queue);
   1244		complete_req(ep, req, -ESHUTDOWN);
   1245	}
   1246}
   1247EXPORT_SYMBOL_GPL(empty_req_queue);
   1248
   1249/* Dequeues a request packet, called by gadget driver */
   1250static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
   1251{
   1252	struct udc_ep		*ep;
   1253	struct udc_request	*req;
   1254	unsigned		halted;
   1255	unsigned long		iflags;
   1256
   1257	ep = container_of(usbep, struct udc_ep, ep);
   1258	if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
   1259				&& ep->num != UDC_EP0OUT_IX)))
   1260		return -EINVAL;
   1261
   1262	req = container_of(usbreq, struct udc_request, req);
   1263
   1264	spin_lock_irqsave(&ep->dev->lock, iflags);
   1265	halted = ep->halted;
   1266	ep->halted = 1;
   1267	/* request in processing or next one */
   1268	if (ep->queue.next == &req->queue) {
   1269		if (ep->dma && req->dma_going) {
   1270			if (ep->in)
   1271				ep->cancel_transfer = 1;
   1272			else {
   1273				u32 tmp;
   1274				u32 dma_sts;
   1275				/* stop potential receive DMA */
   1276				tmp = readl(&udc->regs->ctl);
   1277				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
   1278							&udc->regs->ctl);
   1279				/*
   1280				 * Cancel transfer later in ISR
   1281				 * if descriptor was touched.
   1282				 */
   1283				dma_sts = AMD_GETBITS(req->td_data->status,
   1284							UDC_DMA_OUT_STS_BS);
   1285				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
   1286					ep->cancel_transfer = 1;
   1287				else {
   1288					udc_init_bna_dummy(ep->req);
   1289					writel(ep->bna_dummy_req->td_phys,
   1290						&ep->regs->desptr);
   1291				}
   1292				writel(tmp, &udc->regs->ctl);
   1293			}
   1294		}
   1295	}
   1296	complete_req(ep, req, -ECONNRESET);
   1297	ep->halted = halted;
   1298
   1299	spin_unlock_irqrestore(&ep->dev->lock, iflags);
   1300	return 0;
   1301}
   1302
   1303/* Halt or clear halt of endpoint */
   1304static int
   1305udc_set_halt(struct usb_ep *usbep, int halt)
   1306{
   1307	struct udc_ep	*ep;
   1308	u32 tmp;
   1309	unsigned long iflags;
   1310	int retval = 0;
   1311
   1312	if (!usbep)
   1313		return -EINVAL;
   1314
   1315	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
   1316
   1317	ep = container_of(usbep, struct udc_ep, ep);
   1318	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
   1319		return -EINVAL;
   1320	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
   1321		return -ESHUTDOWN;
   1322
   1323	spin_lock_irqsave(&udc_stall_spinlock, iflags);
   1324	/* halt or clear halt */
   1325	if (halt) {
   1326		if (ep->num == 0)
   1327			ep->dev->stall_ep0in = 1;
   1328		else {
   1329			/*
   1330			 * set STALL
   1331			 * rxfifo empty not taken into acount
   1332			 */
   1333			tmp = readl(&ep->regs->ctl);
   1334			tmp |= AMD_BIT(UDC_EPCTL_S);
   1335			writel(tmp, &ep->regs->ctl);
   1336			ep->halted = 1;
   1337
   1338			/* setup poll timer */
   1339			if (!timer_pending(&udc_pollstall_timer)) {
   1340				udc_pollstall_timer.expires = jiffies +
   1341					HZ * UDC_POLLSTALL_TIMER_USECONDS
   1342					/ (1000 * 1000);
   1343				if (!stop_pollstall_timer) {
   1344					DBG(ep->dev, "start polltimer\n");
   1345					add_timer(&udc_pollstall_timer);
   1346				}
   1347			}
   1348		}
   1349	} else {
   1350		/* ep is halted by set_halt() before */
   1351		if (ep->halted) {
   1352			tmp = readl(&ep->regs->ctl);
   1353			/* clear stall bit */
   1354			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
   1355			/* clear NAK by writing CNAK */
   1356			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   1357			writel(tmp, &ep->regs->ctl);
   1358			ep->halted = 0;
   1359			UDC_QUEUE_CNAK(ep, ep->num);
   1360		}
   1361	}
   1362	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
   1363	return retval;
   1364}
   1365
   1366/* gadget interface */
   1367static const struct usb_ep_ops udc_ep_ops = {
   1368	.enable		= udc_ep_enable,
   1369	.disable	= udc_ep_disable,
   1370
   1371	.alloc_request	= udc_alloc_request,
   1372	.free_request	= udc_free_request,
   1373
   1374	.queue		= udc_queue,
   1375	.dequeue	= udc_dequeue,
   1376
   1377	.set_halt	= udc_set_halt,
   1378	/* fifo ops not implemented */
   1379};
   1380
   1381/*-------------------------------------------------------------------------*/
   1382
   1383/* Get frame counter (not implemented) */
   1384static int udc_get_frame(struct usb_gadget *gadget)
   1385{
   1386	return -EOPNOTSUPP;
   1387}
   1388
   1389/* Initiates a remote wakeup */
   1390static int udc_remote_wakeup(struct udc *dev)
   1391{
   1392	unsigned long flags;
   1393	u32 tmp;
   1394
   1395	DBG(dev, "UDC initiates remote wakeup\n");
   1396
   1397	spin_lock_irqsave(&dev->lock, flags);
   1398
   1399	tmp = readl(&dev->regs->ctl);
   1400	tmp |= AMD_BIT(UDC_DEVCTL_RES);
   1401	writel(tmp, &dev->regs->ctl);
   1402	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
   1403	writel(tmp, &dev->regs->ctl);
   1404
   1405	spin_unlock_irqrestore(&dev->lock, flags);
   1406	return 0;
   1407}
   1408
   1409/* Remote wakeup gadget interface */
   1410static int udc_wakeup(struct usb_gadget *gadget)
   1411{
   1412	struct udc		*dev;
   1413
   1414	if (!gadget)
   1415		return -EINVAL;
   1416	dev = container_of(gadget, struct udc, gadget);
   1417	udc_remote_wakeup(dev);
   1418
   1419	return 0;
   1420}
   1421
   1422static int amd5536_udc_start(struct usb_gadget *g,
   1423		struct usb_gadget_driver *driver);
   1424static int amd5536_udc_stop(struct usb_gadget *g);
   1425
   1426static const struct usb_gadget_ops udc_ops = {
   1427	.wakeup		= udc_wakeup,
   1428	.get_frame	= udc_get_frame,
   1429	.udc_start	= amd5536_udc_start,
   1430	.udc_stop	= amd5536_udc_stop,
   1431};
   1432
   1433/* Setups endpoint parameters, adds endpoints to linked list */
   1434static void make_ep_lists(struct udc *dev)
   1435{
   1436	/* make gadget ep lists */
   1437	INIT_LIST_HEAD(&dev->gadget.ep_list);
   1438	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
   1439						&dev->gadget.ep_list);
   1440	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
   1441						&dev->gadget.ep_list);
   1442	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
   1443						&dev->gadget.ep_list);
   1444
   1445	/* fifo config */
   1446	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
   1447	if (dev->gadget.speed == USB_SPEED_FULL)
   1448		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
   1449	else if (dev->gadget.speed == USB_SPEED_HIGH)
   1450		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
   1451	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
   1452}
   1453
   1454/* Inits UDC context */
   1455void udc_basic_init(struct udc *dev)
   1456{
   1457	u32	tmp;
   1458
   1459	DBG(dev, "udc_basic_init()\n");
   1460
   1461	dev->gadget.speed = USB_SPEED_UNKNOWN;
   1462
   1463	/* stop RDE timer */
   1464	if (timer_pending(&udc_timer)) {
   1465		set_rde = 0;
   1466		mod_timer(&udc_timer, jiffies - 1);
   1467	}
   1468	/* stop poll stall timer */
   1469	if (timer_pending(&udc_pollstall_timer))
   1470		mod_timer(&udc_pollstall_timer, jiffies - 1);
   1471	/* disable DMA */
   1472	tmp = readl(&dev->regs->ctl);
   1473	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
   1474	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
   1475	writel(tmp, &dev->regs->ctl);
   1476
   1477	/* enable dynamic CSR programming */
   1478	tmp = readl(&dev->regs->cfg);
   1479	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
   1480	/* set self powered */
   1481	tmp |= AMD_BIT(UDC_DEVCFG_SP);
   1482	/* set remote wakeupable */
   1483	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
   1484	writel(tmp, &dev->regs->cfg);
   1485
   1486	make_ep_lists(dev);
   1487
   1488	dev->data_ep_enabled = 0;
   1489	dev->data_ep_queued = 0;
   1490}
   1491EXPORT_SYMBOL_GPL(udc_basic_init);
   1492
   1493/* init registers at driver load time */
   1494static int startup_registers(struct udc *dev)
   1495{
   1496	u32 tmp;
   1497
   1498	/* init controller by soft reset */
   1499	udc_soft_reset(dev);
   1500
   1501	/* mask not needed interrupts */
   1502	udc_mask_unused_interrupts(dev);
   1503
   1504	/* put into initial config */
   1505	udc_basic_init(dev);
   1506	/* link up all endpoints */
   1507	udc_setup_endpoints(dev);
   1508
   1509	/* program speed */
   1510	tmp = readl(&dev->regs->cfg);
   1511	if (use_fullspeed)
   1512		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
   1513	else
   1514		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
   1515	writel(tmp, &dev->regs->cfg);
   1516
   1517	return 0;
   1518}
   1519
   1520/* Sets initial endpoint parameters */
   1521static void udc_setup_endpoints(struct udc *dev)
   1522{
   1523	struct udc_ep	*ep;
   1524	u32	tmp;
   1525	u32	reg;
   1526
   1527	DBG(dev, "udc_setup_endpoints()\n");
   1528
   1529	/* read enum speed */
   1530	tmp = readl(&dev->regs->sts);
   1531	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
   1532	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
   1533		dev->gadget.speed = USB_SPEED_HIGH;
   1534	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
   1535		dev->gadget.speed = USB_SPEED_FULL;
   1536
   1537	/* set basic ep parameters */
   1538	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
   1539		ep = &dev->ep[tmp];
   1540		ep->dev = dev;
   1541		ep->ep.name = ep_info[tmp].name;
   1542		ep->ep.caps = ep_info[tmp].caps;
   1543		ep->num = tmp;
   1544		/* txfifo size is calculated at enable time */
   1545		ep->txfifo = dev->txfifo;
   1546
   1547		/* fifo size */
   1548		if (tmp < UDC_EPIN_NUM) {
   1549			ep->fifo_depth = UDC_TXFIFO_SIZE;
   1550			ep->in = 1;
   1551		} else {
   1552			ep->fifo_depth = UDC_RXFIFO_SIZE;
   1553			ep->in = 0;
   1554
   1555		}
   1556		ep->regs = &dev->ep_regs[tmp];
   1557		/*
   1558		 * ep will be reset only if ep was not enabled before to avoid
   1559		 * disabling ep interrupts when ENUM interrupt occurs but ep is
   1560		 * not enabled by gadget driver
   1561		 */
   1562		if (!ep->ep.desc)
   1563			ep_init(dev->regs, ep);
   1564
   1565		if (use_dma) {
   1566			/*
   1567			 * ep->dma is not really used, just to indicate that
   1568			 * DMA is active: remove this
   1569			 * dma regs = dev control regs
   1570			 */
   1571			ep->dma = &dev->regs->ctl;
   1572
   1573			/* nak OUT endpoints until enable - not for ep0 */
   1574			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
   1575						&& tmp > UDC_EPIN_NUM) {
   1576				/* set NAK */
   1577				reg = readl(&dev->ep[tmp].regs->ctl);
   1578				reg |= AMD_BIT(UDC_EPCTL_SNAK);
   1579				writel(reg, &dev->ep[tmp].regs->ctl);
   1580				dev->ep[tmp].naking = 1;
   1581
   1582			}
   1583		}
   1584	}
   1585	/* EP0 max packet */
   1586	if (dev->gadget.speed == USB_SPEED_FULL) {
   1587		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
   1588					   UDC_FS_EP0IN_MAX_PKT_SIZE);
   1589		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
   1590					   UDC_FS_EP0OUT_MAX_PKT_SIZE);
   1591	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
   1592		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
   1593					   UDC_EP0IN_MAX_PKT_SIZE);
   1594		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
   1595					   UDC_EP0OUT_MAX_PKT_SIZE);
   1596	}
   1597
   1598	/*
   1599	 * with suspend bug workaround, ep0 params for gadget driver
   1600	 * are set at gadget driver bind() call
   1601	 */
   1602	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
   1603	dev->ep[UDC_EP0IN_IX].halted = 0;
   1604	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
   1605
   1606	/* init cfg/alt/int */
   1607	dev->cur_config = 0;
   1608	dev->cur_intf = 0;
   1609	dev->cur_alt = 0;
   1610}
   1611
   1612/* Bringup after Connect event, initial bringup to be ready for ep0 events */
   1613static void usb_connect(struct udc *dev)
   1614{
   1615	/* Return if already connected */
   1616	if (dev->connected)
   1617		return;
   1618
   1619	dev_info(dev->dev, "USB Connect\n");
   1620
   1621	dev->connected = 1;
   1622
   1623	/* put into initial config */
   1624	udc_basic_init(dev);
   1625
   1626	/* enable device setup interrupts */
   1627	udc_enable_dev_setup_interrupts(dev);
   1628}
   1629
   1630/*
   1631 * Calls gadget with disconnect event and resets the UDC and makes
   1632 * initial bringup to be ready for ep0 events
   1633 */
   1634static void usb_disconnect(struct udc *dev)
   1635{
   1636	u32 tmp;
   1637
   1638	/* Return if already disconnected */
   1639	if (!dev->connected)
   1640		return;
   1641
   1642	dev_info(dev->dev, "USB Disconnect\n");
   1643
   1644	dev->connected = 0;
   1645
   1646	/* mask interrupts */
   1647	udc_mask_unused_interrupts(dev);
   1648
   1649	if (dev->driver) {
   1650		spin_unlock(&dev->lock);
   1651		dev->driver->disconnect(&dev->gadget);
   1652		spin_lock(&dev->lock);
   1653
   1654		/* empty queues */
   1655		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
   1656			empty_req_queue(&dev->ep[tmp]);
   1657	}
   1658
   1659	/* disable ep0 */
   1660	ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
   1661
   1662	if (!soft_reset_occured) {
   1663		/* init controller by soft reset */
   1664		udc_soft_reset(dev);
   1665		soft_reset_occured++;
   1666	}
   1667
   1668	/* re-enable dev interrupts */
   1669	udc_enable_dev_setup_interrupts(dev);
   1670	/* back to full speed ? */
   1671	if (use_fullspeed) {
   1672		tmp = readl(&dev->regs->cfg);
   1673		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
   1674		writel(tmp, &dev->regs->cfg);
   1675	}
   1676}
   1677
   1678/* Reset the UDC core */
   1679static void udc_soft_reset(struct udc *dev)
   1680{
   1681	unsigned long	flags;
   1682
   1683	DBG(dev, "Soft reset\n");
   1684	/*
   1685	 * reset possible waiting interrupts, because int.
   1686	 * status is lost after soft reset,
   1687	 * ep int. status reset
   1688	 */
   1689	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
   1690	/* device int. status reset */
   1691	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
   1692
   1693	/* Don't do this for Broadcom UDC since this is a reserved
   1694	 * bit.
   1695	 */
   1696	if (dev->chiprev != UDC_BCM_REV) {
   1697		spin_lock_irqsave(&udc_irq_spinlock, flags);
   1698		writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
   1699		readl(&dev->regs->cfg);
   1700		spin_unlock_irqrestore(&udc_irq_spinlock, flags);
   1701	}
   1702}
   1703
   1704/* RDE timer callback to set RDE bit */
   1705static void udc_timer_function(struct timer_list *unused)
   1706{
   1707	u32 tmp;
   1708
   1709	spin_lock_irq(&udc_irq_spinlock);
   1710
   1711	if (set_rde > 0) {
   1712		/*
   1713		 * open the fifo if fifo was filled on last timer call
   1714		 * conditionally
   1715		 */
   1716		if (set_rde > 1) {
   1717			/* set RDE to receive setup data */
   1718			tmp = readl(&udc->regs->ctl);
   1719			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
   1720			writel(tmp, &udc->regs->ctl);
   1721			set_rde = -1;
   1722		} else if (readl(&udc->regs->sts)
   1723				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
   1724			/*
   1725			 * if fifo empty setup polling, do not just
   1726			 * open the fifo
   1727			 */
   1728			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
   1729			if (!stop_timer)
   1730				add_timer(&udc_timer);
   1731		} else {
   1732			/*
   1733			 * fifo contains data now, setup timer for opening
   1734			 * the fifo when timer expires to be able to receive
   1735			 * setup packets, when data packets gets queued by
   1736			 * gadget layer then timer will forced to expire with
   1737			 * set_rde=0 (RDE is set in udc_queue())
   1738			 */
   1739			set_rde++;
   1740			/* debug: lhadmot_timer_start = 221070 */
   1741			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
   1742			if (!stop_timer)
   1743				add_timer(&udc_timer);
   1744		}
   1745
   1746	} else
   1747		set_rde = -1; /* RDE was set by udc_queue() */
   1748	spin_unlock_irq(&udc_irq_spinlock);
   1749	if (stop_timer)
   1750		complete(&on_exit);
   1751
   1752}
   1753
   1754/* Handle halt state, used in stall poll timer */
   1755static void udc_handle_halt_state(struct udc_ep *ep)
   1756{
   1757	u32 tmp;
   1758	/* set stall as long not halted */
   1759	if (ep->halted == 1) {
   1760		tmp = readl(&ep->regs->ctl);
   1761		/* STALL cleared ? */
   1762		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
   1763			/*
   1764			 * FIXME: MSC spec requires that stall remains
   1765			 * even on receivng of CLEAR_FEATURE HALT. So
   1766			 * we would set STALL again here to be compliant.
   1767			 * But with current mass storage drivers this does
   1768			 * not work (would produce endless host retries).
   1769			 * So we clear halt on CLEAR_FEATURE.
   1770			 *
   1771			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
   1772			tmp |= AMD_BIT(UDC_EPCTL_S);
   1773			writel(tmp, &ep->regs->ctl);*/
   1774
   1775			/* clear NAK by writing CNAK */
   1776			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   1777			writel(tmp, &ep->regs->ctl);
   1778			ep->halted = 0;
   1779			UDC_QUEUE_CNAK(ep, ep->num);
   1780		}
   1781	}
   1782}
   1783
   1784/* Stall timer callback to poll S bit and set it again after */
   1785static void udc_pollstall_timer_function(struct timer_list *unused)
   1786{
   1787	struct udc_ep *ep;
   1788	int halted = 0;
   1789
   1790	spin_lock_irq(&udc_stall_spinlock);
   1791	/*
   1792	 * only one IN and OUT endpoints are handled
   1793	 * IN poll stall
   1794	 */
   1795	ep = &udc->ep[UDC_EPIN_IX];
   1796	udc_handle_halt_state(ep);
   1797	if (ep->halted)
   1798		halted = 1;
   1799	/* OUT poll stall */
   1800	ep = &udc->ep[UDC_EPOUT_IX];
   1801	udc_handle_halt_state(ep);
   1802	if (ep->halted)
   1803		halted = 1;
   1804
   1805	/* setup timer again when still halted */
   1806	if (!stop_pollstall_timer && halted) {
   1807		udc_pollstall_timer.expires = jiffies +
   1808					HZ * UDC_POLLSTALL_TIMER_USECONDS
   1809					/ (1000 * 1000);
   1810		add_timer(&udc_pollstall_timer);
   1811	}
   1812	spin_unlock_irq(&udc_stall_spinlock);
   1813
   1814	if (stop_pollstall_timer)
   1815		complete(&on_pollstall_exit);
   1816}
   1817
   1818/* Inits endpoint 0 so that SETUP packets are processed */
   1819static void activate_control_endpoints(struct udc *dev)
   1820{
   1821	u32 tmp;
   1822
   1823	DBG(dev, "activate_control_endpoints\n");
   1824
   1825	/* flush fifo */
   1826	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
   1827	tmp |= AMD_BIT(UDC_EPCTL_F);
   1828	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
   1829
   1830	/* set ep0 directions */
   1831	dev->ep[UDC_EP0IN_IX].in = 1;
   1832	dev->ep[UDC_EP0OUT_IX].in = 0;
   1833
   1834	/* set buffer size (tx fifo entries) of EP0_IN */
   1835	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
   1836	if (dev->gadget.speed == USB_SPEED_FULL)
   1837		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
   1838					UDC_EPIN_BUFF_SIZE);
   1839	else if (dev->gadget.speed == USB_SPEED_HIGH)
   1840		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
   1841					UDC_EPIN_BUFF_SIZE);
   1842	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
   1843
   1844	/* set max packet size of EP0_IN */
   1845	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
   1846	if (dev->gadget.speed == USB_SPEED_FULL)
   1847		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
   1848					UDC_EP_MAX_PKT_SIZE);
   1849	else if (dev->gadget.speed == USB_SPEED_HIGH)
   1850		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
   1851				UDC_EP_MAX_PKT_SIZE);
   1852	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
   1853
   1854	/* set max packet size of EP0_OUT */
   1855	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
   1856	if (dev->gadget.speed == USB_SPEED_FULL)
   1857		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
   1858					UDC_EP_MAX_PKT_SIZE);
   1859	else if (dev->gadget.speed == USB_SPEED_HIGH)
   1860		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
   1861					UDC_EP_MAX_PKT_SIZE);
   1862	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
   1863
   1864	/* set max packet size of EP0 in UDC CSR */
   1865	tmp = readl(&dev->csr->ne[0]);
   1866	if (dev->gadget.speed == USB_SPEED_FULL)
   1867		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
   1868					UDC_CSR_NE_MAX_PKT);
   1869	else if (dev->gadget.speed == USB_SPEED_HIGH)
   1870		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
   1871					UDC_CSR_NE_MAX_PKT);
   1872	writel(tmp, &dev->csr->ne[0]);
   1873
   1874	if (use_dma) {
   1875		dev->ep[UDC_EP0OUT_IX].td->status |=
   1876			AMD_BIT(UDC_DMA_OUT_STS_L);
   1877		/* write dma desc address */
   1878		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
   1879			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
   1880		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
   1881			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
   1882		/* stop RDE timer */
   1883		if (timer_pending(&udc_timer)) {
   1884			set_rde = 0;
   1885			mod_timer(&udc_timer, jiffies - 1);
   1886		}
   1887		/* stop pollstall timer */
   1888		if (timer_pending(&udc_pollstall_timer))
   1889			mod_timer(&udc_pollstall_timer, jiffies - 1);
   1890		/* enable DMA */
   1891		tmp = readl(&dev->regs->ctl);
   1892		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
   1893				| AMD_BIT(UDC_DEVCTL_RDE)
   1894				| AMD_BIT(UDC_DEVCTL_TDE);
   1895		if (use_dma_bufferfill_mode)
   1896			tmp |= AMD_BIT(UDC_DEVCTL_BF);
   1897		else if (use_dma_ppb_du)
   1898			tmp |= AMD_BIT(UDC_DEVCTL_DU);
   1899		writel(tmp, &dev->regs->ctl);
   1900	}
   1901
   1902	/* clear NAK by writing CNAK for EP0IN */
   1903	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
   1904	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   1905	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
   1906	dev->ep[UDC_EP0IN_IX].naking = 0;
   1907	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
   1908
   1909	/* clear NAK by writing CNAK for EP0OUT */
   1910	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
   1911	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   1912	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
   1913	dev->ep[UDC_EP0OUT_IX].naking = 0;
   1914	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
   1915}
   1916
   1917/* Make endpoint 0 ready for control traffic */
   1918static int setup_ep0(struct udc *dev)
   1919{
   1920	activate_control_endpoints(dev);
   1921	/* enable ep0 interrupts */
   1922	udc_enable_ep0_interrupts(dev);
   1923	/* enable device setup interrupts */
   1924	udc_enable_dev_setup_interrupts(dev);
   1925
   1926	return 0;
   1927}
   1928
   1929/* Called by gadget driver to register itself */
   1930static int amd5536_udc_start(struct usb_gadget *g,
   1931		struct usb_gadget_driver *driver)
   1932{
   1933	struct udc *dev = to_amd5536_udc(g);
   1934	u32 tmp;
   1935
   1936	driver->driver.bus = NULL;
   1937	dev->driver = driver;
   1938
   1939	/* Some gadget drivers use both ep0 directions.
   1940	 * NOTE: to gadget driver, ep0 is just one endpoint...
   1941	 */
   1942	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
   1943		dev->ep[UDC_EP0IN_IX].ep.driver_data;
   1944
   1945	/* get ready for ep0 traffic */
   1946	setup_ep0(dev);
   1947
   1948	/* clear SD */
   1949	tmp = readl(&dev->regs->ctl);
   1950	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
   1951	writel(tmp, &dev->regs->ctl);
   1952
   1953	usb_connect(dev);
   1954
   1955	return 0;
   1956}
   1957
   1958/* shutdown requests and disconnect from gadget */
   1959static void
   1960shutdown(struct udc *dev, struct usb_gadget_driver *driver)
   1961__releases(dev->lock)
   1962__acquires(dev->lock)
   1963{
   1964	int tmp;
   1965
   1966	/* empty queues and init hardware */
   1967	udc_basic_init(dev);
   1968
   1969	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
   1970		empty_req_queue(&dev->ep[tmp]);
   1971
   1972	udc_setup_endpoints(dev);
   1973}
   1974
   1975/* Called by gadget driver to unregister itself */
   1976static int amd5536_udc_stop(struct usb_gadget *g)
   1977{
   1978	struct udc *dev = to_amd5536_udc(g);
   1979	unsigned long flags;
   1980	u32 tmp;
   1981
   1982	spin_lock_irqsave(&dev->lock, flags);
   1983	udc_mask_unused_interrupts(dev);
   1984	shutdown(dev, NULL);
   1985	spin_unlock_irqrestore(&dev->lock, flags);
   1986
   1987	dev->driver = NULL;
   1988
   1989	/* set SD */
   1990	tmp = readl(&dev->regs->ctl);
   1991	tmp |= AMD_BIT(UDC_DEVCTL_SD);
   1992	writel(tmp, &dev->regs->ctl);
   1993
   1994	return 0;
   1995}
   1996
   1997/* Clear pending NAK bits */
   1998static void udc_process_cnak_queue(struct udc *dev)
   1999{
   2000	u32 tmp;
   2001	u32 reg;
   2002
   2003	/* check epin's */
   2004	DBG(dev, "CNAK pending queue processing\n");
   2005	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
   2006		if (cnak_pending & (1 << tmp)) {
   2007			DBG(dev, "CNAK pending for ep%d\n", tmp);
   2008			/* clear NAK by writing CNAK */
   2009			reg = readl(&dev->ep[tmp].regs->ctl);
   2010			reg |= AMD_BIT(UDC_EPCTL_CNAK);
   2011			writel(reg, &dev->ep[tmp].regs->ctl);
   2012			dev->ep[tmp].naking = 0;
   2013			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
   2014		}
   2015	}
   2016	/* ...	and ep0out */
   2017	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
   2018		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
   2019		/* clear NAK by writing CNAK */
   2020		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
   2021		reg |= AMD_BIT(UDC_EPCTL_CNAK);
   2022		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
   2023		dev->ep[UDC_EP0OUT_IX].naking = 0;
   2024		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
   2025				dev->ep[UDC_EP0OUT_IX].num);
   2026	}
   2027}
   2028
   2029/* Enabling RX DMA after setup packet */
   2030static void udc_ep0_set_rde(struct udc *dev)
   2031{
   2032	if (use_dma) {
   2033		/*
   2034		 * only enable RXDMA when no data endpoint enabled
   2035		 * or data is queued
   2036		 */
   2037		if (!dev->data_ep_enabled || dev->data_ep_queued) {
   2038			udc_set_rde(dev);
   2039		} else {
   2040			/*
   2041			 * setup timer for enabling RDE (to not enable
   2042			 * RXFIFO DMA for data endpoints to early)
   2043			 */
   2044			if (set_rde != 0 && !timer_pending(&udc_timer)) {
   2045				udc_timer.expires =
   2046					jiffies + HZ/UDC_RDE_TIMER_DIV;
   2047				set_rde = 1;
   2048				if (!stop_timer)
   2049					add_timer(&udc_timer);
   2050			}
   2051		}
   2052	}
   2053}
   2054
   2055
   2056/* Interrupt handler for data OUT traffic */
   2057static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
   2058{
   2059	irqreturn_t		ret_val = IRQ_NONE;
   2060	u32			tmp;
   2061	struct udc_ep		*ep;
   2062	struct udc_request	*req;
   2063	unsigned int		count;
   2064	struct udc_data_dma	*td = NULL;
   2065	unsigned		dma_done;
   2066
   2067	VDBG(dev, "ep%d irq\n", ep_ix);
   2068	ep = &dev->ep[ep_ix];
   2069
   2070	tmp = readl(&ep->regs->sts);
   2071	if (use_dma) {
   2072		/* BNA event ? */
   2073		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
   2074			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
   2075					ep->num, readl(&ep->regs->desptr));
   2076			/* clear BNA */
   2077			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
   2078			if (!ep->cancel_transfer)
   2079				ep->bna_occurred = 1;
   2080			else
   2081				ep->cancel_transfer = 0;
   2082			ret_val = IRQ_HANDLED;
   2083			goto finished;
   2084		}
   2085	}
   2086	/* HE event ? */
   2087	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
   2088		dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
   2089
   2090		/* clear HE */
   2091		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
   2092		ret_val = IRQ_HANDLED;
   2093		goto finished;
   2094	}
   2095
   2096	if (!list_empty(&ep->queue)) {
   2097
   2098		/* next request */
   2099		req = list_entry(ep->queue.next,
   2100			struct udc_request, queue);
   2101	} else {
   2102		req = NULL;
   2103		udc_rxfifo_pending = 1;
   2104	}
   2105	VDBG(dev, "req = %p\n", req);
   2106	/* fifo mode */
   2107	if (!use_dma) {
   2108
   2109		/* read fifo */
   2110		if (req && udc_rxfifo_read(ep, req)) {
   2111			ret_val = IRQ_HANDLED;
   2112
   2113			/* finish */
   2114			complete_req(ep, req, 0);
   2115			/* next request */
   2116			if (!list_empty(&ep->queue) && !ep->halted) {
   2117				req = list_entry(ep->queue.next,
   2118					struct udc_request, queue);
   2119			} else
   2120				req = NULL;
   2121		}
   2122
   2123	/* DMA */
   2124	} else if (!ep->cancel_transfer && req) {
   2125		ret_val = IRQ_HANDLED;
   2126
   2127		/* check for DMA done */
   2128		if (!use_dma_ppb) {
   2129			dma_done = AMD_GETBITS(req->td_data->status,
   2130						UDC_DMA_OUT_STS_BS);
   2131		/* packet per buffer mode - rx bytes */
   2132		} else {
   2133			/*
   2134			 * if BNA occurred then recover desc. from
   2135			 * BNA dummy desc.
   2136			 */
   2137			if (ep->bna_occurred) {
   2138				VDBG(dev, "Recover desc. from BNA dummy\n");
   2139				memcpy(req->td_data, ep->bna_dummy_req->td_data,
   2140						sizeof(struct udc_data_dma));
   2141				ep->bna_occurred = 0;
   2142				udc_init_bna_dummy(ep->req);
   2143			}
   2144			td = udc_get_last_dma_desc(req);
   2145			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
   2146		}
   2147		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
   2148			/* buffer fill mode - rx bytes */
   2149			if (!use_dma_ppb) {
   2150				/* received number bytes */
   2151				count = AMD_GETBITS(req->td_data->status,
   2152						UDC_DMA_OUT_STS_RXBYTES);
   2153				VDBG(dev, "rx bytes=%u\n", count);
   2154			/* packet per buffer mode - rx bytes */
   2155			} else {
   2156				VDBG(dev, "req->td_data=%p\n", req->td_data);
   2157				VDBG(dev, "last desc = %p\n", td);
   2158				/* received number bytes */
   2159				if (use_dma_ppb_du) {
   2160					/* every desc. counts bytes */
   2161					count = udc_get_ppbdu_rxbytes(req);
   2162				} else {
   2163					/* last desc. counts bytes */
   2164					count = AMD_GETBITS(td->status,
   2165						UDC_DMA_OUT_STS_RXBYTES);
   2166					if (!count && req->req.length
   2167						== UDC_DMA_MAXPACKET) {
   2168						/*
   2169						 * on 64k packets the RXBYTES
   2170						 * field is zero
   2171						 */
   2172						count = UDC_DMA_MAXPACKET;
   2173					}
   2174				}
   2175				VDBG(dev, "last desc rx bytes=%u\n", count);
   2176			}
   2177
   2178			tmp = req->req.length - req->req.actual;
   2179			if (count > tmp) {
   2180				if ((tmp % ep->ep.maxpacket) != 0) {
   2181					DBG(dev, "%s: rx %db, space=%db\n",
   2182						ep->ep.name, count, tmp);
   2183					req->req.status = -EOVERFLOW;
   2184				}
   2185				count = tmp;
   2186			}
   2187			req->req.actual += count;
   2188			req->dma_going = 0;
   2189			/* complete request */
   2190			complete_req(ep, req, 0);
   2191
   2192			/* next request */
   2193			if (!list_empty(&ep->queue) && !ep->halted) {
   2194				req = list_entry(ep->queue.next,
   2195					struct udc_request,
   2196					queue);
   2197				/*
   2198				 * DMA may be already started by udc_queue()
   2199				 * called by gadget drivers completion
   2200				 * routine. This happens when queue
   2201				 * holds one request only.
   2202				 */
   2203				if (req->dma_going == 0) {
   2204					/* next dma */
   2205					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
   2206						goto finished;
   2207					/* write desc pointer */
   2208					writel(req->td_phys,
   2209						&ep->regs->desptr);
   2210					req->dma_going = 1;
   2211					/* enable DMA */
   2212					udc_set_rde(dev);
   2213				}
   2214			} else {
   2215				/*
   2216				 * implant BNA dummy descriptor to allow
   2217				 * RXFIFO opening by RDE
   2218				 */
   2219				if (ep->bna_dummy_req) {
   2220					/* write desc pointer */
   2221					writel(ep->bna_dummy_req->td_phys,
   2222						&ep->regs->desptr);
   2223					ep->bna_occurred = 0;
   2224				}
   2225
   2226				/*
   2227				 * schedule timer for setting RDE if queue
   2228				 * remains empty to allow ep0 packets pass
   2229				 * through
   2230				 */
   2231				if (set_rde != 0
   2232						&& !timer_pending(&udc_timer)) {
   2233					udc_timer.expires =
   2234						jiffies
   2235						+ HZ*UDC_RDE_TIMER_SECONDS;
   2236					set_rde = 1;
   2237					if (!stop_timer)
   2238						add_timer(&udc_timer);
   2239				}
   2240				if (ep->num != UDC_EP0OUT_IX)
   2241					dev->data_ep_queued = 0;
   2242			}
   2243
   2244		} else {
   2245			/*
   2246			* RX DMA must be reenabled for each desc in PPBDU mode
   2247			* and must be enabled for PPBNDU mode in case of BNA
   2248			*/
   2249			udc_set_rde(dev);
   2250		}
   2251
   2252	} else if (ep->cancel_transfer) {
   2253		ret_val = IRQ_HANDLED;
   2254		ep->cancel_transfer = 0;
   2255	}
   2256
   2257	/* check pending CNAKS */
   2258	if (cnak_pending) {
   2259		/* CNAk processing when rxfifo empty only */
   2260		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
   2261			udc_process_cnak_queue(dev);
   2262	}
   2263
   2264	/* clear OUT bits in ep status */
   2265	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
   2266finished:
   2267	return ret_val;
   2268}
   2269
   2270/* Interrupt handler for data IN traffic */
   2271static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
   2272{
   2273	irqreturn_t ret_val = IRQ_NONE;
   2274	u32 tmp;
   2275	u32 epsts;
   2276	struct udc_ep *ep;
   2277	struct udc_request *req;
   2278	struct udc_data_dma *td;
   2279	unsigned len;
   2280
   2281	ep = &dev->ep[ep_ix];
   2282
   2283	epsts = readl(&ep->regs->sts);
   2284	if (use_dma) {
   2285		/* BNA ? */
   2286		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
   2287			dev_err(dev->dev,
   2288				"BNA ep%din occurred - DESPTR = %08lx\n",
   2289				ep->num,
   2290				(unsigned long) readl(&ep->regs->desptr));
   2291
   2292			/* clear BNA */
   2293			writel(epsts, &ep->regs->sts);
   2294			ret_val = IRQ_HANDLED;
   2295			goto finished;
   2296		}
   2297	}
   2298	/* HE event ? */
   2299	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
   2300		dev_err(dev->dev,
   2301			"HE ep%dn occurred - DESPTR = %08lx\n",
   2302			ep->num, (unsigned long) readl(&ep->regs->desptr));
   2303
   2304		/* clear HE */
   2305		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
   2306		ret_val = IRQ_HANDLED;
   2307		goto finished;
   2308	}
   2309
   2310	/* DMA completion */
   2311	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
   2312		VDBG(dev, "TDC set- completion\n");
   2313		ret_val = IRQ_HANDLED;
   2314		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
   2315			req = list_entry(ep->queue.next,
   2316					struct udc_request, queue);
   2317			/*
   2318			 * length bytes transferred
   2319			 * check dma done of last desc. in PPBDU mode
   2320			 */
   2321			if (use_dma_ppb_du) {
   2322				td = udc_get_last_dma_desc(req);
   2323				if (td)
   2324					req->req.actual = req->req.length;
   2325			} else {
   2326				/* assume all bytes transferred */
   2327				req->req.actual = req->req.length;
   2328			}
   2329
   2330			if (req->req.actual == req->req.length) {
   2331				/* complete req */
   2332				complete_req(ep, req, 0);
   2333				req->dma_going = 0;
   2334				/* further request available ? */
   2335				if (list_empty(&ep->queue)) {
   2336					/* disable interrupt */
   2337					tmp = readl(&dev->regs->ep_irqmsk);
   2338					tmp |= AMD_BIT(ep->num);
   2339					writel(tmp, &dev->regs->ep_irqmsk);
   2340				}
   2341			}
   2342		}
   2343		ep->cancel_transfer = 0;
   2344
   2345	}
   2346	/*
   2347	 * status reg has IN bit set and TDC not set (if TDC was handled,
   2348	 * IN must not be handled (UDC defect) ?
   2349	 */
   2350	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
   2351			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
   2352		ret_val = IRQ_HANDLED;
   2353		if (!list_empty(&ep->queue)) {
   2354			/* next request */
   2355			req = list_entry(ep->queue.next,
   2356					struct udc_request, queue);
   2357			/* FIFO mode */
   2358			if (!use_dma) {
   2359				/* write fifo */
   2360				udc_txfifo_write(ep, &req->req);
   2361				len = req->req.length - req->req.actual;
   2362				if (len > ep->ep.maxpacket)
   2363					len = ep->ep.maxpacket;
   2364				req->req.actual += len;
   2365				if (req->req.actual == req->req.length
   2366					|| (len != ep->ep.maxpacket)) {
   2367					/* complete req */
   2368					complete_req(ep, req, 0);
   2369				}
   2370			/* DMA */
   2371			} else if (req && !req->dma_going) {
   2372				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
   2373					req, req->td_data);
   2374				if (req->td_data) {
   2375
   2376					req->dma_going = 1;
   2377
   2378					/*
   2379					 * unset L bit of first desc.
   2380					 * for chain
   2381					 */
   2382					if (use_dma_ppb && req->req.length >
   2383							ep->ep.maxpacket) {
   2384						req->td_data->status &=
   2385							AMD_CLEAR_BIT(
   2386							UDC_DMA_IN_STS_L);
   2387					}
   2388
   2389					/* write desc pointer */
   2390					writel(req->td_phys, &ep->regs->desptr);
   2391
   2392					/* set HOST READY */
   2393					req->td_data->status =
   2394						AMD_ADDBITS(
   2395						req->td_data->status,
   2396						UDC_DMA_IN_STS_BS_HOST_READY,
   2397						UDC_DMA_IN_STS_BS);
   2398
   2399					/* set poll demand bit */
   2400					tmp = readl(&ep->regs->ctl);
   2401					tmp |= AMD_BIT(UDC_EPCTL_P);
   2402					writel(tmp, &ep->regs->ctl);
   2403				}
   2404			}
   2405
   2406		} else if (!use_dma && ep->in) {
   2407			/* disable interrupt */
   2408			tmp = readl(
   2409				&dev->regs->ep_irqmsk);
   2410			tmp |= AMD_BIT(ep->num);
   2411			writel(tmp,
   2412				&dev->regs->ep_irqmsk);
   2413		}
   2414	}
   2415	/* clear status bits */
   2416	writel(epsts, &ep->regs->sts);
   2417
   2418finished:
   2419	return ret_val;
   2420
   2421}
   2422
   2423/* Interrupt handler for Control OUT traffic */
   2424static irqreturn_t udc_control_out_isr(struct udc *dev)
   2425__releases(dev->lock)
   2426__acquires(dev->lock)
   2427{
   2428	irqreturn_t ret_val = IRQ_NONE;
   2429	u32 tmp;
   2430	int setup_supported;
   2431	u32 count;
   2432	int set = 0;
   2433	struct udc_ep	*ep;
   2434	struct udc_ep	*ep_tmp;
   2435
   2436	ep = &dev->ep[UDC_EP0OUT_IX];
   2437
   2438	/* clear irq */
   2439	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
   2440
   2441	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
   2442	/* check BNA and clear if set */
   2443	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
   2444		VDBG(dev, "ep0: BNA set\n");
   2445		writel(AMD_BIT(UDC_EPSTS_BNA),
   2446			&dev->ep[UDC_EP0OUT_IX].regs->sts);
   2447		ep->bna_occurred = 1;
   2448		ret_val = IRQ_HANDLED;
   2449		goto finished;
   2450	}
   2451
   2452	/* type of data: SETUP or DATA 0 bytes */
   2453	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
   2454	VDBG(dev, "data_typ = %x\n", tmp);
   2455
   2456	/* setup data */
   2457	if (tmp == UDC_EPSTS_OUT_SETUP) {
   2458		ret_val = IRQ_HANDLED;
   2459
   2460		ep->dev->stall_ep0in = 0;
   2461		dev->waiting_zlp_ack_ep0in = 0;
   2462
   2463		/* set NAK for EP0_IN */
   2464		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
   2465		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
   2466		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
   2467		dev->ep[UDC_EP0IN_IX].naking = 1;
   2468		/* get setup data */
   2469		if (use_dma) {
   2470
   2471			/* clear OUT bits in ep status */
   2472			writel(UDC_EPSTS_OUT_CLEAR,
   2473				&dev->ep[UDC_EP0OUT_IX].regs->sts);
   2474
   2475			setup_data.data[0] =
   2476				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
   2477			setup_data.data[1] =
   2478				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
   2479			/* set HOST READY */
   2480			dev->ep[UDC_EP0OUT_IX].td_stp->status =
   2481					UDC_DMA_STP_STS_BS_HOST_READY;
   2482		} else {
   2483			/* read fifo */
   2484			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
   2485		}
   2486
   2487		/* determine direction of control data */
   2488		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
   2489			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
   2490			/* enable RDE */
   2491			udc_ep0_set_rde(dev);
   2492			set = 0;
   2493		} else {
   2494			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
   2495			/*
   2496			 * implant BNA dummy descriptor to allow RXFIFO opening
   2497			 * by RDE
   2498			 */
   2499			if (ep->bna_dummy_req) {
   2500				/* write desc pointer */
   2501				writel(ep->bna_dummy_req->td_phys,
   2502					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
   2503				ep->bna_occurred = 0;
   2504			}
   2505
   2506			set = 1;
   2507			dev->ep[UDC_EP0OUT_IX].naking = 1;
   2508			/*
   2509			 * setup timer for enabling RDE (to not enable
   2510			 * RXFIFO DMA for data to early)
   2511			 */
   2512			set_rde = 1;
   2513			if (!timer_pending(&udc_timer)) {
   2514				udc_timer.expires = jiffies +
   2515							HZ/UDC_RDE_TIMER_DIV;
   2516				if (!stop_timer)
   2517					add_timer(&udc_timer);
   2518			}
   2519		}
   2520
   2521		/*
   2522		 * mass storage reset must be processed here because
   2523		 * next packet may be a CLEAR_FEATURE HALT which would not
   2524		 * clear the stall bit when no STALL handshake was received
   2525		 * before (autostall can cause this)
   2526		 */
   2527		if (setup_data.data[0] == UDC_MSCRES_DWORD0
   2528				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
   2529			DBG(dev, "MSC Reset\n");
   2530			/*
   2531			 * clear stall bits
   2532			 * only one IN and OUT endpoints are handled
   2533			 */
   2534			ep_tmp = &udc->ep[UDC_EPIN_IX];
   2535			udc_set_halt(&ep_tmp->ep, 0);
   2536			ep_tmp = &udc->ep[UDC_EPOUT_IX];
   2537			udc_set_halt(&ep_tmp->ep, 0);
   2538		}
   2539
   2540		/* call gadget with setup data received */
   2541		spin_unlock(&dev->lock);
   2542		setup_supported = dev->driver->setup(&dev->gadget,
   2543						&setup_data.request);
   2544		spin_lock(&dev->lock);
   2545
   2546		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
   2547		/* ep0 in returns data (not zlp) on IN phase */
   2548		if (setup_supported >= 0 && setup_supported <
   2549				UDC_EP0IN_MAXPACKET) {
   2550			/* clear NAK by writing CNAK in EP0_IN */
   2551			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   2552			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
   2553			dev->ep[UDC_EP0IN_IX].naking = 0;
   2554			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
   2555
   2556		/* if unsupported request then stall */
   2557		} else if (setup_supported < 0) {
   2558			tmp |= AMD_BIT(UDC_EPCTL_S);
   2559			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
   2560		} else
   2561			dev->waiting_zlp_ack_ep0in = 1;
   2562
   2563
   2564		/* clear NAK by writing CNAK in EP0_OUT */
   2565		if (!set) {
   2566			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
   2567			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
   2568			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
   2569			dev->ep[UDC_EP0OUT_IX].naking = 0;
   2570			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
   2571		}
   2572
   2573		if (!use_dma) {
   2574			/* clear OUT bits in ep status */
   2575			writel(UDC_EPSTS_OUT_CLEAR,
   2576				&dev->ep[UDC_EP0OUT_IX].regs->sts);
   2577		}
   2578
   2579	/* data packet 0 bytes */
   2580	} else if (tmp == UDC_EPSTS_OUT_DATA) {
   2581		/* clear OUT bits in ep status */
   2582		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
   2583
   2584		/* get setup data: only 0 packet */
   2585		if (use_dma) {
   2586			/* no req if 0 packet, just reactivate */
   2587			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
   2588				VDBG(dev, "ZLP\n");
   2589
   2590				/* set HOST READY */
   2591				dev->ep[UDC_EP0OUT_IX].td->status =
   2592					AMD_ADDBITS(
   2593					dev->ep[UDC_EP0OUT_IX].td->status,
   2594					UDC_DMA_OUT_STS_BS_HOST_READY,
   2595					UDC_DMA_OUT_STS_BS);
   2596				/* enable RDE */
   2597				udc_ep0_set_rde(dev);
   2598				ret_val = IRQ_HANDLED;
   2599
   2600			} else {
   2601				/* control write */
   2602				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
   2603				/* re-program desc. pointer for possible ZLPs */
   2604				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
   2605					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
   2606				/* enable RDE */
   2607				udc_ep0_set_rde(dev);
   2608			}
   2609		} else {
   2610
   2611			/* received number bytes */
   2612			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
   2613			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
   2614			/* out data for fifo mode not working */
   2615			count = 0;
   2616
   2617			/* 0 packet or real data ? */
   2618			if (count != 0) {
   2619				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
   2620			} else {
   2621				/* dummy read confirm */
   2622				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
   2623				ret_val = IRQ_HANDLED;
   2624			}
   2625		}
   2626	}
   2627
   2628	/* check pending CNAKS */
   2629	if (cnak_pending) {
   2630		/* CNAk processing when rxfifo empty only */
   2631		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
   2632			udc_process_cnak_queue(dev);
   2633	}
   2634
   2635finished:
   2636	return ret_val;
   2637}
   2638
   2639/* Interrupt handler for Control IN traffic */
   2640static irqreturn_t udc_control_in_isr(struct udc *dev)
   2641{
   2642	irqreturn_t ret_val = IRQ_NONE;
   2643	u32 tmp;
   2644	struct udc_ep *ep;
   2645	struct udc_request *req;
   2646	unsigned len;
   2647
   2648	ep = &dev->ep[UDC_EP0IN_IX];
   2649
   2650	/* clear irq */
   2651	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
   2652
   2653	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
   2654	/* DMA completion */
   2655	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
   2656		VDBG(dev, "isr: TDC clear\n");
   2657		ret_val = IRQ_HANDLED;
   2658
   2659		/* clear TDC bit */
   2660		writel(AMD_BIT(UDC_EPSTS_TDC),
   2661				&dev->ep[UDC_EP0IN_IX].regs->sts);
   2662
   2663	/* status reg has IN bit set ? */
   2664	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
   2665		ret_val = IRQ_HANDLED;
   2666
   2667		if (ep->dma) {
   2668			/* clear IN bit */
   2669			writel(AMD_BIT(UDC_EPSTS_IN),
   2670				&dev->ep[UDC_EP0IN_IX].regs->sts);
   2671		}
   2672		if (dev->stall_ep0in) {
   2673			DBG(dev, "stall ep0in\n");
   2674			/* halt ep0in */
   2675			tmp = readl(&ep->regs->ctl);
   2676			tmp |= AMD_BIT(UDC_EPCTL_S);
   2677			writel(tmp, &ep->regs->ctl);
   2678		} else {
   2679			if (!list_empty(&ep->queue)) {
   2680				/* next request */
   2681				req = list_entry(ep->queue.next,
   2682						struct udc_request, queue);
   2683
   2684				if (ep->dma) {
   2685					/* write desc pointer */
   2686					writel(req->td_phys, &ep->regs->desptr);
   2687					/* set HOST READY */
   2688					req->td_data->status =
   2689						AMD_ADDBITS(
   2690						req->td_data->status,
   2691						UDC_DMA_STP_STS_BS_HOST_READY,
   2692						UDC_DMA_STP_STS_BS);
   2693
   2694					/* set poll demand bit */
   2695					tmp =
   2696					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
   2697					tmp |= AMD_BIT(UDC_EPCTL_P);
   2698					writel(tmp,
   2699					&dev->ep[UDC_EP0IN_IX].regs->ctl);
   2700
   2701					/* all bytes will be transferred */
   2702					req->req.actual = req->req.length;
   2703
   2704					/* complete req */
   2705					complete_req(ep, req, 0);
   2706
   2707				} else {
   2708					/* write fifo */
   2709					udc_txfifo_write(ep, &req->req);
   2710
   2711					/* lengh bytes transferred */
   2712					len = req->req.length - req->req.actual;
   2713					if (len > ep->ep.maxpacket)
   2714						len = ep->ep.maxpacket;
   2715
   2716					req->req.actual += len;
   2717					if (req->req.actual == req->req.length
   2718						|| (len != ep->ep.maxpacket)) {
   2719						/* complete req */
   2720						complete_req(ep, req, 0);
   2721					}
   2722				}
   2723
   2724			}
   2725		}
   2726		ep->halted = 0;
   2727		dev->stall_ep0in = 0;
   2728		if (!ep->dma) {
   2729			/* clear IN bit */
   2730			writel(AMD_BIT(UDC_EPSTS_IN),
   2731				&dev->ep[UDC_EP0IN_IX].regs->sts);
   2732		}
   2733	}
   2734
   2735	return ret_val;
   2736}
   2737
   2738
   2739/* Interrupt handler for global device events */
   2740static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
   2741__releases(dev->lock)
   2742__acquires(dev->lock)
   2743{
   2744	irqreturn_t ret_val = IRQ_NONE;
   2745	u32 tmp;
   2746	u32 cfg;
   2747	struct udc_ep *ep;
   2748	u16 i;
   2749	u8 udc_csr_epix;
   2750
   2751	/* SET_CONFIG irq ? */
   2752	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
   2753		ret_val = IRQ_HANDLED;
   2754
   2755		/* read config value */
   2756		tmp = readl(&dev->regs->sts);
   2757		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
   2758		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
   2759		dev->cur_config = cfg;
   2760		dev->set_cfg_not_acked = 1;
   2761
   2762		/* make usb request for gadget driver */
   2763		memset(&setup_data, 0 , sizeof(union udc_setup_data));
   2764		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
   2765		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
   2766
   2767		/* programm the NE registers */
   2768		for (i = 0; i < UDC_EP_NUM; i++) {
   2769			ep = &dev->ep[i];
   2770			if (ep->in) {
   2771
   2772				/* ep ix in UDC CSR register space */
   2773				udc_csr_epix = ep->num;
   2774
   2775
   2776			/* OUT ep */
   2777			} else {
   2778				/* ep ix in UDC CSR register space */
   2779				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
   2780			}
   2781
   2782			tmp = readl(&dev->csr->ne[udc_csr_epix]);
   2783			/* ep cfg */
   2784			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
   2785						UDC_CSR_NE_CFG);
   2786			/* write reg */
   2787			writel(tmp, &dev->csr->ne[udc_csr_epix]);
   2788
   2789			/* clear stall bits */
   2790			ep->halted = 0;
   2791			tmp = readl(&ep->regs->ctl);
   2792			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
   2793			writel(tmp, &ep->regs->ctl);
   2794		}
   2795		/* call gadget zero with setup data received */
   2796		spin_unlock(&dev->lock);
   2797		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
   2798		spin_lock(&dev->lock);
   2799
   2800	} /* SET_INTERFACE ? */
   2801	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
   2802		ret_val = IRQ_HANDLED;
   2803
   2804		dev->set_cfg_not_acked = 1;
   2805		/* read interface and alt setting values */
   2806		tmp = readl(&dev->regs->sts);
   2807		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
   2808		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
   2809
   2810		/* make usb request for gadget driver */
   2811		memset(&setup_data, 0 , sizeof(union udc_setup_data));
   2812		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
   2813		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
   2814		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
   2815		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
   2816
   2817		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
   2818				dev->cur_alt, dev->cur_intf);
   2819
   2820		/* programm the NE registers */
   2821		for (i = 0; i < UDC_EP_NUM; i++) {
   2822			ep = &dev->ep[i];
   2823			if (ep->in) {
   2824
   2825				/* ep ix in UDC CSR register space */
   2826				udc_csr_epix = ep->num;
   2827
   2828
   2829			/* OUT ep */
   2830			} else {
   2831				/* ep ix in UDC CSR register space */
   2832				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
   2833			}
   2834
   2835			/* UDC CSR reg */
   2836			/* set ep values */
   2837			tmp = readl(&dev->csr->ne[udc_csr_epix]);
   2838			/* ep interface */
   2839			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
   2840						UDC_CSR_NE_INTF);
   2841			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
   2842			/* ep alt */
   2843			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
   2844						UDC_CSR_NE_ALT);
   2845			/* write reg */
   2846			writel(tmp, &dev->csr->ne[udc_csr_epix]);
   2847
   2848			/* clear stall bits */
   2849			ep->halted = 0;
   2850			tmp = readl(&ep->regs->ctl);
   2851			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
   2852			writel(tmp, &ep->regs->ctl);
   2853		}
   2854
   2855		/* call gadget zero with setup data received */
   2856		spin_unlock(&dev->lock);
   2857		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
   2858		spin_lock(&dev->lock);
   2859
   2860	} /* USB reset */
   2861	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
   2862		DBG(dev, "USB Reset interrupt\n");
   2863		ret_val = IRQ_HANDLED;
   2864
   2865		/* allow soft reset when suspend occurs */
   2866		soft_reset_occured = 0;
   2867
   2868		dev->waiting_zlp_ack_ep0in = 0;
   2869		dev->set_cfg_not_acked = 0;
   2870
   2871		/* mask not needed interrupts */
   2872		udc_mask_unused_interrupts(dev);
   2873
   2874		/* call gadget to resume and reset configs etc. */
   2875		spin_unlock(&dev->lock);
   2876		if (dev->sys_suspended && dev->driver->resume) {
   2877			dev->driver->resume(&dev->gadget);
   2878			dev->sys_suspended = 0;
   2879		}
   2880		usb_gadget_udc_reset(&dev->gadget, dev->driver);
   2881		spin_lock(&dev->lock);
   2882
   2883		/* disable ep0 to empty req queue */
   2884		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
   2885		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
   2886
   2887		/* soft reset when rxfifo not empty */
   2888		tmp = readl(&dev->regs->sts);
   2889		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
   2890				&& !soft_reset_after_usbreset_occured) {
   2891			udc_soft_reset(dev);
   2892			soft_reset_after_usbreset_occured++;
   2893		}
   2894
   2895		/*
   2896		 * DMA reset to kill potential old DMA hw hang,
   2897		 * POLL bit is already reset by ep_init() through
   2898		 * disconnect()
   2899		 */
   2900		DBG(dev, "DMA machine reset\n");
   2901		tmp = readl(&dev->regs->cfg);
   2902		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
   2903		writel(tmp, &dev->regs->cfg);
   2904
   2905		/* put into initial config */
   2906		udc_basic_init(dev);
   2907
   2908		/* enable device setup interrupts */
   2909		udc_enable_dev_setup_interrupts(dev);
   2910
   2911		/* enable suspend interrupt */
   2912		tmp = readl(&dev->regs->irqmsk);
   2913		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
   2914		writel(tmp, &dev->regs->irqmsk);
   2915
   2916	} /* USB suspend */
   2917	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
   2918		DBG(dev, "USB Suspend interrupt\n");
   2919		ret_val = IRQ_HANDLED;
   2920		if (dev->driver->suspend) {
   2921			spin_unlock(&dev->lock);
   2922			dev->sys_suspended = 1;
   2923			dev->driver->suspend(&dev->gadget);
   2924			spin_lock(&dev->lock);
   2925		}
   2926	} /* new speed ? */
   2927	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
   2928		DBG(dev, "ENUM interrupt\n");
   2929		ret_val = IRQ_HANDLED;
   2930		soft_reset_after_usbreset_occured = 0;
   2931
   2932		/* disable ep0 to empty req queue */
   2933		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
   2934		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
   2935
   2936		/* link up all endpoints */
   2937		udc_setup_endpoints(dev);
   2938		dev_info(dev->dev, "Connect: %s\n",
   2939			 usb_speed_string(dev->gadget.speed));
   2940
   2941		/* init ep 0 */
   2942		activate_control_endpoints(dev);
   2943
   2944		/* enable ep0 interrupts */
   2945		udc_enable_ep0_interrupts(dev);
   2946	}
   2947	/* session valid change interrupt */
   2948	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
   2949		DBG(dev, "USB SVC interrupt\n");
   2950		ret_val = IRQ_HANDLED;
   2951
   2952		/* check that session is not valid to detect disconnect */
   2953		tmp = readl(&dev->regs->sts);
   2954		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
   2955			/* disable suspend interrupt */
   2956			tmp = readl(&dev->regs->irqmsk);
   2957			tmp |= AMD_BIT(UDC_DEVINT_US);
   2958			writel(tmp, &dev->regs->irqmsk);
   2959			DBG(dev, "USB Disconnect (session valid low)\n");
   2960			/* cleanup on disconnect */
   2961			usb_disconnect(udc);
   2962		}
   2963
   2964	}
   2965
   2966	return ret_val;
   2967}
   2968
   2969/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
   2970irqreturn_t udc_irq(int irq, void *pdev)
   2971{
   2972	struct udc *dev = pdev;
   2973	u32 reg;
   2974	u16 i;
   2975	u32 ep_irq;
   2976	irqreturn_t ret_val = IRQ_NONE;
   2977
   2978	spin_lock(&dev->lock);
   2979
   2980	/* check for ep irq */
   2981	reg = readl(&dev->regs->ep_irqsts);
   2982	if (reg) {
   2983		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
   2984			ret_val |= udc_control_out_isr(dev);
   2985		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
   2986			ret_val |= udc_control_in_isr(dev);
   2987
   2988		/*
   2989		 * data endpoint
   2990		 * iterate ep's
   2991		 */
   2992		for (i = 1; i < UDC_EP_NUM; i++) {
   2993			ep_irq = 1 << i;
   2994			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
   2995				continue;
   2996
   2997			/* clear irq status */
   2998			writel(ep_irq, &dev->regs->ep_irqsts);
   2999
   3000			/* irq for out ep ? */
   3001			if (i > UDC_EPIN_NUM)
   3002				ret_val |= udc_data_out_isr(dev, i);
   3003			else
   3004				ret_val |= udc_data_in_isr(dev, i);
   3005		}
   3006
   3007	}
   3008
   3009
   3010	/* check for dev irq */
   3011	reg = readl(&dev->regs->irqsts);
   3012	if (reg) {
   3013		/* clear irq */
   3014		writel(reg, &dev->regs->irqsts);
   3015		ret_val |= udc_dev_isr(dev, reg);
   3016	}
   3017
   3018
   3019	spin_unlock(&dev->lock);
   3020	return ret_val;
   3021}
   3022EXPORT_SYMBOL_GPL(udc_irq);
   3023
   3024/* Tears down device */
   3025void gadget_release(struct device *pdev)
   3026{
   3027	struct amd5536udc *dev = dev_get_drvdata(pdev);
   3028	kfree(dev);
   3029}
   3030EXPORT_SYMBOL_GPL(gadget_release);
   3031
   3032/* Cleanup on device remove */
   3033void udc_remove(struct udc *dev)
   3034{
   3035	/* remove timer */
   3036	stop_timer++;
   3037	if (timer_pending(&udc_timer))
   3038		wait_for_completion(&on_exit);
   3039	del_timer_sync(&udc_timer);
   3040	/* remove pollstall timer */
   3041	stop_pollstall_timer++;
   3042	if (timer_pending(&udc_pollstall_timer))
   3043		wait_for_completion(&on_pollstall_exit);
   3044	del_timer_sync(&udc_pollstall_timer);
   3045	udc = NULL;
   3046}
   3047EXPORT_SYMBOL_GPL(udc_remove);
   3048
   3049/* free all the dma pools */
   3050void free_dma_pools(struct udc *dev)
   3051{
   3052	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
   3053		      dev->ep[UDC_EP0OUT_IX].td_phys);
   3054	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
   3055		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
   3056	dma_pool_destroy(dev->stp_requests);
   3057	dma_pool_destroy(dev->data_requests);
   3058}
   3059EXPORT_SYMBOL_GPL(free_dma_pools);
   3060
   3061/* create dma pools on init */
   3062int init_dma_pools(struct udc *dev)
   3063{
   3064	struct udc_stp_dma	*td_stp;
   3065	struct udc_data_dma	*td_data;
   3066	int retval;
   3067
   3068	/* consistent DMA mode setting ? */
   3069	if (use_dma_ppb) {
   3070		use_dma_bufferfill_mode = 0;
   3071	} else {
   3072		use_dma_ppb_du = 0;
   3073		use_dma_bufferfill_mode = 1;
   3074	}
   3075
   3076	/* DMA setup */
   3077	dev->data_requests = dma_pool_create("data_requests", dev->dev,
   3078		sizeof(struct udc_data_dma), 0, 0);
   3079	if (!dev->data_requests) {
   3080		DBG(dev, "can't get request data pool\n");
   3081		return -ENOMEM;
   3082	}
   3083
   3084	/* EP0 in dma regs = dev control regs */
   3085	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
   3086
   3087	/* dma desc for setup data */
   3088	dev->stp_requests = dma_pool_create("setup requests", dev->dev,
   3089		sizeof(struct udc_stp_dma), 0, 0);
   3090	if (!dev->stp_requests) {
   3091		DBG(dev, "can't get stp request pool\n");
   3092		retval = -ENOMEM;
   3093		goto err_create_dma_pool;
   3094	}
   3095	/* setup */
   3096	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
   3097				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
   3098	if (!td_stp) {
   3099		retval = -ENOMEM;
   3100		goto err_alloc_dma;
   3101	}
   3102	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
   3103
   3104	/* data: 0 packets !? */
   3105	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
   3106				&dev->ep[UDC_EP0OUT_IX].td_phys);
   3107	if (!td_data) {
   3108		retval = -ENOMEM;
   3109		goto err_alloc_phys;
   3110	}
   3111	dev->ep[UDC_EP0OUT_IX].td = td_data;
   3112	return 0;
   3113
   3114err_alloc_phys:
   3115	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
   3116		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
   3117err_alloc_dma:
   3118	dma_pool_destroy(dev->stp_requests);
   3119	dev->stp_requests = NULL;
   3120err_create_dma_pool:
   3121	dma_pool_destroy(dev->data_requests);
   3122	dev->data_requests = NULL;
   3123	return retval;
   3124}
   3125EXPORT_SYMBOL_GPL(init_dma_pools);
   3126
   3127/* general probe */
   3128int udc_probe(struct udc *dev)
   3129{
   3130	char		tmp[128];
   3131	u32		reg;
   3132	int		retval;
   3133
   3134	/* device struct setup */
   3135	dev->gadget.ops = &udc_ops;
   3136
   3137	dev_set_name(&dev->gadget.dev, "gadget");
   3138	dev->gadget.name = name;
   3139	dev->gadget.max_speed = USB_SPEED_HIGH;
   3140
   3141	/* init registers, interrupts, ... */
   3142	startup_registers(dev);
   3143
   3144	dev_info(dev->dev, "%s\n", mod_desc);
   3145
   3146	snprintf(tmp, sizeof(tmp), "%d", dev->irq);
   3147
   3148	/* Print this device info for AMD chips only*/
   3149	if (dev->chiprev == UDC_HSA0_REV ||
   3150	    dev->chiprev == UDC_HSB1_REV) {
   3151		dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
   3152			 tmp, dev->phys_addr, dev->chiprev,
   3153			 (dev->chiprev == UDC_HSA0_REV) ?
   3154			 "A0" : "B1");
   3155		strcpy(tmp, UDC_DRIVER_VERSION_STRING);
   3156		if (dev->chiprev == UDC_HSA0_REV) {
   3157			dev_err(dev->dev, "chip revision is A0; too old\n");
   3158			retval = -ENODEV;
   3159			goto finished;
   3160		}
   3161		dev_info(dev->dev,
   3162			 "driver version: %s(for Geode5536 B1)\n", tmp);
   3163	}
   3164
   3165	udc = dev;
   3166
   3167	retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
   3168					    gadget_release);
   3169	if (retval)
   3170		goto finished;
   3171
   3172	/* timer init */
   3173	timer_setup(&udc_timer, udc_timer_function, 0);
   3174	timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
   3175
   3176	/* set SD */
   3177	reg = readl(&dev->regs->ctl);
   3178	reg |= AMD_BIT(UDC_DEVCTL_SD);
   3179	writel(reg, &dev->regs->ctl);
   3180
   3181	/* print dev register info */
   3182	print_regs(dev);
   3183
   3184	return 0;
   3185
   3186finished:
   3187	return retval;
   3188}
   3189EXPORT_SYMBOL_GPL(udc_probe);
   3190
   3191MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
   3192MODULE_AUTHOR("Thomas Dahlmann");
   3193MODULE_LICENSE("GPL");