cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bcm63xx_udc.c (66755B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
      4 *
      5 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
      6 * Copyright (C) 2012 Broadcom Corporation
      7 */
      8
      9#include <linux/bitops.h>
     10#include <linux/bug.h>
     11#include <linux/clk.h>
     12#include <linux/compiler.h>
     13#include <linux/debugfs.h>
     14#include <linux/delay.h>
     15#include <linux/device.h>
     16#include <linux/dma-mapping.h>
     17#include <linux/errno.h>
     18#include <linux/interrupt.h>
     19#include <linux/ioport.h>
     20#include <linux/kernel.h>
     21#include <linux/list.h>
     22#include <linux/module.h>
     23#include <linux/moduleparam.h>
     24#include <linux/platform_device.h>
     25#include <linux/sched.h>
     26#include <linux/seq_file.h>
     27#include <linux/slab.h>
     28#include <linux/timer.h>
     29#include <linux/usb.h>
     30#include <linux/usb/ch9.h>
     31#include <linux/usb/gadget.h>
     32#include <linux/workqueue.h>
     33
     34#include <bcm63xx_cpu.h>
     35#include <bcm63xx_iudma.h>
     36#include <bcm63xx_dev_usb_usbd.h>
     37#include <bcm63xx_io.h>
     38#include <bcm63xx_regs.h>
     39
     40#define DRV_MODULE_NAME		"bcm63xx_udc"
     41
     42static const char bcm63xx_ep0name[] = "ep0";
     43
     44static const struct {
     45	const char *name;
     46	const struct usb_ep_caps caps;
     47} bcm63xx_ep_info[] = {
     48#define EP_INFO(_name, _caps) \
     49	{ \
     50		.name = _name, \
     51		.caps = _caps, \
     52	}
     53
     54	EP_INFO(bcm63xx_ep0name,
     55		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
     56	EP_INFO("ep1in-bulk",
     57		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
     58	EP_INFO("ep2out-bulk",
     59		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
     60	EP_INFO("ep3in-int",
     61		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
     62	EP_INFO("ep4out-int",
     63		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
     64
     65#undef EP_INFO
     66};
     67
     68static bool use_fullspeed;
     69module_param(use_fullspeed, bool, S_IRUGO);
     70MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
     71
     72/*
     73 * RX IRQ coalescing options:
     74 *
     75 * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
     76 * driver is able to pass the "testusb" suite and recover from conditions like:
     77 *
     78 *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
     79 *   2) Host sends 512 bytes of data
     80 *   3) Host decides to reconfigure the device and sends SET_INTERFACE
     81 *   4) Device shuts down the endpoint and cancels the RX transaction
     82 *
     83 * true - one IRQ per transfer, for transfers <= 2048B.  Generates
     84 * considerably fewer IRQs, but error recovery is less robust.  Does not
     85 * reliably pass "testusb".
     86 *
     87 * TX always uses coalescing, because we can cancel partially complete TX
     88 * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
     89 * this on RX.
     90 */
     91static bool irq_coalesce;
     92module_param(irq_coalesce, bool, S_IRUGO);
     93MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
     94
     95#define BCM63XX_NUM_EP			5
     96#define BCM63XX_NUM_IUDMA		6
     97#define BCM63XX_NUM_FIFO_PAIRS		3
     98
     99#define IUDMA_RESET_TIMEOUT_US		10000
    100
    101#define IUDMA_EP0_RXCHAN		0
    102#define IUDMA_EP0_TXCHAN		1
    103
    104#define IUDMA_MAX_FRAGMENT		2048
    105#define BCM63XX_MAX_CTRL_PKT		64
    106
    107#define BCMEP_CTRL			0x00
    108#define BCMEP_ISOC			0x01
    109#define BCMEP_BULK			0x02
    110#define BCMEP_INTR			0x03
    111
    112#define BCMEP_OUT			0x00
    113#define BCMEP_IN			0x01
    114
    115#define BCM63XX_SPD_FULL		1
    116#define BCM63XX_SPD_HIGH		0
    117
    118#define IUDMA_DMAC_OFFSET		0x200
    119#define IUDMA_DMAS_OFFSET		0x400
    120
    121enum bcm63xx_ep0_state {
    122	EP0_REQUEUE,
    123	EP0_IDLE,
    124	EP0_IN_DATA_PHASE_SETUP,
    125	EP0_IN_DATA_PHASE_COMPLETE,
    126	EP0_OUT_DATA_PHASE_SETUP,
    127	EP0_OUT_DATA_PHASE_COMPLETE,
    128	EP0_OUT_STATUS_PHASE,
    129	EP0_IN_FAKE_STATUS_PHASE,
    130	EP0_SHUTDOWN,
    131};
    132
    133static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
    134	"REQUEUE",
    135	"IDLE",
    136	"IN_DATA_PHASE_SETUP",
    137	"IN_DATA_PHASE_COMPLETE",
    138	"OUT_DATA_PHASE_SETUP",
    139	"OUT_DATA_PHASE_COMPLETE",
    140	"OUT_STATUS_PHASE",
    141	"IN_FAKE_STATUS_PHASE",
    142	"SHUTDOWN",
    143};
    144
    145/**
    146 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
    147 * @ep_num: USB endpoint number.
    148 * @n_bds: Number of buffer descriptors in the ring.
    149 * @ep_type: Endpoint type (control, bulk, interrupt).
    150 * @dir: Direction (in, out).
    151 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
    152 * @max_pkt_hs: Maximum packet size in high speed mode.
    153 * @max_pkt_fs: Maximum packet size in full speed mode.
    154 */
    155struct iudma_ch_cfg {
    156	int				ep_num;
    157	int				n_bds;
    158	int				ep_type;
    159	int				dir;
    160	int				n_fifo_slots;
    161	int				max_pkt_hs;
    162	int				max_pkt_fs;
    163};
    164
    165static const struct iudma_ch_cfg iudma_defaults[] = {
    166
    167	/* This controller was designed to support a CDC/RNDIS application.
    168	   It may be possible to reconfigure some of the endpoints, but
    169	   the hardware limitations (FIFO sizing and number of DMA channels)
    170	   may significantly impact flexibility and/or stability.  Change
    171	   these values at your own risk.
    172
    173	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
    174	idx      |  n_bds     |         dir       |  max_pkt_hs  |
    175	 |       |    |       |          |        |      |       |       */
    176	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
    177	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
    178	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
    179	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
    180	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
    181	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
    182};
    183
    184struct bcm63xx_udc;
    185
    186/**
    187 * struct iudma_ch - Represents the current state of a single IUDMA channel.
    188 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
    189 * @ep_num: USB endpoint number.  -1 for ep0 RX.
    190 * @enabled: Whether bcm63xx_ep_enable() has been called.
    191 * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
    192 * @is_tx: true for TX, false for RX.
    193 * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
    194 * @udc: Reference to the device controller.
    195 * @read_bd: Next buffer descriptor to reap from the hardware.
    196 * @write_bd: Next BD available for a new packet.
    197 * @end_bd: Points to the final BD in the ring.
    198 * @n_bds_used: Number of BD entries currently occupied.
    199 * @bd_ring: Base pointer to the BD ring.
    200 * @bd_ring_dma: Physical (DMA) address of bd_ring.
    201 * @n_bds: Total number of BDs in the ring.
    202 *
    203 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
    204 * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
    205 * only.
    206 *
    207 * Each bulk/intr endpoint has a single IUDMA channel and a single
    208 * struct usb_ep.
    209 */
    210struct iudma_ch {
    211	unsigned int			ch_idx;
    212	int				ep_num;
    213	bool				enabled;
    214	int				max_pkt;
    215	bool				is_tx;
    216	struct bcm63xx_ep		*bep;
    217	struct bcm63xx_udc		*udc;
    218
    219	struct bcm_enet_desc		*read_bd;
    220	struct bcm_enet_desc		*write_bd;
    221	struct bcm_enet_desc		*end_bd;
    222	int				n_bds_used;
    223
    224	struct bcm_enet_desc		*bd_ring;
    225	dma_addr_t			bd_ring_dma;
    226	unsigned int			n_bds;
    227};
    228
    229/**
    230 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
    231 * @ep_num: USB endpoint number.
    232 * @iudma: Pointer to IUDMA channel state.
    233 * @ep: USB gadget layer representation of the EP.
    234 * @udc: Reference to the device controller.
    235 * @queue: Linked list of outstanding requests for this EP.
    236 * @halted: 1 if the EP is stalled; 0 otherwise.
    237 */
    238struct bcm63xx_ep {
    239	unsigned int			ep_num;
    240	struct iudma_ch			*iudma;
    241	struct usb_ep			ep;
    242	struct bcm63xx_udc		*udc;
    243	struct list_head		queue;
    244	unsigned			halted:1;
    245};
    246
    247/**
    248 * struct bcm63xx_req - Internal (driver) state of a single request.
    249 * @queue: Links back to the EP's request list.
    250 * @req: USB gadget layer representation of the request.
    251 * @offset: Current byte offset into the data buffer (next byte to queue).
    252 * @bd_bytes: Number of data bytes in outstanding BD entries.
    253 * @iudma: IUDMA channel used for the request.
    254 */
    255struct bcm63xx_req {
    256	struct list_head		queue;		/* ep's requests */
    257	struct usb_request		req;
    258	unsigned int			offset;
    259	unsigned int			bd_bytes;
    260	struct iudma_ch			*iudma;
    261};
    262
    263/**
    264 * struct bcm63xx_udc - Driver/hardware private context.
    265 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
    266 * @dev: Generic Linux device structure.
    267 * @pd: Platform data (board/port info).
    268 * @usbd_clk: Clock descriptor for the USB device block.
    269 * @usbh_clk: Clock descriptor for the USB host block.
    270 * @gadget: USB device.
    271 * @driver: Driver for USB device.
    272 * @usbd_regs: Base address of the USBD/USB20D block.
    273 * @iudma_regs: Base address of the USBD's associated IUDMA block.
    274 * @bep: Array of endpoints, including ep0.
    275 * @iudma: Array of all IUDMA channels used by this controller.
    276 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
    277 * @iface: USB interface number, from SET_INTERFACE wIndex.
    278 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
    279 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
    280 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
    281 * @ep0state: Current state of the ep0 state machine.
    282 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
    283 * @wedgemap: Bitmap of wedged endpoints.
    284 * @ep0_req_reset: USB reset is pending.
    285 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
    286 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
    287 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
    288 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
    289 * @ep0_reply: Pending reply from gadget driver.
    290 * @ep0_request: Outstanding ep0 request.
    291 */
    292struct bcm63xx_udc {
    293	spinlock_t			lock;
    294
    295	struct device			*dev;
    296	struct bcm63xx_usbd_platform_data *pd;
    297	struct clk			*usbd_clk;
    298	struct clk			*usbh_clk;
    299
    300	struct usb_gadget		gadget;
    301	struct usb_gadget_driver	*driver;
    302
    303	void __iomem			*usbd_regs;
    304	void __iomem			*iudma_regs;
    305
    306	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
    307	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
    308
    309	int				cfg;
    310	int				iface;
    311	int				alt_iface;
    312
    313	struct bcm63xx_req		ep0_ctrl_req;
    314	u8				*ep0_ctrl_buf;
    315
    316	int				ep0state;
    317	struct work_struct		ep0_wq;
    318
    319	unsigned long			wedgemap;
    320
    321	unsigned			ep0_req_reset:1;
    322	unsigned			ep0_req_set_cfg:1;
    323	unsigned			ep0_req_set_iface:1;
    324	unsigned			ep0_req_shutdown:1;
    325
    326	unsigned			ep0_req_completed:1;
    327	struct usb_request		*ep0_reply;
    328	struct usb_request		*ep0_request;
    329};
    330
    331static const struct usb_ep_ops bcm63xx_udc_ep_ops;
    332
    333/***********************************************************************
    334 * Convenience functions
    335 ***********************************************************************/
    336
    337static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
    338{
    339	return container_of(g, struct bcm63xx_udc, gadget);
    340}
    341
    342static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
    343{
    344	return container_of(ep, struct bcm63xx_ep, ep);
    345}
    346
    347static inline struct bcm63xx_req *our_req(struct usb_request *req)
    348{
    349	return container_of(req, struct bcm63xx_req, req);
    350}
    351
    352static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
    353{
    354	return bcm_readl(udc->usbd_regs + off);
    355}
    356
    357static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
    358{
    359	bcm_writel(val, udc->usbd_regs + off);
    360}
    361
    362static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
    363{
    364	return bcm_readl(udc->iudma_regs + off);
    365}
    366
    367static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
    368{
    369	bcm_writel(val, udc->iudma_regs + off);
    370}
    371
    372static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
    373{
    374	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
    375			(ENETDMA_CHAN_WIDTH * chan));
    376}
    377
    378static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
    379					int chan)
    380{
    381	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
    382			(ENETDMA_CHAN_WIDTH * chan));
    383}
    384
    385static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
    386{
    387	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
    388			(ENETDMA_CHAN_WIDTH * chan));
    389}
    390
    391static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
    392					int chan)
    393{
    394	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
    395			(ENETDMA_CHAN_WIDTH * chan));
    396}
    397
    398static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
    399{
    400	if (is_enabled) {
    401		clk_enable(udc->usbh_clk);
    402		clk_enable(udc->usbd_clk);
    403		udelay(10);
    404	} else {
    405		clk_disable(udc->usbd_clk);
    406		clk_disable(udc->usbh_clk);
    407	}
    408}
    409
    410/***********************************************************************
    411 * Low-level IUDMA / FIFO operations
    412 ***********************************************************************/
    413
    414/**
    415 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
    416 * @udc: Reference to the device controller.
    417 * @idx: Desired init_sel value.
    418 *
    419 * The "init_sel" signal is used as a selection index for both endpoints
    420 * and IUDMA channels.  Since these do not map 1:1, the use of this signal
    421 * depends on the context.
    422 */
    423static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
    424{
    425	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
    426
    427	val &= ~USBD_CONTROL_INIT_SEL_MASK;
    428	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
    429	usbd_writel(udc, val, USBD_CONTROL_REG);
    430}
    431
    432/**
    433 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
    434 * @udc: Reference to the device controller.
    435 * @bep: Endpoint on which to operate.
    436 * @is_stalled: true to enable stall, false to disable.
    437 *
    438 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
    439 * halt/stall conditions.
    440 */
    441static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
    442	bool is_stalled)
    443{
    444	u32 val;
    445
    446	val = USBD_STALL_UPDATE_MASK |
    447		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
    448		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
    449	usbd_writel(udc, val, USBD_STALL_REG);
    450}
    451
    452/**
    453 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
    454 * @udc: Reference to the device controller.
    455 *
    456 * These parameters depend on the USB link speed.  Settings are
    457 * per-IUDMA-channel-pair.
    458 */
    459static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
    460{
    461	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
    462	u32 i, val, rx_fifo_slot, tx_fifo_slot;
    463
    464	/* set up FIFO boundaries and packet sizes; this is done in pairs */
    465	rx_fifo_slot = tx_fifo_slot = 0;
    466	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
    467		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
    468		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
    469
    470		bcm63xx_ep_dma_select(udc, i >> 1);
    471
    472		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
    473			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
    474			 USBD_RXFIFO_CONFIG_END_SHIFT);
    475		rx_fifo_slot += rx_cfg->n_fifo_slots;
    476		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
    477		usbd_writel(udc,
    478			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
    479			    USBD_RXFIFO_EPSIZE_REG);
    480
    481		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
    482			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
    483			 USBD_TXFIFO_CONFIG_END_SHIFT);
    484		tx_fifo_slot += tx_cfg->n_fifo_slots;
    485		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
    486		usbd_writel(udc,
    487			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
    488			    USBD_TXFIFO_EPSIZE_REG);
    489
    490		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
    491	}
    492}
    493
    494/**
    495 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
    496 * @udc: Reference to the device controller.
    497 * @ep_num: Endpoint number.
    498 */
    499static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
    500{
    501	u32 val;
    502
    503	bcm63xx_ep_dma_select(udc, ep_num);
    504
    505	val = usbd_readl(udc, USBD_CONTROL_REG);
    506	val |= USBD_CONTROL_FIFO_RESET_MASK;
    507	usbd_writel(udc, val, USBD_CONTROL_REG);
    508	usbd_readl(udc, USBD_CONTROL_REG);
    509}
    510
    511/**
    512 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
    513 * @udc: Reference to the device controller.
    514 */
    515static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
    516{
    517	int i;
    518
    519	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
    520		bcm63xx_fifo_reset_ep(udc, i);
    521}
    522
    523/**
    524 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
    525 * @udc: Reference to the device controller.
    526 */
    527static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
    528{
    529	u32 i, val;
    530
    531	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
    532		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
    533
    534		if (cfg->ep_num < 0)
    535			continue;
    536
    537		bcm63xx_ep_dma_select(udc, cfg->ep_num);
    538		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
    539			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
    540		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
    541	}
    542}
    543
    544/**
    545 * bcm63xx_ep_setup - Configure per-endpoint settings.
    546 * @udc: Reference to the device controller.
    547 *
    548 * This needs to be rerun if the speed/cfg/intf/altintf changes.
    549 */
    550static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
    551{
    552	u32 val, i;
    553
    554	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
    555
    556	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
    557		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
    558		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
    559			      cfg->max_pkt_hs : cfg->max_pkt_fs;
    560		int idx = cfg->ep_num;
    561
    562		udc->iudma[i].max_pkt = max_pkt;
    563
    564		if (idx < 0)
    565			continue;
    566		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
    567
    568		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
    569		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
    570		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
    571		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
    572		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
    573		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
    574		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
    575		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
    576	}
    577}
    578
    579/**
    580 * iudma_write - Queue a single IUDMA transaction.
    581 * @udc: Reference to the device controller.
    582 * @iudma: IUDMA channel to use.
    583 * @breq: Request containing the transaction data.
    584 *
    585 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
    586 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
    587 * So iudma_write() may be called several times to fulfill a single
    588 * usb_request.
    589 *
    590 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
    591 */
    592static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
    593	struct bcm63xx_req *breq)
    594{
    595	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
    596	unsigned int bytes_left = breq->req.length - breq->offset;
    597	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
    598		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
    599
    600	iudma->n_bds_used = 0;
    601	breq->bd_bytes = 0;
    602	breq->iudma = iudma;
    603
    604	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
    605		extra_zero_pkt = 1;
    606
    607	do {
    608		struct bcm_enet_desc *d = iudma->write_bd;
    609		u32 dmaflags = 0;
    610		unsigned int n_bytes;
    611
    612		if (d == iudma->end_bd) {
    613			dmaflags |= DMADESC_WRAP_MASK;
    614			iudma->write_bd = iudma->bd_ring;
    615		} else {
    616			iudma->write_bd++;
    617		}
    618		iudma->n_bds_used++;
    619
    620		n_bytes = min_t(int, bytes_left, max_bd_bytes);
    621		if (n_bytes)
    622			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
    623		else
    624			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
    625				    DMADESC_USB_ZERO_MASK;
    626
    627		dmaflags |= DMADESC_OWNER_MASK;
    628		if (first_bd) {
    629			dmaflags |= DMADESC_SOP_MASK;
    630			first_bd = 0;
    631		}
    632
    633		/*
    634		 * extra_zero_pkt forces one more iteration through the loop
    635		 * after all data is queued up, to send the zero packet
    636		 */
    637		if (extra_zero_pkt && !bytes_left)
    638			extra_zero_pkt = 0;
    639
    640		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
    641		    (n_bytes == bytes_left && !extra_zero_pkt)) {
    642			last_bd = 1;
    643			dmaflags |= DMADESC_EOP_MASK;
    644		}
    645
    646		d->address = breq->req.dma + breq->offset;
    647		mb();
    648		d->len_stat = dmaflags;
    649
    650		breq->offset += n_bytes;
    651		breq->bd_bytes += n_bytes;
    652		bytes_left -= n_bytes;
    653	} while (!last_bd);
    654
    655	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
    656			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
    657}
    658
    659/**
    660 * iudma_read - Check for IUDMA buffer completion.
    661 * @udc: Reference to the device controller.
    662 * @iudma: IUDMA channel to use.
    663 *
    664 * This checks to see if ALL of the outstanding BDs on the DMA channel
    665 * have been filled.  If so, it returns the actual transfer length;
    666 * otherwise it returns -EBUSY.
    667 */
    668static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
    669{
    670	int i, actual_len = 0;
    671	struct bcm_enet_desc *d = iudma->read_bd;
    672
    673	if (!iudma->n_bds_used)
    674		return -EINVAL;
    675
    676	for (i = 0; i < iudma->n_bds_used; i++) {
    677		u32 dmaflags;
    678
    679		dmaflags = d->len_stat;
    680
    681		if (dmaflags & DMADESC_OWNER_MASK)
    682			return -EBUSY;
    683
    684		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
    685			      DMADESC_LENGTH_SHIFT;
    686		if (d == iudma->end_bd)
    687			d = iudma->bd_ring;
    688		else
    689			d++;
    690	}
    691
    692	iudma->read_bd = d;
    693	iudma->n_bds_used = 0;
    694	return actual_len;
    695}
    696
    697/**
    698 * iudma_reset_channel - Stop DMA on a single channel.
    699 * @udc: Reference to the device controller.
    700 * @iudma: IUDMA channel to reset.
    701 */
    702static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
    703{
    704	int timeout = IUDMA_RESET_TIMEOUT_US;
    705	struct bcm_enet_desc *d;
    706	int ch_idx = iudma->ch_idx;
    707
    708	if (!iudma->is_tx)
    709		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
    710
    711	/* stop DMA, then wait for the hardware to wrap up */
    712	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
    713
    714	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
    715				   ENETDMAC_CHANCFG_EN_MASK) {
    716		udelay(1);
    717
    718		/* repeatedly flush the FIFO data until the BD completes */
    719		if (iudma->is_tx && iudma->ep_num >= 0)
    720			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
    721
    722		if (!timeout--) {
    723			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
    724				ch_idx);
    725			break;
    726		}
    727		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
    728			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
    729				 ch_idx);
    730			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
    731					ENETDMAC_CHANCFG_REG, ch_idx);
    732		}
    733	}
    734	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
    735
    736	/* don't leave "live" HW-owned entries for the next guy to step on */
    737	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
    738		d->len_stat = 0;
    739	mb();
    740
    741	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
    742	iudma->n_bds_used = 0;
    743
    744	/* set up IRQs, UBUS burst size, and BD base for this channel */
    745	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
    746			ENETDMAC_IRMASK_REG, ch_idx);
    747	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
    748
    749	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
    750	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
    751}
    752
    753/**
    754 * iudma_init_channel - One-time IUDMA channel initialization.
    755 * @udc: Reference to the device controller.
    756 * @ch_idx: Channel to initialize.
    757 */
    758static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
    759{
    760	struct iudma_ch *iudma = &udc->iudma[ch_idx];
    761	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
    762	unsigned int n_bds = cfg->n_bds;
    763	struct bcm63xx_ep *bep = NULL;
    764
    765	iudma->ep_num = cfg->ep_num;
    766	iudma->ch_idx = ch_idx;
    767	iudma->is_tx = !!(ch_idx & 0x01);
    768	if (iudma->ep_num >= 0) {
    769		bep = &udc->bep[iudma->ep_num];
    770		bep->iudma = iudma;
    771		INIT_LIST_HEAD(&bep->queue);
    772	}
    773
    774	iudma->bep = bep;
    775	iudma->udc = udc;
    776
    777	/* ep0 is always active; others are controlled by the gadget driver */
    778	if (iudma->ep_num <= 0)
    779		iudma->enabled = true;
    780
    781	iudma->n_bds = n_bds;
    782	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
    783		n_bds * sizeof(struct bcm_enet_desc),
    784		&iudma->bd_ring_dma, GFP_KERNEL);
    785	if (!iudma->bd_ring)
    786		return -ENOMEM;
    787	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
    788
    789	return 0;
    790}
    791
    792/**
    793 * iudma_init - One-time initialization of all IUDMA channels.
    794 * @udc: Reference to the device controller.
    795 *
    796 * Enable DMA, flush channels, and enable global IUDMA IRQs.
    797 */
    798static int iudma_init(struct bcm63xx_udc *udc)
    799{
    800	int i, rc;
    801
    802	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
    803
    804	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
    805		rc = iudma_init_channel(udc, i);
    806		if (rc)
    807			return rc;
    808		iudma_reset_channel(udc, &udc->iudma[i]);
    809	}
    810
    811	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
    812	return 0;
    813}
    814
    815/**
    816 * iudma_uninit - Uninitialize IUDMA channels.
    817 * @udc: Reference to the device controller.
    818 *
    819 * Kill global IUDMA IRQs, flush channels, and kill DMA.
    820 */
    821static void iudma_uninit(struct bcm63xx_udc *udc)
    822{
    823	int i;
    824
    825	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
    826
    827	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
    828		iudma_reset_channel(udc, &udc->iudma[i]);
    829
    830	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
    831}
    832
    833/***********************************************************************
    834 * Other low-level USBD operations
    835 ***********************************************************************/
    836
    837/**
    838 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
    839 * @udc: Reference to the device controller.
    840 * @enable_irqs: true to enable, false to disable.
    841 */
    842static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
    843{
    844	u32 val;
    845
    846	usbd_writel(udc, 0, USBD_STATUS_REG);
    847
    848	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
    849	      BIT(USBD_EVENT_IRQ_SETUP) |
    850	      BIT(USBD_EVENT_IRQ_SETCFG) |
    851	      BIT(USBD_EVENT_IRQ_SETINTF) |
    852	      BIT(USBD_EVENT_IRQ_USB_LINK);
    853	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
    854	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
    855}
    856
    857/**
    858 * bcm63xx_select_phy_mode - Select between USB device and host mode.
    859 * @udc: Reference to the device controller.
    860 * @is_device: true for device, false for host.
    861 *
    862 * This should probably be reworked to use the drivers/usb/otg
    863 * infrastructure.
    864 *
    865 * By default, the AFE/pullups are disabled in device mode, until
    866 * bcm63xx_select_pullup() is called.
    867 */
    868static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
    869{
    870	u32 val, portmask = BIT(udc->pd->port_no);
    871
    872	if (BCMCPU_IS_6328()) {
    873		/* configure pinmux to sense VBUS signal */
    874		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
    875		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
    876		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
    877			       GPIO_PINMUX_OTHR_6328_USB_HOST;
    878		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
    879	}
    880
    881	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
    882	if (is_device) {
    883		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
    884		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
    885	} else {
    886		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
    887		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
    888	}
    889	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
    890
    891	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
    892	if (is_device)
    893		val |= USBH_PRIV_SWAP_USBD_MASK;
    894	else
    895		val &= ~USBH_PRIV_SWAP_USBD_MASK;
    896	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
    897}
    898
    899/**
    900 * bcm63xx_select_pullup - Enable/disable the pullup on D+
    901 * @udc: Reference to the device controller.
    902 * @is_on: true to enable the pullup, false to disable.
    903 *
    904 * If the pullup is active, the host will sense a FS/HS device connected to
    905 * the port.  If the pullup is inactive, the host will think the USB
    906 * device has been disconnected.
    907 */
    908static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
    909{
    910	u32 val, portmask = BIT(udc->pd->port_no);
    911
    912	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
    913	if (is_on)
    914		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
    915	else
    916		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
    917	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
    918}
    919
    920/**
    921 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
    922 * @udc: Reference to the device controller.
    923 *
    924 * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
    925 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
    926 */
    927static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
    928{
    929	set_clocks(udc, true);
    930	iudma_uninit(udc);
    931	set_clocks(udc, false);
    932
    933	clk_put(udc->usbd_clk);
    934	clk_put(udc->usbh_clk);
    935}
    936
    937/**
    938 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
    939 * @udc: Reference to the device controller.
    940 */
    941static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
    942{
    943	int i, rc = 0;
    944	u32 val;
    945
    946	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
    947					 GFP_KERNEL);
    948	if (!udc->ep0_ctrl_buf)
    949		return -ENOMEM;
    950
    951	INIT_LIST_HEAD(&udc->gadget.ep_list);
    952	for (i = 0; i < BCM63XX_NUM_EP; i++) {
    953		struct bcm63xx_ep *bep = &udc->bep[i];
    954
    955		bep->ep.name = bcm63xx_ep_info[i].name;
    956		bep->ep.caps = bcm63xx_ep_info[i].caps;
    957		bep->ep_num = i;
    958		bep->ep.ops = &bcm63xx_udc_ep_ops;
    959		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
    960		bep->halted = 0;
    961		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
    962		bep->udc = udc;
    963		bep->ep.desc = NULL;
    964		INIT_LIST_HEAD(&bep->queue);
    965	}
    966
    967	udc->gadget.ep0 = &udc->bep[0].ep;
    968	list_del(&udc->bep[0].ep.ep_list);
    969
    970	udc->gadget.speed = USB_SPEED_UNKNOWN;
    971	udc->ep0state = EP0_SHUTDOWN;
    972
    973	udc->usbh_clk = clk_get(udc->dev, "usbh");
    974	if (IS_ERR(udc->usbh_clk))
    975		return -EIO;
    976
    977	udc->usbd_clk = clk_get(udc->dev, "usbd");
    978	if (IS_ERR(udc->usbd_clk)) {
    979		clk_put(udc->usbh_clk);
    980		return -EIO;
    981	}
    982
    983	set_clocks(udc, true);
    984
    985	val = USBD_CONTROL_AUTO_CSRS_MASK |
    986	      USBD_CONTROL_DONE_CSRS_MASK |
    987	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
    988	usbd_writel(udc, val, USBD_CONTROL_REG);
    989
    990	val = USBD_STRAPS_APP_SELF_PWR_MASK |
    991	      USBD_STRAPS_APP_RAM_IF_MASK |
    992	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
    993	      USBD_STRAPS_APP_8BITPHY_MASK |
    994	      USBD_STRAPS_APP_RMTWKUP_MASK;
    995
    996	if (udc->gadget.max_speed == USB_SPEED_HIGH)
    997		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
    998	else
    999		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
   1000	usbd_writel(udc, val, USBD_STRAPS_REG);
   1001
   1002	bcm63xx_set_ctrl_irqs(udc, false);
   1003
   1004	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
   1005
   1006	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
   1007	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
   1008	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
   1009
   1010	rc = iudma_init(udc);
   1011	set_clocks(udc, false);
   1012	if (rc)
   1013		bcm63xx_uninit_udc_hw(udc);
   1014
   1015	return 0;
   1016}
   1017
   1018/***********************************************************************
   1019 * Standard EP gadget operations
   1020 ***********************************************************************/
   1021
   1022/**
   1023 * bcm63xx_ep_enable - Enable one endpoint.
   1024 * @ep: Endpoint to enable.
   1025 * @desc: Contains max packet, direction, etc.
   1026 *
   1027 * Most of the endpoint parameters are fixed in this controller, so there
   1028 * isn't much for this function to do.
   1029 */
   1030static int bcm63xx_ep_enable(struct usb_ep *ep,
   1031	const struct usb_endpoint_descriptor *desc)
   1032{
   1033	struct bcm63xx_ep *bep = our_ep(ep);
   1034	struct bcm63xx_udc *udc = bep->udc;
   1035	struct iudma_ch *iudma = bep->iudma;
   1036	unsigned long flags;
   1037
   1038	if (!ep || !desc || ep->name == bcm63xx_ep0name)
   1039		return -EINVAL;
   1040
   1041	if (!udc->driver)
   1042		return -ESHUTDOWN;
   1043
   1044	spin_lock_irqsave(&udc->lock, flags);
   1045	if (iudma->enabled) {
   1046		spin_unlock_irqrestore(&udc->lock, flags);
   1047		return -EINVAL;
   1048	}
   1049
   1050	iudma->enabled = true;
   1051	BUG_ON(!list_empty(&bep->queue));
   1052
   1053	iudma_reset_channel(udc, iudma);
   1054
   1055	bep->halted = 0;
   1056	bcm63xx_set_stall(udc, bep, false);
   1057	clear_bit(bep->ep_num, &udc->wedgemap);
   1058
   1059	ep->desc = desc;
   1060	ep->maxpacket = usb_endpoint_maxp(desc);
   1061
   1062	spin_unlock_irqrestore(&udc->lock, flags);
   1063	return 0;
   1064}
   1065
   1066/**
   1067 * bcm63xx_ep_disable - Disable one endpoint.
   1068 * @ep: Endpoint to disable.
   1069 */
   1070static int bcm63xx_ep_disable(struct usb_ep *ep)
   1071{
   1072	struct bcm63xx_ep *bep = our_ep(ep);
   1073	struct bcm63xx_udc *udc = bep->udc;
   1074	struct iudma_ch *iudma = bep->iudma;
   1075	struct bcm63xx_req *breq, *n;
   1076	unsigned long flags;
   1077
   1078	if (!ep || !ep->desc)
   1079		return -EINVAL;
   1080
   1081	spin_lock_irqsave(&udc->lock, flags);
   1082	if (!iudma->enabled) {
   1083		spin_unlock_irqrestore(&udc->lock, flags);
   1084		return -EINVAL;
   1085	}
   1086	iudma->enabled = false;
   1087
   1088	iudma_reset_channel(udc, iudma);
   1089
   1090	if (!list_empty(&bep->queue)) {
   1091		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
   1092			usb_gadget_unmap_request(&udc->gadget, &breq->req,
   1093						 iudma->is_tx);
   1094			list_del(&breq->queue);
   1095			breq->req.status = -ESHUTDOWN;
   1096
   1097			spin_unlock_irqrestore(&udc->lock, flags);
   1098			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
   1099			spin_lock_irqsave(&udc->lock, flags);
   1100		}
   1101	}
   1102	ep->desc = NULL;
   1103
   1104	spin_unlock_irqrestore(&udc->lock, flags);
   1105	return 0;
   1106}
   1107
   1108/**
   1109 * bcm63xx_udc_alloc_request - Allocate a new request.
   1110 * @ep: Endpoint associated with the request.
   1111 * @mem_flags: Flags to pass to kzalloc().
   1112 */
   1113static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
   1114	gfp_t mem_flags)
   1115{
   1116	struct bcm63xx_req *breq;
   1117
   1118	breq = kzalloc(sizeof(*breq), mem_flags);
   1119	if (!breq)
   1120		return NULL;
   1121	return &breq->req;
   1122}
   1123
   1124/**
   1125 * bcm63xx_udc_free_request - Free a request.
   1126 * @ep: Endpoint associated with the request.
   1127 * @req: Request to free.
   1128 */
   1129static void bcm63xx_udc_free_request(struct usb_ep *ep,
   1130	struct usb_request *req)
   1131{
   1132	struct bcm63xx_req *breq = our_req(req);
   1133	kfree(breq);
   1134}
   1135
   1136/**
   1137 * bcm63xx_udc_queue - Queue up a new request.
   1138 * @ep: Endpoint associated with the request.
   1139 * @req: Request to add.
   1140 * @mem_flags: Unused.
   1141 *
   1142 * If the queue is empty, start this request immediately.  Otherwise, add
   1143 * it to the list.
   1144 *
   1145 * ep0 replies are sent through this function from the gadget driver, but
   1146 * they are treated differently because they need to be handled by the ep0
   1147 * state machine.  (Sometimes they are replies to control requests that
   1148 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
   1149 */
   1150static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
   1151	gfp_t mem_flags)
   1152{
   1153	struct bcm63xx_ep *bep = our_ep(ep);
   1154	struct bcm63xx_udc *udc = bep->udc;
   1155	struct bcm63xx_req *breq = our_req(req);
   1156	unsigned long flags;
   1157	int rc = 0;
   1158
   1159	if (unlikely(!req || !req->complete || !req->buf || !ep))
   1160		return -EINVAL;
   1161
   1162	req->actual = 0;
   1163	req->status = 0;
   1164	breq->offset = 0;
   1165
   1166	if (bep == &udc->bep[0]) {
   1167		/* only one reply per request, please */
   1168		if (udc->ep0_reply)
   1169			return -EINVAL;
   1170
   1171		udc->ep0_reply = req;
   1172		schedule_work(&udc->ep0_wq);
   1173		return 0;
   1174	}
   1175
   1176	spin_lock_irqsave(&udc->lock, flags);
   1177	if (!bep->iudma->enabled) {
   1178		rc = -ESHUTDOWN;
   1179		goto out;
   1180	}
   1181
   1182	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
   1183	if (rc == 0) {
   1184		list_add_tail(&breq->queue, &bep->queue);
   1185		if (list_is_singular(&bep->queue))
   1186			iudma_write(udc, bep->iudma, breq);
   1187	}
   1188
   1189out:
   1190	spin_unlock_irqrestore(&udc->lock, flags);
   1191	return rc;
   1192}
   1193
   1194/**
   1195 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
   1196 * @ep: Endpoint associated with the request.
   1197 * @req: Request to remove.
   1198 *
   1199 * If the request is not at the head of the queue, this is easy - just nuke
   1200 * it.  If the request is at the head of the queue, we'll need to stop the
   1201 * DMA transaction and then queue up the successor.
   1202 */
   1203static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
   1204{
   1205	struct bcm63xx_ep *bep = our_ep(ep);
   1206	struct bcm63xx_udc *udc = bep->udc;
   1207	struct bcm63xx_req *breq = our_req(req), *cur;
   1208	unsigned long flags;
   1209	int rc = 0;
   1210
   1211	spin_lock_irqsave(&udc->lock, flags);
   1212	if (list_empty(&bep->queue)) {
   1213		rc = -EINVAL;
   1214		goto out;
   1215	}
   1216
   1217	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
   1218	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
   1219
   1220	if (breq == cur) {
   1221		iudma_reset_channel(udc, bep->iudma);
   1222		list_del(&breq->queue);
   1223
   1224		if (!list_empty(&bep->queue)) {
   1225			struct bcm63xx_req *next;
   1226
   1227			next = list_first_entry(&bep->queue,
   1228				struct bcm63xx_req, queue);
   1229			iudma_write(udc, bep->iudma, next);
   1230		}
   1231	} else {
   1232		list_del(&breq->queue);
   1233	}
   1234
   1235out:
   1236	spin_unlock_irqrestore(&udc->lock, flags);
   1237
   1238	req->status = -ESHUTDOWN;
   1239	req->complete(ep, req);
   1240
   1241	return rc;
   1242}
   1243
   1244/**
   1245 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
   1246 * @ep: Endpoint to halt.
   1247 * @value: Zero to clear halt; nonzero to set halt.
   1248 *
   1249 * See comments in bcm63xx_update_wedge().
   1250 */
   1251static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
   1252{
   1253	struct bcm63xx_ep *bep = our_ep(ep);
   1254	struct bcm63xx_udc *udc = bep->udc;
   1255	unsigned long flags;
   1256
   1257	spin_lock_irqsave(&udc->lock, flags);
   1258	bcm63xx_set_stall(udc, bep, !!value);
   1259	bep->halted = value;
   1260	spin_unlock_irqrestore(&udc->lock, flags);
   1261
   1262	return 0;
   1263}
   1264
   1265/**
   1266 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
   1267 * @ep: Endpoint to wedge.
   1268 *
   1269 * See comments in bcm63xx_update_wedge().
   1270 */
   1271static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
   1272{
   1273	struct bcm63xx_ep *bep = our_ep(ep);
   1274	struct bcm63xx_udc *udc = bep->udc;
   1275	unsigned long flags;
   1276
   1277	spin_lock_irqsave(&udc->lock, flags);
   1278	set_bit(bep->ep_num, &udc->wedgemap);
   1279	bcm63xx_set_stall(udc, bep, true);
   1280	spin_unlock_irqrestore(&udc->lock, flags);
   1281
   1282	return 0;
   1283}
   1284
   1285static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
   1286	.enable		= bcm63xx_ep_enable,
   1287	.disable	= bcm63xx_ep_disable,
   1288
   1289	.alloc_request	= bcm63xx_udc_alloc_request,
   1290	.free_request	= bcm63xx_udc_free_request,
   1291
   1292	.queue		= bcm63xx_udc_queue,
   1293	.dequeue	= bcm63xx_udc_dequeue,
   1294
   1295	.set_halt	= bcm63xx_udc_set_halt,
   1296	.set_wedge	= bcm63xx_udc_set_wedge,
   1297};
   1298
   1299/***********************************************************************
   1300 * EP0 handling
   1301 ***********************************************************************/
   1302
   1303/**
   1304 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
   1305 * @udc: Reference to the device controller.
   1306 * @ctrl: 8-byte SETUP request.
   1307 */
   1308static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
   1309	struct usb_ctrlrequest *ctrl)
   1310{
   1311	int rc;
   1312
   1313	spin_unlock_irq(&udc->lock);
   1314	rc = udc->driver->setup(&udc->gadget, ctrl);
   1315	spin_lock_irq(&udc->lock);
   1316	return rc;
   1317}
   1318
   1319/**
   1320 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
   1321 * @udc: Reference to the device controller.
   1322 *
   1323 * Many standard requests are handled automatically in the hardware, but
   1324 * we still need to pass them to the gadget driver so that it can
   1325 * reconfigure the interfaces/endpoints if necessary.
   1326 *
   1327 * Unfortunately we are not able to send a STALL response if the host
   1328 * requests an invalid configuration.  If this happens, we'll have to be
   1329 * content with printing a warning.
   1330 */
   1331static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
   1332{
   1333	struct usb_ctrlrequest ctrl;
   1334	int rc;
   1335
   1336	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
   1337	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
   1338	ctrl.wValue = cpu_to_le16(udc->cfg);
   1339	ctrl.wIndex = 0;
   1340	ctrl.wLength = 0;
   1341
   1342	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
   1343	if (rc < 0) {
   1344		dev_warn_ratelimited(udc->dev,
   1345			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
   1346			udc->cfg);
   1347	}
   1348	return rc;
   1349}
   1350
   1351/**
   1352 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
   1353 * @udc: Reference to the device controller.
   1354 */
   1355static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
   1356{
   1357	struct usb_ctrlrequest ctrl;
   1358	int rc;
   1359
   1360	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
   1361	ctrl.bRequest = USB_REQ_SET_INTERFACE;
   1362	ctrl.wValue = cpu_to_le16(udc->alt_iface);
   1363	ctrl.wIndex = cpu_to_le16(udc->iface);
   1364	ctrl.wLength = 0;
   1365
   1366	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
   1367	if (rc < 0) {
   1368		dev_warn_ratelimited(udc->dev,
   1369			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
   1370			udc->iface, udc->alt_iface);
   1371	}
   1372	return rc;
   1373}
   1374
   1375/**
   1376 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
   1377 * @udc: Reference to the device controller.
   1378 * @ch_idx: IUDMA channel number.
   1379 * @req: USB gadget layer representation of the request.
   1380 */
   1381static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
   1382	struct usb_request *req)
   1383{
   1384	struct bcm63xx_req *breq = our_req(req);
   1385	struct iudma_ch *iudma = &udc->iudma[ch_idx];
   1386
   1387	BUG_ON(udc->ep0_request);
   1388	udc->ep0_request = req;
   1389
   1390	req->actual = 0;
   1391	breq->offset = 0;
   1392	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
   1393	iudma_write(udc, iudma, breq);
   1394}
   1395
   1396/**
   1397 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
   1398 * @udc: Reference to the device controller.
   1399 * @req: USB gadget layer representation of the request.
   1400 * @status: Status to return to the gadget driver.
   1401 */
   1402static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
   1403	struct usb_request *req, int status)
   1404{
   1405	req->status = status;
   1406	if (status)
   1407		req->actual = 0;
   1408	if (req->complete) {
   1409		spin_unlock_irq(&udc->lock);
   1410		req->complete(&udc->bep[0].ep, req);
   1411		spin_lock_irq(&udc->lock);
   1412	}
   1413}
   1414
   1415/**
   1416 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
   1417 *   reset/shutdown.
   1418 * @udc: Reference to the device controller.
   1419 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
   1420 */
   1421static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
   1422{
   1423	struct usb_request *req = udc->ep0_reply;
   1424
   1425	udc->ep0_reply = NULL;
   1426	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
   1427	if (udc->ep0_request == req) {
   1428		udc->ep0_req_completed = 0;
   1429		udc->ep0_request = NULL;
   1430	}
   1431	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
   1432}
   1433
   1434/**
   1435 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
   1436 *   transfer len.
   1437 * @udc: Reference to the device controller.
   1438 */
   1439static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
   1440{
   1441	struct usb_request *req = udc->ep0_request;
   1442
   1443	udc->ep0_req_completed = 0;
   1444	udc->ep0_request = NULL;
   1445
   1446	return req->actual;
   1447}
   1448
   1449/**
   1450 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
   1451 * @udc: Reference to the device controller.
   1452 * @ch_idx: IUDMA channel number.
   1453 * @length: Number of bytes to TX/RX.
   1454 *
   1455 * Used for simple transfers performed by the ep0 worker.  This will always
   1456 * use ep0_ctrl_req / ep0_ctrl_buf.
   1457 */
   1458static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
   1459	int length)
   1460{
   1461	struct usb_request *req = &udc->ep0_ctrl_req.req;
   1462
   1463	req->buf = udc->ep0_ctrl_buf;
   1464	req->length = length;
   1465	req->complete = NULL;
   1466
   1467	bcm63xx_ep0_map_write(udc, ch_idx, req);
   1468}
   1469
   1470/**
   1471 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
   1472 * @udc: Reference to the device controller.
   1473 *
   1474 * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
   1475 * for the next packet.  Anything else means the transaction requires multiple
   1476 * stages of handling.
   1477 */
   1478static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
   1479{
   1480	int rc;
   1481	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
   1482
   1483	rc = bcm63xx_ep0_read_complete(udc);
   1484
   1485	if (rc < 0) {
   1486		dev_err(udc->dev, "missing SETUP packet\n");
   1487		return EP0_IDLE;
   1488	}
   1489
   1490	/*
   1491	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
   1492	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
   1493	 * just throw it away.
   1494	 */
   1495	if (rc == 0)
   1496		return EP0_REQUEUE;
   1497
   1498	/* Drop malformed SETUP packets */
   1499	if (rc != sizeof(*ctrl)) {
   1500		dev_warn_ratelimited(udc->dev,
   1501			"malformed SETUP packet (%d bytes)\n", rc);
   1502		return EP0_REQUEUE;
   1503	}
   1504
   1505	/* Process new SETUP packet arriving on ep0 */
   1506	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
   1507	if (rc < 0) {
   1508		bcm63xx_set_stall(udc, &udc->bep[0], true);
   1509		return EP0_REQUEUE;
   1510	}
   1511
   1512	if (!ctrl->wLength)
   1513		return EP0_REQUEUE;
   1514	else if (ctrl->bRequestType & USB_DIR_IN)
   1515		return EP0_IN_DATA_PHASE_SETUP;
   1516	else
   1517		return EP0_OUT_DATA_PHASE_SETUP;
   1518}
   1519
   1520/**
   1521 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
   1522 * @udc: Reference to the device controller.
   1523 *
   1524 * In state EP0_IDLE, the RX descriptor is either pending, or has been
   1525 * filled with a SETUP packet from the host.  This function handles new
   1526 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
   1527 * and reset/shutdown events.
   1528 *
   1529 * Returns 0 if work was done; -EAGAIN if nothing to do.
   1530 */
   1531static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
   1532{
   1533	if (udc->ep0_req_reset) {
   1534		udc->ep0_req_reset = 0;
   1535	} else if (udc->ep0_req_set_cfg) {
   1536		udc->ep0_req_set_cfg = 0;
   1537		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
   1538			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
   1539	} else if (udc->ep0_req_set_iface) {
   1540		udc->ep0_req_set_iface = 0;
   1541		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
   1542			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
   1543	} else if (udc->ep0_req_completed) {
   1544		udc->ep0state = bcm63xx_ep0_do_setup(udc);
   1545		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
   1546	} else if (udc->ep0_req_shutdown) {
   1547		udc->ep0_req_shutdown = 0;
   1548		udc->ep0_req_completed = 0;
   1549		udc->ep0_request = NULL;
   1550		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
   1551		usb_gadget_unmap_request(&udc->gadget,
   1552			&udc->ep0_ctrl_req.req, 0);
   1553
   1554		/* bcm63xx_udc_pullup() is waiting for this */
   1555		mb();
   1556		udc->ep0state = EP0_SHUTDOWN;
   1557	} else if (udc->ep0_reply) {
   1558		/*
   1559		 * This could happen if a USB RESET shows up during an ep0
   1560		 * transaction (especially if a laggy driver like gadgetfs
   1561		 * is in use).
   1562		 */
   1563		dev_warn(udc->dev, "nuking unexpected reply\n");
   1564		bcm63xx_ep0_nuke_reply(udc, 0);
   1565	} else {
   1566		return -EAGAIN;
   1567	}
   1568
   1569	return 0;
   1570}
   1571
   1572/**
   1573 * bcm63xx_ep0_one_round - Handle the current ep0 state.
   1574 * @udc: Reference to the device controller.
   1575 *
   1576 * Returns 0 if work was done; -EAGAIN if nothing to do.
   1577 */
   1578static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
   1579{
   1580	enum bcm63xx_ep0_state ep0state = udc->ep0state;
   1581	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
   1582
   1583	switch (udc->ep0state) {
   1584	case EP0_REQUEUE:
   1585		/* set up descriptor to receive SETUP packet */
   1586		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
   1587					     BCM63XX_MAX_CTRL_PKT);
   1588		ep0state = EP0_IDLE;
   1589		break;
   1590	case EP0_IDLE:
   1591		return bcm63xx_ep0_do_idle(udc);
   1592	case EP0_IN_DATA_PHASE_SETUP:
   1593		/*
   1594		 * Normal case: TX request is in ep0_reply (queued by the
   1595		 * callback), or will be queued shortly.  When it's here,
   1596		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
   1597		 *
   1598		 * Shutdown case: Stop waiting for the reply.  Just
   1599		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
   1600		 * queue anything else now.
   1601		 */
   1602		if (udc->ep0_reply) {
   1603			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
   1604					      udc->ep0_reply);
   1605			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
   1606		} else if (shutdown) {
   1607			ep0state = EP0_REQUEUE;
   1608		}
   1609		break;
   1610	case EP0_IN_DATA_PHASE_COMPLETE: {
   1611		/*
   1612		 * Normal case: TX packet (ep0_reply) is in flight; wait for
   1613		 * it to finish, then go back to REQUEUE->IDLE.
   1614		 *
   1615		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
   1616		 * completion to the gadget driver, then REQUEUE->IDLE.
   1617		 */
   1618		if (udc->ep0_req_completed) {
   1619			udc->ep0_reply = NULL;
   1620			bcm63xx_ep0_read_complete(udc);
   1621			/*
   1622			 * the "ack" sometimes gets eaten (see
   1623			 * bcm63xx_ep0_do_idle)
   1624			 */
   1625			ep0state = EP0_REQUEUE;
   1626		} else if (shutdown) {
   1627			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
   1628			bcm63xx_ep0_nuke_reply(udc, 1);
   1629			ep0state = EP0_REQUEUE;
   1630		}
   1631		break;
   1632	}
   1633	case EP0_OUT_DATA_PHASE_SETUP:
   1634		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
   1635		if (udc->ep0_reply) {
   1636			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
   1637					      udc->ep0_reply);
   1638			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
   1639		} else if (shutdown) {
   1640			ep0state = EP0_REQUEUE;
   1641		}
   1642		break;
   1643	case EP0_OUT_DATA_PHASE_COMPLETE: {
   1644		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
   1645		if (udc->ep0_req_completed) {
   1646			udc->ep0_reply = NULL;
   1647			bcm63xx_ep0_read_complete(udc);
   1648
   1649			/* send 0-byte ack to host */
   1650			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
   1651			ep0state = EP0_OUT_STATUS_PHASE;
   1652		} else if (shutdown) {
   1653			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
   1654			bcm63xx_ep0_nuke_reply(udc, 0);
   1655			ep0state = EP0_REQUEUE;
   1656		}
   1657		break;
   1658	}
   1659	case EP0_OUT_STATUS_PHASE:
   1660		/*
   1661		 * Normal case: 0-byte OUT ack packet is in flight; wait
   1662		 * for it to finish, then go back to REQUEUE->IDLE.
   1663		 *
   1664		 * Shutdown case: just cancel the transmission.  Don't bother
   1665		 * calling the completion, because it originated from this
   1666		 * function anyway.  Then go back to REQUEUE->IDLE.
   1667		 */
   1668		if (udc->ep0_req_completed) {
   1669			bcm63xx_ep0_read_complete(udc);
   1670			ep0state = EP0_REQUEUE;
   1671		} else if (shutdown) {
   1672			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
   1673			udc->ep0_request = NULL;
   1674			ep0state = EP0_REQUEUE;
   1675		}
   1676		break;
   1677	case EP0_IN_FAKE_STATUS_PHASE: {
   1678		/*
   1679		 * Normal case: we spoofed a SETUP packet and are now
   1680		 * waiting for the gadget driver to send a 0-byte reply.
   1681		 * This doesn't actually get sent to the HW because the
   1682		 * HW has already sent its own reply.  Once we get the
   1683		 * response, return to IDLE.
   1684		 *
   1685		 * Shutdown case: return to IDLE immediately.
   1686		 *
   1687		 * Note that the ep0 RX descriptor has remained queued
   1688		 * (and possibly unfilled) during this entire transaction.
   1689		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
   1690		 * or SET_INTERFACE transactions.
   1691		 */
   1692		struct usb_request *r = udc->ep0_reply;
   1693
   1694		if (!r) {
   1695			if (shutdown)
   1696				ep0state = EP0_IDLE;
   1697			break;
   1698		}
   1699
   1700		bcm63xx_ep0_complete(udc, r, 0);
   1701		udc->ep0_reply = NULL;
   1702		ep0state = EP0_IDLE;
   1703		break;
   1704	}
   1705	case EP0_SHUTDOWN:
   1706		break;
   1707	}
   1708
   1709	if (udc->ep0state == ep0state)
   1710		return -EAGAIN;
   1711
   1712	udc->ep0state = ep0state;
   1713	return 0;
   1714}
   1715
   1716/**
   1717 * bcm63xx_ep0_process - ep0 worker thread / state machine.
   1718 * @w: Workqueue struct.
   1719 *
   1720 * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
   1721 * is used to synchronize ep0 events and ensure that both HW and SW events
   1722 * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
   1723 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
   1724 * by the USBD hardware.
   1725 *
   1726 * The worker function will continue iterating around the state machine
   1727 * until there is nothing left to do.  Usually "nothing left to do" means
   1728 * that we're waiting for a new event from the hardware.
   1729 */
   1730static void bcm63xx_ep0_process(struct work_struct *w)
   1731{
   1732	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
   1733	spin_lock_irq(&udc->lock);
   1734	while (bcm63xx_ep0_one_round(udc) == 0)
   1735		;
   1736	spin_unlock_irq(&udc->lock);
   1737}
   1738
   1739/***********************************************************************
   1740 * Standard UDC gadget operations
   1741 ***********************************************************************/
   1742
   1743/**
   1744 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
   1745 * @gadget: USB device.
   1746 */
   1747static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
   1748{
   1749	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
   1750
   1751	return (usbd_readl(udc, USBD_STATUS_REG) &
   1752		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
   1753}
   1754
   1755/**
   1756 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
   1757 * @gadget: USB device.
   1758 * @is_on: 0 to disable pullup, 1 to enable.
   1759 *
   1760 * See notes in bcm63xx_select_pullup().
   1761 */
   1762static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
   1763{
   1764	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
   1765	unsigned long flags;
   1766	int i, rc = -EINVAL;
   1767
   1768	spin_lock_irqsave(&udc->lock, flags);
   1769	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
   1770		udc->gadget.speed = USB_SPEED_UNKNOWN;
   1771		udc->ep0state = EP0_REQUEUE;
   1772		bcm63xx_fifo_setup(udc);
   1773		bcm63xx_fifo_reset(udc);
   1774		bcm63xx_ep_setup(udc);
   1775
   1776		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
   1777		for (i = 0; i < BCM63XX_NUM_EP; i++)
   1778			bcm63xx_set_stall(udc, &udc->bep[i], false);
   1779
   1780		bcm63xx_set_ctrl_irqs(udc, true);
   1781		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
   1782		rc = 0;
   1783	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
   1784		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
   1785
   1786		udc->ep0_req_shutdown = 1;
   1787		spin_unlock_irqrestore(&udc->lock, flags);
   1788
   1789		while (1) {
   1790			schedule_work(&udc->ep0_wq);
   1791			if (udc->ep0state == EP0_SHUTDOWN)
   1792				break;
   1793			msleep(50);
   1794		}
   1795		bcm63xx_set_ctrl_irqs(udc, false);
   1796		cancel_work_sync(&udc->ep0_wq);
   1797		return 0;
   1798	}
   1799
   1800	spin_unlock_irqrestore(&udc->lock, flags);
   1801	return rc;
   1802}
   1803
   1804/**
   1805 * bcm63xx_udc_start - Start the controller.
   1806 * @gadget: USB device.
   1807 * @driver: Driver for USB device.
   1808 */
   1809static int bcm63xx_udc_start(struct usb_gadget *gadget,
   1810		struct usb_gadget_driver *driver)
   1811{
   1812	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
   1813	unsigned long flags;
   1814
   1815	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
   1816	    !driver->setup)
   1817		return -EINVAL;
   1818	if (!udc)
   1819		return -ENODEV;
   1820	if (udc->driver)
   1821		return -EBUSY;
   1822
   1823	spin_lock_irqsave(&udc->lock, flags);
   1824
   1825	set_clocks(udc, true);
   1826	bcm63xx_fifo_setup(udc);
   1827	bcm63xx_ep_init(udc);
   1828	bcm63xx_ep_setup(udc);
   1829	bcm63xx_fifo_reset(udc);
   1830	bcm63xx_select_phy_mode(udc, true);
   1831
   1832	udc->driver = driver;
   1833	driver->driver.bus = NULL;
   1834	udc->gadget.dev.of_node = udc->dev->of_node;
   1835
   1836	spin_unlock_irqrestore(&udc->lock, flags);
   1837
   1838	return 0;
   1839}
   1840
   1841/**
   1842 * bcm63xx_udc_stop - Shut down the controller.
   1843 * @gadget: USB device.
   1844 * @driver: Driver for USB device.
   1845 */
   1846static int bcm63xx_udc_stop(struct usb_gadget *gadget)
   1847{
   1848	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
   1849	unsigned long flags;
   1850
   1851	spin_lock_irqsave(&udc->lock, flags);
   1852
   1853	udc->driver = NULL;
   1854
   1855	/*
   1856	 * If we switch the PHY too abruptly after dropping D+, the host
   1857	 * will often complain:
   1858	 *
   1859	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
   1860	 */
   1861	msleep(100);
   1862
   1863	bcm63xx_select_phy_mode(udc, false);
   1864	set_clocks(udc, false);
   1865
   1866	spin_unlock_irqrestore(&udc->lock, flags);
   1867
   1868	return 0;
   1869}
   1870
   1871static const struct usb_gadget_ops bcm63xx_udc_ops = {
   1872	.get_frame	= bcm63xx_udc_get_frame,
   1873	.pullup		= bcm63xx_udc_pullup,
   1874	.udc_start	= bcm63xx_udc_start,
   1875	.udc_stop	= bcm63xx_udc_stop,
   1876};
   1877
   1878/***********************************************************************
   1879 * IRQ handling
   1880 ***********************************************************************/
   1881
   1882/**
   1883 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
   1884 * @udc: Reference to the device controller.
   1885 *
   1886 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
   1887 * The driver never sees the raw control packets coming in on the ep0
   1888 * IUDMA channel, but at least we get an interrupt event to tell us that
   1889 * new values are waiting in the USBD_STATUS register.
   1890 */
   1891static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
   1892{
   1893	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
   1894
   1895	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
   1896	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
   1897	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
   1898			 USBD_STATUS_ALTINTF_SHIFT;
   1899	bcm63xx_ep_setup(udc);
   1900}
   1901
   1902/**
   1903 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
   1904 * @udc: Reference to the device controller.
   1905 *
   1906 * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
   1907 * speed has changed, so that the caller can update the endpoint settings.
   1908 */
   1909static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
   1910{
   1911	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
   1912	enum usb_device_speed oldspeed = udc->gadget.speed;
   1913
   1914	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
   1915	case BCM63XX_SPD_HIGH:
   1916		udc->gadget.speed = USB_SPEED_HIGH;
   1917		break;
   1918	case BCM63XX_SPD_FULL:
   1919		udc->gadget.speed = USB_SPEED_FULL;
   1920		break;
   1921	default:
   1922		/* this should never happen */
   1923		udc->gadget.speed = USB_SPEED_UNKNOWN;
   1924		dev_err(udc->dev,
   1925			"received SETUP packet with invalid link speed\n");
   1926		return 0;
   1927	}
   1928
   1929	if (udc->gadget.speed != oldspeed) {
   1930		dev_info(udc->dev, "link up, %s-speed mode\n",
   1931			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
   1932		return 1;
   1933	} else {
   1934		return 0;
   1935	}
   1936}
   1937
   1938/**
   1939 * bcm63xx_update_wedge - Iterate through wedged endpoints.
   1940 * @udc: Reference to the device controller.
   1941 * @new_status: true to "refresh" wedge status; false to clear it.
   1942 *
   1943 * On a SETUP interrupt, we need to manually "refresh" the wedge status
   1944 * because the controller hardware is designed to automatically clear
   1945 * stalls in response to a CLEAR_FEATURE request from the host.
   1946 *
   1947 * On a RESET interrupt, we do want to restore all wedged endpoints.
   1948 */
   1949static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
   1950{
   1951	int i;
   1952
   1953	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
   1954		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
   1955		if (!new_status)
   1956			clear_bit(i, &udc->wedgemap);
   1957	}
   1958}
   1959
   1960/**
   1961 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
   1962 * @irq: IRQ number (unused).
   1963 * @dev_id: Reference to the device controller.
   1964 *
   1965 * This is where we handle link (VBUS) down, USB reset, speed changes,
   1966 * SET_CONFIGURATION, and SET_INTERFACE events.
   1967 */
   1968static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
   1969{
   1970	struct bcm63xx_udc *udc = dev_id;
   1971	u32 stat;
   1972	bool disconnected = false, bus_reset = false;
   1973
   1974	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
   1975	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
   1976
   1977	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
   1978
   1979	spin_lock(&udc->lock);
   1980	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
   1981		/* VBUS toggled */
   1982
   1983		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
   1984		      USBD_EVENTS_USB_LINK_MASK) &&
   1985		      udc->gadget.speed != USB_SPEED_UNKNOWN)
   1986			dev_info(udc->dev, "link down\n");
   1987
   1988		udc->gadget.speed = USB_SPEED_UNKNOWN;
   1989		disconnected = true;
   1990	}
   1991	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
   1992		bcm63xx_fifo_setup(udc);
   1993		bcm63xx_fifo_reset(udc);
   1994		bcm63xx_ep_setup(udc);
   1995
   1996		bcm63xx_update_wedge(udc, false);
   1997
   1998		udc->ep0_req_reset = 1;
   1999		schedule_work(&udc->ep0_wq);
   2000		bus_reset = true;
   2001	}
   2002	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
   2003		if (bcm63xx_update_link_speed(udc)) {
   2004			bcm63xx_fifo_setup(udc);
   2005			bcm63xx_ep_setup(udc);
   2006		}
   2007		bcm63xx_update_wedge(udc, true);
   2008	}
   2009	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
   2010		bcm63xx_update_cfg_iface(udc);
   2011		udc->ep0_req_set_cfg = 1;
   2012		schedule_work(&udc->ep0_wq);
   2013	}
   2014	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
   2015		bcm63xx_update_cfg_iface(udc);
   2016		udc->ep0_req_set_iface = 1;
   2017		schedule_work(&udc->ep0_wq);
   2018	}
   2019	spin_unlock(&udc->lock);
   2020
   2021	if (disconnected && udc->driver)
   2022		udc->driver->disconnect(&udc->gadget);
   2023	else if (bus_reset && udc->driver)
   2024		usb_gadget_udc_reset(&udc->gadget, udc->driver);
   2025
   2026	return IRQ_HANDLED;
   2027}
   2028
   2029/**
   2030 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
   2031 * @irq: IRQ number (unused).
   2032 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
   2033 *
   2034 * For the two ep0 channels, we have special handling that triggers the
   2035 * ep0 worker thread.  For normal bulk/intr channels, either queue up
   2036 * the next buffer descriptor for the transaction (incomplete transaction),
   2037 * or invoke the completion callback (complete transactions).
   2038 */
   2039static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
   2040{
   2041	struct iudma_ch *iudma = dev_id;
   2042	struct bcm63xx_udc *udc = iudma->udc;
   2043	struct bcm63xx_ep *bep;
   2044	struct usb_request *req = NULL;
   2045	struct bcm63xx_req *breq = NULL;
   2046	int rc;
   2047	bool is_done = false;
   2048
   2049	spin_lock(&udc->lock);
   2050
   2051	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
   2052			ENETDMAC_IR_REG, iudma->ch_idx);
   2053	bep = iudma->bep;
   2054	rc = iudma_read(udc, iudma);
   2055
   2056	/* special handling for EP0 RX (0) and TX (1) */
   2057	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
   2058	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
   2059		req = udc->ep0_request;
   2060		breq = our_req(req);
   2061
   2062		/* a single request could require multiple submissions */
   2063		if (rc >= 0) {
   2064			req->actual += rc;
   2065
   2066			if (req->actual >= req->length || breq->bd_bytes > rc) {
   2067				udc->ep0_req_completed = 1;
   2068				is_done = true;
   2069				schedule_work(&udc->ep0_wq);
   2070
   2071				/* "actual" on a ZLP is 1 byte */
   2072				req->actual = min(req->actual, req->length);
   2073			} else {
   2074				/* queue up the next BD (same request) */
   2075				iudma_write(udc, iudma, breq);
   2076			}
   2077		}
   2078	} else if (!list_empty(&bep->queue)) {
   2079		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
   2080		req = &breq->req;
   2081
   2082		if (rc >= 0) {
   2083			req->actual += rc;
   2084
   2085			if (req->actual >= req->length || breq->bd_bytes > rc) {
   2086				is_done = true;
   2087				list_del(&breq->queue);
   2088
   2089				req->actual = min(req->actual, req->length);
   2090
   2091				if (!list_empty(&bep->queue)) {
   2092					struct bcm63xx_req *next;
   2093
   2094					next = list_first_entry(&bep->queue,
   2095						struct bcm63xx_req, queue);
   2096					iudma_write(udc, iudma, next);
   2097				}
   2098			} else {
   2099				iudma_write(udc, iudma, breq);
   2100			}
   2101		}
   2102	}
   2103	spin_unlock(&udc->lock);
   2104
   2105	if (is_done) {
   2106		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
   2107		if (req->complete)
   2108			req->complete(&bep->ep, req);
   2109	}
   2110
   2111	return IRQ_HANDLED;
   2112}
   2113
   2114/***********************************************************************
   2115 * Debug filesystem
   2116 ***********************************************************************/
   2117
   2118/*
   2119 * bcm63xx_usbd_dbg_show - Show USBD controller state.
   2120 * @s: seq_file to which the information will be written.
   2121 * @p: Unused.
   2122 *
   2123 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
   2124 */
   2125static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
   2126{
   2127	struct bcm63xx_udc *udc = s->private;
   2128
   2129	if (!udc->driver)
   2130		return -ENODEV;
   2131
   2132	seq_printf(s, "ep0 state: %s\n",
   2133		   bcm63xx_ep0_state_names[udc->ep0state]);
   2134	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
   2135		   udc->ep0_req_reset ? "reset " : "",
   2136		   udc->ep0_req_set_cfg ? "set_cfg " : "",
   2137		   udc->ep0_req_set_iface ? "set_iface " : "",
   2138		   udc->ep0_req_shutdown ? "shutdown " : "",
   2139		   udc->ep0_request ? "pending " : "",
   2140		   udc->ep0_req_completed ? "completed " : "",
   2141		   udc->ep0_reply ? "reply " : "");
   2142	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
   2143		   udc->cfg, udc->iface, udc->alt_iface);
   2144	seq_printf(s, "regs:\n");
   2145	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
   2146		   usbd_readl(udc, USBD_CONTROL_REG),
   2147		   usbd_readl(udc, USBD_STRAPS_REG),
   2148		   usbd_readl(udc, USBD_STATUS_REG));
   2149	seq_printf(s, "  events:  %08x; stall:  %08x\n",
   2150		   usbd_readl(udc, USBD_EVENTS_REG),
   2151		   usbd_readl(udc, USBD_STALL_REG));
   2152
   2153	return 0;
   2154}
   2155DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
   2156
   2157/*
   2158 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
   2159 * @s: seq_file to which the information will be written.
   2160 * @p: Unused.
   2161 *
   2162 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
   2163 */
   2164static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
   2165{
   2166	struct bcm63xx_udc *udc = s->private;
   2167	int ch_idx, i;
   2168	u32 sram2, sram3;
   2169
   2170	if (!udc->driver)
   2171		return -ENODEV;
   2172
   2173	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
   2174		struct iudma_ch *iudma = &udc->iudma[ch_idx];
   2175		struct list_head *pos;
   2176
   2177		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
   2178		switch (iudma_defaults[ch_idx].ep_type) {
   2179		case BCMEP_CTRL:
   2180			seq_printf(s, "control");
   2181			break;
   2182		case BCMEP_BULK:
   2183			seq_printf(s, "bulk");
   2184			break;
   2185		case BCMEP_INTR:
   2186			seq_printf(s, "interrupt");
   2187			break;
   2188		}
   2189		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
   2190		seq_printf(s, " [ep%d]:\n",
   2191			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
   2192		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
   2193			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
   2194			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
   2195			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
   2196			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
   2197
   2198		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
   2199		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
   2200		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
   2201			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
   2202			   sram2 >> 16, sram2 & 0xffff,
   2203			   sram3 >> 16, sram3 & 0xffff,
   2204			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
   2205		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
   2206			   iudma->n_bds);
   2207
   2208		if (iudma->bep) {
   2209			i = 0;
   2210			list_for_each(pos, &iudma->bep->queue)
   2211				i++;
   2212			seq_printf(s, "; %d queued\n", i);
   2213		} else {
   2214			seq_printf(s, "\n");
   2215		}
   2216
   2217		for (i = 0; i < iudma->n_bds; i++) {
   2218			struct bcm_enet_desc *d = &iudma->bd_ring[i];
   2219
   2220			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
   2221				   i * sizeof(*d), i,
   2222				   d->len_stat >> 16, d->len_stat & 0xffff,
   2223				   d->address);
   2224			if (d == iudma->read_bd)
   2225				seq_printf(s, "   <<RD");
   2226			if (d == iudma->write_bd)
   2227				seq_printf(s, "   <<WR");
   2228			seq_printf(s, "\n");
   2229		}
   2230
   2231		seq_printf(s, "\n");
   2232	}
   2233
   2234	return 0;
   2235}
   2236DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
   2237
   2238/**
   2239 * bcm63xx_udc_init_debugfs - Create debugfs entries.
   2240 * @udc: Reference to the device controller.
   2241 */
   2242static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
   2243{
   2244	struct dentry *root;
   2245
   2246	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
   2247		return;
   2248
   2249	root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
   2250	debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
   2251	debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
   2252}
   2253
   2254/**
   2255 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
   2256 * @udc: Reference to the device controller.
   2257 *
   2258 * debugfs_remove() is safe to call with a NULL argument.
   2259 */
   2260static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
   2261{
   2262	debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
   2263}
   2264
   2265/***********************************************************************
   2266 * Driver init/exit
   2267 ***********************************************************************/
   2268
   2269/**
   2270 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
   2271 * @pdev: Platform device struct from the bcm63xx BSP code.
   2272 *
   2273 * Note that platform data is required, because pd.port_no varies from chip
   2274 * to chip and is used to switch the correct USB port to device mode.
   2275 */
   2276static int bcm63xx_udc_probe(struct platform_device *pdev)
   2277{
   2278	struct device *dev = &pdev->dev;
   2279	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
   2280	struct bcm63xx_udc *udc;
   2281	int rc = -ENOMEM, i, irq;
   2282
   2283	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
   2284	if (!udc)
   2285		return -ENOMEM;
   2286
   2287	platform_set_drvdata(pdev, udc);
   2288	udc->dev = dev;
   2289	udc->pd = pd;
   2290
   2291	if (!pd) {
   2292		dev_err(dev, "missing platform data\n");
   2293		return -EINVAL;
   2294	}
   2295
   2296	udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
   2297	if (IS_ERR(udc->usbd_regs))
   2298		return PTR_ERR(udc->usbd_regs);
   2299
   2300	udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
   2301	if (IS_ERR(udc->iudma_regs))
   2302		return PTR_ERR(udc->iudma_regs);
   2303
   2304	spin_lock_init(&udc->lock);
   2305	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
   2306
   2307	udc->gadget.ops = &bcm63xx_udc_ops;
   2308	udc->gadget.name = dev_name(dev);
   2309
   2310	if (!pd->use_fullspeed && !use_fullspeed)
   2311		udc->gadget.max_speed = USB_SPEED_HIGH;
   2312	else
   2313		udc->gadget.max_speed = USB_SPEED_FULL;
   2314
   2315	/* request clocks, allocate buffers, and clear any pending IRQs */
   2316	rc = bcm63xx_init_udc_hw(udc);
   2317	if (rc)
   2318		return rc;
   2319
   2320	rc = -ENXIO;
   2321
   2322	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
   2323	irq = platform_get_irq(pdev, 0);
   2324	if (irq < 0) {
   2325		rc = irq;
   2326		goto out_uninit;
   2327	}
   2328	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
   2329			     dev_name(dev), udc) < 0)
   2330		goto report_request_failure;
   2331
   2332	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
   2333	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
   2334		irq = platform_get_irq(pdev, i + 1);
   2335		if (irq < 0) {
   2336			rc = irq;
   2337			goto out_uninit;
   2338		}
   2339		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
   2340				     dev_name(dev), &udc->iudma[i]) < 0)
   2341			goto report_request_failure;
   2342	}
   2343
   2344	bcm63xx_udc_init_debugfs(udc);
   2345	rc = usb_add_gadget_udc(dev, &udc->gadget);
   2346	if (!rc)
   2347		return 0;
   2348
   2349	bcm63xx_udc_cleanup_debugfs(udc);
   2350out_uninit:
   2351	bcm63xx_uninit_udc_hw(udc);
   2352	return rc;
   2353
   2354report_request_failure:
   2355	dev_err(dev, "error requesting IRQ #%d\n", irq);
   2356	goto out_uninit;
   2357}
   2358
   2359/**
   2360 * bcm63xx_udc_remove - Remove the device from the system.
   2361 * @pdev: Platform device struct from the bcm63xx BSP code.
   2362 */
   2363static int bcm63xx_udc_remove(struct platform_device *pdev)
   2364{
   2365	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
   2366
   2367	bcm63xx_udc_cleanup_debugfs(udc);
   2368	usb_del_gadget_udc(&udc->gadget);
   2369	BUG_ON(udc->driver);
   2370
   2371	bcm63xx_uninit_udc_hw(udc);
   2372
   2373	return 0;
   2374}
   2375
   2376static struct platform_driver bcm63xx_udc_driver = {
   2377	.probe		= bcm63xx_udc_probe,
   2378	.remove		= bcm63xx_udc_remove,
   2379	.driver		= {
   2380		.name	= DRV_MODULE_NAME,
   2381	},
   2382};
   2383module_platform_driver(bcm63xx_udc_driver);
   2384
   2385MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
   2386MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
   2387MODULE_LICENSE("GPL");
   2388MODULE_ALIAS("platform:" DRV_MODULE_NAME);