cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

epn.c (22940B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
      4 *
      5 * epn.c - Generic endpoints management
      6 *
      7 * Copyright 2017 IBM Corporation
      8 */
      9
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/platform_device.h>
     13#include <linux/delay.h>
     14#include <linux/ioport.h>
     15#include <linux/slab.h>
     16#include <linux/errno.h>
     17#include <linux/list.h>
     18#include <linux/interrupt.h>
     19#include <linux/proc_fs.h>
     20#include <linux/prefetch.h>
     21#include <linux/clk.h>
     22#include <linux/usb/gadget.h>
     23#include <linux/of.h>
     24#include <linux/of_gpio.h>
     25#include <linux/regmap.h>
     26#include <linux/dma-mapping.h>
     27
     28#include "vhub.h"
     29
     30#define EXTRA_CHECKS
     31
     32#ifdef EXTRA_CHECKS
     33#define CHECK(ep, expr, fmt...)					\
     34	do {							\
     35		if (!(expr)) EPDBG(ep, "CHECK:" fmt);		\
     36	} while(0)
     37#else
     38#define CHECK(ep, expr, fmt...)	do { } while(0)
     39#endif
     40
     41static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
     42{
     43	unsigned int act = req->req.actual;
     44	unsigned int len = req->req.length;
     45	unsigned int chunk;
     46
     47	/* There should be no DMA ongoing */
     48	WARN_ON(req->active);
     49
     50	/* Calculate next chunk size */
     51	chunk = len - act;
     52	if (chunk > ep->ep.maxpacket)
     53		chunk = ep->ep.maxpacket;
     54	else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
     55		req->last_desc = 1;
     56
     57	EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
     58	       req, act, len, chunk, req->last_desc);
     59
     60	/* If DMA unavailable, using staging EP buffer */
     61	if (!req->req.dma) {
     62
     63		/* For IN transfers, copy data over first */
     64		if (ep->epn.is_in) {
     65			memcpy(ep->buf, req->req.buf + act, chunk);
     66			vhub_dma_workaround(ep->buf);
     67		}
     68		writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
     69	} else {
     70		if (ep->epn.is_in)
     71			vhub_dma_workaround(req->req.buf);
     72		writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
     73	}
     74
     75	/* Start DMA */
     76	req->active = true;
     77	writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
     78	       ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
     79	writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
     80	       ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
     81}
     82
     83static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
     84{
     85	struct ast_vhub_req *req;
     86	unsigned int len;
     87	u32 stat;
     88
     89	/* Read EP status */
     90	stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
     91
     92	/* Grab current request if any */
     93	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
     94
     95	EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
     96	       stat, ep->epn.is_in, req, req ? req->active : 0);
     97
     98	/* In absence of a request, bail out, must have been dequeued */
     99	if (!req)
    100		return;
    101
    102	/*
    103	 * Request not active, move on to processing queue, active request
    104	 * was probably dequeued
    105	 */
    106	if (!req->active)
    107		goto next_chunk;
    108
    109	/* Check if HW has moved on */
    110	if (VHUB_EP_DMA_RPTR(stat) != 0) {
    111		EPDBG(ep, "DMA read pointer not 0 !\n");
    112		return;
    113	}
    114
    115	/* No current DMA ongoing */
    116	req->active = false;
    117
    118	/* Grab length out of HW */
    119	len = VHUB_EP_DMA_TX_SIZE(stat);
    120
    121	/* If not using DMA, copy data out if needed */
    122	if (!req->req.dma && !ep->epn.is_in && len)
    123		memcpy(req->req.buf + req->req.actual, ep->buf, len);
    124
    125	/* Adjust size */
    126	req->req.actual += len;
    127
    128	/* Check for short packet */
    129	if (len < ep->ep.maxpacket)
    130		req->last_desc = 1;
    131
    132	/* That's it ? complete the request and pick a new one */
    133	if (req->last_desc >= 0) {
    134		ast_vhub_done(ep, req, 0);
    135		req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
    136					       queue);
    137
    138		/*
    139		 * Due to lock dropping inside "done" the next request could
    140		 * already be active, so check for that and bail if needed.
    141		 */
    142		if (!req || req->active)
    143			return;
    144	}
    145
    146 next_chunk:
    147	ast_vhub_epn_kick(ep, req);
    148}
    149
    150static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
    151{
    152	/*
    153	 * d_next == d_last means descriptor list empty to HW,
    154	 * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
    155	 * in the list
    156	 */
    157	return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
    158		(AST_VHUB_DESCS_COUNT - 1);
    159}
    160
    161static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
    162				   struct ast_vhub_req *req)
    163{
    164	struct ast_vhub_desc *desc = NULL;
    165	unsigned int act = req->act_count;
    166	unsigned int len = req->req.length;
    167	unsigned int chunk;
    168
    169	/* Mark request active if not already */
    170	req->active = true;
    171
    172	/* If the request was already completely written, do nothing */
    173	if (req->last_desc >= 0)
    174		return;
    175
    176	EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
    177	       act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
    178
    179	/* While we can create descriptors */
    180	while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
    181		unsigned int d_num;
    182
    183		/* Grab next free descriptor */
    184		d_num = ep->epn.d_next;
    185		desc = &ep->epn.descs[d_num];
    186		ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
    187
    188		/* Calculate next chunk size */
    189		chunk = len - act;
    190		if (chunk <= ep->epn.chunk_max) {
    191			/*
    192			 * Is this the last packet ? Because of having up to 8
    193			 * packets in a descriptor we can't just compare "chunk"
    194			 * with ep.maxpacket. We have to see if it's a multiple
    195			 * of it to know if we have to send a zero packet.
    196			 * Sadly that involves a modulo which is a bit expensive
    197			 * but probably still better than not doing it.
    198			 */
    199			if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
    200				req->last_desc = d_num;
    201		} else {
    202			chunk = ep->epn.chunk_max;
    203		}
    204
    205		EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
    206		       act, len, chunk, req->last_desc, d_num,
    207		       ast_vhub_count_free_descs(ep));
    208
    209		/* Populate descriptor */
    210		desc->w0 = cpu_to_le32(req->req.dma + act);
    211
    212		/* Interrupt if end of request or no more descriptors */
    213
    214		/*
    215		 * TODO: Be smarter about it, if we don't have enough
    216		 * descriptors request an interrupt before queue empty
    217		 * or so in order to be able to populate more before
    218		 * the HW runs out. This isn't a problem at the moment
    219		 * as we use 256 descriptors and only put at most one
    220		 * request in the ring.
    221		 */
    222		desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
    223		if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
    224			desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
    225
    226		/* Account packet */
    227		req->act_count = act = act + chunk;
    228	}
    229
    230	if (likely(desc))
    231		vhub_dma_workaround(desc);
    232
    233	/* Tell HW about new descriptors */
    234	writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
    235	       ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
    236
    237	EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
    238	       ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
    239}
    240
    241static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
    242{
    243	struct ast_vhub_req *req;
    244	unsigned int len, d_last;
    245	u32 stat, stat1;
    246
    247	/* Read EP status, workaround HW race */
    248	do {
    249		stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
    250		stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
    251	} while(stat != stat1);
    252
    253	/* Extract RPTR */
    254	d_last = VHUB_EP_DMA_RPTR(stat);
    255
    256	/* Grab current request if any */
    257	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
    258
    259	EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
    260	       stat, ep->epn.is_in, ep->epn.d_last, d_last);
    261
    262	/* Check all completed descriptors */
    263	while (ep->epn.d_last != d_last) {
    264		struct ast_vhub_desc *desc;
    265		unsigned int d_num;
    266		bool is_last_desc;
    267
    268		/* Grab next completed descriptor */
    269		d_num = ep->epn.d_last;
    270		desc = &ep->epn.descs[d_num];
    271		ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
    272
    273		/* Grab len out of descriptor */
    274		len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
    275
    276		EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
    277		       d_num, len, req, req ? req->active : 0);
    278
    279		/* If no active request pending, move on */
    280		if (!req || !req->active)
    281			continue;
    282
    283		/* Adjust size */
    284		req->req.actual += len;
    285
    286		/* Is that the last chunk ? */
    287		is_last_desc = req->last_desc == d_num;
    288		CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
    289					   (req->req.actual >= req->req.length &&
    290					    !req->req.zero)),
    291		      "Last packet discrepancy: last_desc=%d len=%d r.act=%d "
    292		      "r.len=%d r.zero=%d mp=%d\n",
    293		      is_last_desc, len, req->req.actual, req->req.length,
    294		      req->req.zero, ep->ep.maxpacket);
    295
    296		if (is_last_desc) {
    297			/*
    298			 * Because we can only have one request at a time
    299			 * in our descriptor list in this implementation,
    300			 * d_last and ep->d_last should now be equal
    301			 */
    302			CHECK(ep, d_last == ep->epn.d_last,
    303			      "DMA read ptr mismatch %d vs %d\n",
    304			      d_last, ep->epn.d_last);
    305
    306			/* Note: done will drop and re-acquire the lock */
    307			ast_vhub_done(ep, req, 0);
    308			req = list_first_entry_or_null(&ep->queue,
    309						       struct ast_vhub_req,
    310						       queue);
    311			break;
    312		}
    313	}
    314
    315	/* More work ? */
    316	if (req)
    317		ast_vhub_epn_kick_desc(ep, req);
    318}
    319
    320void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
    321{
    322	if (ep->epn.desc_mode)
    323		ast_vhub_epn_handle_ack_desc(ep);
    324	else
    325		ast_vhub_epn_handle_ack(ep);
    326}
    327
    328static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
    329			      gfp_t gfp_flags)
    330{
    331	struct ast_vhub_req *req = to_ast_req(u_req);
    332	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
    333	struct ast_vhub *vhub = ep->vhub;
    334	unsigned long flags;
    335	bool empty;
    336	int rc;
    337
    338	/* Paranoid checks */
    339	if (!u_req || !u_req->complete || !u_req->buf) {
    340		dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
    341		if (u_req) {
    342			dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
    343				 u_req->complete, req->internal);
    344		}
    345		return -EINVAL;
    346	}
    347
    348	/* Endpoint enabled ? */
    349	if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
    350	    !ep->dev->enabled) {
    351		EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
    352		return -ESHUTDOWN;
    353	}
    354
    355	/* Map request for DMA if possible. For now, the rule for DMA is
    356	 * that:
    357	 *
    358	 *  * For single stage mode (no descriptors):
    359	 *
    360	 *   - The buffer is aligned to a 8 bytes boundary (HW requirement)
    361	 *   - For a OUT endpoint, the request size is a multiple of the EP
    362	 *     packet size (otherwise the controller will DMA past the end
    363	 *     of the buffer if the host is sending a too long packet).
    364	 *
    365	 *  * For descriptor mode (tx only for now), always.
    366	 *
    367	 * We could relax the latter by making the decision to use the bounce
    368	 * buffer based on the size of a given *segment* of the request rather
    369	 * than the whole request.
    370	 */
    371	if (ep->epn.desc_mode ||
    372	    ((((unsigned long)u_req->buf & 7) == 0) &&
    373	     (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
    374		rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
    375					    ep->epn.is_in);
    376		if (rc) {
    377			dev_warn(&vhub->pdev->dev,
    378				 "Request mapping failure %d\n", rc);
    379			return rc;
    380		}
    381	} else
    382		u_req->dma = 0;
    383
    384	EPVDBG(ep, "enqueue req @%p\n", req);
    385	EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
    386	       u_req->length, (u32)u_req->dma, u_req->zero,
    387	       u_req->short_not_ok, u_req->no_interrupt,
    388	       ep->epn.is_in);
    389
    390	/* Initialize request progress fields */
    391	u_req->status = -EINPROGRESS;
    392	u_req->actual = 0;
    393	req->act_count = 0;
    394	req->active = false;
    395	req->last_desc = -1;
    396	spin_lock_irqsave(&vhub->lock, flags);
    397	empty = list_empty(&ep->queue);
    398
    399	/* Add request to list and kick processing if empty */
    400	list_add_tail(&req->queue, &ep->queue);
    401	if (empty) {
    402		if (ep->epn.desc_mode)
    403			ast_vhub_epn_kick_desc(ep, req);
    404		else
    405			ast_vhub_epn_kick(ep, req);
    406	}
    407	spin_unlock_irqrestore(&vhub->lock, flags);
    408
    409	return 0;
    410}
    411
    412static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
    413				     bool restart_ep)
    414{
    415	u32 state, reg, loops;
    416
    417	/* Stop DMA activity */
    418	if (ep->epn.desc_mode)
    419		writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    420	else
    421		writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    422
    423	/* Wait for it to complete */
    424	for (loops = 0; loops < 1000; loops++) {
    425		state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    426		state = VHUB_EP_DMA_PROC_STATUS(state);
    427		if (state == EP_DMA_PROC_RX_IDLE ||
    428		    state == EP_DMA_PROC_TX_IDLE)
    429			break;
    430		udelay(1);
    431	}
    432	if (loops >= 1000)
    433		dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
    434
    435	/* If we don't have to restart the endpoint, that's it */
    436	if (!restart_ep)
    437		return;
    438
    439	/* Restart the endpoint */
    440	if (ep->epn.desc_mode) {
    441		/*
    442		 * Take out descriptors by resetting the DMA read
    443		 * pointer to be equal to the CPU write pointer.
    444		 *
    445		 * Note: If we ever support creating descriptors for
    446		 * requests that aren't the head of the queue, we
    447		 * may have to do something more complex here,
    448		 * especially if the request being taken out is
    449		 * not the current head descriptors.
    450		 */
    451		reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
    452			VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
    453		writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
    454
    455		/* Then turn it back on */
    456		writel(ep->epn.dma_conf,
    457		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    458	} else {
    459		/* Single mode: just turn it back on */
    460		writel(ep->epn.dma_conf,
    461		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    462	}
    463}
    464
    465static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
    466{
    467	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
    468	struct ast_vhub *vhub = ep->vhub;
    469	struct ast_vhub_req *req = NULL, *iter;
    470	unsigned long flags;
    471	int rc = -EINVAL;
    472
    473	spin_lock_irqsave(&vhub->lock, flags);
    474
    475	/* Make sure it's actually queued on this endpoint */
    476	list_for_each_entry(iter, &ep->queue, queue) {
    477		if (&iter->req != u_req)
    478			continue;
    479		req = iter;
    480		break;
    481	}
    482
    483	if (req) {
    484		EPVDBG(ep, "dequeue req @%p active=%d\n",
    485		       req, req->active);
    486		if (req->active)
    487			ast_vhub_stop_active_req(ep, true);
    488		ast_vhub_done(ep, req, -ECONNRESET);
    489		rc = 0;
    490	}
    491
    492	spin_unlock_irqrestore(&vhub->lock, flags);
    493	return rc;
    494}
    495
    496void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
    497{
    498	u32 reg;
    499
    500	if (WARN_ON(ep->d_idx == 0))
    501		return;
    502	reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
    503	if (ep->epn.stalled || ep->epn.wedged)
    504		reg |= VHUB_EP_CFG_STALL_CTRL;
    505	else
    506		reg &= ~VHUB_EP_CFG_STALL_CTRL;
    507	writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
    508
    509	if (!ep->epn.stalled && !ep->epn.wedged)
    510		writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
    511		       ep->vhub->regs + AST_VHUB_EP_TOGGLE);
    512}
    513
    514static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
    515				      bool wedge)
    516{
    517	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
    518	struct ast_vhub *vhub = ep->vhub;
    519	unsigned long flags;
    520
    521	EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
    522
    523	if (!u_ep || !u_ep->desc)
    524		return -EINVAL;
    525	if (ep->d_idx == 0)
    526		return 0;
    527	if (ep->epn.is_iso)
    528		return -EOPNOTSUPP;
    529
    530	spin_lock_irqsave(&vhub->lock, flags);
    531
    532	/* Fail with still-busy IN endpoints */
    533	if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
    534		spin_unlock_irqrestore(&vhub->lock, flags);
    535		return -EAGAIN;
    536	}
    537	ep->epn.stalled = halt;
    538	ep->epn.wedged = wedge;
    539	ast_vhub_update_epn_stall(ep);
    540
    541	spin_unlock_irqrestore(&vhub->lock, flags);
    542
    543	return 0;
    544}
    545
    546static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
    547{
    548	return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
    549}
    550
    551static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
    552{
    553	return ast_vhub_set_halt_and_wedge(u_ep, true, true);
    554}
    555
    556static int ast_vhub_epn_disable(struct usb_ep* u_ep)
    557{
    558	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
    559	struct ast_vhub *vhub = ep->vhub;
    560	unsigned long flags;
    561	u32 imask, ep_ier;
    562
    563	EPDBG(ep, "Disabling !\n");
    564
    565	spin_lock_irqsave(&vhub->lock, flags);
    566
    567	ep->epn.enabled = false;
    568
    569	/* Stop active DMA if any */
    570	ast_vhub_stop_active_req(ep, false);
    571
    572	/* Disable endpoint */
    573	writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
    574
    575	/* Disable ACK interrupt */
    576	imask = VHUB_EP_IRQ(ep->epn.g_idx);
    577	ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
    578	ep_ier &= ~imask;
    579	writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
    580	writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
    581
    582	/* Nuke all pending requests */
    583	ast_vhub_nuke(ep, -ESHUTDOWN);
    584
    585	/* No more descriptor associated with request */
    586	ep->ep.desc = NULL;
    587
    588	spin_unlock_irqrestore(&vhub->lock, flags);
    589
    590	return 0;
    591}
    592
    593static int ast_vhub_epn_enable(struct usb_ep* u_ep,
    594			       const struct usb_endpoint_descriptor *desc)
    595{
    596	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
    597	struct ast_vhub_dev *dev;
    598	struct ast_vhub *vhub;
    599	u16 maxpacket, type;
    600	unsigned long flags;
    601	u32 ep_conf, ep_ier, imask;
    602
    603	/* Check arguments */
    604	if (!u_ep || !desc)
    605		return -EINVAL;
    606
    607	maxpacket = usb_endpoint_maxp(desc);
    608	if (!ep->d_idx || !ep->dev ||
    609	    desc->bDescriptorType != USB_DT_ENDPOINT ||
    610	    maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
    611		EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
    612		      ep->d_idx, ep->dev, desc->bDescriptorType,
    613		      maxpacket, ep->ep.maxpacket);
    614		return -EINVAL;
    615	}
    616	if (ep->d_idx != usb_endpoint_num(desc)) {
    617		EPDBG(ep, "EP number mismatch !\n");
    618		return -EINVAL;
    619	}
    620
    621	if (ep->epn.enabled) {
    622		EPDBG(ep, "Already enabled\n");
    623		return -EBUSY;
    624	}
    625	dev = ep->dev;
    626	vhub = ep->vhub;
    627
    628	/* Check device state */
    629	if (!dev->driver) {
    630		EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
    631		       dev->driver, dev->gadget.speed);
    632		return -ESHUTDOWN;
    633	}
    634
    635	/* Grab some info from the descriptor */
    636	ep->epn.is_in = usb_endpoint_dir_in(desc);
    637	ep->ep.maxpacket = maxpacket;
    638	type = usb_endpoint_type(desc);
    639	ep->epn.d_next = ep->epn.d_last = 0;
    640	ep->epn.is_iso = false;
    641	ep->epn.stalled = false;
    642	ep->epn.wedged = false;
    643
    644	EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
    645	      ep->epn.is_in ? "in" : "out", usb_ep_type_string(type),
    646	      usb_endpoint_num(desc), maxpacket);
    647
    648	/* Can we use DMA descriptor mode ? */
    649	ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
    650	if (ep->epn.desc_mode)
    651		memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
    652
    653	/*
    654	 * Large send function can send up to 8 packets from
    655	 * one descriptor with a limit of 4095 bytes.
    656	 */
    657	ep->epn.chunk_max = ep->ep.maxpacket;
    658	if (ep->epn.is_in) {
    659		ep->epn.chunk_max <<= 3;
    660		while (ep->epn.chunk_max > 4095)
    661			ep->epn.chunk_max -= ep->ep.maxpacket;
    662	}
    663
    664	switch(type) {
    665	case USB_ENDPOINT_XFER_CONTROL:
    666		EPDBG(ep, "Only one control endpoint\n");
    667		return -EINVAL;
    668	case USB_ENDPOINT_XFER_INT:
    669		ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
    670		break;
    671	case USB_ENDPOINT_XFER_BULK:
    672		ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
    673		break;
    674	case USB_ENDPOINT_XFER_ISOC:
    675		ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
    676		ep->epn.is_iso = true;
    677		break;
    678	default:
    679		return -EINVAL;
    680	}
    681
    682	/* Encode the rest of the EP config register */
    683	if (maxpacket < 1024)
    684		ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
    685	if (!ep->epn.is_in)
    686		ep_conf |= VHUB_EP_CFG_DIR_OUT;
    687	ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
    688	ep_conf |= VHUB_EP_CFG_ENABLE;
    689	ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
    690	EPVDBG(ep, "config=%08x\n", ep_conf);
    691
    692	spin_lock_irqsave(&vhub->lock, flags);
    693
    694	/* Disable HW and reset DMA */
    695	writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
    696	writel(VHUB_EP_DMA_CTRL_RESET,
    697	       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    698
    699	/* Configure and enable */
    700	writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
    701
    702	if (ep->epn.desc_mode) {
    703		/* Clear DMA status, including the DMA read ptr */
    704		writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
    705
    706		/* Set descriptor base */
    707		writel(ep->epn.descs_dma,
    708		       ep->epn.regs + AST_VHUB_EP_DESC_BASE);
    709
    710		/* Set base DMA config value */
    711		ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
    712		if (ep->epn.is_in)
    713			ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
    714
    715		/* First reset and disable all operations */
    716		writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
    717		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    718
    719		/* Enable descriptor mode */
    720		writel(ep->epn.dma_conf,
    721		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    722	} else {
    723		/* Set base DMA config value */
    724		ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
    725
    726		/* Reset and switch to single stage mode */
    727		writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
    728		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    729		writel(ep->epn.dma_conf,
    730		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
    731		writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
    732	}
    733
    734	/* Cleanup data toggle just in case */
    735	writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
    736	       vhub->regs + AST_VHUB_EP_TOGGLE);
    737
    738	/* Cleanup and enable ACK interrupt */
    739	imask = VHUB_EP_IRQ(ep->epn.g_idx);
    740	writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
    741	ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
    742	ep_ier |= imask;
    743	writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
    744
    745	/* Woot, we are online ! */
    746	ep->epn.enabled = true;
    747
    748	spin_unlock_irqrestore(&vhub->lock, flags);
    749
    750	return 0;
    751}
    752
    753static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
    754{
    755	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
    756
    757	if (WARN_ON(!ep->dev || !ep->d_idx))
    758		return;
    759
    760	EPDBG(ep, "Releasing endpoint\n");
    761
    762	/* Take it out of the EP list */
    763	list_del_init(&ep->ep.ep_list);
    764
    765	/* Mark the address free in the device */
    766	ep->dev->epns[ep->d_idx - 1] = NULL;
    767
    768	/* Free name & DMA buffers */
    769	kfree(ep->ep.name);
    770	ep->ep.name = NULL;
    771	dma_free_coherent(&ep->vhub->pdev->dev,
    772			  AST_VHUB_EPn_MAX_PACKET +
    773			  8 * AST_VHUB_DESCS_COUNT,
    774			  ep->buf, ep->buf_dma);
    775	ep->buf = NULL;
    776	ep->epn.descs = NULL;
    777
    778	/* Mark free */
    779	ep->dev = NULL;
    780}
    781
    782static const struct usb_ep_ops ast_vhub_epn_ops = {
    783	.enable		= ast_vhub_epn_enable,
    784	.disable	= ast_vhub_epn_disable,
    785	.dispose	= ast_vhub_epn_dispose,
    786	.queue		= ast_vhub_epn_queue,
    787	.dequeue	= ast_vhub_epn_dequeue,
    788	.set_halt	= ast_vhub_epn_set_halt,
    789	.set_wedge	= ast_vhub_epn_set_wedge,
    790	.alloc_request	= ast_vhub_alloc_request,
    791	.free_request	= ast_vhub_free_request,
    792};
    793
    794struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
    795{
    796	struct ast_vhub *vhub = d->vhub;
    797	struct ast_vhub_ep *ep;
    798	unsigned long flags;
    799	int i;
    800
    801	/* Find a free one (no device) */
    802	spin_lock_irqsave(&vhub->lock, flags);
    803	for (i = 0; i < vhub->max_epns; i++)
    804		if (vhub->epns[i].dev == NULL)
    805			break;
    806	if (i >= vhub->max_epns) {
    807		spin_unlock_irqrestore(&vhub->lock, flags);
    808		return NULL;
    809	}
    810
    811	/* Set it up */
    812	ep = &vhub->epns[i];
    813	ep->dev = d;
    814	spin_unlock_irqrestore(&vhub->lock, flags);
    815
    816	DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
    817	INIT_LIST_HEAD(&ep->queue);
    818	ep->d_idx = addr;
    819	ep->vhub = vhub;
    820	ep->ep.ops = &ast_vhub_epn_ops;
    821	ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
    822	d->epns[addr-1] = ep;
    823	ep->epn.g_idx = i;
    824	ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
    825
    826	ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
    827				     AST_VHUB_EPn_MAX_PACKET +
    828				     8 * AST_VHUB_DESCS_COUNT,
    829				     &ep->buf_dma, GFP_KERNEL);
    830	if (!ep->buf) {
    831		kfree(ep->ep.name);
    832		ep->ep.name = NULL;
    833		return NULL;
    834	}
    835	ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
    836	ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
    837
    838	usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
    839	list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
    840	ep->ep.caps.type_iso = true;
    841	ep->ep.caps.type_bulk = true;
    842	ep->ep.caps.type_int = true;
    843	ep->ep.caps.dir_in = true;
    844	ep->ep.caps.dir_out = true;
    845
    846	return ep;
    847}