cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtu3_gadget.c (17130B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * mtu3_gadget.c - MediaTek usb3 DRD peripheral support
      4 *
      5 * Copyright (C) 2016 MediaTek Inc.
      6 *
      7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
      8 */
      9
     10#include "mtu3.h"
     11#include "mtu3_trace.h"
     12
     13void mtu3_req_complete(struct mtu3_ep *mep,
     14		     struct usb_request *req, int status)
     15__releases(mep->mtu->lock)
     16__acquires(mep->mtu->lock)
     17{
     18	struct mtu3_request *mreq = to_mtu3_request(req);
     19	struct mtu3 *mtu = mreq->mtu;
     20
     21	list_del(&mreq->list);
     22	if (req->status == -EINPROGRESS)
     23		req->status = status;
     24
     25	trace_mtu3_req_complete(mreq);
     26	spin_unlock(&mtu->lock);
     27
     28	/* ep0 makes use of PIO, needn't unmap it */
     29	if (mep->epnum)
     30		usb_gadget_unmap_request(&mtu->g, req, mep->is_in);
     31
     32	dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n",
     33		mep->name, req, req->status, req->actual, req->length);
     34
     35	usb_gadget_giveback_request(&mep->ep, req);
     36	spin_lock(&mtu->lock);
     37}
     38
     39static void nuke(struct mtu3_ep *mep, const int status)
     40{
     41	struct mtu3_request *mreq = NULL;
     42
     43	if (list_empty(&mep->req_list))
     44		return;
     45
     46	dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status);
     47
     48	/* exclude EP0 */
     49	if (mep->epnum)
     50		mtu3_qmu_flush(mep);
     51
     52	while (!list_empty(&mep->req_list)) {
     53		mreq = list_first_entry(&mep->req_list,
     54					struct mtu3_request, list);
     55		mtu3_req_complete(mep, &mreq->request, status);
     56	}
     57}
     58
     59static int mtu3_ep_enable(struct mtu3_ep *mep)
     60{
     61	const struct usb_endpoint_descriptor *desc;
     62	const struct usb_ss_ep_comp_descriptor *comp_desc;
     63	struct mtu3 *mtu = mep->mtu;
     64	u32 interval = 0;
     65	u32 mult = 0;
     66	u32 burst = 0;
     67	int ret;
     68
     69	desc = mep->desc;
     70	comp_desc = mep->comp_desc;
     71	mep->type = usb_endpoint_type(desc);
     72	mep->maxp = usb_endpoint_maxp(desc);
     73
     74	switch (mtu->g.speed) {
     75	case USB_SPEED_SUPER:
     76	case USB_SPEED_SUPER_PLUS:
     77		if (usb_endpoint_xfer_int(desc) ||
     78				usb_endpoint_xfer_isoc(desc)) {
     79			interval = desc->bInterval;
     80			interval = clamp_val(interval, 1, 16);
     81			if (usb_endpoint_xfer_isoc(desc) && comp_desc)
     82				mult = comp_desc->bmAttributes;
     83		}
     84		if (comp_desc)
     85			burst = comp_desc->bMaxBurst;
     86
     87		break;
     88	case USB_SPEED_HIGH:
     89		if (usb_endpoint_xfer_isoc(desc) ||
     90				usb_endpoint_xfer_int(desc)) {
     91			interval = desc->bInterval;
     92			interval = clamp_val(interval, 1, 16);
     93			mult = usb_endpoint_maxp_mult(desc) - 1;
     94		}
     95		break;
     96	case USB_SPEED_FULL:
     97		if (usb_endpoint_xfer_isoc(desc))
     98			interval = clamp_val(desc->bInterval, 1, 16);
     99		else if (usb_endpoint_xfer_int(desc))
    100			interval = clamp_val(desc->bInterval, 1, 255);
    101
    102		break;
    103	default:
    104		break; /*others are ignored */
    105	}
    106
    107	dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n",
    108		__func__, mep->maxp, interval, burst, mult);
    109
    110	mep->ep.maxpacket = mep->maxp;
    111	mep->ep.desc = desc;
    112	mep->ep.comp_desc = comp_desc;
    113
    114	/* slot mainly affects bulk/isoc transfer, so ignore int */
    115	mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot;
    116
    117	ret = mtu3_config_ep(mtu, mep, interval, burst, mult);
    118	if (ret < 0)
    119		return ret;
    120
    121	ret = mtu3_gpd_ring_alloc(mep);
    122	if (ret < 0) {
    123		mtu3_deconfig_ep(mtu, mep);
    124		return ret;
    125	}
    126
    127	mtu3_qmu_start(mep);
    128
    129	return 0;
    130}
    131
    132static int mtu3_ep_disable(struct mtu3_ep *mep)
    133{
    134	struct mtu3 *mtu = mep->mtu;
    135
    136	mtu3_qmu_stop(mep);
    137
    138	/* abort all pending requests */
    139	nuke(mep, -ESHUTDOWN);
    140	mtu3_deconfig_ep(mtu, mep);
    141	mtu3_gpd_ring_free(mep);
    142
    143	mep->desc = NULL;
    144	mep->ep.desc = NULL;
    145	mep->comp_desc = NULL;
    146	mep->type = 0;
    147	mep->flags = 0;
    148
    149	return 0;
    150}
    151
    152static int mtu3_gadget_ep_enable(struct usb_ep *ep,
    153		const struct usb_endpoint_descriptor *desc)
    154{
    155	struct mtu3_ep *mep;
    156	struct mtu3 *mtu;
    157	unsigned long flags;
    158	int ret = -EINVAL;
    159
    160	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
    161		pr_debug("%s invalid parameters\n", __func__);
    162		return -EINVAL;
    163	}
    164
    165	if (!desc->wMaxPacketSize) {
    166		pr_debug("%s missing wMaxPacketSize\n", __func__);
    167		return -EINVAL;
    168	}
    169	mep = to_mtu3_ep(ep);
    170	mtu = mep->mtu;
    171
    172	/* check ep number and direction against endpoint */
    173	if (usb_endpoint_num(desc) != mep->epnum)
    174		return -EINVAL;
    175
    176	if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in)
    177		return -EINVAL;
    178
    179	dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name);
    180
    181	if (mep->flags & MTU3_EP_ENABLED) {
    182		dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n",
    183				mep->name);
    184		return 0;
    185	}
    186
    187	spin_lock_irqsave(&mtu->lock, flags);
    188	mep->desc = desc;
    189	mep->comp_desc = ep->comp_desc;
    190
    191	ret = mtu3_ep_enable(mep);
    192	if (ret)
    193		goto error;
    194
    195	mep->flags = MTU3_EP_ENABLED;
    196	mtu->active_ep++;
    197
    198error:
    199	spin_unlock_irqrestore(&mtu->lock, flags);
    200
    201	dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep);
    202	trace_mtu3_gadget_ep_enable(mep);
    203
    204	return ret;
    205}
    206
    207static int mtu3_gadget_ep_disable(struct usb_ep *ep)
    208{
    209	struct mtu3_ep *mep = to_mtu3_ep(ep);
    210	struct mtu3 *mtu = mep->mtu;
    211	unsigned long flags;
    212
    213	dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name);
    214	trace_mtu3_gadget_ep_disable(mep);
    215
    216	if (!(mep->flags & MTU3_EP_ENABLED)) {
    217		dev_warn(mtu->dev, "%s is already disabled\n", mep->name);
    218		return 0;
    219	}
    220
    221	spin_lock_irqsave(&mtu->lock, flags);
    222	mtu3_ep_disable(mep);
    223	mep->flags = 0;
    224	mtu->active_ep--;
    225	spin_unlock_irqrestore(&(mtu->lock), flags);
    226
    227	dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n",
    228		__func__, mtu->active_ep, mtu->is_active);
    229
    230	return 0;
    231}
    232
    233struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
    234{
    235	struct mtu3_ep *mep = to_mtu3_ep(ep);
    236	struct mtu3_request *mreq;
    237
    238	mreq = kzalloc(sizeof(*mreq), gfp_flags);
    239	if (!mreq)
    240		return NULL;
    241
    242	mreq->request.dma = DMA_ADDR_INVALID;
    243	mreq->epnum = mep->epnum;
    244	mreq->mep = mep;
    245	INIT_LIST_HEAD(&mreq->list);
    246	trace_mtu3_alloc_request(mreq);
    247
    248	return &mreq->request;
    249}
    250
    251void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
    252{
    253	struct mtu3_request *mreq = to_mtu3_request(req);
    254
    255	trace_mtu3_free_request(mreq);
    256	kfree(mreq);
    257}
    258
    259static int mtu3_gadget_queue(struct usb_ep *ep,
    260		struct usb_request *req, gfp_t gfp_flags)
    261{
    262	struct mtu3_ep *mep = to_mtu3_ep(ep);
    263	struct mtu3_request *mreq = to_mtu3_request(req);
    264	struct mtu3 *mtu = mep->mtu;
    265	unsigned long flags;
    266	int ret = 0;
    267
    268	if (!req->buf)
    269		return -ENODATA;
    270
    271	if (mreq->mep != mep)
    272		return -EINVAL;
    273
    274	dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n",
    275		__func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name,
    276		mreq, ep->maxpacket, mreq->request.length);
    277
    278	if (req->length > GPD_BUF_SIZE ||
    279	    (mtu->gen2cp && req->length > GPD_BUF_SIZE_EL)) {
    280		dev_warn(mtu->dev,
    281			"req length > supported MAX:%d requested:%d\n",
    282			mtu->gen2cp ? GPD_BUF_SIZE_EL : GPD_BUF_SIZE,
    283			req->length);
    284		return -EOPNOTSUPP;
    285	}
    286
    287	/* don't queue if the ep is down */
    288	if (!mep->desc) {
    289		dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n",
    290			req, ep->name);
    291		return -ESHUTDOWN;
    292	}
    293
    294	mreq->mtu = mtu;
    295	mreq->request.actual = 0;
    296	mreq->request.status = -EINPROGRESS;
    297
    298	ret = usb_gadget_map_request(&mtu->g, req, mep->is_in);
    299	if (ret) {
    300		dev_err(mtu->dev, "dma mapping failed\n");
    301		return ret;
    302	}
    303
    304	spin_lock_irqsave(&mtu->lock, flags);
    305
    306	if (mtu3_prepare_transfer(mep)) {
    307		ret = -EAGAIN;
    308		goto error;
    309	}
    310
    311	list_add_tail(&mreq->list, &mep->req_list);
    312	mtu3_insert_gpd(mep, mreq);
    313	mtu3_qmu_resume(mep);
    314
    315error:
    316	spin_unlock_irqrestore(&mtu->lock, flags);
    317	trace_mtu3_gadget_queue(mreq);
    318
    319	return ret;
    320}
    321
    322static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req)
    323{
    324	struct mtu3_ep *mep = to_mtu3_ep(ep);
    325	struct mtu3_request *mreq = to_mtu3_request(req);
    326	struct mtu3_request *r;
    327	struct mtu3 *mtu = mep->mtu;
    328	unsigned long flags;
    329	int ret = 0;
    330
    331	if (mreq->mep != mep)
    332		return -EINVAL;
    333
    334	dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
    335	trace_mtu3_gadget_dequeue(mreq);
    336
    337	spin_lock_irqsave(&mtu->lock, flags);
    338
    339	list_for_each_entry(r, &mep->req_list, list) {
    340		if (r == mreq)
    341			break;
    342	}
    343	if (r != mreq) {
    344		dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name);
    345		ret = -EINVAL;
    346		goto done;
    347	}
    348
    349	mtu3_qmu_flush(mep);  /* REVISIT: set BPS ?? */
    350	mtu3_req_complete(mep, req, -ECONNRESET);
    351	mtu3_qmu_start(mep);
    352
    353done:
    354	spin_unlock_irqrestore(&mtu->lock, flags);
    355
    356	return ret;
    357}
    358
    359/*
    360 * Set or clear the halt bit of an EP.
    361 * A halted EP won't TX/RX any data but will queue requests.
    362 */
    363static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value)
    364{
    365	struct mtu3_ep *mep = to_mtu3_ep(ep);
    366	struct mtu3 *mtu = mep->mtu;
    367	struct mtu3_request *mreq;
    368	unsigned long flags;
    369	int ret = 0;
    370
    371	dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name);
    372
    373	spin_lock_irqsave(&mtu->lock, flags);
    374
    375	if (mep->type == USB_ENDPOINT_XFER_ISOC) {
    376		ret = -EINVAL;
    377		goto done;
    378	}
    379
    380	mreq = next_request(mep);
    381	if (value) {
    382		/*
    383		 * If there is not request for TX-EP, QMU will not transfer
    384		 * data to TX-FIFO, so no need check whether TX-FIFO
    385		 * holds bytes or not here
    386		 */
    387		if (mreq) {
    388			dev_dbg(mtu->dev, "req in progress, cannot halt %s\n",
    389				ep->name);
    390			ret = -EAGAIN;
    391			goto done;
    392		}
    393	} else {
    394		mep->flags &= ~MTU3_EP_WEDGE;
    395	}
    396
    397	dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear");
    398
    399	mtu3_ep_stall_set(mep, value);
    400
    401done:
    402	spin_unlock_irqrestore(&mtu->lock, flags);
    403	trace_mtu3_gadget_ep_set_halt(mep);
    404
    405	return ret;
    406}
    407
    408/* Sets the halt feature with the clear requests ignored */
    409static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep)
    410{
    411	struct mtu3_ep *mep = to_mtu3_ep(ep);
    412
    413	mep->flags |= MTU3_EP_WEDGE;
    414
    415	return usb_ep_set_halt(ep);
    416}
    417
    418static const struct usb_ep_ops mtu3_ep_ops = {
    419	.enable = mtu3_gadget_ep_enable,
    420	.disable = mtu3_gadget_ep_disable,
    421	.alloc_request = mtu3_alloc_request,
    422	.free_request = mtu3_free_request,
    423	.queue = mtu3_gadget_queue,
    424	.dequeue = mtu3_gadget_dequeue,
    425	.set_halt = mtu3_gadget_ep_set_halt,
    426	.set_wedge = mtu3_gadget_ep_set_wedge,
    427};
    428
    429static int mtu3_gadget_get_frame(struct usb_gadget *gadget)
    430{
    431	struct mtu3 *mtu = gadget_to_mtu3(gadget);
    432
    433	return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM);
    434}
    435
    436static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
    437{
    438	struct mtu3 *mtu = gadget_to_mtu3(gadget);
    439	unsigned long flags;
    440
    441	dev_dbg(mtu->dev, "%s\n", __func__);
    442
    443	/* remote wakeup feature is not enabled by host */
    444	if (!mtu->may_wakeup)
    445		return  -EOPNOTSUPP;
    446
    447	spin_lock_irqsave(&mtu->lock, flags);
    448	if (mtu->g.speed >= USB_SPEED_SUPER) {
    449		mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
    450	} else {
    451		mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
    452		spin_unlock_irqrestore(&mtu->lock, flags);
    453		usleep_range(10000, 11000);
    454		spin_lock_irqsave(&mtu->lock, flags);
    455		mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
    456	}
    457	spin_unlock_irqrestore(&mtu->lock, flags);
    458	return 0;
    459}
    460
    461static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget,
    462		int is_selfpowered)
    463{
    464	struct mtu3 *mtu = gadget_to_mtu3(gadget);
    465
    466	mtu->is_self_powered = !!is_selfpowered;
    467	return 0;
    468}
    469
    470static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on)
    471{
    472	struct mtu3 *mtu = gadget_to_mtu3(gadget);
    473	unsigned long flags;
    474
    475	dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__,
    476		is_on ? "on" : "off", mtu->is_active ? "" : "in");
    477
    478	pm_runtime_get_sync(mtu->dev);
    479
    480	/* we'd rather not pullup unless the device is active. */
    481	spin_lock_irqsave(&mtu->lock, flags);
    482
    483	is_on = !!is_on;
    484	if (!mtu->is_active) {
    485		/* save it for mtu3_start() to process the request */
    486		mtu->softconnect = is_on;
    487	} else if (is_on != mtu->softconnect) {
    488		mtu->softconnect = is_on;
    489		mtu3_dev_on_off(mtu, is_on);
    490	}
    491
    492	spin_unlock_irqrestore(&mtu->lock, flags);
    493	pm_runtime_put(mtu->dev);
    494
    495	return 0;
    496}
    497
    498static int mtu3_gadget_start(struct usb_gadget *gadget,
    499		struct usb_gadget_driver *driver)
    500{
    501	struct mtu3 *mtu = gadget_to_mtu3(gadget);
    502	unsigned long flags;
    503
    504	if (mtu->gadget_driver) {
    505		dev_err(mtu->dev, "%s is already bound to %s\n",
    506			mtu->g.name, mtu->gadget_driver->driver.name);
    507		return -EBUSY;
    508	}
    509
    510	dev_dbg(mtu->dev, "bind driver %s\n", driver->function);
    511	pm_runtime_get_sync(mtu->dev);
    512
    513	spin_lock_irqsave(&mtu->lock, flags);
    514
    515	mtu->softconnect = 0;
    516	mtu->gadget_driver = driver;
    517
    518	if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
    519		mtu3_start(mtu);
    520
    521	spin_unlock_irqrestore(&mtu->lock, flags);
    522	pm_runtime_put(mtu->dev);
    523
    524	return 0;
    525}
    526
    527static void stop_activity(struct mtu3 *mtu)
    528{
    529	struct usb_gadget_driver *driver = mtu->gadget_driver;
    530	int i;
    531
    532	/* don't disconnect if it's not connected */
    533	if (mtu->g.speed == USB_SPEED_UNKNOWN)
    534		driver = NULL;
    535	else
    536		mtu->g.speed = USB_SPEED_UNKNOWN;
    537
    538	/* deactivate the hardware */
    539	if (mtu->softconnect) {
    540		mtu->softconnect = 0;
    541		mtu3_dev_on_off(mtu, 0);
    542	}
    543
    544	/*
    545	 * killing any outstanding requests will quiesce the driver;
    546	 * then report disconnect
    547	 */
    548	nuke(mtu->ep0, -ESHUTDOWN);
    549	for (i = 1; i < mtu->num_eps; i++) {
    550		nuke(mtu->in_eps + i, -ESHUTDOWN);
    551		nuke(mtu->out_eps + i, -ESHUTDOWN);
    552	}
    553
    554	if (driver) {
    555		spin_unlock(&mtu->lock);
    556		driver->disconnect(&mtu->g);
    557		spin_lock(&mtu->lock);
    558	}
    559}
    560
    561static int mtu3_gadget_stop(struct usb_gadget *g)
    562{
    563	struct mtu3 *mtu = gadget_to_mtu3(g);
    564	unsigned long flags;
    565
    566	dev_dbg(mtu->dev, "%s\n", __func__);
    567
    568	spin_lock_irqsave(&mtu->lock, flags);
    569
    570	stop_activity(mtu);
    571	mtu->gadget_driver = NULL;
    572
    573	if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
    574		mtu3_stop(mtu);
    575
    576	spin_unlock_irqrestore(&mtu->lock, flags);
    577
    578	synchronize_irq(mtu->irq);
    579	return 0;
    580}
    581
    582static void
    583mtu3_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
    584{
    585	struct mtu3 *mtu = gadget_to_mtu3(g);
    586	unsigned long flags;
    587
    588	dev_dbg(mtu->dev, "%s %s\n", __func__, usb_speed_string(speed));
    589
    590	spin_lock_irqsave(&mtu->lock, flags);
    591	mtu->speed = speed;
    592	spin_unlock_irqrestore(&mtu->lock, flags);
    593}
    594
    595static const struct usb_gadget_ops mtu3_gadget_ops = {
    596	.get_frame = mtu3_gadget_get_frame,
    597	.wakeup = mtu3_gadget_wakeup,
    598	.set_selfpowered = mtu3_gadget_set_self_powered,
    599	.pullup = mtu3_gadget_pullup,
    600	.udc_start = mtu3_gadget_start,
    601	.udc_stop = mtu3_gadget_stop,
    602	.udc_set_speed = mtu3_gadget_set_speed,
    603};
    604
    605static void mtu3_state_reset(struct mtu3 *mtu)
    606{
    607	mtu->address = 0;
    608	mtu->ep0_state = MU3D_EP0_STATE_SETUP;
    609	mtu->may_wakeup = 0;
    610	mtu->u1_enable = 0;
    611	mtu->u2_enable = 0;
    612	mtu->delayed_status = false;
    613	mtu->test_mode = false;
    614}
    615
    616static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
    617		u32 epnum, u32 is_in)
    618{
    619	mep->epnum = epnum;
    620	mep->mtu = mtu;
    621	mep->is_in = is_in;
    622
    623	INIT_LIST_HEAD(&mep->req_list);
    624
    625	sprintf(mep->name, "ep%d%s", epnum,
    626		!epnum ? "" : (is_in ? "in" : "out"));
    627
    628	mep->ep.name = mep->name;
    629	INIT_LIST_HEAD(&mep->ep.ep_list);
    630
    631	/* initialize maxpacket as SS */
    632	if (!epnum) {
    633		usb_ep_set_maxpacket_limit(&mep->ep, 512);
    634		mep->ep.caps.type_control = true;
    635		mep->ep.ops = &mtu3_ep0_ops;
    636		mtu->g.ep0 = &mep->ep;
    637	} else {
    638		usb_ep_set_maxpacket_limit(&mep->ep, 1024);
    639		mep->ep.caps.type_iso = true;
    640		mep->ep.caps.type_bulk = true;
    641		mep->ep.caps.type_int = true;
    642		mep->ep.ops = &mtu3_ep_ops;
    643		list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list);
    644	}
    645
    646	dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name,
    647		 mep->ep.maxpacket);
    648
    649	if (!epnum) {
    650		mep->ep.caps.dir_in = true;
    651		mep->ep.caps.dir_out = true;
    652	} else if (is_in) {
    653		mep->ep.caps.dir_in = true;
    654	} else {
    655		mep->ep.caps.dir_out = true;
    656	}
    657}
    658
    659static void mtu3_gadget_init_eps(struct mtu3 *mtu)
    660{
    661	u8 epnum;
    662
    663	/* initialize endpoint list just once */
    664	INIT_LIST_HEAD(&(mtu->g.ep_list));
    665
    666	dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n",
    667		__func__, mtu->num_eps);
    668
    669	init_hw_ep(mtu, mtu->ep0, 0, 0);
    670	for (epnum = 1; epnum < mtu->num_eps; epnum++) {
    671		init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1);
    672		init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0);
    673	}
    674}
    675
    676int mtu3_gadget_setup(struct mtu3 *mtu)
    677{
    678	mtu->g.ops = &mtu3_gadget_ops;
    679	mtu->g.max_speed = mtu->max_speed;
    680	mtu->g.speed = USB_SPEED_UNKNOWN;
    681	mtu->g.sg_supported = 0;
    682	mtu->g.name = MTU3_DRIVER_NAME;
    683	mtu->is_active = 0;
    684	mtu->delayed_status = false;
    685
    686	mtu3_gadget_init_eps(mtu);
    687
    688	return usb_add_gadget_udc(mtu->dev, &mtu->g);
    689}
    690
    691void mtu3_gadget_cleanup(struct mtu3 *mtu)
    692{
    693	usb_del_gadget_udc(&mtu->g);
    694}
    695
    696void mtu3_gadget_resume(struct mtu3 *mtu)
    697{
    698	dev_dbg(mtu->dev, "gadget RESUME\n");
    699	if (mtu->gadget_driver && mtu->gadget_driver->resume) {
    700		spin_unlock(&mtu->lock);
    701		mtu->gadget_driver->resume(&mtu->g);
    702		spin_lock(&mtu->lock);
    703	}
    704}
    705
    706/* called when SOF packets stop for 3+ msec or enters U3 */
    707void mtu3_gadget_suspend(struct mtu3 *mtu)
    708{
    709	dev_dbg(mtu->dev, "gadget SUSPEND\n");
    710	if (mtu->gadget_driver && mtu->gadget_driver->suspend) {
    711		spin_unlock(&mtu->lock);
    712		mtu->gadget_driver->suspend(&mtu->g);
    713		spin_lock(&mtu->lock);
    714	}
    715}
    716
    717/* called when VBUS drops below session threshold, and in other cases */
    718void mtu3_gadget_disconnect(struct mtu3 *mtu)
    719{
    720	dev_dbg(mtu->dev, "gadget DISCONNECT\n");
    721	if (mtu->gadget_driver && mtu->gadget_driver->disconnect) {
    722		spin_unlock(&mtu->lock);
    723		mtu->gadget_driver->disconnect(&mtu->g);
    724		spin_lock(&mtu->lock);
    725	}
    726
    727	mtu3_state_reset(mtu);
    728	usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
    729}
    730
    731void mtu3_gadget_reset(struct mtu3 *mtu)
    732{
    733	dev_dbg(mtu->dev, "gadget RESET\n");
    734
    735	/* report disconnect, if we didn't flush EP state */
    736	if (mtu->g.speed != USB_SPEED_UNKNOWN)
    737		mtu3_gadget_disconnect(mtu);
    738	else
    739		mtu3_state_reset(mtu);
    740}