cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xhci-mtk-sch.c (19216B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2015 MediaTek Inc.
      4 * Author:
      5 *  Zhigang.Wei <zhigang.wei@mediatek.com>
      6 *  Chunfeng.Yun <chunfeng.yun@mediatek.com>
      7 */
      8
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11#include <linux/slab.h>
     12
     13#include "xhci.h"
     14#include "xhci-mtk.h"
     15
     16#define SSP_BW_BOUNDARY	130000
     17#define SS_BW_BOUNDARY	51000
     18/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
     19#define HS_BW_BOUNDARY	6144
     20/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
     21#define FS_PAYLOAD_MAX 188
     22
     23#define DBG_BUF_EN	64
     24
     25/* schedule error type */
     26#define ESCH_SS_Y6		1001
     27#define ESCH_SS_OVERLAP		1002
     28#define ESCH_CS_OVERFLOW	1003
     29#define ESCH_BW_OVERFLOW	1004
     30#define ESCH_FIXME		1005
     31
     32/* mtk scheduler bitmasks */
     33#define EP_BPKTS(p)	((p) & 0x7f)
     34#define EP_BCSCOUNT(p)	(((p) & 0x7) << 8)
     35#define EP_BBM(p)	((p) << 11)
     36#define EP_BOFFSET(p)	((p) & 0x3fff)
     37#define EP_BREPEAT(p)	(((p) & 0x7fff) << 16)
     38
     39static char *sch_error_string(int err_num)
     40{
     41	switch (err_num) {
     42	case ESCH_SS_Y6:
     43		return "Can't schedule Start-Split in Y6";
     44	case ESCH_SS_OVERLAP:
     45		return "Can't find a suitable Start-Split location";
     46	case ESCH_CS_OVERFLOW:
     47		return "The last Complete-Split is greater than 7";
     48	case ESCH_BW_OVERFLOW:
     49		return "Bandwidth exceeds the maximum limit";
     50	case ESCH_FIXME:
     51		return "FIXME, to be resolved";
     52	default:
     53		return "Unknown";
     54	}
     55}
     56
     57static int is_fs_or_ls(enum usb_device_speed speed)
     58{
     59	return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
     60}
     61
     62static const char *
     63decode_ep(struct usb_host_endpoint *ep, enum usb_device_speed speed)
     64{
     65	static char buf[DBG_BUF_EN];
     66	struct usb_endpoint_descriptor *epd = &ep->desc;
     67	unsigned int interval;
     68	const char *unit;
     69
     70	interval = usb_decode_interval(epd, speed);
     71	if (interval % 1000) {
     72		unit = "us";
     73	} else {
     74		unit = "ms";
     75		interval /= 1000;
     76	}
     77
     78	snprintf(buf, DBG_BUF_EN, "%s ep%d%s %s, mpkt:%d, interval:%d/%d%s",
     79		 usb_speed_string(speed), usb_endpoint_num(epd),
     80		 usb_endpoint_dir_in(epd) ? "in" : "out",
     81		 usb_ep_type_string(usb_endpoint_type(epd)),
     82		 usb_endpoint_maxp(epd), epd->bInterval, interval, unit);
     83
     84	return buf;
     85}
     86
     87static u32 get_bw_boundary(enum usb_device_speed speed)
     88{
     89	u32 boundary;
     90
     91	switch (speed) {
     92	case USB_SPEED_SUPER_PLUS:
     93		boundary = SSP_BW_BOUNDARY;
     94		break;
     95	case USB_SPEED_SUPER:
     96		boundary = SS_BW_BOUNDARY;
     97		break;
     98	default:
     99		boundary = HS_BW_BOUNDARY;
    100		break;
    101	}
    102
    103	return boundary;
    104}
    105
    106/*
    107* get the bandwidth domain which @ep belongs to.
    108*
    109* the bandwidth domain array is saved to @sch_array of struct xhci_hcd_mtk,
    110* each HS root port is treated as a single bandwidth domain,
    111* but each SS root port is treated as two bandwidth domains, one for IN eps,
    112* one for OUT eps.
    113* @real_port value is defined as follow according to xHCI spec:
    114* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
    115* so the bandwidth domain array is organized as follow for simplification:
    116* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
    117*/
    118static struct mu3h_sch_bw_info *
    119get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
    120	    struct usb_host_endpoint *ep)
    121{
    122	struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
    123	struct xhci_virt_device *virt_dev;
    124	int bw_index;
    125
    126	virt_dev = xhci->devs[udev->slot_id];
    127	if (!virt_dev->real_port) {
    128		WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
    129		return NULL;
    130	}
    131
    132	if (udev->speed >= USB_SPEED_SUPER) {
    133		if (usb_endpoint_dir_out(&ep->desc))
    134			bw_index = (virt_dev->real_port - 1) * 2;
    135		else
    136			bw_index = (virt_dev->real_port - 1) * 2 + 1;
    137	} else {
    138		/* add one more for each SS port */
    139		bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
    140	}
    141
    142	return &mtk->sch_array[bw_index];
    143}
    144
    145static u32 get_esit(struct xhci_ep_ctx *ep_ctx)
    146{
    147	u32 esit;
    148
    149	esit = 1 << CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
    150	if (esit > XHCI_MTK_MAX_ESIT)
    151		esit = XHCI_MTK_MAX_ESIT;
    152
    153	return esit;
    154}
    155
    156static struct mu3h_sch_tt *find_tt(struct usb_device *udev)
    157{
    158	struct usb_tt *utt = udev->tt;
    159	struct mu3h_sch_tt *tt, **tt_index, **ptt;
    160	bool allocated_index = false;
    161
    162	if (!utt)
    163		return NULL;	/* Not below a TT */
    164
    165	/*
    166	 * Find/create our data structure.
    167	 * For hubs with a single TT, we get it directly.
    168	 * For hubs with multiple TTs, there's an extra level of pointers.
    169	 */
    170	tt_index = NULL;
    171	if (utt->multi) {
    172		tt_index = utt->hcpriv;
    173		if (!tt_index) {	/* Create the index array */
    174			tt_index = kcalloc(utt->hub->maxchild,
    175					sizeof(*tt_index), GFP_KERNEL);
    176			if (!tt_index)
    177				return ERR_PTR(-ENOMEM);
    178			utt->hcpriv = tt_index;
    179			allocated_index = true;
    180		}
    181		ptt = &tt_index[udev->ttport - 1];
    182	} else {
    183		ptt = (struct mu3h_sch_tt **) &utt->hcpriv;
    184	}
    185
    186	tt = *ptt;
    187	if (!tt) {	/* Create the mu3h_sch_tt */
    188		tt = kzalloc(sizeof(*tt), GFP_KERNEL);
    189		if (!tt) {
    190			if (allocated_index) {
    191				utt->hcpriv = NULL;
    192				kfree(tt_index);
    193			}
    194			return ERR_PTR(-ENOMEM);
    195		}
    196		INIT_LIST_HEAD(&tt->ep_list);
    197		*ptt = tt;
    198	}
    199
    200	return tt;
    201}
    202
    203/* Release the TT above udev, if it's not in use */
    204static void drop_tt(struct usb_device *udev)
    205{
    206	struct usb_tt *utt = udev->tt;
    207	struct mu3h_sch_tt *tt, **tt_index, **ptt;
    208	int i, cnt;
    209
    210	if (!utt || !utt->hcpriv)
    211		return;		/* Not below a TT, or never allocated */
    212
    213	cnt = 0;
    214	if (utt->multi) {
    215		tt_index = utt->hcpriv;
    216		ptt = &tt_index[udev->ttport - 1];
    217		/*  How many entries are left in tt_index? */
    218		for (i = 0; i < utt->hub->maxchild; ++i)
    219			cnt += !!tt_index[i];
    220	} else {
    221		tt_index = NULL;
    222		ptt = (struct mu3h_sch_tt **)&utt->hcpriv;
    223	}
    224
    225	tt = *ptt;
    226	if (!tt || !list_empty(&tt->ep_list))
    227		return;		/* never allocated , or still in use*/
    228
    229	*ptt = NULL;
    230	kfree(tt);
    231
    232	if (cnt == 1) {
    233		utt->hcpriv = NULL;
    234		kfree(tt_index);
    235	}
    236}
    237
    238static struct mu3h_sch_ep_info *
    239create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
    240	      struct usb_host_endpoint *ep)
    241{
    242	struct mu3h_sch_ep_info *sch_ep;
    243	struct mu3h_sch_bw_info *bw_info;
    244	struct mu3h_sch_tt *tt = NULL;
    245
    246	bw_info = get_bw_info(mtk, udev, ep);
    247	if (!bw_info)
    248		return ERR_PTR(-ENODEV);
    249
    250	sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL);
    251	if (!sch_ep)
    252		return ERR_PTR(-ENOMEM);
    253
    254	if (is_fs_or_ls(udev->speed)) {
    255		tt = find_tt(udev);
    256		if (IS_ERR(tt)) {
    257			kfree(sch_ep);
    258			return ERR_PTR(-ENOMEM);
    259		}
    260	}
    261
    262	sch_ep->bw_info = bw_info;
    263	sch_ep->sch_tt = tt;
    264	sch_ep->ep = ep;
    265	sch_ep->speed = udev->speed;
    266	INIT_LIST_HEAD(&sch_ep->endpoint);
    267	INIT_LIST_HEAD(&sch_ep->tt_endpoint);
    268	INIT_HLIST_NODE(&sch_ep->hentry);
    269
    270	return sch_ep;
    271}
    272
    273static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
    274			   struct mu3h_sch_ep_info *sch_ep)
    275{
    276	u32 ep_type;
    277	u32 maxpkt;
    278	u32 max_burst;
    279	u32 mult;
    280	u32 esit_pkts;
    281	u32 max_esit_payload;
    282
    283	ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
    284	maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
    285	max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
    286	mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
    287	max_esit_payload =
    288		(CTX_TO_MAX_ESIT_PAYLOAD_HI(
    289			le32_to_cpu(ep_ctx->ep_info)) << 16) |
    290		 CTX_TO_MAX_ESIT_PAYLOAD(le32_to_cpu(ep_ctx->tx_info));
    291
    292	sch_ep->esit = get_esit(ep_ctx);
    293	sch_ep->num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
    294	sch_ep->ep_type = ep_type;
    295	sch_ep->maxpkt = maxpkt;
    296	sch_ep->offset = 0;
    297	sch_ep->burst_mode = 0;
    298	sch_ep->repeat = 0;
    299
    300	if (sch_ep->speed == USB_SPEED_HIGH) {
    301		sch_ep->cs_count = 0;
    302
    303		/*
    304		 * usb_20 spec section5.9
    305		 * a single microframe is enough for HS synchromous endpoints
    306		 * in a interval
    307		 */
    308		sch_ep->num_budget_microframes = 1;
    309
    310		/*
    311		 * xHCI spec section6.2.3.4
    312		 * @max_burst is the number of additional transactions
    313		 * opportunities per microframe
    314		 */
    315		sch_ep->pkts = max_burst + 1;
    316		sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
    317	} else if (sch_ep->speed >= USB_SPEED_SUPER) {
    318		/* usb3_r1 spec section4.4.7 & 4.4.8 */
    319		sch_ep->cs_count = 0;
    320		sch_ep->burst_mode = 1;
    321		/*
    322		 * some device's (d)wBytesPerInterval is set as 0,
    323		 * then max_esit_payload is 0, so evaluate esit_pkts from
    324		 * mult and burst
    325		 */
    326		esit_pkts = DIV_ROUND_UP(max_esit_payload, maxpkt);
    327		if (esit_pkts == 0)
    328			esit_pkts = (mult + 1) * (max_burst + 1);
    329
    330		if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
    331			sch_ep->pkts = esit_pkts;
    332			sch_ep->num_budget_microframes = 1;
    333		}
    334
    335		if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
    336
    337			if (sch_ep->esit == 1)
    338				sch_ep->pkts = esit_pkts;
    339			else if (esit_pkts <= sch_ep->esit)
    340				sch_ep->pkts = 1;
    341			else
    342				sch_ep->pkts = roundup_pow_of_two(esit_pkts)
    343					/ sch_ep->esit;
    344
    345			sch_ep->num_budget_microframes =
    346				DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
    347
    348			sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
    349		}
    350		sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
    351	} else if (is_fs_or_ls(sch_ep->speed)) {
    352		sch_ep->pkts = 1; /* at most one packet for each microframe */
    353
    354		/*
    355		 * num_budget_microframes and cs_count will be updated when
    356		 * check TT for INT_OUT_EP, ISOC/INT_IN_EP type
    357		 */
    358		sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
    359		sch_ep->num_budget_microframes = sch_ep->cs_count;
    360		sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX);
    361	}
    362}
    363
    364/* Get maximum bandwidth when we schedule at offset slot. */
    365static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
    366	struct mu3h_sch_ep_info *sch_ep, u32 offset)
    367{
    368	u32 max_bw = 0;
    369	u32 bw;
    370	int i, j, k;
    371
    372	for (i = 0; i < sch_ep->num_esit; i++) {
    373		u32 base = offset + i * sch_ep->esit;
    374
    375		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
    376			k = XHCI_MTK_BW_INDEX(base + j);
    377			bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe;
    378			if (bw > max_bw)
    379				max_bw = bw;
    380		}
    381	}
    382	return max_bw;
    383}
    384
    385static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
    386	struct mu3h_sch_ep_info *sch_ep, bool used)
    387{
    388	int bw_updated;
    389	u32 base;
    390	int i, j;
    391
    392	bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
    393
    394	for (i = 0; i < sch_ep->num_esit; i++) {
    395		base = sch_ep->offset + i * sch_ep->esit;
    396		for (j = 0; j < sch_ep->num_budget_microframes; j++)
    397			sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
    398	}
    399}
    400
    401static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
    402{
    403	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
    404	u32 tmp;
    405	int base;
    406	int i, j, k;
    407
    408	for (i = 0; i < sch_ep->num_esit; i++) {
    409		base = offset + i * sch_ep->esit;
    410
    411		/*
    412		 * Compared with hs bus, no matter what ep type,
    413		 * the hub will always delay one uframe to send data
    414		 */
    415		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
    416			k = XHCI_MTK_BW_INDEX(base + j);
    417			tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
    418			if (tmp > FS_PAYLOAD_MAX)
    419				return -ESCH_BW_OVERFLOW;
    420		}
    421	}
    422
    423	return 0;
    424}
    425
    426static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
    427{
    428	u32 extra_cs_count;
    429	u32 start_ss, last_ss;
    430	u32 start_cs, last_cs;
    431
    432	if (!sch_ep->sch_tt)
    433		return 0;
    434
    435	start_ss = offset % 8;
    436
    437	if (sch_ep->ep_type == ISOC_OUT_EP) {
    438		last_ss = start_ss + sch_ep->cs_count - 1;
    439
    440		/*
    441		 * usb_20 spec section11.18:
    442		 * must never schedule Start-Split in Y6
    443		 */
    444		if (!(start_ss == 7 || last_ss < 6))
    445			return -ESCH_SS_Y6;
    446
    447	} else {
    448		u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
    449
    450		/*
    451		 * usb_20 spec section11.18:
    452		 * must never schedule Start-Split in Y6
    453		 */
    454		if (start_ss == 6)
    455			return -ESCH_SS_Y6;
    456
    457		/* one uframe for ss + one uframe for idle */
    458		start_cs = (start_ss + 2) % 8;
    459		last_cs = start_cs + cs_count - 1;
    460
    461		if (last_cs > 7)
    462			return -ESCH_CS_OVERFLOW;
    463
    464		if (sch_ep->ep_type == ISOC_IN_EP)
    465			extra_cs_count = (last_cs == 7) ? 1 : 2;
    466		else /*  ep_type : INTR IN / INTR OUT */
    467			extra_cs_count = 1;
    468
    469		cs_count += extra_cs_count;
    470		if (cs_count > 7)
    471			cs_count = 7; /* HW limit */
    472
    473		sch_ep->cs_count = cs_count;
    474		/* one for ss, the other for idle */
    475		sch_ep->num_budget_microframes = cs_count + 2;
    476
    477		/*
    478		 * if interval=1, maxp >752, num_budge_micoframe is larger
    479		 * than sch_ep->esit, will overstep boundary
    480		 */
    481		if (sch_ep->num_budget_microframes > sch_ep->esit)
    482			sch_ep->num_budget_microframes = sch_ep->esit;
    483	}
    484
    485	return check_fs_bus_bw(sch_ep, offset);
    486}
    487
    488static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
    489{
    490	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
    491	int bw_updated;
    492	u32 base;
    493	int i, j;
    494
    495	bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
    496
    497	for (i = 0; i < sch_ep->num_esit; i++) {
    498		base = sch_ep->offset + i * sch_ep->esit;
    499
    500		for (j = 0; j < sch_ep->num_budget_microframes; j++)
    501			tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
    502	}
    503
    504	if (used)
    505		list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
    506	else
    507		list_del(&sch_ep->tt_endpoint);
    508}
    509
    510static int load_ep_bw(struct mu3h_sch_bw_info *sch_bw,
    511		      struct mu3h_sch_ep_info *sch_ep, bool loaded)
    512{
    513	if (sch_ep->sch_tt)
    514		update_sch_tt(sch_ep, loaded);
    515
    516	/* update bus bandwidth info */
    517	update_bus_bw(sch_bw, sch_ep, loaded);
    518	sch_ep->allocated = loaded;
    519
    520	return 0;
    521}
    522
    523static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
    524{
    525	struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info;
    526	const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
    527	u32 offset;
    528	u32 worst_bw;
    529	u32 min_bw = ~0;
    530	int min_index = -1;
    531	int ret = 0;
    532
    533	/*
    534	 * Search through all possible schedule microframes.
    535	 * and find a microframe where its worst bandwidth is minimum.
    536	 */
    537	for (offset = 0; offset < sch_ep->esit; offset++) {
    538		ret = check_sch_tt(sch_ep, offset);
    539		if (ret)
    540			continue;
    541
    542		worst_bw = get_max_bw(sch_bw, sch_ep, offset);
    543		if (worst_bw > bw_boundary)
    544			continue;
    545
    546		if (min_bw > worst_bw) {
    547			min_bw = worst_bw;
    548			min_index = offset;
    549		}
    550
    551		/* use first-fit for LS/FS */
    552		if (sch_ep->sch_tt && min_index >= 0)
    553			break;
    554
    555		if (min_bw == 0)
    556			break;
    557	}
    558
    559	if (min_index < 0)
    560		return ret ? ret : -ESCH_BW_OVERFLOW;
    561
    562	sch_ep->offset = min_index;
    563
    564	return load_ep_bw(sch_bw, sch_ep, true);
    565}
    566
    567static void destroy_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
    568			   struct mu3h_sch_ep_info *sch_ep)
    569{
    570	/* only release ep bw check passed by check_sch_bw() */
    571	if (sch_ep->allocated)
    572		load_ep_bw(sch_ep->bw_info, sch_ep, false);
    573
    574	if (sch_ep->sch_tt)
    575		drop_tt(udev);
    576
    577	list_del(&sch_ep->endpoint);
    578	hlist_del(&sch_ep->hentry);
    579	kfree(sch_ep);
    580}
    581
    582static bool need_bw_sch(struct usb_device *udev,
    583			struct usb_host_endpoint *ep)
    584{
    585	bool has_tt = udev->tt && udev->tt->hub->parent;
    586
    587	/* only for periodic endpoints */
    588	if (usb_endpoint_xfer_control(&ep->desc)
    589		|| usb_endpoint_xfer_bulk(&ep->desc))
    590		return false;
    591
    592	/*
    593	 * for LS & FS periodic endpoints which its device is not behind
    594	 * a TT are also ignored, root-hub will schedule them directly,
    595	 * but need set @bpkts field of endpoint context to 1.
    596	 */
    597	if (is_fs_or_ls(udev->speed) && !has_tt)
    598		return false;
    599
    600	/* skip endpoint with zero maxpkt */
    601	if (usb_endpoint_maxp(&ep->desc) == 0)
    602		return false;
    603
    604	return true;
    605}
    606
    607int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
    608{
    609	struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
    610	struct mu3h_sch_bw_info *sch_array;
    611	int num_usb_bus;
    612
    613	/* ss IN and OUT are separated */
    614	num_usb_bus = xhci->usb3_rhub.num_ports * 2 + xhci->usb2_rhub.num_ports;
    615
    616	sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
    617	if (sch_array == NULL)
    618		return -ENOMEM;
    619
    620	mtk->sch_array = sch_array;
    621
    622	INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
    623	hash_init(mtk->sch_ep_hash);
    624
    625	return 0;
    626}
    627
    628void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk)
    629{
    630	kfree(mtk->sch_array);
    631}
    632
    633static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
    634			struct usb_host_endpoint *ep)
    635{
    636	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
    637	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    638	struct xhci_ep_ctx *ep_ctx;
    639	struct xhci_virt_device *virt_dev;
    640	struct mu3h_sch_ep_info *sch_ep;
    641	unsigned int ep_index;
    642
    643	virt_dev = xhci->devs[udev->slot_id];
    644	ep_index = xhci_get_endpoint_index(&ep->desc);
    645	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
    646
    647	if (!need_bw_sch(udev, ep)) {
    648		/*
    649		 * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
    650		 * device does not connected through an external HS hub
    651		 */
    652		if (usb_endpoint_xfer_int(&ep->desc)
    653			|| usb_endpoint_xfer_isoc(&ep->desc))
    654			ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
    655
    656		return 0;
    657	}
    658
    659	xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
    660
    661	sch_ep = create_sch_ep(mtk, udev, ep);
    662	if (IS_ERR_OR_NULL(sch_ep))
    663		return -ENOMEM;
    664
    665	setup_sch_info(ep_ctx, sch_ep);
    666
    667	list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
    668	hash_add(mtk->sch_ep_hash, &sch_ep->hentry, (unsigned long)ep);
    669
    670	return 0;
    671}
    672
    673static void drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
    674			  struct usb_host_endpoint *ep)
    675{
    676	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
    677	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    678	struct mu3h_sch_ep_info *sch_ep;
    679	struct hlist_node *hn;
    680
    681	if (!need_bw_sch(udev, ep))
    682		return;
    683
    684	xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
    685
    686	hash_for_each_possible_safe(mtk->sch_ep_hash, sch_ep,
    687				    hn, hentry, (unsigned long)ep) {
    688		if (sch_ep->ep == ep) {
    689			destroy_sch_ep(mtk, udev, sch_ep);
    690			break;
    691		}
    692	}
    693}
    694
    695int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
    696{
    697	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
    698	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    699	struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
    700	struct mu3h_sch_ep_info *sch_ep;
    701	int ret;
    702
    703	xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
    704
    705	list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
    706		struct xhci_ep_ctx *ep_ctx;
    707		struct usb_host_endpoint *ep = sch_ep->ep;
    708		unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
    709
    710		ret = check_sch_bw(sch_ep);
    711		if (ret) {
    712			xhci_err(xhci, "Not enough bandwidth! (%s)\n",
    713				 sch_error_string(-ret));
    714			return -ENOSPC;
    715		}
    716
    717		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
    718		ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
    719			| EP_BCSCOUNT(sch_ep->cs_count)
    720			| EP_BBM(sch_ep->burst_mode));
    721		ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
    722			| EP_BREPEAT(sch_ep->repeat));
    723
    724		xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
    725			sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
    726			sch_ep->offset, sch_ep->repeat);
    727	}
    728
    729	ret = xhci_check_bandwidth(hcd, udev);
    730	if (!ret)
    731		list_del_init(&mtk->bw_ep_chk_list);
    732
    733	return ret;
    734}
    735
    736void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
    737{
    738	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
    739	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
    740	struct mu3h_sch_ep_info *sch_ep, *tmp;
    741
    742	xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
    743
    744	list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint)
    745		destroy_sch_ep(mtk, udev, sch_ep);
    746
    747	xhci_reset_bandwidth(hcd, udev);
    748}
    749
    750int xhci_mtk_add_ep(struct usb_hcd *hcd, struct usb_device *udev,
    751		    struct usb_host_endpoint *ep)
    752{
    753	int ret;
    754
    755	ret = xhci_add_endpoint(hcd, udev, ep);
    756	if (ret)
    757		return ret;
    758
    759	if (ep->hcpriv)
    760		ret = add_ep_quirk(hcd, udev, ep);
    761
    762	return ret;
    763}
    764
    765int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
    766		     struct usb_host_endpoint *ep)
    767{
    768	int ret;
    769
    770	ret = xhci_drop_endpoint(hcd, udev, ep);
    771	if (ret)
    772		return ret;
    773
    774	if (ep->hcpriv)
    775		drop_ep_quirk(hcd, udev, ep);
    776
    777	return 0;
    778}