cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

usb.c (25264B)


      1// SPDX-License-Identifier: ISC
      2/*
      3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
      4 */
      5
      6#include <linux/module.h>
      7#include "mt76.h"
      8#include "usb_trace.h"
      9#include "dma.h"
     10
     11#define MT_VEND_REQ_MAX_RETRY	10
     12#define MT_VEND_REQ_TOUT_MS	300
     13
     14static bool disable_usb_sg;
     15module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
     16MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
     17
     18int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
     19			   u16 val, u16 offset, void *buf, size_t len)
     20{
     21	struct usb_interface *uintf = to_usb_interface(dev->dev);
     22	struct usb_device *udev = interface_to_usbdev(uintf);
     23	unsigned int pipe;
     24	int i, ret;
     25
     26	lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
     27
     28	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
     29				       : usb_sndctrlpipe(udev, 0);
     30	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
     31		if (test_bit(MT76_REMOVED, &dev->phy.state))
     32			return -EIO;
     33
     34		ret = usb_control_msg(udev, pipe, req, req_type, val,
     35				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
     36		if (ret == -ENODEV)
     37			set_bit(MT76_REMOVED, &dev->phy.state);
     38		if (ret >= 0 || ret == -ENODEV)
     39			return ret;
     40		usleep_range(5000, 10000);
     41	}
     42
     43	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
     44		req, offset, ret);
     45	return ret;
     46}
     47EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
     48
     49int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
     50			 u8 req_type, u16 val, u16 offset,
     51			 void *buf, size_t len)
     52{
     53	int ret;
     54
     55	mutex_lock(&dev->usb.usb_ctrl_mtx);
     56	ret = __mt76u_vendor_request(dev, req, req_type,
     57				     val, offset, buf, len);
     58	trace_usb_reg_wr(dev, offset, val);
     59	mutex_unlock(&dev->usb.usb_ctrl_mtx);
     60
     61	return ret;
     62}
     63EXPORT_SYMBOL_GPL(mt76u_vendor_request);
     64
     65u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
     66{
     67	struct mt76_usb *usb = &dev->usb;
     68	u32 data = ~0;
     69	int ret;
     70
     71	ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
     72				     addr, usb->data, sizeof(__le32));
     73	if (ret == sizeof(__le32))
     74		data = get_unaligned_le32(usb->data);
     75	trace_usb_reg_rr(dev, addr, data);
     76
     77	return data;
     78}
     79EXPORT_SYMBOL_GPL(___mt76u_rr);
     80
     81static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
     82{
     83	u8 req;
     84
     85	switch (addr & MT_VEND_TYPE_MASK) {
     86	case MT_VEND_TYPE_EEPROM:
     87		req = MT_VEND_READ_EEPROM;
     88		break;
     89	case MT_VEND_TYPE_CFG:
     90		req = MT_VEND_READ_CFG;
     91		break;
     92	default:
     93		req = MT_VEND_MULTI_READ;
     94		break;
     95	}
     96
     97	return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
     98			   addr & ~MT_VEND_TYPE_MASK);
     99}
    100
    101static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
    102{
    103	u32 ret;
    104
    105	mutex_lock(&dev->usb.usb_ctrl_mtx);
    106	ret = __mt76u_rr(dev, addr);
    107	mutex_unlock(&dev->usb.usb_ctrl_mtx);
    108
    109	return ret;
    110}
    111
    112void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
    113		 u32 addr, u32 val)
    114{
    115	struct mt76_usb *usb = &dev->usb;
    116
    117	put_unaligned_le32(val, usb->data);
    118	__mt76u_vendor_request(dev, req, req_type, addr >> 16,
    119			       addr, usb->data, sizeof(__le32));
    120	trace_usb_reg_wr(dev, addr, val);
    121}
    122EXPORT_SYMBOL_GPL(___mt76u_wr);
    123
    124static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
    125{
    126	u8 req;
    127
    128	switch (addr & MT_VEND_TYPE_MASK) {
    129	case MT_VEND_TYPE_CFG:
    130		req = MT_VEND_WRITE_CFG;
    131		break;
    132	default:
    133		req = MT_VEND_MULTI_WRITE;
    134		break;
    135	}
    136	___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
    137		    addr & ~MT_VEND_TYPE_MASK, val);
    138}
    139
    140static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
    141{
    142	mutex_lock(&dev->usb.usb_ctrl_mtx);
    143	__mt76u_wr(dev, addr, val);
    144	mutex_unlock(&dev->usb.usb_ctrl_mtx);
    145}
    146
    147static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
    148		     u32 mask, u32 val)
    149{
    150	mutex_lock(&dev->usb.usb_ctrl_mtx);
    151	val |= __mt76u_rr(dev, addr) & ~mask;
    152	__mt76u_wr(dev, addr, val);
    153	mutex_unlock(&dev->usb.usb_ctrl_mtx);
    154
    155	return val;
    156}
    157
    158static void mt76u_copy(struct mt76_dev *dev, u32 offset,
    159		       const void *data, int len)
    160{
    161	struct mt76_usb *usb = &dev->usb;
    162	const u8 *val = data;
    163	int ret;
    164	int current_batch_size;
    165	int i = 0;
    166
    167	/* Assure that always a multiple of 4 bytes are copied,
    168	 * otherwise beacons can be corrupted.
    169	 * See: "mt76: round up length on mt76_wr_copy"
    170	 * Commit 850e8f6fbd5d0003b0
    171	 */
    172	len = round_up(len, 4);
    173
    174	mutex_lock(&usb->usb_ctrl_mtx);
    175	while (i < len) {
    176		current_batch_size = min_t(int, usb->data_len, len - i);
    177		memcpy(usb->data, val + i, current_batch_size);
    178		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
    179					     USB_DIR_OUT | USB_TYPE_VENDOR,
    180					     0, offset + i, usb->data,
    181					     current_batch_size);
    182		if (ret < 0)
    183			break;
    184
    185		i += current_batch_size;
    186	}
    187	mutex_unlock(&usb->usb_ctrl_mtx);
    188}
    189
    190void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
    191		     void *data, int len)
    192{
    193	struct mt76_usb *usb = &dev->usb;
    194	int i = 0, batch_len, ret;
    195	u8 *val = data;
    196
    197	len = round_up(len, 4);
    198	mutex_lock(&usb->usb_ctrl_mtx);
    199	while (i < len) {
    200		batch_len = min_t(int, usb->data_len, len - i);
    201		ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
    202					     USB_DIR_IN | USB_TYPE_VENDOR,
    203					     (offset + i) >> 16, offset + i,
    204					     usb->data, batch_len);
    205		if (ret < 0)
    206			break;
    207
    208		memcpy(val + i, usb->data, batch_len);
    209		i += batch_len;
    210	}
    211	mutex_unlock(&usb->usb_ctrl_mtx);
    212}
    213EXPORT_SYMBOL_GPL(mt76u_read_copy);
    214
    215void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
    216		     const u16 offset, const u32 val)
    217{
    218	mutex_lock(&dev->usb.usb_ctrl_mtx);
    219	__mt76u_vendor_request(dev, req,
    220			       USB_DIR_OUT | USB_TYPE_VENDOR,
    221			       val & 0xffff, offset, NULL, 0);
    222	__mt76u_vendor_request(dev, req,
    223			       USB_DIR_OUT | USB_TYPE_VENDOR,
    224			       val >> 16, offset + 2, NULL, 0);
    225	mutex_unlock(&dev->usb.usb_ctrl_mtx);
    226}
    227EXPORT_SYMBOL_GPL(mt76u_single_wr);
    228
    229static int
    230mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
    231		const struct mt76_reg_pair *data, int len)
    232{
    233	struct mt76_usb *usb = &dev->usb;
    234
    235	mutex_lock(&usb->usb_ctrl_mtx);
    236	while (len > 0) {
    237		__mt76u_wr(dev, base + data->reg, data->value);
    238		len--;
    239		data++;
    240	}
    241	mutex_unlock(&usb->usb_ctrl_mtx);
    242
    243	return 0;
    244}
    245
    246static int
    247mt76u_wr_rp(struct mt76_dev *dev, u32 base,
    248	    const struct mt76_reg_pair *data, int n)
    249{
    250	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
    251		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
    252	else
    253		return mt76u_req_wr_rp(dev, base, data, n);
    254}
    255
    256static int
    257mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
    258		int len)
    259{
    260	struct mt76_usb *usb = &dev->usb;
    261
    262	mutex_lock(&usb->usb_ctrl_mtx);
    263	while (len > 0) {
    264		data->value = __mt76u_rr(dev, base + data->reg);
    265		len--;
    266		data++;
    267	}
    268	mutex_unlock(&usb->usb_ctrl_mtx);
    269
    270	return 0;
    271}
    272
    273static int
    274mt76u_rd_rp(struct mt76_dev *dev, u32 base,
    275	    struct mt76_reg_pair *data, int n)
    276{
    277	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
    278		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
    279	else
    280		return mt76u_req_rd_rp(dev, base, data, n);
    281}
    282
    283static bool mt76u_check_sg(struct mt76_dev *dev)
    284{
    285	struct usb_interface *uintf = to_usb_interface(dev->dev);
    286	struct usb_device *udev = interface_to_usbdev(uintf);
    287
    288	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
    289		(udev->bus->no_sg_constraint ||
    290		 udev->speed == USB_SPEED_WIRELESS));
    291}
    292
    293static int
    294mt76u_set_endpoints(struct usb_interface *intf,
    295		    struct mt76_usb *usb)
    296{
    297	struct usb_host_interface *intf_desc = intf->cur_altsetting;
    298	struct usb_endpoint_descriptor *ep_desc;
    299	int i, in_ep = 0, out_ep = 0;
    300
    301	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
    302		ep_desc = &intf_desc->endpoint[i].desc;
    303
    304		if (usb_endpoint_is_bulk_in(ep_desc) &&
    305		    in_ep < __MT_EP_IN_MAX) {
    306			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
    307			in_ep++;
    308		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
    309			   out_ep < __MT_EP_OUT_MAX) {
    310			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
    311			out_ep++;
    312		}
    313	}
    314
    315	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
    316		return -EINVAL;
    317	return 0;
    318}
    319
    320static int
    321mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
    322		 int nsgs, gfp_t gfp)
    323{
    324	int i;
    325
    326	for (i = 0; i < nsgs; i++) {
    327		struct page *page;
    328		void *data;
    329		int offset;
    330
    331		data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
    332		if (!data)
    333			break;
    334
    335		page = virt_to_head_page(data);
    336		offset = data - page_address(page);
    337		sg_set_page(&urb->sg[i], page, q->buf_size, offset);
    338	}
    339
    340	if (i < nsgs) {
    341		int j;
    342
    343		for (j = nsgs; j < urb->num_sgs; j++)
    344			skb_free_frag(sg_virt(&urb->sg[j]));
    345		urb->num_sgs = i;
    346	}
    347
    348	urb->num_sgs = max_t(int, i, urb->num_sgs);
    349	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
    350	sg_init_marker(urb->sg, urb->num_sgs);
    351
    352	return i ? : -ENOMEM;
    353}
    354
    355static int
    356mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
    357		struct urb *urb, int nsgs, gfp_t gfp)
    358{
    359	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
    360
    361	if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
    362		return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
    363
    364	urb->transfer_buffer_length = q->buf_size;
    365	urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
    366
    367	return urb->transfer_buffer ? 0 : -ENOMEM;
    368}
    369
    370static int
    371mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
    372		int sg_max_size)
    373{
    374	unsigned int size = sizeof(struct urb);
    375
    376	if (dev->usb.sg_en)
    377		size += sg_max_size * sizeof(struct scatterlist);
    378
    379	e->urb = kzalloc(size, GFP_KERNEL);
    380	if (!e->urb)
    381		return -ENOMEM;
    382
    383	usb_init_urb(e->urb);
    384
    385	if (dev->usb.sg_en && sg_max_size > 0)
    386		e->urb->sg = (struct scatterlist *)(e->urb + 1);
    387
    388	return 0;
    389}
    390
    391static int
    392mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
    393		   struct mt76_queue_entry *e)
    394{
    395	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
    396	int err, sg_size;
    397
    398	sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
    399	err = mt76u_urb_alloc(dev, e, sg_size);
    400	if (err)
    401		return err;
    402
    403	return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
    404}
    405
    406static void mt76u_urb_free(struct urb *urb)
    407{
    408	int i;
    409
    410	for (i = 0; i < urb->num_sgs; i++)
    411		skb_free_frag(sg_virt(&urb->sg[i]));
    412
    413	if (urb->transfer_buffer)
    414		skb_free_frag(urb->transfer_buffer);
    415
    416	usb_free_urb(urb);
    417}
    418
    419static void
    420mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
    421		    struct urb *urb, usb_complete_t complete_fn,
    422		    void *context)
    423{
    424	struct usb_interface *uintf = to_usb_interface(dev->dev);
    425	struct usb_device *udev = interface_to_usbdev(uintf);
    426	unsigned int pipe;
    427
    428	if (dir == USB_DIR_IN)
    429		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
    430	else
    431		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
    432
    433	urb->dev = udev;
    434	urb->pipe = pipe;
    435	urb->complete = complete_fn;
    436	urb->context = context;
    437}
    438
    439static struct urb *
    440mt76u_get_next_rx_entry(struct mt76_queue *q)
    441{
    442	struct urb *urb = NULL;
    443	unsigned long flags;
    444
    445	spin_lock_irqsave(&q->lock, flags);
    446	if (q->queued > 0) {
    447		urb = q->entry[q->tail].urb;
    448		q->tail = (q->tail + 1) % q->ndesc;
    449		q->queued--;
    450	}
    451	spin_unlock_irqrestore(&q->lock, flags);
    452
    453	return urb;
    454}
    455
    456static int
    457mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
    458		       u32 data_len)
    459{
    460	u16 dma_len, min_len;
    461
    462	dma_len = get_unaligned_le16(data);
    463	if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
    464		return dma_len;
    465
    466	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
    467	if (data_len < min_len || !dma_len ||
    468	    dma_len + MT_DMA_HDR_LEN > data_len ||
    469	    (dma_len & 0x3))
    470		return -EINVAL;
    471	return dma_len;
    472}
    473
    474static struct sk_buff *
    475mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
    476		   int len, int buf_size)
    477{
    478	int head_room, drv_flags = dev->drv->drv_flags;
    479	struct sk_buff *skb;
    480
    481	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
    482	if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
    483		struct page *page;
    484
    485		/* slow path, not enough space for data and
    486		 * skb_shared_info
    487		 */
    488		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
    489		if (!skb)
    490			return NULL;
    491
    492		skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
    493		data += head_room + MT_SKB_HEAD_LEN;
    494		page = virt_to_head_page(data);
    495		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
    496				page, data - page_address(page),
    497				len - MT_SKB_HEAD_LEN, buf_size);
    498
    499		return skb;
    500	}
    501
    502	/* fast path */
    503	skb = build_skb(data, buf_size);
    504	if (!skb)
    505		return NULL;
    506
    507	skb_reserve(skb, head_room);
    508	__skb_put(skb, len);
    509
    510	return skb;
    511}
    512
    513static int
    514mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
    515		       int buf_size)
    516{
    517	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
    518	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
    519	int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
    520	struct sk_buff *skb;
    521
    522	if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
    523		return 0;
    524
    525	len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
    526	if (len < 0)
    527		return 0;
    528
    529	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
    530	data_len = min_t(int, len, data_len - head_room);
    531	skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
    532	if (!skb)
    533		return 0;
    534
    535	len -= data_len;
    536	while (len > 0 && nsgs < urb->num_sgs) {
    537		data_len = min_t(int, len, urb->sg[nsgs].length);
    538		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
    539				sg_page(&urb->sg[nsgs]),
    540				urb->sg[nsgs].offset, data_len,
    541				buf_size);
    542		len -= data_len;
    543		nsgs++;
    544	}
    545	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
    546
    547	return nsgs;
    548}
    549
    550static void mt76u_complete_rx(struct urb *urb)
    551{
    552	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
    553	struct mt76_queue *q = urb->context;
    554	unsigned long flags;
    555
    556	trace_rx_urb(dev, urb);
    557
    558	switch (urb->status) {
    559	case -ECONNRESET:
    560	case -ESHUTDOWN:
    561	case -ENOENT:
    562	case -EPROTO:
    563		return;
    564	default:
    565		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
    566				    urb->status);
    567		fallthrough;
    568	case 0:
    569		break;
    570	}
    571
    572	spin_lock_irqsave(&q->lock, flags);
    573	if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
    574		goto out;
    575
    576	q->head = (q->head + 1) % q->ndesc;
    577	q->queued++;
    578	mt76_worker_schedule(&dev->usb.rx_worker);
    579out:
    580	spin_unlock_irqrestore(&q->lock, flags);
    581}
    582
    583static int
    584mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
    585		    struct urb *urb)
    586{
    587	int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
    588
    589	mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
    590			    mt76u_complete_rx, &dev->q_rx[qid]);
    591	trace_submit_urb(dev, urb);
    592
    593	return usb_submit_urb(urb, GFP_ATOMIC);
    594}
    595
    596static void
    597mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
    598{
    599	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
    600	struct urb *urb;
    601	int err, count;
    602
    603	while (true) {
    604		urb = mt76u_get_next_rx_entry(q);
    605		if (!urb)
    606			break;
    607
    608		count = mt76u_process_rx_entry(dev, urb, q->buf_size);
    609		if (count > 0) {
    610			err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
    611			if (err < 0)
    612				break;
    613		}
    614		mt76u_submit_rx_buf(dev, qid, urb);
    615	}
    616	if (qid == MT_RXQ_MAIN) {
    617		local_bh_disable();
    618		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
    619		local_bh_enable();
    620	}
    621}
    622
    623static void mt76u_rx_worker(struct mt76_worker *w)
    624{
    625	struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
    626	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
    627	int i;
    628
    629	rcu_read_lock();
    630	mt76_for_each_q_rx(dev, i)
    631		mt76u_process_rx_queue(dev, &dev->q_rx[i]);
    632	rcu_read_unlock();
    633}
    634
    635static int
    636mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
    637{
    638	struct mt76_queue *q = &dev->q_rx[qid];
    639	unsigned long flags;
    640	int i, err = 0;
    641
    642	spin_lock_irqsave(&q->lock, flags);
    643	for (i = 0; i < q->ndesc; i++) {
    644		err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
    645		if (err < 0)
    646			break;
    647	}
    648	q->head = q->tail = 0;
    649	q->queued = 0;
    650	spin_unlock_irqrestore(&q->lock, flags);
    651
    652	return err;
    653}
    654
    655static int
    656mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
    657{
    658	struct mt76_queue *q = &dev->q_rx[qid];
    659	int i, err;
    660
    661	spin_lock_init(&q->lock);
    662	q->entry = devm_kcalloc(dev->dev,
    663				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
    664				GFP_KERNEL);
    665	if (!q->entry)
    666		return -ENOMEM;
    667
    668	q->ndesc = MT_NUM_RX_ENTRIES;
    669	q->buf_size = PAGE_SIZE;
    670
    671	for (i = 0; i < q->ndesc; i++) {
    672		err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
    673		if (err < 0)
    674			return err;
    675	}
    676
    677	return mt76u_submit_rx_buffers(dev, qid);
    678}
    679
    680int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
    681{
    682	return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
    683}
    684EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
    685
    686static void
    687mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
    688{
    689	struct page *page;
    690	int i;
    691
    692	for (i = 0; i < q->ndesc; i++) {
    693		if (!q->entry[i].urb)
    694			continue;
    695
    696		mt76u_urb_free(q->entry[i].urb);
    697		q->entry[i].urb = NULL;
    698	}
    699
    700	if (!q->rx_page.va)
    701		return;
    702
    703	page = virt_to_page(q->rx_page.va);
    704	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
    705	memset(&q->rx_page, 0, sizeof(q->rx_page));
    706}
    707
    708static void mt76u_free_rx(struct mt76_dev *dev)
    709{
    710	int i;
    711
    712	mt76_worker_teardown(&dev->usb.rx_worker);
    713
    714	mt76_for_each_q_rx(dev, i)
    715		mt76u_free_rx_queue(dev, &dev->q_rx[i]);
    716}
    717
    718void mt76u_stop_rx(struct mt76_dev *dev)
    719{
    720	int i;
    721
    722	mt76_worker_disable(&dev->usb.rx_worker);
    723
    724	mt76_for_each_q_rx(dev, i) {
    725		struct mt76_queue *q = &dev->q_rx[i];
    726		int j;
    727
    728		for (j = 0; j < q->ndesc; j++)
    729			usb_poison_urb(q->entry[j].urb);
    730	}
    731}
    732EXPORT_SYMBOL_GPL(mt76u_stop_rx);
    733
    734int mt76u_resume_rx(struct mt76_dev *dev)
    735{
    736	int i;
    737
    738	mt76_for_each_q_rx(dev, i) {
    739		struct mt76_queue *q = &dev->q_rx[i];
    740		int err, j;
    741
    742		for (j = 0; j < q->ndesc; j++)
    743			usb_unpoison_urb(q->entry[j].urb);
    744
    745		err = mt76u_submit_rx_buffers(dev, i);
    746		if (err < 0)
    747			return err;
    748	}
    749
    750	mt76_worker_enable(&dev->usb.rx_worker);
    751
    752	return 0;
    753}
    754EXPORT_SYMBOL_GPL(mt76u_resume_rx);
    755
    756static void mt76u_status_worker(struct mt76_worker *w)
    757{
    758	struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
    759	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
    760	struct mt76_queue_entry entry;
    761	struct mt76_queue *q;
    762	int i;
    763
    764	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
    765		q = dev->phy.q_tx[i];
    766		if (!q)
    767			continue;
    768
    769		while (q->queued > 0) {
    770			if (!q->entry[q->tail].done)
    771				break;
    772
    773			entry = q->entry[q->tail];
    774			q->entry[q->tail].done = false;
    775
    776			mt76_queue_tx_complete(dev, q, &entry);
    777		}
    778
    779		if (!q->queued)
    780			wake_up(&dev->tx_wait);
    781
    782		mt76_worker_schedule(&dev->tx_worker);
    783
    784		if (dev->drv->tx_status_data &&
    785		    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
    786			queue_work(dev->wq, &dev->usb.stat_work);
    787	}
    788}
    789
    790static void mt76u_tx_status_data(struct work_struct *work)
    791{
    792	struct mt76_usb *usb;
    793	struct mt76_dev *dev;
    794	u8 update = 1;
    795	u16 count = 0;
    796
    797	usb = container_of(work, struct mt76_usb, stat_work);
    798	dev = container_of(usb, struct mt76_dev, usb);
    799
    800	while (true) {
    801		if (test_bit(MT76_REMOVED, &dev->phy.state))
    802			break;
    803
    804		if (!dev->drv->tx_status_data(dev, &update))
    805			break;
    806		count++;
    807	}
    808
    809	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
    810		queue_work(dev->wq, &usb->stat_work);
    811	else
    812		clear_bit(MT76_READING_STATS, &dev->phy.state);
    813}
    814
    815static void mt76u_complete_tx(struct urb *urb)
    816{
    817	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
    818	struct mt76_queue_entry *e = urb->context;
    819
    820	if (mt76u_urb_error(urb))
    821		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
    822	e->done = true;
    823
    824	mt76_worker_schedule(&dev->usb.status_worker);
    825}
    826
    827static int
    828mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
    829		       struct urb *urb)
    830{
    831	urb->transfer_buffer_length = skb->len;
    832
    833	if (!dev->usb.sg_en) {
    834		urb->transfer_buffer = skb->data;
    835		return 0;
    836	}
    837
    838	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
    839	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
    840	if (!urb->num_sgs)
    841		return -ENOMEM;
    842
    843	return urb->num_sgs;
    844}
    845
    846static int
    847mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
    848		   struct sk_buff *skb, struct mt76_wcid *wcid,
    849		   struct ieee80211_sta *sta)
    850{
    851	struct mt76_tx_info tx_info = {
    852		.skb = skb,
    853	};
    854	u16 idx = q->head;
    855	int err;
    856
    857	if (q->queued == q->ndesc)
    858		return -ENOSPC;
    859
    860	skb->prev = skb->next = NULL;
    861	err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
    862	if (err < 0)
    863		return err;
    864
    865	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
    866	if (err < 0)
    867		return err;
    868
    869	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
    870			    q->entry[idx].urb, mt76u_complete_tx,
    871			    &q->entry[idx]);
    872
    873	q->head = (q->head + 1) % q->ndesc;
    874	q->entry[idx].skb = tx_info.skb;
    875	q->entry[idx].wcid = 0xffff;
    876	q->queued++;
    877
    878	return idx;
    879}
    880
    881static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
    882{
    883	struct urb *urb;
    884	int err;
    885
    886	while (q->first != q->head) {
    887		urb = q->entry[q->first].urb;
    888
    889		trace_submit_urb(dev, urb);
    890		err = usb_submit_urb(urb, GFP_ATOMIC);
    891		if (err < 0) {
    892			if (err == -ENODEV)
    893				set_bit(MT76_REMOVED, &dev->phy.state);
    894			else
    895				dev_err(dev->dev, "tx urb submit failed:%d\n",
    896					err);
    897			break;
    898		}
    899		q->first = (q->first + 1) % q->ndesc;
    900	}
    901}
    902
    903static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
    904{
    905	if (mt76_chip(dev) == 0x7663) {
    906		static const u8 lmac_queue_map[] = {
    907			/* ac to lmac mapping */
    908			[IEEE80211_AC_BK] = 0,
    909			[IEEE80211_AC_BE] = 1,
    910			[IEEE80211_AC_VI] = 2,
    911			[IEEE80211_AC_VO] = 4,
    912		};
    913
    914		if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
    915			return 1; /* BE */
    916
    917		return lmac_queue_map[ac];
    918	}
    919
    920	return mt76_ac_to_hwq(ac);
    921}
    922
    923static int mt76u_alloc_tx(struct mt76_dev *dev)
    924{
    925	struct mt76_queue *q;
    926	int i, j, err;
    927
    928	for (i = 0; i <= MT_TXQ_PSD; i++) {
    929		if (i >= IEEE80211_NUM_ACS) {
    930			dev->phy.q_tx[i] = dev->phy.q_tx[0];
    931			continue;
    932		}
    933
    934		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
    935		if (!q)
    936			return -ENOMEM;
    937
    938		spin_lock_init(&q->lock);
    939		q->hw_idx = mt76u_ac_to_hwq(dev, i);
    940		q->qid = i;
    941
    942		dev->phy.q_tx[i] = q;
    943
    944		q->entry = devm_kcalloc(dev->dev,
    945					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
    946					GFP_KERNEL);
    947		if (!q->entry)
    948			return -ENOMEM;
    949
    950		q->ndesc = MT_NUM_TX_ENTRIES;
    951		for (j = 0; j < q->ndesc; j++) {
    952			err = mt76u_urb_alloc(dev, &q->entry[j],
    953					      MT_TX_SG_MAX_SIZE);
    954			if (err < 0)
    955				return err;
    956		}
    957	}
    958	return 0;
    959}
    960
    961static void mt76u_free_tx(struct mt76_dev *dev)
    962{
    963	int i;
    964
    965	mt76_worker_teardown(&dev->usb.status_worker);
    966
    967	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
    968		struct mt76_queue *q;
    969		int j;
    970
    971		q = dev->phy.q_tx[i];
    972		if (!q)
    973			continue;
    974
    975		for (j = 0; j < q->ndesc; j++) {
    976			usb_free_urb(q->entry[j].urb);
    977			q->entry[j].urb = NULL;
    978		}
    979	}
    980}
    981
    982void mt76u_stop_tx(struct mt76_dev *dev)
    983{
    984	int ret;
    985
    986	mt76_worker_disable(&dev->usb.status_worker);
    987
    988	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
    989				 HZ / 5);
    990	if (!ret) {
    991		struct mt76_queue_entry entry;
    992		struct mt76_queue *q;
    993		int i, j;
    994
    995		dev_err(dev->dev, "timed out waiting for pending tx\n");
    996
    997		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
    998			q = dev->phy.q_tx[i];
    999			if (!q)
   1000				continue;
   1001
   1002			for (j = 0; j < q->ndesc; j++)
   1003				usb_kill_urb(q->entry[j].urb);
   1004		}
   1005
   1006		mt76_worker_disable(&dev->tx_worker);
   1007
   1008		/* On device removal we maight queue skb's, but mt76u_tx_kick()
   1009		 * will fail to submit urb, cleanup those skb's manually.
   1010		 */
   1011		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
   1012			q = dev->phy.q_tx[i];
   1013			if (!q)
   1014				continue;
   1015
   1016			while (q->queued > 0) {
   1017				entry = q->entry[q->tail];
   1018				q->entry[q->tail].done = false;
   1019				mt76_queue_tx_complete(dev, q, &entry);
   1020			}
   1021		}
   1022
   1023		mt76_worker_enable(&dev->tx_worker);
   1024	}
   1025
   1026	cancel_work_sync(&dev->usb.stat_work);
   1027	clear_bit(MT76_READING_STATS, &dev->phy.state);
   1028
   1029	mt76_worker_enable(&dev->usb.status_worker);
   1030
   1031	mt76_tx_status_check(dev, true);
   1032}
   1033EXPORT_SYMBOL_GPL(mt76u_stop_tx);
   1034
   1035void mt76u_queues_deinit(struct mt76_dev *dev)
   1036{
   1037	mt76u_stop_rx(dev);
   1038	mt76u_stop_tx(dev);
   1039
   1040	mt76u_free_rx(dev);
   1041	mt76u_free_tx(dev);
   1042}
   1043EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
   1044
   1045int mt76u_alloc_queues(struct mt76_dev *dev)
   1046{
   1047	int err;
   1048
   1049	err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
   1050	if (err < 0)
   1051		return err;
   1052
   1053	return mt76u_alloc_tx(dev);
   1054}
   1055EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
   1056
   1057static const struct mt76_queue_ops usb_queue_ops = {
   1058	.tx_queue_skb = mt76u_tx_queue_skb,
   1059	.kick = mt76u_tx_kick,
   1060};
   1061
   1062int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
   1063		 struct mt76_bus_ops *ops)
   1064{
   1065	struct usb_device *udev = interface_to_usbdev(intf);
   1066	struct mt76_usb *usb = &dev->usb;
   1067	int err;
   1068
   1069	INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
   1070
   1071	usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
   1072	if (usb->data_len < 32)
   1073		usb->data_len = 32;
   1074
   1075	usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
   1076	if (!usb->data)
   1077		return -ENOMEM;
   1078
   1079	mutex_init(&usb->usb_ctrl_mtx);
   1080	dev->bus = ops;
   1081	dev->queue_ops = &usb_queue_ops;
   1082
   1083	dev_set_drvdata(&udev->dev, dev);
   1084
   1085	usb->sg_en = mt76u_check_sg(dev);
   1086
   1087	err = mt76u_set_endpoints(intf, usb);
   1088	if (err < 0)
   1089		return err;
   1090
   1091	err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
   1092				"usb-rx");
   1093	if (err)
   1094		return err;
   1095
   1096	err = mt76_worker_setup(dev->hw, &usb->status_worker,
   1097				mt76u_status_worker, "usb-status");
   1098	if (err)
   1099		return err;
   1100
   1101	sched_set_fifo_low(usb->rx_worker.task);
   1102	sched_set_fifo_low(usb->status_worker.task);
   1103
   1104	return 0;
   1105}
   1106EXPORT_SYMBOL_GPL(__mt76u_init);
   1107
   1108int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
   1109{
   1110	static struct mt76_bus_ops bus_ops = {
   1111		.rr = mt76u_rr,
   1112		.wr = mt76u_wr,
   1113		.rmw = mt76u_rmw,
   1114		.read_copy = mt76u_read_copy,
   1115		.write_copy = mt76u_copy,
   1116		.wr_rp = mt76u_wr_rp,
   1117		.rd_rp = mt76u_rd_rp,
   1118		.type = MT76_BUS_USB,
   1119	};
   1120
   1121	return __mt76u_init(dev, intf, &bus_ops);
   1122}
   1123EXPORT_SYMBOL_GPL(mt76u_init);
   1124
   1125MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
   1126MODULE_LICENSE("Dual BSD/GPL");