cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

htt_rx.c (126800B)


      1// SPDX-License-Identifier: ISC
      2/*
      3 * Copyright (c) 2005-2011 Atheros Communications Inc.
      4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
      5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
      6 */
      7
      8#include "core.h"
      9#include "htc.h"
     10#include "htt.h"
     11#include "txrx.h"
     12#include "debug.h"
     13#include "trace.h"
     14#include "mac.h"
     15
     16#include <linux/log2.h>
     17#include <linux/bitfield.h>
     18
     19/* when under memory pressure rx ring refill may fail and needs a retry */
     20#define HTT_RX_RING_REFILL_RETRY_MS 50
     21
     22#define HTT_RX_RING_REFILL_RESCHED_MS 5
     23
     24/* shortcut to interpret a raw memory buffer as a rx descriptor */
     25#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
     26
     27static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
     28
     29static struct sk_buff *
     30ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
     31{
     32	struct ath10k_skb_rxcb *rxcb;
     33
     34	hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
     35		if (rxcb->paddr == paddr)
     36			return ATH10K_RXCB_SKB(rxcb);
     37
     38	WARN_ON_ONCE(1);
     39	return NULL;
     40}
     41
     42static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
     43{
     44	struct sk_buff *skb;
     45	struct ath10k_skb_rxcb *rxcb;
     46	struct hlist_node *n;
     47	int i;
     48
     49	if (htt->rx_ring.in_ord_rx) {
     50		hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
     51			skb = ATH10K_RXCB_SKB(rxcb);
     52			dma_unmap_single(htt->ar->dev, rxcb->paddr,
     53					 skb->len + skb_tailroom(skb),
     54					 DMA_FROM_DEVICE);
     55			hash_del(&rxcb->hlist);
     56			dev_kfree_skb_any(skb);
     57		}
     58	} else {
     59		for (i = 0; i < htt->rx_ring.size; i++) {
     60			skb = htt->rx_ring.netbufs_ring[i];
     61			if (!skb)
     62				continue;
     63
     64			rxcb = ATH10K_SKB_RXCB(skb);
     65			dma_unmap_single(htt->ar->dev, rxcb->paddr,
     66					 skb->len + skb_tailroom(skb),
     67					 DMA_FROM_DEVICE);
     68			dev_kfree_skb_any(skb);
     69		}
     70	}
     71
     72	htt->rx_ring.fill_cnt = 0;
     73	hash_init(htt->rx_ring.skb_table);
     74	memset(htt->rx_ring.netbufs_ring, 0,
     75	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
     76}
     77
     78static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
     79{
     80	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
     81}
     82
     83static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
     84{
     85	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
     86}
     87
     88static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
     89					     void *vaddr)
     90{
     91	htt->rx_ring.paddrs_ring_32 = vaddr;
     92}
     93
     94static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
     95					     void *vaddr)
     96{
     97	htt->rx_ring.paddrs_ring_64 = vaddr;
     98}
     99
    100static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
    101					  dma_addr_t paddr, int idx)
    102{
    103	htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
    104}
    105
    106static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
    107					  dma_addr_t paddr, int idx)
    108{
    109	htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
    110}
    111
    112static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
    113{
    114	htt->rx_ring.paddrs_ring_32[idx] = 0;
    115}
    116
    117static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
    118{
    119	htt->rx_ring.paddrs_ring_64[idx] = 0;
    120}
    121
    122static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
    123{
    124	return (void *)htt->rx_ring.paddrs_ring_32;
    125}
    126
    127static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
    128{
    129	return (void *)htt->rx_ring.paddrs_ring_64;
    130}
    131
    132static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
    133{
    134	struct ath10k_hw_params *hw = &htt->ar->hw_params;
    135	struct htt_rx_desc *rx_desc;
    136	struct ath10k_skb_rxcb *rxcb;
    137	struct sk_buff *skb;
    138	dma_addr_t paddr;
    139	int ret = 0, idx;
    140
    141	/* The Full Rx Reorder firmware has no way of telling the host
    142	 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
    143	 * To keep things simple make sure ring is always half empty. This
    144	 * guarantees there'll be no replenishment overruns possible.
    145	 */
    146	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
    147
    148	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
    149
    150	if (idx < 0 || idx >= htt->rx_ring.size) {
    151		ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
    152		idx &= htt->rx_ring.size_mask;
    153		ret = -ENOMEM;
    154		goto fail;
    155	}
    156
    157	while (num > 0) {
    158		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
    159		if (!skb) {
    160			ret = -ENOMEM;
    161			goto fail;
    162		}
    163
    164		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
    165			skb_pull(skb,
    166				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
    167				 skb->data);
    168
    169		/* Clear rx_desc attention word before posting to Rx ring */
    170		rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
    171		ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
    172
    173		paddr = dma_map_single(htt->ar->dev, skb->data,
    174				       skb->len + skb_tailroom(skb),
    175				       DMA_FROM_DEVICE);
    176
    177		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
    178			dev_kfree_skb_any(skb);
    179			ret = -ENOMEM;
    180			goto fail;
    181		}
    182
    183		rxcb = ATH10K_SKB_RXCB(skb);
    184		rxcb->paddr = paddr;
    185		htt->rx_ring.netbufs_ring[idx] = skb;
    186		ath10k_htt_set_paddrs_ring(htt, paddr, idx);
    187		htt->rx_ring.fill_cnt++;
    188
    189		if (htt->rx_ring.in_ord_rx) {
    190			hash_add(htt->rx_ring.skb_table,
    191				 &ATH10K_SKB_RXCB(skb)->hlist,
    192				 paddr);
    193		}
    194
    195		num--;
    196		idx++;
    197		idx &= htt->rx_ring.size_mask;
    198	}
    199
    200fail:
    201	/*
    202	 * Make sure the rx buffer is updated before available buffer
    203	 * index to avoid any potential rx ring corruption.
    204	 */
    205	mb();
    206	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
    207	return ret;
    208}
    209
    210static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
    211{
    212	lockdep_assert_held(&htt->rx_ring.lock);
    213	return __ath10k_htt_rx_ring_fill_n(htt, num);
    214}
    215
    216static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
    217{
    218	int ret, num_deficit, num_to_fill;
    219
    220	/* Refilling the whole RX ring buffer proves to be a bad idea. The
    221	 * reason is RX may take up significant amount of CPU cycles and starve
    222	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
    223	 * with ath10k wlan interface. This ended up with very poor performance
    224	 * once CPU the host system was overwhelmed with RX on ath10k.
    225	 *
    226	 * By limiting the number of refills the replenishing occurs
    227	 * progressively. This in turns makes use of the fact tasklets are
    228	 * processed in FIFO order. This means actual RX processing can starve
    229	 * out refilling. If there's not enough buffers on RX ring FW will not
    230	 * report RX until it is refilled with enough buffers. This
    231	 * automatically balances load wrt to CPU power.
    232	 *
    233	 * This probably comes at a cost of lower maximum throughput but
    234	 * improves the average and stability.
    235	 */
    236	spin_lock_bh(&htt->rx_ring.lock);
    237	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
    238	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
    239	num_deficit -= num_to_fill;
    240	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
    241	if (ret == -ENOMEM) {
    242		/*
    243		 * Failed to fill it to the desired level -
    244		 * we'll start a timer and try again next time.
    245		 * As long as enough buffers are left in the ring for
    246		 * another A-MPDU rx, no special recovery is needed.
    247		 */
    248		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
    249			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
    250	} else if (num_deficit > 0) {
    251		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
    252			  msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
    253	}
    254	spin_unlock_bh(&htt->rx_ring.lock);
    255}
    256
    257static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
    258{
    259	struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
    260
    261	ath10k_htt_rx_msdu_buff_replenish(htt);
    262}
    263
    264int ath10k_htt_rx_ring_refill(struct ath10k *ar)
    265{
    266	struct ath10k_htt *htt = &ar->htt;
    267	int ret;
    268
    269	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
    270		return 0;
    271
    272	spin_lock_bh(&htt->rx_ring.lock);
    273	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
    274					      htt->rx_ring.fill_cnt));
    275
    276	if (ret)
    277		ath10k_htt_rx_ring_free(htt);
    278
    279	spin_unlock_bh(&htt->rx_ring.lock);
    280
    281	return ret;
    282}
    283
    284void ath10k_htt_rx_free(struct ath10k_htt *htt)
    285{
    286	if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
    287		return;
    288
    289	del_timer_sync(&htt->rx_ring.refill_retry_timer);
    290
    291	skb_queue_purge(&htt->rx_msdus_q);
    292	skb_queue_purge(&htt->rx_in_ord_compl_q);
    293	skb_queue_purge(&htt->tx_fetch_ind_q);
    294
    295	spin_lock_bh(&htt->rx_ring.lock);
    296	ath10k_htt_rx_ring_free(htt);
    297	spin_unlock_bh(&htt->rx_ring.lock);
    298
    299	dma_free_coherent(htt->ar->dev,
    300			  ath10k_htt_get_rx_ring_size(htt),
    301			  ath10k_htt_get_vaddr_ring(htt),
    302			  htt->rx_ring.base_paddr);
    303
    304	dma_free_coherent(htt->ar->dev,
    305			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
    306			  htt->rx_ring.alloc_idx.vaddr,
    307			  htt->rx_ring.alloc_idx.paddr);
    308
    309	kfree(htt->rx_ring.netbufs_ring);
    310}
    311
    312static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
    313{
    314	struct ath10k *ar = htt->ar;
    315	int idx;
    316	struct sk_buff *msdu;
    317
    318	lockdep_assert_held(&htt->rx_ring.lock);
    319
    320	if (htt->rx_ring.fill_cnt == 0) {
    321		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
    322		return NULL;
    323	}
    324
    325	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
    326	msdu = htt->rx_ring.netbufs_ring[idx];
    327	htt->rx_ring.netbufs_ring[idx] = NULL;
    328	ath10k_htt_reset_paddrs_ring(htt, idx);
    329
    330	idx++;
    331	idx &= htt->rx_ring.size_mask;
    332	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
    333	htt->rx_ring.fill_cnt--;
    334
    335	dma_unmap_single(htt->ar->dev,
    336			 ATH10K_SKB_RXCB(msdu)->paddr,
    337			 msdu->len + skb_tailroom(msdu),
    338			 DMA_FROM_DEVICE);
    339	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
    340			msdu->data, msdu->len + skb_tailroom(msdu));
    341
    342	return msdu;
    343}
    344
    345/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
    346static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
    347				   struct sk_buff_head *amsdu)
    348{
    349	struct ath10k *ar = htt->ar;
    350	struct ath10k_hw_params *hw = &ar->hw_params;
    351	int msdu_len, msdu_chaining = 0;
    352	struct sk_buff *msdu;
    353	struct htt_rx_desc *rx_desc;
    354	struct rx_attention *rx_desc_attention;
    355	struct rx_frag_info_common *rx_desc_frag_info_common;
    356	struct rx_msdu_start_common *rx_desc_msdu_start_common;
    357	struct rx_msdu_end_common *rx_desc_msdu_end_common;
    358
    359	lockdep_assert_held(&htt->rx_ring.lock);
    360
    361	for (;;) {
    362		int last_msdu, msdu_len_invalid, msdu_chained;
    363
    364		msdu = ath10k_htt_rx_netbuf_pop(htt);
    365		if (!msdu) {
    366			__skb_queue_purge(amsdu);
    367			return -ENOENT;
    368		}
    369
    370		__skb_queue_tail(amsdu, msdu);
    371
    372		rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
    373		rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
    374		rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
    375									      rx_desc);
    376		rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
    377		rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
    378
    379		/* FIXME: we must report msdu payload since this is what caller
    380		 * expects now
    381		 */
    382		skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
    383		skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
    384
    385		/*
    386		 * Sanity check - confirm the HW is finished filling in the
    387		 * rx data.
    388		 * If the HW and SW are working correctly, then it's guaranteed
    389		 * that the HW's MAC DMA is done before this point in the SW.
    390		 * To prevent the case that we handle a stale Rx descriptor,
    391		 * just assert for now until we have a way to recover.
    392		 */
    393		if (!(__le32_to_cpu(rx_desc_attention->flags)
    394				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
    395			__skb_queue_purge(amsdu);
    396			return -EIO;
    397		}
    398
    399		msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
    400					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
    401					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
    402		msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
    403			      RX_MSDU_START_INFO0_MSDU_LENGTH);
    404		msdu_chained = rx_desc_frag_info_common->ring2_more_count;
    405
    406		if (msdu_len_invalid)
    407			msdu_len = 0;
    408
    409		skb_trim(msdu, 0);
    410		skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
    411		msdu_len -= msdu->len;
    412
    413		/* Note: Chained buffers do not contain rx descriptor */
    414		while (msdu_chained--) {
    415			msdu = ath10k_htt_rx_netbuf_pop(htt);
    416			if (!msdu) {
    417				__skb_queue_purge(amsdu);
    418				return -ENOENT;
    419			}
    420
    421			__skb_queue_tail(amsdu, msdu);
    422			skb_trim(msdu, 0);
    423			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
    424			msdu_len -= msdu->len;
    425			msdu_chaining = 1;
    426		}
    427
    428		last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
    429				RX_MSDU_END_INFO0_LAST_MSDU;
    430
    431		/* FIXME: why are we skipping the first part of the rx_desc? */
    432		trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
    433					 hw->rx_desc_ops->rx_desc_size - sizeof(u32));
    434
    435		if (last_msdu)
    436			break;
    437	}
    438
    439	if (skb_queue_empty(amsdu))
    440		msdu_chaining = -1;
    441
    442	/*
    443	 * Don't refill the ring yet.
    444	 *
    445	 * First, the elements popped here are still in use - it is not
    446	 * safe to overwrite them until the matching call to
    447	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
    448	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
    449	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
    450	 * (something like 3 buffers). Consequently, we'll rely on the txrx
    451	 * SW to tell us when it is done pulling all the PPDU's rx buffers
    452	 * out of the rx ring, and then refill it just once.
    453	 */
    454
    455	return msdu_chaining;
    456}
    457
    458static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
    459					       u64 paddr)
    460{
    461	struct ath10k *ar = htt->ar;
    462	struct ath10k_skb_rxcb *rxcb;
    463	struct sk_buff *msdu;
    464
    465	lockdep_assert_held(&htt->rx_ring.lock);
    466
    467	msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
    468	if (!msdu)
    469		return NULL;
    470
    471	rxcb = ATH10K_SKB_RXCB(msdu);
    472	hash_del(&rxcb->hlist);
    473	htt->rx_ring.fill_cnt--;
    474
    475	dma_unmap_single(htt->ar->dev, rxcb->paddr,
    476			 msdu->len + skb_tailroom(msdu),
    477			 DMA_FROM_DEVICE);
    478	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
    479			msdu->data, msdu->len + skb_tailroom(msdu));
    480
    481	return msdu;
    482}
    483
    484static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
    485					       struct sk_buff *frag_list,
    486					       unsigned int frag_len)
    487{
    488	skb_shinfo(skb_head)->frag_list = frag_list;
    489	skb_head->data_len = frag_len;
    490	skb_head->len += skb_head->data_len;
    491}
    492
    493static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
    494					     struct sk_buff *msdu,
    495					     struct htt_rx_in_ord_msdu_desc **msdu_desc)
    496{
    497	struct ath10k *ar = htt->ar;
    498	struct ath10k_hw_params *hw = &ar->hw_params;
    499	u32 paddr;
    500	struct sk_buff *frag_buf;
    501	struct sk_buff *prev_frag_buf;
    502	u8 last_frag;
    503	struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
    504	struct htt_rx_desc *rxd;
    505	int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
    506
    507	rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
    508	trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
    509
    510	skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
    511	skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
    512	skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
    513	amsdu_len -= msdu->len;
    514
    515	last_frag = ind_desc->reserved;
    516	if (last_frag) {
    517		if (amsdu_len) {
    518			ath10k_warn(ar, "invalid amsdu len %u, left %d",
    519				    __le16_to_cpu(ind_desc->msdu_len),
    520				    amsdu_len);
    521		}
    522		return 0;
    523	}
    524
    525	ind_desc++;
    526	paddr = __le32_to_cpu(ind_desc->msdu_paddr);
    527	frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
    528	if (!frag_buf) {
    529		ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
    530		return -ENOENT;
    531	}
    532
    533	skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
    534	ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
    535
    536	amsdu_len -= frag_buf->len;
    537	prev_frag_buf = frag_buf;
    538	last_frag = ind_desc->reserved;
    539	while (!last_frag) {
    540		ind_desc++;
    541		paddr = __le32_to_cpu(ind_desc->msdu_paddr);
    542		frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
    543		if (!frag_buf) {
    544			ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
    545				    paddr);
    546			prev_frag_buf->next = NULL;
    547			return -ENOENT;
    548		}
    549
    550		skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
    551		last_frag = ind_desc->reserved;
    552		amsdu_len -= frag_buf->len;
    553
    554		prev_frag_buf->next = frag_buf;
    555		prev_frag_buf = frag_buf;
    556	}
    557
    558	if (amsdu_len) {
    559		ath10k_warn(ar, "invalid amsdu len %u, left %d",
    560			    __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
    561	}
    562
    563	*msdu_desc = ind_desc;
    564
    565	prev_frag_buf->next = NULL;
    566	return 0;
    567}
    568
    569static int
    570ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
    571				  struct sk_buff *msdu,
    572				  struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
    573{
    574	struct ath10k *ar = htt->ar;
    575	struct ath10k_hw_params *hw = &ar->hw_params;
    576	u64 paddr;
    577	struct sk_buff *frag_buf;
    578	struct sk_buff *prev_frag_buf;
    579	u8 last_frag;
    580	struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
    581	struct htt_rx_desc *rxd;
    582	int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
    583
    584	rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
    585	trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
    586
    587	skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
    588	skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
    589	skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
    590	amsdu_len -= msdu->len;
    591
    592	last_frag = ind_desc->reserved;
    593	if (last_frag) {
    594		if (amsdu_len) {
    595			ath10k_warn(ar, "invalid amsdu len %u, left %d",
    596				    __le16_to_cpu(ind_desc->msdu_len),
    597				    amsdu_len);
    598		}
    599		return 0;
    600	}
    601
    602	ind_desc++;
    603	paddr = __le64_to_cpu(ind_desc->msdu_paddr);
    604	frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
    605	if (!frag_buf) {
    606		ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
    607		return -ENOENT;
    608	}
    609
    610	skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
    611	ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
    612
    613	amsdu_len -= frag_buf->len;
    614	prev_frag_buf = frag_buf;
    615	last_frag = ind_desc->reserved;
    616	while (!last_frag) {
    617		ind_desc++;
    618		paddr = __le64_to_cpu(ind_desc->msdu_paddr);
    619		frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
    620		if (!frag_buf) {
    621			ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
    622				    paddr);
    623			prev_frag_buf->next = NULL;
    624			return -ENOENT;
    625		}
    626
    627		skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
    628		last_frag = ind_desc->reserved;
    629		amsdu_len -= frag_buf->len;
    630
    631		prev_frag_buf->next = frag_buf;
    632		prev_frag_buf = frag_buf;
    633	}
    634
    635	if (amsdu_len) {
    636		ath10k_warn(ar, "invalid amsdu len %u, left %d",
    637			    __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
    638	}
    639
    640	*msdu_desc = ind_desc;
    641
    642	prev_frag_buf->next = NULL;
    643	return 0;
    644}
    645
    646static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
    647					  struct htt_rx_in_ord_ind *ev,
    648					  struct sk_buff_head *list)
    649{
    650	struct ath10k *ar = htt->ar;
    651	struct ath10k_hw_params *hw = &ar->hw_params;
    652	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
    653	struct htt_rx_desc *rxd;
    654	struct rx_attention *rxd_attention;
    655	struct sk_buff *msdu;
    656	int msdu_count, ret;
    657	bool is_offload;
    658	u32 paddr;
    659
    660	lockdep_assert_held(&htt->rx_ring.lock);
    661
    662	msdu_count = __le16_to_cpu(ev->msdu_count);
    663	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
    664
    665	while (msdu_count--) {
    666		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
    667
    668		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
    669		if (!msdu) {
    670			__skb_queue_purge(list);
    671			return -ENOENT;
    672		}
    673
    674		if (!is_offload && ar->monitor_arvif) {
    675			ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
    676								&msdu_desc);
    677			if (ret) {
    678				__skb_queue_purge(list);
    679				return ret;
    680			}
    681			__skb_queue_tail(list, msdu);
    682			msdu_desc++;
    683			continue;
    684		}
    685
    686		__skb_queue_tail(list, msdu);
    687
    688		if (!is_offload) {
    689			rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
    690			rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
    691
    692			trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
    693
    694			skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
    695			skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
    696			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
    697
    698			if (!(__le32_to_cpu(rxd_attention->flags) &
    699			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
    700				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
    701				return -EIO;
    702			}
    703		}
    704
    705		msdu_desc++;
    706	}
    707
    708	return 0;
    709}
    710
    711static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
    712					  struct htt_rx_in_ord_ind *ev,
    713					  struct sk_buff_head *list)
    714{
    715	struct ath10k *ar = htt->ar;
    716	struct ath10k_hw_params *hw = &ar->hw_params;
    717	struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
    718	struct htt_rx_desc *rxd;
    719	struct rx_attention *rxd_attention;
    720	struct sk_buff *msdu;
    721	int msdu_count, ret;
    722	bool is_offload;
    723	u64 paddr;
    724
    725	lockdep_assert_held(&htt->rx_ring.lock);
    726
    727	msdu_count = __le16_to_cpu(ev->msdu_count);
    728	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
    729
    730	while (msdu_count--) {
    731		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
    732		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
    733		if (!msdu) {
    734			__skb_queue_purge(list);
    735			return -ENOENT;
    736		}
    737
    738		if (!is_offload && ar->monitor_arvif) {
    739			ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
    740								&msdu_desc);
    741			if (ret) {
    742				__skb_queue_purge(list);
    743				return ret;
    744			}
    745			__skb_queue_tail(list, msdu);
    746			msdu_desc++;
    747			continue;
    748		}
    749
    750		__skb_queue_tail(list, msdu);
    751
    752		if (!is_offload) {
    753			rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
    754			rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
    755
    756			trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
    757
    758			skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
    759			skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
    760			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
    761
    762			if (!(__le32_to_cpu(rxd_attention->flags) &
    763			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
    764				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
    765				return -EIO;
    766			}
    767		}
    768
    769		msdu_desc++;
    770	}
    771
    772	return 0;
    773}
    774
    775int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
    776{
    777	struct ath10k *ar = htt->ar;
    778	dma_addr_t paddr;
    779	void *vaddr, *vaddr_ring;
    780	size_t size;
    781	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
    782
    783	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
    784		return 0;
    785
    786	htt->rx_confused = false;
    787
    788	/* XXX: The fill level could be changed during runtime in response to
    789	 * the host processing latency. Is this really worth it?
    790	 */
    791	htt->rx_ring.size = HTT_RX_RING_SIZE;
    792	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
    793	htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
    794
    795	if (!is_power_of_2(htt->rx_ring.size)) {
    796		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
    797		return -EINVAL;
    798	}
    799
    800	htt->rx_ring.netbufs_ring =
    801		kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
    802			GFP_KERNEL);
    803	if (!htt->rx_ring.netbufs_ring)
    804		goto err_netbuf;
    805
    806	size = ath10k_htt_get_rx_ring_size(htt);
    807
    808	vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
    809	if (!vaddr_ring)
    810		goto err_dma_ring;
    811
    812	ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
    813	htt->rx_ring.base_paddr = paddr;
    814
    815	vaddr = dma_alloc_coherent(htt->ar->dev,
    816				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
    817				   &paddr, GFP_KERNEL);
    818	if (!vaddr)
    819		goto err_dma_idx;
    820
    821	htt->rx_ring.alloc_idx.vaddr = vaddr;
    822	htt->rx_ring.alloc_idx.paddr = paddr;
    823	htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
    824	*htt->rx_ring.alloc_idx.vaddr = 0;
    825
    826	/* Initialize the Rx refill retry timer */
    827	timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
    828
    829	spin_lock_init(&htt->rx_ring.lock);
    830
    831	htt->rx_ring.fill_cnt = 0;
    832	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
    833	hash_init(htt->rx_ring.skb_table);
    834
    835	skb_queue_head_init(&htt->rx_msdus_q);
    836	skb_queue_head_init(&htt->rx_in_ord_compl_q);
    837	skb_queue_head_init(&htt->tx_fetch_ind_q);
    838	atomic_set(&htt->num_mpdus_ready, 0);
    839
    840	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
    841		   htt->rx_ring.size, htt->rx_ring.fill_level);
    842	return 0;
    843
    844err_dma_idx:
    845	dma_free_coherent(htt->ar->dev,
    846			  ath10k_htt_get_rx_ring_size(htt),
    847			  vaddr_ring,
    848			  htt->rx_ring.base_paddr);
    849err_dma_ring:
    850	kfree(htt->rx_ring.netbufs_ring);
    851err_netbuf:
    852	return -ENOMEM;
    853}
    854
    855static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
    856					  enum htt_rx_mpdu_encrypt_type type)
    857{
    858	switch (type) {
    859	case HTT_RX_MPDU_ENCRYPT_NONE:
    860		return 0;
    861	case HTT_RX_MPDU_ENCRYPT_WEP40:
    862	case HTT_RX_MPDU_ENCRYPT_WEP104:
    863		return IEEE80211_WEP_IV_LEN;
    864	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
    865	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
    866		return IEEE80211_TKIP_IV_LEN;
    867	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
    868		return IEEE80211_CCMP_HDR_LEN;
    869	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
    870		return IEEE80211_CCMP_256_HDR_LEN;
    871	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
    872	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
    873		return IEEE80211_GCMP_HDR_LEN;
    874	case HTT_RX_MPDU_ENCRYPT_WEP128:
    875	case HTT_RX_MPDU_ENCRYPT_WAPI:
    876		break;
    877	}
    878
    879	ath10k_warn(ar, "unsupported encryption type %d\n", type);
    880	return 0;
    881}
    882
    883#define MICHAEL_MIC_LEN 8
    884
    885static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
    886					enum htt_rx_mpdu_encrypt_type type)
    887{
    888	switch (type) {
    889	case HTT_RX_MPDU_ENCRYPT_NONE:
    890	case HTT_RX_MPDU_ENCRYPT_WEP40:
    891	case HTT_RX_MPDU_ENCRYPT_WEP104:
    892	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
    893	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
    894		return 0;
    895	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
    896		return IEEE80211_CCMP_MIC_LEN;
    897	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
    898		return IEEE80211_CCMP_256_MIC_LEN;
    899	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
    900	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
    901		return IEEE80211_GCMP_MIC_LEN;
    902	case HTT_RX_MPDU_ENCRYPT_WEP128:
    903	case HTT_RX_MPDU_ENCRYPT_WAPI:
    904		break;
    905	}
    906
    907	ath10k_warn(ar, "unsupported encryption type %d\n", type);
    908	return 0;
    909}
    910
    911static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
    912					enum htt_rx_mpdu_encrypt_type type)
    913{
    914	switch (type) {
    915	case HTT_RX_MPDU_ENCRYPT_NONE:
    916	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
    917	case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
    918	case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
    919	case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
    920		return 0;
    921	case HTT_RX_MPDU_ENCRYPT_WEP40:
    922	case HTT_RX_MPDU_ENCRYPT_WEP104:
    923		return IEEE80211_WEP_ICV_LEN;
    924	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
    925	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
    926		return IEEE80211_TKIP_ICV_LEN;
    927	case HTT_RX_MPDU_ENCRYPT_WEP128:
    928	case HTT_RX_MPDU_ENCRYPT_WAPI:
    929		break;
    930	}
    931
    932	ath10k_warn(ar, "unsupported encryption type %d\n", type);
    933	return 0;
    934}
    935
    936struct amsdu_subframe_hdr {
    937	u8 dst[ETH_ALEN];
    938	u8 src[ETH_ALEN];
    939	__be16 len;
    940} __packed;
    941
    942#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
    943
    944static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
    945{
    946	u8 ret = 0;
    947
    948	switch (bw) {
    949	case 0:
    950		ret = RATE_INFO_BW_20;
    951		break;
    952	case 1:
    953		ret = RATE_INFO_BW_40;
    954		break;
    955	case 2:
    956		ret = RATE_INFO_BW_80;
    957		break;
    958	case 3:
    959		ret = RATE_INFO_BW_160;
    960		break;
    961	}
    962
    963	return ret;
    964}
    965
    966static void ath10k_htt_rx_h_rates(struct ath10k *ar,
    967				  struct ieee80211_rx_status *status,
    968				  struct htt_rx_desc *rxd)
    969{
    970	struct ath10k_hw_params *hw = &ar->hw_params;
    971	struct rx_attention *rxd_attention;
    972	struct rx_mpdu_start *rxd_mpdu_start;
    973	struct rx_mpdu_end *rxd_mpdu_end;
    974	struct rx_msdu_start_common *rxd_msdu_start_common;
    975	struct rx_msdu_end_common *rxd_msdu_end_common;
    976	struct rx_ppdu_start *rxd_ppdu_start;
    977	struct ieee80211_supported_band *sband;
    978	u8 cck, rate, bw, sgi, mcs, nss;
    979	u8 *rxd_msdu_payload;
    980	u8 preamble = 0;
    981	u8 group_id;
    982	u32 info1, info2, info3;
    983	u32 stbc, nsts_su;
    984
    985	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
    986	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
    987	rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
    988	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
    989	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
    990	rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
    991	rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
    992
    993	info1 = __le32_to_cpu(rxd_ppdu_start->info1);
    994	info2 = __le32_to_cpu(rxd_ppdu_start->info2);
    995	info3 = __le32_to_cpu(rxd_ppdu_start->info3);
    996
    997	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
    998
    999	switch (preamble) {
   1000	case HTT_RX_LEGACY:
   1001		/* To get legacy rate index band is required. Since band can't
   1002		 * be undefined check if freq is non-zero.
   1003		 */
   1004		if (!status->freq)
   1005			return;
   1006
   1007		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
   1008		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
   1009		rate &= ~RX_PPDU_START_RATE_FLAG;
   1010
   1011		sband = &ar->mac.sbands[status->band];
   1012		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
   1013		break;
   1014	case HTT_RX_HT:
   1015	case HTT_RX_HT_WITH_TXBF:
   1016		/* HT-SIG - Table 20-11 in info2 and info3 */
   1017		mcs = info2 & 0x1F;
   1018		nss = mcs >> 3;
   1019		bw = (info2 >> 7) & 1;
   1020		sgi = (info3 >> 7) & 1;
   1021
   1022		status->rate_idx = mcs;
   1023		status->encoding = RX_ENC_HT;
   1024		if (sgi)
   1025			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
   1026		if (bw)
   1027			status->bw = RATE_INFO_BW_40;
   1028		break;
   1029	case HTT_RX_VHT:
   1030	case HTT_RX_VHT_WITH_TXBF:
   1031		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
   1032		 * TODO check this
   1033		 */
   1034		bw = info2 & 3;
   1035		sgi = info3 & 1;
   1036		stbc = (info2 >> 3) & 1;
   1037		group_id = (info2 >> 4) & 0x3F;
   1038
   1039		if (GROUP_ID_IS_SU_MIMO(group_id)) {
   1040			mcs = (info3 >> 4) & 0x0F;
   1041			nsts_su = ((info2 >> 10) & 0x07);
   1042			if (stbc)
   1043				nss = (nsts_su >> 2) + 1;
   1044			else
   1045				nss = (nsts_su + 1);
   1046		} else {
   1047			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
   1048			 * so it's impossible to decode MCS. Also since
   1049			 * firmware consumes Group Id Management frames host
   1050			 * has no knowledge regarding group/user position
   1051			 * mapping so it's impossible to pick the correct Nsts
   1052			 * from VHT-SIG-A1.
   1053			 *
   1054			 * Bandwidth and SGI are valid so report the rateinfo
   1055			 * on best-effort basis.
   1056			 */
   1057			mcs = 0;
   1058			nss = 1;
   1059		}
   1060
   1061		if (mcs > 0x09) {
   1062			ath10k_warn(ar, "invalid MCS received %u\n", mcs);
   1063			ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
   1064				    __le32_to_cpu(rxd_attention->flags),
   1065				    __le32_to_cpu(rxd_mpdu_start->info0),
   1066				    __le32_to_cpu(rxd_mpdu_start->info1),
   1067				    __le32_to_cpu(rxd_msdu_start_common->info0),
   1068				    __le32_to_cpu(rxd_msdu_start_common->info1),
   1069				    rxd_ppdu_start->info0,
   1070				    __le32_to_cpu(rxd_ppdu_start->info1),
   1071				    __le32_to_cpu(rxd_ppdu_start->info2),
   1072				    __le32_to_cpu(rxd_ppdu_start->info3),
   1073				    __le32_to_cpu(rxd_ppdu_start->info4));
   1074
   1075			ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
   1076				    __le32_to_cpu(rxd_msdu_end_common->info0),
   1077				    __le32_to_cpu(rxd_mpdu_end->info0));
   1078
   1079			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
   1080					"rx desc msdu payload: ",
   1081					rxd_msdu_payload, 50);
   1082		}
   1083
   1084		status->rate_idx = mcs;
   1085		status->nss = nss;
   1086
   1087		if (sgi)
   1088			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
   1089
   1090		status->bw = ath10k_bw_to_mac80211_bw(bw);
   1091		status->encoding = RX_ENC_VHT;
   1092		break;
   1093	default:
   1094		break;
   1095	}
   1096}
   1097
   1098static struct ieee80211_channel *
   1099ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
   1100{
   1101	struct ath10k_hw_params *hw = &ar->hw_params;
   1102	struct rx_attention *rxd_attention;
   1103	struct rx_msdu_end_common *rxd_msdu_end_common;
   1104	struct rx_mpdu_start *rxd_mpdu_start;
   1105	struct ath10k_peer *peer;
   1106	struct ath10k_vif *arvif;
   1107	struct cfg80211_chan_def def;
   1108	u16 peer_id;
   1109
   1110	lockdep_assert_held(&ar->data_lock);
   1111
   1112	if (!rxd)
   1113		return NULL;
   1114
   1115	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
   1116	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
   1117	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
   1118
   1119	if (rxd_attention->flags &
   1120	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
   1121		return NULL;
   1122
   1123	if (!(rxd_msdu_end_common->info0 &
   1124	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
   1125		return NULL;
   1126
   1127	peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
   1128		     RX_MPDU_START_INFO0_PEER_IDX);
   1129
   1130	peer = ath10k_peer_find_by_id(ar, peer_id);
   1131	if (!peer)
   1132		return NULL;
   1133
   1134	arvif = ath10k_get_arvif(ar, peer->vdev_id);
   1135	if (WARN_ON_ONCE(!arvif))
   1136		return NULL;
   1137
   1138	if (ath10k_mac_vif_chan(arvif->vif, &def))
   1139		return NULL;
   1140
   1141	return def.chan;
   1142}
   1143
   1144static struct ieee80211_channel *
   1145ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
   1146{
   1147	struct ath10k_vif *arvif;
   1148	struct cfg80211_chan_def def;
   1149
   1150	lockdep_assert_held(&ar->data_lock);
   1151
   1152	list_for_each_entry(arvif, &ar->arvifs, list) {
   1153		if (arvif->vdev_id == vdev_id &&
   1154		    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
   1155			return def.chan;
   1156	}
   1157
   1158	return NULL;
   1159}
   1160
   1161static void
   1162ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
   1163			      struct ieee80211_chanctx_conf *conf,
   1164			      void *data)
   1165{
   1166	struct cfg80211_chan_def *def = data;
   1167
   1168	*def = conf->def;
   1169}
   1170
   1171static struct ieee80211_channel *
   1172ath10k_htt_rx_h_any_channel(struct ath10k *ar)
   1173{
   1174	struct cfg80211_chan_def def = {};
   1175
   1176	ieee80211_iter_chan_contexts_atomic(ar->hw,
   1177					    ath10k_htt_rx_h_any_chan_iter,
   1178					    &def);
   1179
   1180	return def.chan;
   1181}
   1182
   1183static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
   1184				    struct ieee80211_rx_status *status,
   1185				    struct htt_rx_desc *rxd,
   1186				    u32 vdev_id)
   1187{
   1188	struct ieee80211_channel *ch;
   1189
   1190	spin_lock_bh(&ar->data_lock);
   1191	ch = ar->scan_channel;
   1192	if (!ch)
   1193		ch = ar->rx_channel;
   1194	if (!ch)
   1195		ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
   1196	if (!ch)
   1197		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
   1198	if (!ch)
   1199		ch = ath10k_htt_rx_h_any_channel(ar);
   1200	if (!ch)
   1201		ch = ar->tgt_oper_chan;
   1202	spin_unlock_bh(&ar->data_lock);
   1203
   1204	if (!ch)
   1205		return false;
   1206
   1207	status->band = ch->band;
   1208	status->freq = ch->center_freq;
   1209
   1210	return true;
   1211}
   1212
   1213static void ath10k_htt_rx_h_signal(struct ath10k *ar,
   1214				   struct ieee80211_rx_status *status,
   1215				   struct htt_rx_desc *rxd)
   1216{
   1217	struct ath10k_hw_params *hw = &ar->hw_params;
   1218	struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
   1219	int i;
   1220
   1221	for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
   1222		status->chains &= ~BIT(i);
   1223
   1224		if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
   1225			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
   1226				rxd_ppdu_start->rssi_chains[i].pri20_mhz;
   1227
   1228			status->chains |= BIT(i);
   1229		}
   1230	}
   1231
   1232	/* FIXME: Get real NF */
   1233	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
   1234			 rxd_ppdu_start->rssi_comb;
   1235	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
   1236}
   1237
   1238static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
   1239				    struct ieee80211_rx_status *status,
   1240				    struct htt_rx_desc *rxd)
   1241{
   1242	struct ath10k_hw_params *hw = &ar->hw_params;
   1243	struct rx_ppdu_end_common *rxd_ppdu_end_common;
   1244
   1245	rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
   1246
   1247	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
   1248	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
   1249	 * TSF. Is it worth holding frames until end of PPDU is known?
   1250	 *
   1251	 * FIXME: Can we get/compute 64bit TSF?
   1252	 */
   1253	status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
   1254	status->flag |= RX_FLAG_MACTIME_END;
   1255}
   1256
   1257static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
   1258				 struct sk_buff_head *amsdu,
   1259				 struct ieee80211_rx_status *status,
   1260				 u32 vdev_id)
   1261{
   1262	struct sk_buff *first;
   1263	struct ath10k_hw_params *hw = &ar->hw_params;
   1264	struct htt_rx_desc *rxd;
   1265	struct rx_attention *rxd_attention;
   1266	bool is_first_ppdu;
   1267	bool is_last_ppdu;
   1268
   1269	if (skb_queue_empty(amsdu))
   1270		return;
   1271
   1272	first = skb_peek(amsdu);
   1273	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1274				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
   1275
   1276	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
   1277
   1278	is_first_ppdu = !!(rxd_attention->flags &
   1279			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
   1280	is_last_ppdu = !!(rxd_attention->flags &
   1281			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
   1282
   1283	if (is_first_ppdu) {
   1284		/* New PPDU starts so clear out the old per-PPDU status. */
   1285		status->freq = 0;
   1286		status->rate_idx = 0;
   1287		status->nss = 0;
   1288		status->encoding = RX_ENC_LEGACY;
   1289		status->bw = RATE_INFO_BW_20;
   1290
   1291		status->flag &= ~RX_FLAG_MACTIME_END;
   1292		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
   1293
   1294		status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
   1295		status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
   1296		status->ampdu_reference = ar->ampdu_reference;
   1297
   1298		ath10k_htt_rx_h_signal(ar, status, rxd);
   1299		ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
   1300		ath10k_htt_rx_h_rates(ar, status, rxd);
   1301	}
   1302
   1303	if (is_last_ppdu) {
   1304		ath10k_htt_rx_h_mactime(ar, status, rxd);
   1305
   1306		/* set ampdu last segment flag */
   1307		status->flag |= RX_FLAG_AMPDU_IS_LAST;
   1308		ar->ampdu_reference++;
   1309	}
   1310}
   1311
   1312static const char * const tid_to_ac[] = {
   1313	"BE",
   1314	"BK",
   1315	"BK",
   1316	"BE",
   1317	"VI",
   1318	"VI",
   1319	"VO",
   1320	"VO",
   1321};
   1322
   1323static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
   1324{
   1325	u8 *qc;
   1326	int tid;
   1327
   1328	if (!ieee80211_is_data_qos(hdr->frame_control))
   1329		return "";
   1330
   1331	qc = ieee80211_get_qos_ctl(hdr);
   1332	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
   1333	if (tid < 8)
   1334		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
   1335	else
   1336		snprintf(out, size, "tid %d", tid);
   1337
   1338	return out;
   1339}
   1340
   1341static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
   1342				       struct ieee80211_rx_status *rx_status,
   1343				       struct sk_buff *skb)
   1344{
   1345	struct ieee80211_rx_status *status;
   1346
   1347	status = IEEE80211_SKB_RXCB(skb);
   1348	*status = *rx_status;
   1349
   1350	skb_queue_tail(&ar->htt.rx_msdus_q, skb);
   1351}
   1352
   1353static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
   1354{
   1355	struct ieee80211_rx_status *status;
   1356	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
   1357	char tid[32];
   1358
   1359	status = IEEE80211_SKB_RXCB(skb);
   1360
   1361	if (!(ar->filter_flags & FIF_FCSFAIL) &&
   1362	    status->flag & RX_FLAG_FAILED_FCS_CRC) {
   1363		ar->stats.rx_crc_err_drop++;
   1364		dev_kfree_skb_any(skb);
   1365		return;
   1366	}
   1367
   1368	ath10k_dbg(ar, ATH10K_DBG_DATA,
   1369		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
   1370		   skb,
   1371		   skb->len,
   1372		   ieee80211_get_SA(hdr),
   1373		   ath10k_get_tid(hdr, tid, sizeof(tid)),
   1374		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
   1375							"mcast" : "ucast",
   1376		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
   1377		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
   1378		   (status->encoding == RX_ENC_HT) ? "ht" : "",
   1379		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
   1380		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
   1381		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
   1382		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
   1383		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
   1384		   status->rate_idx,
   1385		   status->nss,
   1386		   status->freq,
   1387		   status->band, status->flag,
   1388		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
   1389		   !!(status->flag & RX_FLAG_MMIC_ERROR),
   1390		   !!(status->flag & RX_FLAG_AMSDU_MORE));
   1391	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
   1392			skb->data, skb->len);
   1393	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
   1394	trace_ath10k_rx_payload(ar, skb->data, skb->len);
   1395
   1396	ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
   1397}
   1398
   1399static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
   1400				      struct ieee80211_hdr *hdr)
   1401{
   1402	int len = ieee80211_hdrlen(hdr->frame_control);
   1403
   1404	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
   1405		      ar->running_fw->fw_file.fw_features))
   1406		len = round_up(len, 4);
   1407
   1408	return len;
   1409}
   1410
   1411static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
   1412					struct sk_buff *msdu,
   1413					struct ieee80211_rx_status *status,
   1414					enum htt_rx_mpdu_encrypt_type enctype,
   1415					bool is_decrypted,
   1416					const u8 first_hdr[64])
   1417{
   1418	struct ieee80211_hdr *hdr;
   1419	struct ath10k_hw_params *hw = &ar->hw_params;
   1420	struct htt_rx_desc *rxd;
   1421	struct rx_msdu_end_common *rxd_msdu_end_common;
   1422	size_t hdr_len;
   1423	size_t crypto_len;
   1424	bool is_first;
   1425	bool is_last;
   1426	bool msdu_limit_err;
   1427	int bytes_aligned = ar->hw_params.decap_align_bytes;
   1428	u8 *qos;
   1429
   1430	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1431				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
   1432
   1433	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
   1434	is_first = !!(rxd_msdu_end_common->info0 &
   1435		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
   1436	is_last = !!(rxd_msdu_end_common->info0 &
   1437		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
   1438
   1439	/* Delivered decapped frame:
   1440	 * [802.11 header]
   1441	 * [crypto param] <-- can be trimmed if !fcs_err &&
   1442	 *                    !decrypt_err && !peer_idx_invalid
   1443	 * [amsdu header] <-- only if A-MSDU
   1444	 * [rfc1042/llc]
   1445	 * [payload]
   1446	 * [FCS] <-- at end, needs to be trimmed
   1447	 */
   1448
   1449	/* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
   1450	 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
   1451	 * error packets. If limit exceeds, hw sends all remaining MSDUs as
   1452	 * a single last MSDU with this msdu limit error set.
   1453	 */
   1454	msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
   1455
   1456	/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
   1457	 * without first MSDU is expected in that case, and handled later here.
   1458	 */
   1459	/* This probably shouldn't happen but warn just in case */
   1460	if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
   1461		return;
   1462
   1463	/* This probably shouldn't happen but warn just in case */
   1464	if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
   1465		return;
   1466
   1467	skb_trim(msdu, msdu->len - FCS_LEN);
   1468
   1469	/* Push original 80211 header */
   1470	if (unlikely(msdu_limit_err)) {
   1471		hdr = (struct ieee80211_hdr *)first_hdr;
   1472		hdr_len = ieee80211_hdrlen(hdr->frame_control);
   1473		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
   1474
   1475		if (ieee80211_is_data_qos(hdr->frame_control)) {
   1476			qos = ieee80211_get_qos_ctl(hdr);
   1477			qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
   1478		}
   1479
   1480		if (crypto_len)
   1481			memcpy(skb_push(msdu, crypto_len),
   1482			       (void *)hdr + round_up(hdr_len, bytes_aligned),
   1483			       crypto_len);
   1484
   1485		memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
   1486	}
   1487
   1488	/* In most cases this will be true for sniffed frames. It makes sense
   1489	 * to deliver them as-is without stripping the crypto param. This is
   1490	 * necessary for software based decryption.
   1491	 *
   1492	 * If there's no error then the frame is decrypted. At least that is
   1493	 * the case for frames that come in via fragmented rx indication.
   1494	 */
   1495	if (!is_decrypted)
   1496		return;
   1497
   1498	/* The payload is decrypted so strip crypto params. Start from tail
   1499	 * since hdr is used to compute some stuff.
   1500	 */
   1501
   1502	hdr = (void *)msdu->data;
   1503
   1504	/* Tail */
   1505	if (status->flag & RX_FLAG_IV_STRIPPED) {
   1506		skb_trim(msdu, msdu->len -
   1507			 ath10k_htt_rx_crypto_mic_len(ar, enctype));
   1508
   1509		skb_trim(msdu, msdu->len -
   1510			 ath10k_htt_rx_crypto_icv_len(ar, enctype));
   1511	} else {
   1512		/* MIC */
   1513		if (status->flag & RX_FLAG_MIC_STRIPPED)
   1514			skb_trim(msdu, msdu->len -
   1515				 ath10k_htt_rx_crypto_mic_len(ar, enctype));
   1516
   1517		/* ICV */
   1518		if (status->flag & RX_FLAG_ICV_STRIPPED)
   1519			skb_trim(msdu, msdu->len -
   1520				 ath10k_htt_rx_crypto_icv_len(ar, enctype));
   1521	}
   1522
   1523	/* MMIC */
   1524	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
   1525	    !ieee80211_has_morefrags(hdr->frame_control) &&
   1526	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
   1527		skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
   1528
   1529	/* Head */
   1530	if (status->flag & RX_FLAG_IV_STRIPPED) {
   1531		hdr_len = ieee80211_hdrlen(hdr->frame_control);
   1532		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
   1533
   1534		memmove((void *)msdu->data + crypto_len,
   1535			(void *)msdu->data, hdr_len);
   1536		skb_pull(msdu, crypto_len);
   1537	}
   1538}
   1539
   1540static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
   1541					  struct sk_buff *msdu,
   1542					  struct ieee80211_rx_status *status,
   1543					  const u8 first_hdr[64],
   1544					  enum htt_rx_mpdu_encrypt_type enctype)
   1545{
   1546	struct ath10k_hw_params *hw = &ar->hw_params;
   1547	struct ieee80211_hdr *hdr;
   1548	struct htt_rx_desc *rxd;
   1549	size_t hdr_len;
   1550	u8 da[ETH_ALEN];
   1551	u8 sa[ETH_ALEN];
   1552	int l3_pad_bytes;
   1553	int bytes_aligned = ar->hw_params.decap_align_bytes;
   1554
   1555	/* Delivered decapped frame:
   1556	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
   1557	 * [rfc1042/llc]
   1558	 *
   1559	 * Note: The nwifi header doesn't have QoS Control and is
   1560	 * (always?) a 3addr frame.
   1561	 *
   1562	 * Note2: There's no A-MSDU subframe header. Even if it's part
   1563	 * of an A-MSDU.
   1564	 */
   1565
   1566	/* pull decapped header and copy SA & DA */
   1567	rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
   1568				    hw->rx_desc_ops->rx_desc_size);
   1569
   1570	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
   1571	skb_put(msdu, l3_pad_bytes);
   1572
   1573	hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
   1574
   1575	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
   1576	ether_addr_copy(da, ieee80211_get_DA(hdr));
   1577	ether_addr_copy(sa, ieee80211_get_SA(hdr));
   1578	skb_pull(msdu, hdr_len);
   1579
   1580	/* push original 802.11 header */
   1581	hdr = (struct ieee80211_hdr *)first_hdr;
   1582	hdr_len = ieee80211_hdrlen(hdr->frame_control);
   1583
   1584	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
   1585		memcpy(skb_push(msdu,
   1586				ath10k_htt_rx_crypto_param_len(ar, enctype)),
   1587		       (void *)hdr + round_up(hdr_len, bytes_aligned),
   1588			ath10k_htt_rx_crypto_param_len(ar, enctype));
   1589	}
   1590
   1591	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
   1592
   1593	/* original 802.11 header has a different DA and in
   1594	 * case of 4addr it may also have different SA
   1595	 */
   1596	hdr = (struct ieee80211_hdr *)msdu->data;
   1597	ether_addr_copy(ieee80211_get_DA(hdr), da);
   1598	ether_addr_copy(ieee80211_get_SA(hdr), sa);
   1599}
   1600
   1601static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
   1602					  struct sk_buff *msdu,
   1603					  enum htt_rx_mpdu_encrypt_type enctype)
   1604{
   1605	struct ieee80211_hdr *hdr;
   1606	struct ath10k_hw_params *hw = &ar->hw_params;
   1607	struct htt_rx_desc *rxd;
   1608	struct rx_msdu_end_common *rxd_msdu_end_common;
   1609	u8 *rxd_rx_hdr_status;
   1610	size_t hdr_len, crypto_len;
   1611	void *rfc1042;
   1612	bool is_first, is_last, is_amsdu;
   1613	int bytes_aligned = ar->hw_params.decap_align_bytes;
   1614
   1615	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1616				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
   1617
   1618	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
   1619	rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
   1620	hdr = (void *)rxd_rx_hdr_status;
   1621
   1622	is_first = !!(rxd_msdu_end_common->info0 &
   1623		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
   1624	is_last = !!(rxd_msdu_end_common->info0 &
   1625		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
   1626	is_amsdu = !(is_first && is_last);
   1627
   1628	rfc1042 = hdr;
   1629
   1630	if (is_first) {
   1631		hdr_len = ieee80211_hdrlen(hdr->frame_control);
   1632		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
   1633
   1634		rfc1042 += round_up(hdr_len, bytes_aligned) +
   1635			   round_up(crypto_len, bytes_aligned);
   1636	}
   1637
   1638	if (is_amsdu)
   1639		rfc1042 += sizeof(struct amsdu_subframe_hdr);
   1640
   1641	return rfc1042;
   1642}
   1643
   1644static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
   1645					struct sk_buff *msdu,
   1646					struct ieee80211_rx_status *status,
   1647					const u8 first_hdr[64],
   1648					enum htt_rx_mpdu_encrypt_type enctype)
   1649{
   1650	struct ath10k_hw_params *hw = &ar->hw_params;
   1651	struct ieee80211_hdr *hdr;
   1652	struct ethhdr *eth;
   1653	size_t hdr_len;
   1654	void *rfc1042;
   1655	u8 da[ETH_ALEN];
   1656	u8 sa[ETH_ALEN];
   1657	int l3_pad_bytes;
   1658	struct htt_rx_desc *rxd;
   1659	int bytes_aligned = ar->hw_params.decap_align_bytes;
   1660
   1661	/* Delivered decapped frame:
   1662	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
   1663	 * [payload]
   1664	 */
   1665
   1666	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
   1667	if (WARN_ON_ONCE(!rfc1042))
   1668		return;
   1669
   1670	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1671				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
   1672
   1673	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
   1674	skb_put(msdu, l3_pad_bytes);
   1675	skb_pull(msdu, l3_pad_bytes);
   1676
   1677	/* pull decapped header and copy SA & DA */
   1678	eth = (struct ethhdr *)msdu->data;
   1679	ether_addr_copy(da, eth->h_dest);
   1680	ether_addr_copy(sa, eth->h_source);
   1681	skb_pull(msdu, sizeof(struct ethhdr));
   1682
   1683	/* push rfc1042/llc/snap */
   1684	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
   1685	       sizeof(struct rfc1042_hdr));
   1686
   1687	/* push original 802.11 header */
   1688	hdr = (struct ieee80211_hdr *)first_hdr;
   1689	hdr_len = ieee80211_hdrlen(hdr->frame_control);
   1690
   1691	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
   1692		memcpy(skb_push(msdu,
   1693				ath10k_htt_rx_crypto_param_len(ar, enctype)),
   1694		       (void *)hdr + round_up(hdr_len, bytes_aligned),
   1695			ath10k_htt_rx_crypto_param_len(ar, enctype));
   1696	}
   1697
   1698	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
   1699
   1700	/* original 802.11 header has a different DA and in
   1701	 * case of 4addr it may also have different SA
   1702	 */
   1703	hdr = (struct ieee80211_hdr *)msdu->data;
   1704	ether_addr_copy(ieee80211_get_DA(hdr), da);
   1705	ether_addr_copy(ieee80211_get_SA(hdr), sa);
   1706}
   1707
   1708static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
   1709					 struct sk_buff *msdu,
   1710					 struct ieee80211_rx_status *status,
   1711					 const u8 first_hdr[64],
   1712					 enum htt_rx_mpdu_encrypt_type enctype)
   1713{
   1714	struct ath10k_hw_params *hw = &ar->hw_params;
   1715	struct ieee80211_hdr *hdr;
   1716	size_t hdr_len;
   1717	int l3_pad_bytes;
   1718	struct htt_rx_desc *rxd;
   1719	int bytes_aligned = ar->hw_params.decap_align_bytes;
   1720
   1721	/* Delivered decapped frame:
   1722	 * [amsdu header] <-- replaced with 802.11 hdr
   1723	 * [rfc1042/llc]
   1724	 * [payload]
   1725	 */
   1726
   1727	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1728				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
   1729
   1730	l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
   1731
   1732	skb_put(msdu, l3_pad_bytes);
   1733	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
   1734
   1735	hdr = (struct ieee80211_hdr *)first_hdr;
   1736	hdr_len = ieee80211_hdrlen(hdr->frame_control);
   1737
   1738	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
   1739		memcpy(skb_push(msdu,
   1740				ath10k_htt_rx_crypto_param_len(ar, enctype)),
   1741		       (void *)hdr + round_up(hdr_len, bytes_aligned),
   1742			ath10k_htt_rx_crypto_param_len(ar, enctype));
   1743	}
   1744
   1745	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
   1746}
   1747
   1748static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
   1749				    struct sk_buff *msdu,
   1750				    struct ieee80211_rx_status *status,
   1751				    u8 first_hdr[64],
   1752				    enum htt_rx_mpdu_encrypt_type enctype,
   1753				    bool is_decrypted)
   1754{
   1755	struct ath10k_hw_params *hw = &ar->hw_params;
   1756	struct htt_rx_desc *rxd;
   1757	struct rx_msdu_start_common *rxd_msdu_start_common;
   1758	enum rx_msdu_decap_format decap;
   1759
   1760	/* First msdu's decapped header:
   1761	 * [802.11 header] <-- padded to 4 bytes long
   1762	 * [crypto param] <-- padded to 4 bytes long
   1763	 * [amsdu header] <-- only if A-MSDU
   1764	 * [rfc1042/llc]
   1765	 *
   1766	 * Other (2nd, 3rd, ..) msdu's decapped header:
   1767	 * [amsdu header] <-- only if A-MSDU
   1768	 * [rfc1042/llc]
   1769	 */
   1770
   1771	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1772				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
   1773
   1774	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
   1775	decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
   1776		   RX_MSDU_START_INFO1_DECAP_FORMAT);
   1777
   1778	switch (decap) {
   1779	case RX_MSDU_DECAP_RAW:
   1780		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
   1781					    is_decrypted, first_hdr);
   1782		break;
   1783	case RX_MSDU_DECAP_NATIVE_WIFI:
   1784		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
   1785					      enctype);
   1786		break;
   1787	case RX_MSDU_DECAP_ETHERNET2_DIX:
   1788		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
   1789		break;
   1790	case RX_MSDU_DECAP_8023_SNAP_LLC:
   1791		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
   1792					     enctype);
   1793		break;
   1794	}
   1795}
   1796
   1797static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
   1798{
   1799	struct htt_rx_desc *rxd;
   1800	struct rx_attention *rxd_attention;
   1801	struct rx_msdu_start_common *rxd_msdu_start_common;
   1802	u32 flags, info;
   1803	bool is_ip4, is_ip6;
   1804	bool is_tcp, is_udp;
   1805	bool ip_csum_ok, tcpudp_csum_ok;
   1806
   1807	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1808				    (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
   1809
   1810	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
   1811	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
   1812	flags = __le32_to_cpu(rxd_attention->flags);
   1813	info = __le32_to_cpu(rxd_msdu_start_common->info1);
   1814
   1815	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
   1816	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
   1817	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
   1818	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
   1819	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
   1820	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
   1821
   1822	if (!is_ip4 && !is_ip6)
   1823		return CHECKSUM_NONE;
   1824	if (!is_tcp && !is_udp)
   1825		return CHECKSUM_NONE;
   1826	if (!ip_csum_ok)
   1827		return CHECKSUM_NONE;
   1828	if (!tcpudp_csum_ok)
   1829		return CHECKSUM_NONE;
   1830
   1831	return CHECKSUM_UNNECESSARY;
   1832}
   1833
   1834static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
   1835					 struct sk_buff *msdu)
   1836{
   1837	msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
   1838}
   1839
   1840static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
   1841				  u16 offset,
   1842				  enum htt_rx_mpdu_encrypt_type enctype)
   1843{
   1844	struct ieee80211_hdr *hdr;
   1845	u64 pn = 0;
   1846	u8 *ehdr;
   1847
   1848	hdr = (struct ieee80211_hdr *)(skb->data + offset);
   1849	ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
   1850
   1851	if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
   1852		pn = ehdr[0];
   1853		pn |= (u64)ehdr[1] << 8;
   1854		pn |= (u64)ehdr[4] << 16;
   1855		pn |= (u64)ehdr[5] << 24;
   1856		pn |= (u64)ehdr[6] << 32;
   1857		pn |= (u64)ehdr[7] << 40;
   1858	}
   1859	return pn;
   1860}
   1861
   1862static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
   1863						 struct sk_buff *skb,
   1864						 u16 offset)
   1865{
   1866	struct ieee80211_hdr *hdr;
   1867
   1868	hdr = (struct ieee80211_hdr *)(skb->data + offset);
   1869	return !is_multicast_ether_addr(hdr->addr1);
   1870}
   1871
   1872static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
   1873					  struct sk_buff *skb,
   1874					  u16 peer_id,
   1875					  u16 offset,
   1876					  enum htt_rx_mpdu_encrypt_type enctype)
   1877{
   1878	struct ath10k_peer *peer;
   1879	union htt_rx_pn_t *last_pn, new_pn = {0};
   1880	struct ieee80211_hdr *hdr;
   1881	u8 tid, frag_number;
   1882	u32 seq;
   1883
   1884	peer = ath10k_peer_find_by_id(ar, peer_id);
   1885	if (!peer) {
   1886		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
   1887		return false;
   1888	}
   1889
   1890	hdr = (struct ieee80211_hdr *)(skb->data + offset);
   1891	if (ieee80211_is_data_qos(hdr->frame_control))
   1892		tid = ieee80211_get_tid(hdr);
   1893	else
   1894		tid = ATH10K_TXRX_NON_QOS_TID;
   1895
   1896	last_pn = &peer->frag_tids_last_pn[tid];
   1897	new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
   1898	frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
   1899	seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
   1900
   1901	if (frag_number == 0) {
   1902		last_pn->pn48 = new_pn.pn48;
   1903		peer->frag_tids_seq[tid] = seq;
   1904	} else {
   1905		if (seq != peer->frag_tids_seq[tid])
   1906			return false;
   1907
   1908		if (new_pn.pn48 != last_pn->pn48 + 1)
   1909			return false;
   1910
   1911		last_pn->pn48 = new_pn.pn48;
   1912	}
   1913
   1914	return true;
   1915}
   1916
   1917static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
   1918				 struct sk_buff_head *amsdu,
   1919				 struct ieee80211_rx_status *status,
   1920				 bool fill_crypt_header,
   1921				 u8 *rx_hdr,
   1922				 enum ath10k_pkt_rx_err *err,
   1923				 u16 peer_id,
   1924				 bool frag)
   1925{
   1926	struct sk_buff *first;
   1927	struct sk_buff *last;
   1928	struct sk_buff *msdu, *temp;
   1929	struct ath10k_hw_params *hw = &ar->hw_params;
   1930	struct htt_rx_desc *rxd;
   1931	struct rx_attention *rxd_attention;
   1932	struct rx_mpdu_start *rxd_mpdu_start;
   1933
   1934	struct ieee80211_hdr *hdr;
   1935	enum htt_rx_mpdu_encrypt_type enctype;
   1936	u8 first_hdr[64];
   1937	u8 *qos;
   1938	bool has_fcs_err;
   1939	bool has_crypto_err;
   1940	bool has_tkip_err;
   1941	bool has_peer_idx_invalid;
   1942	bool is_decrypted;
   1943	bool is_mgmt;
   1944	u32 attention;
   1945	bool frag_pn_check = true, multicast_check = true;
   1946
   1947	if (skb_queue_empty(amsdu))
   1948		return;
   1949
   1950	first = skb_peek(amsdu);
   1951	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1952				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
   1953
   1954	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
   1955	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
   1956
   1957	is_mgmt = !!(rxd_attention->flags &
   1958		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
   1959
   1960	enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
   1961		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
   1962
   1963	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
   1964	 * decapped header. It'll be used for undecapping of each MSDU.
   1965	 */
   1966	hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
   1967	memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
   1968
   1969	if (rx_hdr)
   1970		memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
   1971
   1972	/* Each A-MSDU subframe will use the original header as the base and be
   1973	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
   1974	 */
   1975	hdr = (void *)first_hdr;
   1976
   1977	if (ieee80211_is_data_qos(hdr->frame_control)) {
   1978		qos = ieee80211_get_qos_ctl(hdr);
   1979		qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
   1980	}
   1981
   1982	/* Some attention flags are valid only in the last MSDU. */
   1983	last = skb_peek_tail(amsdu);
   1984	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   1985				    (void *)last->data - hw->rx_desc_ops->rx_desc_size);
   1986
   1987	rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
   1988	attention = __le32_to_cpu(rxd_attention->flags);
   1989
   1990	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
   1991	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
   1992	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
   1993	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
   1994
   1995	/* Note: If hardware captures an encrypted frame that it can't decrypt,
   1996	 * e.g. due to fcs error, missing peer or invalid key data it will
   1997	 * report the frame as raw.
   1998	 */
   1999	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
   2000			!has_fcs_err &&
   2001			!has_crypto_err &&
   2002			!has_peer_idx_invalid);
   2003
   2004	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
   2005	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
   2006			  RX_FLAG_MMIC_ERROR |
   2007			  RX_FLAG_DECRYPTED |
   2008			  RX_FLAG_IV_STRIPPED |
   2009			  RX_FLAG_ONLY_MONITOR |
   2010			  RX_FLAG_MMIC_STRIPPED);
   2011
   2012	if (has_fcs_err)
   2013		status->flag |= RX_FLAG_FAILED_FCS_CRC;
   2014
   2015	if (has_tkip_err)
   2016		status->flag |= RX_FLAG_MMIC_ERROR;
   2017
   2018	if (err) {
   2019		if (has_fcs_err)
   2020			*err = ATH10K_PKT_RX_ERR_FCS;
   2021		else if (has_tkip_err)
   2022			*err = ATH10K_PKT_RX_ERR_TKIP;
   2023		else if (has_crypto_err)
   2024			*err = ATH10K_PKT_RX_ERR_CRYPT;
   2025		else if (has_peer_idx_invalid)
   2026			*err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
   2027	}
   2028
   2029	/* Firmware reports all necessary management frames via WMI already.
   2030	 * They are not reported to monitor interfaces at all so pass the ones
   2031	 * coming via HTT to monitor interfaces instead. This simplifies
   2032	 * matters a lot.
   2033	 */
   2034	if (is_mgmt)
   2035		status->flag |= RX_FLAG_ONLY_MONITOR;
   2036
   2037	if (is_decrypted) {
   2038		status->flag |= RX_FLAG_DECRYPTED;
   2039
   2040		if (likely(!is_mgmt))
   2041			status->flag |= RX_FLAG_MMIC_STRIPPED;
   2042
   2043		if (fill_crypt_header)
   2044			status->flag |= RX_FLAG_MIC_STRIPPED |
   2045					RX_FLAG_ICV_STRIPPED;
   2046		else
   2047			status->flag |= RX_FLAG_IV_STRIPPED;
   2048	}
   2049
   2050	skb_queue_walk(amsdu, msdu) {
   2051		if (frag && !fill_crypt_header && is_decrypted &&
   2052		    enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
   2053			frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
   2054								      msdu,
   2055								      peer_id,
   2056								      0,
   2057								      enctype);
   2058
   2059		if (frag)
   2060			multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
   2061									       msdu,
   2062									       0);
   2063
   2064		if (!frag_pn_check || !multicast_check) {
   2065			/* Discard the fragment with invalid PN or multicast DA
   2066			 */
   2067			temp = msdu->prev;
   2068			__skb_unlink(msdu, amsdu);
   2069			dev_kfree_skb_any(msdu);
   2070			msdu = temp;
   2071			frag_pn_check = true;
   2072			multicast_check = true;
   2073			continue;
   2074		}
   2075
   2076		ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
   2077
   2078		if (frag && !fill_crypt_header &&
   2079		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
   2080			status->flag &= ~RX_FLAG_MMIC_STRIPPED;
   2081
   2082		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
   2083					is_decrypted);
   2084
   2085		/* Undecapping involves copying the original 802.11 header back
   2086		 * to sk_buff. If frame is protected and hardware has decrypted
   2087		 * it then remove the protected bit.
   2088		 */
   2089		if (!is_decrypted)
   2090			continue;
   2091		if (is_mgmt)
   2092			continue;
   2093
   2094		if (fill_crypt_header)
   2095			continue;
   2096
   2097		hdr = (void *)msdu->data;
   2098		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
   2099
   2100		if (frag && !fill_crypt_header &&
   2101		    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
   2102			status->flag &= ~RX_FLAG_IV_STRIPPED &
   2103					~RX_FLAG_MMIC_STRIPPED;
   2104	}
   2105}
   2106
   2107static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
   2108				    struct sk_buff_head *amsdu,
   2109				    struct ieee80211_rx_status *status)
   2110{
   2111	struct sk_buff *msdu;
   2112	struct sk_buff *first_subframe;
   2113
   2114	first_subframe = skb_peek(amsdu);
   2115
   2116	while ((msdu = __skb_dequeue(amsdu))) {
   2117		/* Setup per-MSDU flags */
   2118		if (skb_queue_empty(amsdu))
   2119			status->flag &= ~RX_FLAG_AMSDU_MORE;
   2120		else
   2121			status->flag |= RX_FLAG_AMSDU_MORE;
   2122
   2123		if (msdu == first_subframe) {
   2124			first_subframe = NULL;
   2125			status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
   2126		} else {
   2127			status->flag |= RX_FLAG_ALLOW_SAME_PN;
   2128		}
   2129
   2130		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
   2131	}
   2132}
   2133
   2134static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
   2135			       unsigned long *unchain_cnt)
   2136{
   2137	struct sk_buff *skb, *first;
   2138	int space;
   2139	int total_len = 0;
   2140	int amsdu_len = skb_queue_len(amsdu);
   2141
   2142	/* TODO:  Might could optimize this by using
   2143	 * skb_try_coalesce or similar method to
   2144	 * decrease copying, or maybe get mac80211 to
   2145	 * provide a way to just receive a list of
   2146	 * skb?
   2147	 */
   2148
   2149	first = __skb_dequeue(amsdu);
   2150
   2151	/* Allocate total length all at once. */
   2152	skb_queue_walk(amsdu, skb)
   2153		total_len += skb->len;
   2154
   2155	space = total_len - skb_tailroom(first);
   2156	if ((space > 0) &&
   2157	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
   2158		/* TODO:  bump some rx-oom error stat */
   2159		/* put it back together so we can free the
   2160		 * whole list at once.
   2161		 */
   2162		__skb_queue_head(amsdu, first);
   2163		return -1;
   2164	}
   2165
   2166	/* Walk list again, copying contents into
   2167	 * msdu_head
   2168	 */
   2169	while ((skb = __skb_dequeue(amsdu))) {
   2170		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
   2171					  skb->len);
   2172		dev_kfree_skb_any(skb);
   2173	}
   2174
   2175	__skb_queue_head(amsdu, first);
   2176
   2177	*unchain_cnt += amsdu_len - 1;
   2178
   2179	return 0;
   2180}
   2181
   2182static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
   2183				    struct sk_buff_head *amsdu,
   2184				    unsigned long *drop_cnt,
   2185				    unsigned long *unchain_cnt)
   2186{
   2187	struct sk_buff *first;
   2188	struct ath10k_hw_params *hw = &ar->hw_params;
   2189	struct htt_rx_desc *rxd;
   2190	struct rx_msdu_start_common *rxd_msdu_start_common;
   2191	struct rx_frag_info_common *rxd_frag_info;
   2192	enum rx_msdu_decap_format decap;
   2193
   2194	first = skb_peek(amsdu);
   2195	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   2196				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
   2197
   2198	rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
   2199	rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
   2200	decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
   2201		   RX_MSDU_START_INFO1_DECAP_FORMAT);
   2202
   2203	/* FIXME: Current unchaining logic can only handle simple case of raw
   2204	 * msdu chaining. If decapping is other than raw the chaining may be
   2205	 * more complex and this isn't handled by the current code. Don't even
   2206	 * try re-constructing such frames - it'll be pretty much garbage.
   2207	 */
   2208	if (decap != RX_MSDU_DECAP_RAW ||
   2209	    skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
   2210		*drop_cnt += skb_queue_len(amsdu);
   2211		__skb_queue_purge(amsdu);
   2212		return;
   2213	}
   2214
   2215	ath10k_unchain_msdu(amsdu, unchain_cnt);
   2216}
   2217
   2218static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
   2219					 struct sk_buff_head *amsdu)
   2220{
   2221	u8 *subframe_hdr;
   2222	struct sk_buff *first;
   2223	bool is_first, is_last;
   2224	struct ath10k_hw_params *hw = &ar->hw_params;
   2225	struct htt_rx_desc *rxd;
   2226	struct rx_msdu_end_common *rxd_msdu_end_common;
   2227	struct rx_mpdu_start *rxd_mpdu_start;
   2228	struct ieee80211_hdr *hdr;
   2229	size_t hdr_len, crypto_len;
   2230	enum htt_rx_mpdu_encrypt_type enctype;
   2231	int bytes_aligned = ar->hw_params.decap_align_bytes;
   2232
   2233	first = skb_peek(amsdu);
   2234
   2235	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   2236				    (void *)first->data - hw->rx_desc_ops->rx_desc_size);
   2237
   2238	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
   2239	rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
   2240	hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
   2241
   2242	is_first = !!(rxd_msdu_end_common->info0 &
   2243		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
   2244	is_last = !!(rxd_msdu_end_common->info0 &
   2245		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
   2246
   2247	/* Return in case of non-aggregated msdu */
   2248	if (is_first && is_last)
   2249		return true;
   2250
   2251	/* First msdu flag is not set for the first msdu of the list */
   2252	if (!is_first)
   2253		return false;
   2254
   2255	enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
   2256		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
   2257
   2258	hdr_len = ieee80211_hdrlen(hdr->frame_control);
   2259	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
   2260
   2261	subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
   2262		       crypto_len;
   2263
   2264	/* Validate if the amsdu has a proper first subframe.
   2265	 * There are chances a single msdu can be received as amsdu when
   2266	 * the unauthenticated amsdu flag of a QoS header
   2267	 * gets flipped in non-SPP AMSDU's, in such cases the first
   2268	 * subframe has llc/snap header in place of a valid da.
   2269	 * return false if the da matches rfc1042 pattern
   2270	 */
   2271	if (ether_addr_equal(subframe_hdr, rfc1042_header))
   2272		return false;
   2273
   2274	return true;
   2275}
   2276
   2277static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
   2278					struct sk_buff_head *amsdu,
   2279					struct ieee80211_rx_status *rx_status)
   2280{
   2281	if (!rx_status->freq) {
   2282		ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
   2283		return false;
   2284	}
   2285
   2286	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
   2287		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
   2288		return false;
   2289	}
   2290
   2291	if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
   2292		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
   2293		return false;
   2294	}
   2295
   2296	return true;
   2297}
   2298
   2299static void ath10k_htt_rx_h_filter(struct ath10k *ar,
   2300				   struct sk_buff_head *amsdu,
   2301				   struct ieee80211_rx_status *rx_status,
   2302				   unsigned long *drop_cnt)
   2303{
   2304	if (skb_queue_empty(amsdu))
   2305		return;
   2306
   2307	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
   2308		return;
   2309
   2310	if (drop_cnt)
   2311		*drop_cnt += skb_queue_len(amsdu);
   2312
   2313	__skb_queue_purge(amsdu);
   2314}
   2315
   2316static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
   2317{
   2318	struct ath10k *ar = htt->ar;
   2319	struct ieee80211_rx_status *rx_status = &htt->rx_status;
   2320	struct sk_buff_head amsdu;
   2321	int ret;
   2322	unsigned long drop_cnt = 0;
   2323	unsigned long unchain_cnt = 0;
   2324	unsigned long drop_cnt_filter = 0;
   2325	unsigned long msdus_to_queue, num_msdus;
   2326	enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
   2327	u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
   2328
   2329	__skb_queue_head_init(&amsdu);
   2330
   2331	spin_lock_bh(&htt->rx_ring.lock);
   2332	if (htt->rx_confused) {
   2333		spin_unlock_bh(&htt->rx_ring.lock);
   2334		return -EIO;
   2335	}
   2336	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
   2337	spin_unlock_bh(&htt->rx_ring.lock);
   2338
   2339	if (ret < 0) {
   2340		ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
   2341		__skb_queue_purge(&amsdu);
   2342		/* FIXME: It's probably a good idea to reboot the
   2343		 * device instead of leaving it inoperable.
   2344		 */
   2345		htt->rx_confused = true;
   2346		return ret;
   2347	}
   2348
   2349	num_msdus = skb_queue_len(&amsdu);
   2350
   2351	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
   2352
   2353	/* only for ret = 1 indicates chained msdus */
   2354	if (ret > 0)
   2355		ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
   2356
   2357	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
   2358	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
   2359			     false);
   2360	msdus_to_queue = skb_queue_len(&amsdu);
   2361	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
   2362
   2363	ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
   2364				       unchain_cnt, drop_cnt, drop_cnt_filter,
   2365				       msdus_to_queue);
   2366
   2367	return 0;
   2368}
   2369
   2370static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
   2371					  union htt_rx_pn_t *pn,
   2372					  int pn_len_bits)
   2373{
   2374	switch (pn_len_bits) {
   2375	case 48:
   2376		pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
   2377			   ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
   2378		break;
   2379	case 24:
   2380		pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
   2381		break;
   2382	}
   2383}
   2384
   2385static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
   2386				   union htt_rx_pn_t *old_pn)
   2387{
   2388	return ((new_pn->pn48 & 0xffffffffffffULL) <=
   2389		(old_pn->pn48 & 0xffffffffffffULL));
   2390}
   2391
   2392static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
   2393					     struct ath10k_peer *peer,
   2394					     struct htt_rx_indication_hl *rx)
   2395{
   2396	bool last_pn_valid, pn_invalid = false;
   2397	enum htt_txrx_sec_cast_type sec_index;
   2398	enum htt_security_types sec_type;
   2399	union htt_rx_pn_t new_pn = {0};
   2400	struct htt_hl_rx_desc *rx_desc;
   2401	union htt_rx_pn_t *last_pn;
   2402	u32 rx_desc_info, tid;
   2403	int num_mpdu_ranges;
   2404
   2405	lockdep_assert_held(&ar->data_lock);
   2406
   2407	if (!peer)
   2408		return false;
   2409
   2410	if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
   2411		return false;
   2412
   2413	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
   2414			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
   2415
   2416	rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
   2417	rx_desc_info = __le32_to_cpu(rx_desc->info);
   2418
   2419	if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
   2420		return false;
   2421
   2422	tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
   2423	last_pn_valid = peer->tids_last_pn_valid[tid];
   2424	last_pn = &peer->tids_last_pn[tid];
   2425
   2426	if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
   2427		sec_index = HTT_TXRX_SEC_MCAST;
   2428	else
   2429		sec_index = HTT_TXRX_SEC_UCAST;
   2430
   2431	sec_type = peer->rx_pn[sec_index].sec_type;
   2432	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
   2433
   2434	if (sec_type != HTT_SECURITY_AES_CCMP &&
   2435	    sec_type != HTT_SECURITY_TKIP &&
   2436	    sec_type != HTT_SECURITY_TKIP_NOMIC)
   2437		return false;
   2438
   2439	if (last_pn_valid)
   2440		pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
   2441	else
   2442		peer->tids_last_pn_valid[tid] = true;
   2443
   2444	if (!pn_invalid)
   2445		last_pn->pn48 = new_pn.pn48;
   2446
   2447	return pn_invalid;
   2448}
   2449
   2450static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
   2451					 struct htt_rx_indication_hl *rx,
   2452					 struct sk_buff *skb,
   2453					 enum htt_rx_pn_check_type check_pn_type,
   2454					 enum htt_rx_tkip_demic_type tkip_mic_type)
   2455{
   2456	struct ath10k *ar = htt->ar;
   2457	struct ath10k_peer *peer;
   2458	struct htt_rx_indication_mpdu_range *mpdu_ranges;
   2459	struct fw_rx_desc_hl *fw_desc;
   2460	enum htt_txrx_sec_cast_type sec_index;
   2461	enum htt_security_types sec_type;
   2462	union htt_rx_pn_t new_pn = {0};
   2463	struct htt_hl_rx_desc *rx_desc;
   2464	struct ieee80211_hdr *hdr;
   2465	struct ieee80211_rx_status *rx_status;
   2466	u16 peer_id;
   2467	u8 rx_desc_len;
   2468	int num_mpdu_ranges;
   2469	size_t tot_hdr_len;
   2470	struct ieee80211_channel *ch;
   2471	bool pn_invalid, qos, first_msdu;
   2472	u32 tid, rx_desc_info;
   2473
   2474	peer_id = __le16_to_cpu(rx->hdr.peer_id);
   2475	tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
   2476
   2477	spin_lock_bh(&ar->data_lock);
   2478	peer = ath10k_peer_find_by_id(ar, peer_id);
   2479	spin_unlock_bh(&ar->data_lock);
   2480	if (!peer && peer_id != HTT_INVALID_PEERID)
   2481		ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
   2482
   2483	if (!peer)
   2484		return true;
   2485
   2486	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
   2487			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
   2488	mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
   2489	fw_desc = &rx->fw_desc;
   2490	rx_desc_len = fw_desc->len;
   2491
   2492	if (fw_desc->u.bits.discard) {
   2493		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
   2494		goto err;
   2495	}
   2496
   2497	/* I have not yet seen any case where num_mpdu_ranges > 1.
   2498	 * qcacld does not seem handle that case either, so we introduce the
   2499	 * same limitiation here as well.
   2500	 */
   2501	if (num_mpdu_ranges > 1)
   2502		ath10k_warn(ar,
   2503			    "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
   2504			    num_mpdu_ranges);
   2505
   2506	if (mpdu_ranges->mpdu_range_status !=
   2507	    HTT_RX_IND_MPDU_STATUS_OK &&
   2508	    mpdu_ranges->mpdu_range_status !=
   2509	    HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
   2510		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
   2511			   mpdu_ranges->mpdu_range_status);
   2512		goto err;
   2513	}
   2514
   2515	rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
   2516	rx_desc_info = __le32_to_cpu(rx_desc->info);
   2517
   2518	if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
   2519		sec_index = HTT_TXRX_SEC_MCAST;
   2520	else
   2521		sec_index = HTT_TXRX_SEC_UCAST;
   2522
   2523	sec_type = peer->rx_pn[sec_index].sec_type;
   2524	first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
   2525
   2526	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
   2527
   2528	if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
   2529		spin_lock_bh(&ar->data_lock);
   2530		pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
   2531		spin_unlock_bh(&ar->data_lock);
   2532
   2533		if (pn_invalid)
   2534			goto err;
   2535	}
   2536
   2537	/* Strip off all headers before the MAC header before delivery to
   2538	 * mac80211
   2539	 */
   2540	tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
   2541		      sizeof(rx->ppdu) + sizeof(rx->prefix) +
   2542		      sizeof(rx->fw_desc) +
   2543		      sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
   2544
   2545	skb_pull(skb, tot_hdr_len);
   2546
   2547	hdr = (struct ieee80211_hdr *)skb->data;
   2548	qos = ieee80211_is_data_qos(hdr->frame_control);
   2549
   2550	rx_status = IEEE80211_SKB_RXCB(skb);
   2551	memset(rx_status, 0, sizeof(*rx_status));
   2552
   2553	if (rx->ppdu.combined_rssi == 0) {
   2554		/* SDIO firmware does not provide signal */
   2555		rx_status->signal = 0;
   2556		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
   2557	} else {
   2558		rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
   2559			rx->ppdu.combined_rssi;
   2560		rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
   2561	}
   2562
   2563	spin_lock_bh(&ar->data_lock);
   2564	ch = ar->scan_channel;
   2565	if (!ch)
   2566		ch = ar->rx_channel;
   2567	if (!ch)
   2568		ch = ath10k_htt_rx_h_any_channel(ar);
   2569	if (!ch)
   2570		ch = ar->tgt_oper_chan;
   2571	spin_unlock_bh(&ar->data_lock);
   2572
   2573	if (ch) {
   2574		rx_status->band = ch->band;
   2575		rx_status->freq = ch->center_freq;
   2576	}
   2577	if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
   2578		rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
   2579	else
   2580		rx_status->flag |= RX_FLAG_AMSDU_MORE;
   2581
   2582	/* Not entirely sure about this, but all frames from the chipset has
   2583	 * the protected flag set even though they have already been decrypted.
   2584	 * Unmasking this flag is necessary in order for mac80211 not to drop
   2585	 * the frame.
   2586	 * TODO: Verify this is always the case or find out a way to check
   2587	 * if there has been hw decryption.
   2588	 */
   2589	if (ieee80211_has_protected(hdr->frame_control)) {
   2590		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
   2591		rx_status->flag |= RX_FLAG_DECRYPTED |
   2592				   RX_FLAG_IV_STRIPPED |
   2593				   RX_FLAG_MMIC_STRIPPED;
   2594
   2595		if (tid < IEEE80211_NUM_TIDS &&
   2596		    first_msdu &&
   2597		    check_pn_type == HTT_RX_PN_CHECK &&
   2598		   (sec_type == HTT_SECURITY_AES_CCMP ||
   2599		    sec_type == HTT_SECURITY_TKIP ||
   2600		    sec_type == HTT_SECURITY_TKIP_NOMIC)) {
   2601			u8 offset, *ivp, i;
   2602			s8 keyidx = 0;
   2603			__le64 pn48 = cpu_to_le64(new_pn.pn48);
   2604
   2605			hdr = (struct ieee80211_hdr *)skb->data;
   2606			offset = ieee80211_hdrlen(hdr->frame_control);
   2607			hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
   2608			rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
   2609
   2610			memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
   2611				skb->data, offset);
   2612			skb_push(skb, IEEE80211_CCMP_HDR_LEN);
   2613			ivp = skb->data + offset;
   2614			memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
   2615			/* Ext IV */
   2616			ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
   2617
   2618			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
   2619				if (peer->keys[i] &&
   2620				    peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
   2621					keyidx = peer->keys[i]->keyidx;
   2622			}
   2623
   2624			/* Key ID */
   2625			ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
   2626
   2627			if (sec_type == HTT_SECURITY_AES_CCMP) {
   2628				rx_status->flag |= RX_FLAG_MIC_STRIPPED;
   2629				/* pn 0, pn 1 */
   2630				memcpy(skb->data + offset, &pn48, 2);
   2631				/* pn 1, pn 3 , pn 34 , pn 5 */
   2632				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
   2633			} else {
   2634				rx_status->flag |= RX_FLAG_ICV_STRIPPED;
   2635				/* TSC 0 */
   2636				memcpy(skb->data + offset + 2, &pn48, 1);
   2637				/* TSC 1 */
   2638				memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
   2639				/* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
   2640				memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
   2641			}
   2642		}
   2643	}
   2644
   2645	if (tkip_mic_type == HTT_RX_TKIP_MIC)
   2646		rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
   2647				   ~RX_FLAG_MMIC_STRIPPED;
   2648
   2649	if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
   2650		rx_status->flag |= RX_FLAG_MMIC_ERROR;
   2651
   2652	if (!qos && tid < IEEE80211_NUM_TIDS) {
   2653		u8 offset;
   2654		__le16 qos_ctrl = 0;
   2655
   2656		hdr = (struct ieee80211_hdr *)skb->data;
   2657		offset = ieee80211_hdrlen(hdr->frame_control);
   2658
   2659		hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
   2660		memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
   2661		skb_push(skb, IEEE80211_QOS_CTL_LEN);
   2662		qos_ctrl = cpu_to_le16(tid);
   2663		memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
   2664	}
   2665
   2666	if (ar->napi.dev)
   2667		ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
   2668	else
   2669		ieee80211_rx_ni(ar->hw, skb);
   2670
   2671	/* We have delivered the skb to the upper layers (mac80211) so we
   2672	 * must not free it.
   2673	 */
   2674	return false;
   2675err:
   2676	/* Tell the caller that it must free the skb since we have not
   2677	 * consumed it
   2678	 */
   2679	return true;
   2680}
   2681
   2682static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
   2683					       u16 head_len,
   2684					       u16 hdr_len)
   2685{
   2686	u8 *ivp, *orig_hdr;
   2687
   2688	orig_hdr = skb->data;
   2689	ivp = orig_hdr + hdr_len + head_len;
   2690
   2691	/* the ExtIV bit is always set to 1 for TKIP */
   2692	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
   2693		return -EINVAL;
   2694
   2695	memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
   2696	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
   2697	skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
   2698	return 0;
   2699}
   2700
   2701static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
   2702						 u16 head_len,
   2703						 u16 hdr_len)
   2704{
   2705	u8 *ivp, *orig_hdr;
   2706
   2707	orig_hdr = skb->data;
   2708	ivp = orig_hdr + hdr_len + head_len;
   2709
   2710	/* the ExtIV bit is always set to 1 for TKIP */
   2711	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
   2712		return -EINVAL;
   2713
   2714	memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
   2715	skb_pull(skb, IEEE80211_TKIP_IV_LEN);
   2716	skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
   2717	return 0;
   2718}
   2719
   2720static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
   2721					 u16 head_len,
   2722					 u16 hdr_len)
   2723{
   2724	u8 *ivp, *orig_hdr;
   2725
   2726	orig_hdr = skb->data;
   2727	ivp = orig_hdr + hdr_len + head_len;
   2728
   2729	/* the ExtIV bit is always set to 1 for CCMP */
   2730	if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
   2731		return -EINVAL;
   2732
   2733	skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
   2734	memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
   2735	skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
   2736	return 0;
   2737}
   2738
   2739static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
   2740					u16 head_len,
   2741					u16 hdr_len)
   2742{
   2743	u8 *orig_hdr;
   2744
   2745	orig_hdr = skb->data;
   2746
   2747	memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
   2748		orig_hdr, head_len + hdr_len);
   2749	skb_pull(skb, IEEE80211_WEP_IV_LEN);
   2750	skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
   2751	return 0;
   2752}
   2753
   2754static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
   2755					      struct htt_rx_fragment_indication *rx,
   2756					      struct sk_buff *skb)
   2757{
   2758	struct ath10k *ar = htt->ar;
   2759	enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
   2760	enum htt_txrx_sec_cast_type sec_index;
   2761	struct htt_rx_indication_hl *rx_hl;
   2762	enum htt_security_types sec_type;
   2763	u32 tid, frag, seq, rx_desc_info;
   2764	union htt_rx_pn_t new_pn = {0};
   2765	struct htt_hl_rx_desc *rx_desc;
   2766	u16 peer_id, sc, hdr_space;
   2767	union htt_rx_pn_t *last_pn;
   2768	struct ieee80211_hdr *hdr;
   2769	int ret, num_mpdu_ranges;
   2770	struct ath10k_peer *peer;
   2771	struct htt_resp *resp;
   2772	size_t tot_hdr_len;
   2773
   2774	resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
   2775	skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
   2776	skb_trim(skb, skb->len - FCS_LEN);
   2777
   2778	peer_id = __le16_to_cpu(rx->peer_id);
   2779	rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
   2780
   2781	spin_lock_bh(&ar->data_lock);
   2782	peer = ath10k_peer_find_by_id(ar, peer_id);
   2783	if (!peer) {
   2784		ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
   2785		goto err;
   2786	}
   2787
   2788	num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
   2789			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
   2790
   2791	tot_hdr_len = sizeof(struct htt_resp_hdr) +
   2792		      sizeof(rx_hl->hdr) +
   2793		      sizeof(rx_hl->ppdu) +
   2794		      sizeof(rx_hl->prefix) +
   2795		      sizeof(rx_hl->fw_desc) +
   2796		      sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
   2797
   2798	tid =  MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
   2799	rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
   2800	rx_desc_info = __le32_to_cpu(rx_desc->info);
   2801
   2802	hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
   2803
   2804	if (is_multicast_ether_addr(hdr->addr1)) {
   2805		/* Discard the fragment with multicast DA */
   2806		goto err;
   2807	}
   2808
   2809	if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
   2810		spin_unlock_bh(&ar->data_lock);
   2811		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
   2812						    HTT_RX_NON_PN_CHECK,
   2813						    HTT_RX_NON_TKIP_MIC);
   2814	}
   2815
   2816	if (ieee80211_has_retry(hdr->frame_control))
   2817		goto err;
   2818
   2819	hdr_space = ieee80211_hdrlen(hdr->frame_control);
   2820	sc = __le16_to_cpu(hdr->seq_ctrl);
   2821	seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
   2822	frag = sc & IEEE80211_SCTL_FRAG;
   2823
   2824	sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
   2825		    HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
   2826	sec_type = peer->rx_pn[sec_index].sec_type;
   2827	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
   2828
   2829	switch (sec_type) {
   2830	case HTT_SECURITY_TKIP:
   2831		tkip_mic = HTT_RX_TKIP_MIC;
   2832		ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
   2833							    tot_hdr_len +
   2834							    rx_hl->fw_desc.len,
   2835							    hdr_space);
   2836		if (ret)
   2837			goto err;
   2838		break;
   2839	case HTT_SECURITY_TKIP_NOMIC:
   2840		ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
   2841							  tot_hdr_len +
   2842							  rx_hl->fw_desc.len,
   2843							  hdr_space);
   2844		if (ret)
   2845			goto err;
   2846		break;
   2847	case HTT_SECURITY_AES_CCMP:
   2848		ret = ath10k_htt_rx_frag_ccmp_decap(skb,
   2849						    tot_hdr_len + rx_hl->fw_desc.len,
   2850						    hdr_space);
   2851		if (ret)
   2852			goto err;
   2853		break;
   2854	case HTT_SECURITY_WEP128:
   2855	case HTT_SECURITY_WEP104:
   2856	case HTT_SECURITY_WEP40:
   2857		ret = ath10k_htt_rx_frag_wep_decap(skb,
   2858						   tot_hdr_len + rx_hl->fw_desc.len,
   2859						   hdr_space);
   2860		if (ret)
   2861			goto err;
   2862		break;
   2863	default:
   2864		break;
   2865	}
   2866
   2867	resp = (struct htt_resp *)(skb->data);
   2868
   2869	if (sec_type != HTT_SECURITY_AES_CCMP &&
   2870	    sec_type != HTT_SECURITY_TKIP &&
   2871	    sec_type != HTT_SECURITY_TKIP_NOMIC) {
   2872		spin_unlock_bh(&ar->data_lock);
   2873		return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
   2874						    HTT_RX_NON_PN_CHECK,
   2875						    HTT_RX_NON_TKIP_MIC);
   2876	}
   2877
   2878	last_pn = &peer->frag_tids_last_pn[tid];
   2879
   2880	if (frag == 0) {
   2881		if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
   2882			goto err;
   2883
   2884		last_pn->pn48 = new_pn.pn48;
   2885		peer->frag_tids_seq[tid] = seq;
   2886	} else if (sec_type == HTT_SECURITY_AES_CCMP) {
   2887		if (seq != peer->frag_tids_seq[tid])
   2888			goto err;
   2889
   2890		if (new_pn.pn48 != last_pn->pn48 + 1)
   2891			goto err;
   2892
   2893		last_pn->pn48 = new_pn.pn48;
   2894		last_pn = &peer->tids_last_pn[tid];
   2895		last_pn->pn48 = new_pn.pn48;
   2896	}
   2897
   2898	spin_unlock_bh(&ar->data_lock);
   2899
   2900	return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
   2901					    HTT_RX_NON_PN_CHECK, tkip_mic);
   2902
   2903err:
   2904	spin_unlock_bh(&ar->data_lock);
   2905
   2906	/* Tell the caller that it must free the skb since we have not
   2907	 * consumed it
   2908	 */
   2909	return true;
   2910}
   2911
   2912static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
   2913					 struct htt_rx_indication *rx)
   2914{
   2915	struct ath10k *ar = htt->ar;
   2916	struct htt_rx_indication_mpdu_range *mpdu_ranges;
   2917	int num_mpdu_ranges;
   2918	int i, mpdu_count = 0;
   2919	u16 peer_id;
   2920	u8 tid;
   2921
   2922	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
   2923			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
   2924	peer_id = __le16_to_cpu(rx->hdr.peer_id);
   2925	tid =  MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
   2926
   2927	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
   2928
   2929	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
   2930			rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
   2931
   2932	for (i = 0; i < num_mpdu_ranges; i++)
   2933		mpdu_count += mpdu_ranges[i].mpdu_count;
   2934
   2935	atomic_add(mpdu_count, &htt->num_mpdus_ready);
   2936
   2937	ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
   2938					     num_mpdu_ranges);
   2939}
   2940
   2941static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
   2942				       struct sk_buff *skb)
   2943{
   2944	struct ath10k_htt *htt = &ar->htt;
   2945	struct htt_resp *resp = (struct htt_resp *)skb->data;
   2946	struct htt_tx_done tx_done = {};
   2947	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
   2948	__le16 msdu_id, *msdus;
   2949	bool rssi_enabled = false;
   2950	u8 msdu_count = 0, num_airtime_records, tid;
   2951	int i, htt_pad = 0;
   2952	struct htt_data_tx_compl_ppdu_dur *ppdu_info;
   2953	struct ath10k_peer *peer;
   2954	u16 ppdu_info_offset = 0, peer_id;
   2955	u32 tx_duration;
   2956
   2957	switch (status) {
   2958	case HTT_DATA_TX_STATUS_NO_ACK:
   2959		tx_done.status = HTT_TX_COMPL_STATE_NOACK;
   2960		break;
   2961	case HTT_DATA_TX_STATUS_OK:
   2962		tx_done.status = HTT_TX_COMPL_STATE_ACK;
   2963		break;
   2964	case HTT_DATA_TX_STATUS_DISCARD:
   2965	case HTT_DATA_TX_STATUS_POSTPONE:
   2966	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
   2967		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
   2968		break;
   2969	default:
   2970		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
   2971		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
   2972		break;
   2973	}
   2974
   2975	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
   2976		   resp->data_tx_completion.num_msdus);
   2977
   2978	msdu_count = resp->data_tx_completion.num_msdus;
   2979	msdus = resp->data_tx_completion.msdus;
   2980	rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
   2981
   2982	if (rssi_enabled)
   2983		htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
   2984							    resp);
   2985
   2986	for (i = 0; i < msdu_count; i++) {
   2987		msdu_id = msdus[i];
   2988		tx_done.msdu_id = __le16_to_cpu(msdu_id);
   2989
   2990		if (rssi_enabled) {
   2991			/* Total no of MSDUs should be even,
   2992			 * if odd MSDUs are sent firmware fills
   2993			 * last msdu id with 0xffff
   2994			 */
   2995			if (msdu_count & 0x01) {
   2996				msdu_id = msdus[msdu_count +  i + 1 + htt_pad];
   2997				tx_done.ack_rssi = __le16_to_cpu(msdu_id);
   2998			} else {
   2999				msdu_id = msdus[msdu_count +  i + htt_pad];
   3000				tx_done.ack_rssi = __le16_to_cpu(msdu_id);
   3001			}
   3002		}
   3003
   3004		/* kfifo_put: In practice firmware shouldn't fire off per-CE
   3005		 * interrupt and main interrupt (MSI/-X range case) for the same
   3006		 * HTC service so it should be safe to use kfifo_put w/o lock.
   3007		 *
   3008		 * From kfifo_put() documentation:
   3009		 *  Note that with only one concurrent reader and one concurrent
   3010		 *  writer, you don't need extra locking to use these macro.
   3011		 */
   3012		if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
   3013			ath10k_txrx_tx_unref(htt, &tx_done);
   3014		} else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
   3015			ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
   3016				    tx_done.msdu_id, tx_done.status);
   3017			ath10k_txrx_tx_unref(htt, &tx_done);
   3018		}
   3019	}
   3020
   3021	if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
   3022		return;
   3023
   3024	ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
   3025
   3026	if (rssi_enabled)
   3027		ppdu_info_offset += ppdu_info_offset;
   3028
   3029	if (resp->data_tx_completion.flags2 &
   3030	    (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
   3031		ppdu_info_offset += 2;
   3032
   3033	ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
   3034	num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
   3035					__le32_to_cpu(ppdu_info->info0));
   3036
   3037	for (i = 0; i < num_airtime_records; i++) {
   3038		struct htt_data_tx_ppdu_dur *ppdu_dur;
   3039		u32 info0;
   3040
   3041		ppdu_dur = &ppdu_info->ppdu_dur[i];
   3042		info0 = __le32_to_cpu(ppdu_dur->info0);
   3043
   3044		peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
   3045				    info0);
   3046		rcu_read_lock();
   3047		spin_lock_bh(&ar->data_lock);
   3048
   3049		peer = ath10k_peer_find_by_id(ar, peer_id);
   3050		if (!peer || !peer->sta) {
   3051			spin_unlock_bh(&ar->data_lock);
   3052			rcu_read_unlock();
   3053			continue;
   3054		}
   3055
   3056		tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
   3057						IEEE80211_QOS_CTL_TID_MASK;
   3058		tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
   3059
   3060		ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
   3061
   3062		spin_unlock_bh(&ar->data_lock);
   3063		rcu_read_unlock();
   3064	}
   3065}
   3066
   3067static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
   3068{
   3069	struct htt_rx_addba *ev = &resp->rx_addba;
   3070	struct ath10k_peer *peer;
   3071	struct ath10k_vif *arvif;
   3072	u16 info0, tid, peer_id;
   3073
   3074	info0 = __le16_to_cpu(ev->info0);
   3075	tid = MS(info0, HTT_RX_BA_INFO0_TID);
   3076	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
   3077
   3078	ath10k_dbg(ar, ATH10K_DBG_HTT,
   3079		   "htt rx addba tid %u peer_id %u size %u\n",
   3080		   tid, peer_id, ev->window_size);
   3081
   3082	spin_lock_bh(&ar->data_lock);
   3083	peer = ath10k_peer_find_by_id(ar, peer_id);
   3084	if (!peer) {
   3085		ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
   3086			    peer_id);
   3087		spin_unlock_bh(&ar->data_lock);
   3088		return;
   3089	}
   3090
   3091	arvif = ath10k_get_arvif(ar, peer->vdev_id);
   3092	if (!arvif) {
   3093		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
   3094			    peer->vdev_id);
   3095		spin_unlock_bh(&ar->data_lock);
   3096		return;
   3097	}
   3098
   3099	ath10k_dbg(ar, ATH10K_DBG_HTT,
   3100		   "htt rx start rx ba session sta %pM tid %u size %u\n",
   3101		   peer->addr, tid, ev->window_size);
   3102
   3103	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
   3104	spin_unlock_bh(&ar->data_lock);
   3105}
   3106
   3107static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
   3108{
   3109	struct htt_rx_delba *ev = &resp->rx_delba;
   3110	struct ath10k_peer *peer;
   3111	struct ath10k_vif *arvif;
   3112	u16 info0, tid, peer_id;
   3113
   3114	info0 = __le16_to_cpu(ev->info0);
   3115	tid = MS(info0, HTT_RX_BA_INFO0_TID);
   3116	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
   3117
   3118	ath10k_dbg(ar, ATH10K_DBG_HTT,
   3119		   "htt rx delba tid %u peer_id %u\n",
   3120		   tid, peer_id);
   3121
   3122	spin_lock_bh(&ar->data_lock);
   3123	peer = ath10k_peer_find_by_id(ar, peer_id);
   3124	if (!peer) {
   3125		ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
   3126			    peer_id);
   3127		spin_unlock_bh(&ar->data_lock);
   3128		return;
   3129	}
   3130
   3131	arvif = ath10k_get_arvif(ar, peer->vdev_id);
   3132	if (!arvif) {
   3133		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
   3134			    peer->vdev_id);
   3135		spin_unlock_bh(&ar->data_lock);
   3136		return;
   3137	}
   3138
   3139	ath10k_dbg(ar, ATH10K_DBG_HTT,
   3140		   "htt rx stop rx ba session sta %pM tid %u\n",
   3141		   peer->addr, tid);
   3142
   3143	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
   3144	spin_unlock_bh(&ar->data_lock);
   3145}
   3146
   3147static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
   3148				       struct sk_buff_head *list,
   3149				       struct sk_buff_head *amsdu)
   3150{
   3151	struct sk_buff *msdu;
   3152	struct htt_rx_desc *rxd;
   3153	struct rx_msdu_end_common *rxd_msdu_end_common;
   3154
   3155	if (skb_queue_empty(list))
   3156		return -ENOBUFS;
   3157
   3158	if (WARN_ON(!skb_queue_empty(amsdu)))
   3159		return -EINVAL;
   3160
   3161	while ((msdu = __skb_dequeue(list))) {
   3162		__skb_queue_tail(amsdu, msdu);
   3163
   3164		rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   3165					    (void *)msdu->data -
   3166					    hw->rx_desc_ops->rx_desc_size);
   3167
   3168		rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
   3169		if (rxd_msdu_end_common->info0 &
   3170		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
   3171			break;
   3172	}
   3173
   3174	msdu = skb_peek_tail(amsdu);
   3175	rxd = HTT_RX_BUF_TO_RX_DESC(hw,
   3176				    (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
   3177
   3178	rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
   3179	if (!(rxd_msdu_end_common->info0 &
   3180	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
   3181		skb_queue_splice_init(amsdu, list);
   3182		return -EAGAIN;
   3183	}
   3184
   3185	return 0;
   3186}
   3187
   3188static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
   3189					    struct sk_buff *skb)
   3190{
   3191	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
   3192
   3193	if (!ieee80211_has_protected(hdr->frame_control))
   3194		return;
   3195
   3196	/* Offloaded frames are already decrypted but firmware insists they are
   3197	 * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
   3198	 * will drop the frame.
   3199	 */
   3200
   3201	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
   3202	status->flag |= RX_FLAG_DECRYPTED |
   3203			RX_FLAG_IV_STRIPPED |
   3204			RX_FLAG_MMIC_STRIPPED;
   3205}
   3206
   3207static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
   3208				       struct sk_buff_head *list)
   3209{
   3210	struct ath10k_htt *htt = &ar->htt;
   3211	struct ieee80211_rx_status *status = &htt->rx_status;
   3212	struct htt_rx_offload_msdu *rx;
   3213	struct sk_buff *msdu;
   3214	size_t offset;
   3215
   3216	while ((msdu = __skb_dequeue(list))) {
   3217		/* Offloaded frames don't have Rx descriptor. Instead they have
   3218		 * a short meta information header.
   3219		 */
   3220
   3221		rx = (void *)msdu->data;
   3222
   3223		skb_put(msdu, sizeof(*rx));
   3224		skb_pull(msdu, sizeof(*rx));
   3225
   3226		if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
   3227			ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
   3228			dev_kfree_skb_any(msdu);
   3229			continue;
   3230		}
   3231
   3232		skb_put(msdu, __le16_to_cpu(rx->msdu_len));
   3233
   3234		/* Offloaded rx header length isn't multiple of 2 nor 4 so the
   3235		 * actual payload is unaligned. Align the frame.  Otherwise
   3236		 * mac80211 complains.  This shouldn't reduce performance much
   3237		 * because these offloaded frames are rare.
   3238		 */
   3239		offset = 4 - ((unsigned long)msdu->data & 3);
   3240		skb_put(msdu, offset);
   3241		memmove(msdu->data + offset, msdu->data, msdu->len);
   3242		skb_pull(msdu, offset);
   3243
   3244		/* FIXME: The frame is NWifi. Re-construct QoS Control
   3245		 * if possible later.
   3246		 */
   3247
   3248		memset(status, 0, sizeof(*status));
   3249		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
   3250
   3251		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
   3252		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
   3253		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
   3254	}
   3255}
   3256
   3257static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
   3258{
   3259	struct ath10k_htt *htt = &ar->htt;
   3260	struct htt_resp *resp = (void *)skb->data;
   3261	struct ieee80211_rx_status *status = &htt->rx_status;
   3262	struct sk_buff_head list;
   3263	struct sk_buff_head amsdu;
   3264	u16 peer_id;
   3265	u16 msdu_count;
   3266	u8 vdev_id;
   3267	u8 tid;
   3268	bool offload;
   3269	bool frag;
   3270	int ret;
   3271
   3272	lockdep_assert_held(&htt->rx_ring.lock);
   3273
   3274	if (htt->rx_confused)
   3275		return -EIO;
   3276
   3277	skb_pull(skb, sizeof(resp->hdr));
   3278	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
   3279
   3280	peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
   3281	msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
   3282	vdev_id = resp->rx_in_ord_ind.vdev_id;
   3283	tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
   3284	offload = !!(resp->rx_in_ord_ind.info &
   3285			HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
   3286	frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
   3287
   3288	ath10k_dbg(ar, ATH10K_DBG_HTT,
   3289		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
   3290		   vdev_id, peer_id, tid, offload, frag, msdu_count);
   3291
   3292	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
   3293		ath10k_warn(ar, "dropping invalid in order rx indication\n");
   3294		return -EINVAL;
   3295	}
   3296
   3297	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
   3298	 * extracted and processed.
   3299	 */
   3300	__skb_queue_head_init(&list);
   3301	if (ar->hw_params.target_64bit)
   3302		ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
   3303						     &list);
   3304	else
   3305		ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
   3306						     &list);
   3307
   3308	if (ret < 0) {
   3309		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
   3310		htt->rx_confused = true;
   3311		return -EIO;
   3312	}
   3313
   3314	/* Offloaded frames are very different and need to be handled
   3315	 * separately.
   3316	 */
   3317	if (offload)
   3318		ath10k_htt_rx_h_rx_offload(ar, &list);
   3319
   3320	while (!skb_queue_empty(&list)) {
   3321		__skb_queue_head_init(&amsdu);
   3322		ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
   3323		switch (ret) {
   3324		case 0:
   3325			/* Note: The in-order indication may report interleaved
   3326			 * frames from different PPDUs meaning reported rx rate
   3327			 * to mac80211 isn't accurate/reliable. It's still
   3328			 * better to report something than nothing though. This
   3329			 * should still give an idea about rx rate to the user.
   3330			 */
   3331			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
   3332			ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
   3333			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
   3334					     NULL, peer_id, frag);
   3335			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
   3336			break;
   3337		case -EAGAIN:
   3338			fallthrough;
   3339		default:
   3340			/* Should not happen. */
   3341			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
   3342			htt->rx_confused = true;
   3343			__skb_queue_purge(&list);
   3344			return -EIO;
   3345		}
   3346	}
   3347	return ret;
   3348}
   3349
   3350static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
   3351						   const __le32 *resp_ids,
   3352						   int num_resp_ids)
   3353{
   3354	int i;
   3355	u32 resp_id;
   3356
   3357	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
   3358		   num_resp_ids);
   3359
   3360	for (i = 0; i < num_resp_ids; i++) {
   3361		resp_id = le32_to_cpu(resp_ids[i]);
   3362
   3363		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
   3364			   resp_id);
   3365
   3366		/* TODO: free resp_id */
   3367	}
   3368}
   3369
   3370static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
   3371{
   3372	struct ieee80211_hw *hw = ar->hw;
   3373	struct ieee80211_txq *txq;
   3374	struct htt_resp *resp = (struct htt_resp *)skb->data;
   3375	struct htt_tx_fetch_record *record;
   3376	size_t len;
   3377	size_t max_num_bytes;
   3378	size_t max_num_msdus;
   3379	size_t num_bytes;
   3380	size_t num_msdus;
   3381	const __le32 *resp_ids;
   3382	u16 num_records;
   3383	u16 num_resp_ids;
   3384	u16 peer_id;
   3385	u8 tid;
   3386	int ret;
   3387	int i;
   3388	bool may_tx;
   3389
   3390	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
   3391
   3392	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
   3393	if (unlikely(skb->len < len)) {
   3394		ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
   3395		return;
   3396	}
   3397
   3398	num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
   3399	num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
   3400
   3401	len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
   3402	len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
   3403
   3404	if (unlikely(skb->len < len)) {
   3405		ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
   3406		return;
   3407	}
   3408
   3409	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
   3410		   num_records, num_resp_ids,
   3411		   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
   3412
   3413	if (!ar->htt.tx_q_state.enabled) {
   3414		ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
   3415		return;
   3416	}
   3417
   3418	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
   3419		ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
   3420		return;
   3421	}
   3422
   3423	rcu_read_lock();
   3424
   3425	for (i = 0; i < num_records; i++) {
   3426		record = &resp->tx_fetch_ind.records[i];
   3427		peer_id = MS(le16_to_cpu(record->info),
   3428			     HTT_TX_FETCH_RECORD_INFO_PEER_ID);
   3429		tid = MS(le16_to_cpu(record->info),
   3430			 HTT_TX_FETCH_RECORD_INFO_TID);
   3431		max_num_msdus = le16_to_cpu(record->num_msdus);
   3432		max_num_bytes = le32_to_cpu(record->num_bytes);
   3433
   3434		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
   3435			   i, peer_id, tid, max_num_msdus, max_num_bytes);
   3436
   3437		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
   3438		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
   3439			ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
   3440				    peer_id, tid);
   3441			continue;
   3442		}
   3443
   3444		spin_lock_bh(&ar->data_lock);
   3445		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
   3446		spin_unlock_bh(&ar->data_lock);
   3447
   3448		/* It is okay to release the lock and use txq because RCU read
   3449		 * lock is held.
   3450		 */
   3451
   3452		if (unlikely(!txq)) {
   3453			ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
   3454				    peer_id, tid);
   3455			continue;
   3456		}
   3457
   3458		num_msdus = 0;
   3459		num_bytes = 0;
   3460
   3461		ieee80211_txq_schedule_start(hw, txq->ac);
   3462		may_tx = ieee80211_txq_may_transmit(hw, txq);
   3463		while (num_msdus < max_num_msdus &&
   3464		       num_bytes < max_num_bytes) {
   3465			if (!may_tx)
   3466				break;
   3467
   3468			ret = ath10k_mac_tx_push_txq(hw, txq);
   3469			if (ret < 0)
   3470				break;
   3471
   3472			num_msdus++;
   3473			num_bytes += ret;
   3474		}
   3475		ieee80211_return_txq(hw, txq, false);
   3476		ieee80211_txq_schedule_end(hw, txq->ac);
   3477
   3478		record->num_msdus = cpu_to_le16(num_msdus);
   3479		record->num_bytes = cpu_to_le32(num_bytes);
   3480
   3481		ath10k_htt_tx_txq_recalc(hw, txq);
   3482	}
   3483
   3484	rcu_read_unlock();
   3485
   3486	resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
   3487	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
   3488
   3489	ret = ath10k_htt_tx_fetch_resp(ar,
   3490				       resp->tx_fetch_ind.token,
   3491				       resp->tx_fetch_ind.fetch_seq_num,
   3492				       resp->tx_fetch_ind.records,
   3493				       num_records);
   3494	if (unlikely(ret)) {
   3495		ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
   3496			    le32_to_cpu(resp->tx_fetch_ind.token), ret);
   3497		/* FIXME: request fw restart */
   3498	}
   3499
   3500	ath10k_htt_tx_txq_sync(ar);
   3501}
   3502
   3503static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
   3504					   struct sk_buff *skb)
   3505{
   3506	const struct htt_resp *resp = (void *)skb->data;
   3507	size_t len;
   3508	int num_resp_ids;
   3509
   3510	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
   3511
   3512	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
   3513	if (unlikely(skb->len < len)) {
   3514		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
   3515		return;
   3516	}
   3517
   3518	num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
   3519	len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
   3520
   3521	if (unlikely(skb->len < len)) {
   3522		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
   3523		return;
   3524	}
   3525
   3526	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
   3527					       resp->tx_fetch_confirm.resp_ids,
   3528					       num_resp_ids);
   3529}
   3530
   3531static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
   3532					     struct sk_buff *skb)
   3533{
   3534	const struct htt_resp *resp = (void *)skb->data;
   3535	const struct htt_tx_mode_switch_record *record;
   3536	struct ieee80211_txq *txq;
   3537	struct ath10k_txq *artxq;
   3538	size_t len;
   3539	size_t num_records;
   3540	enum htt_tx_mode_switch_mode mode;
   3541	bool enable;
   3542	u16 info0;
   3543	u16 info1;
   3544	u16 threshold;
   3545	u16 peer_id;
   3546	u8 tid;
   3547	int i;
   3548
   3549	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
   3550
   3551	len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
   3552	if (unlikely(skb->len < len)) {
   3553		ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
   3554		return;
   3555	}
   3556
   3557	info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
   3558	info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
   3559
   3560	enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
   3561	num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
   3562	mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
   3563	threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
   3564
   3565	ath10k_dbg(ar, ATH10K_DBG_HTT,
   3566		   "htt rx tx mode switch ind info0 0x%04hx info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
   3567		   info0, info1, enable, num_records, mode, threshold);
   3568
   3569	len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
   3570
   3571	if (unlikely(skb->len < len)) {
   3572		ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
   3573		return;
   3574	}
   3575
   3576	switch (mode) {
   3577	case HTT_TX_MODE_SWITCH_PUSH:
   3578	case HTT_TX_MODE_SWITCH_PUSH_PULL:
   3579		break;
   3580	default:
   3581		ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
   3582			    mode);
   3583		return;
   3584	}
   3585
   3586	if (!enable)
   3587		return;
   3588
   3589	ar->htt.tx_q_state.enabled = enable;
   3590	ar->htt.tx_q_state.mode = mode;
   3591	ar->htt.tx_q_state.num_push_allowed = threshold;
   3592
   3593	rcu_read_lock();
   3594
   3595	for (i = 0; i < num_records; i++) {
   3596		record = &resp->tx_mode_switch_ind.records[i];
   3597		info0 = le16_to_cpu(record->info0);
   3598		peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
   3599		tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
   3600
   3601		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
   3602		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
   3603			ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
   3604				    peer_id, tid);
   3605			continue;
   3606		}
   3607
   3608		spin_lock_bh(&ar->data_lock);
   3609		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
   3610		spin_unlock_bh(&ar->data_lock);
   3611
   3612		/* It is okay to release the lock and use txq because RCU read
   3613		 * lock is held.
   3614		 */
   3615
   3616		if (unlikely(!txq)) {
   3617			ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
   3618				    peer_id, tid);
   3619			continue;
   3620		}
   3621
   3622		spin_lock_bh(&ar->htt.tx_lock);
   3623		artxq = (void *)txq->drv_priv;
   3624		artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
   3625		spin_unlock_bh(&ar->htt.tx_lock);
   3626	}
   3627
   3628	rcu_read_unlock();
   3629
   3630	ath10k_mac_tx_push_pending(ar);
   3631}
   3632
   3633void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
   3634{
   3635	bool release;
   3636
   3637	release = ath10k_htt_t2h_msg_handler(ar, skb);
   3638
   3639	/* Free the indication buffer */
   3640	if (release)
   3641		dev_kfree_skb_any(skb);
   3642}
   3643
   3644static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
   3645{
   3646	static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
   3647					  18, 24, 36, 48, 54};
   3648	int i;
   3649
   3650	for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
   3651		if (rate == legacy_rates[i])
   3652			return i;
   3653	}
   3654
   3655	ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
   3656	return -EINVAL;
   3657}
   3658
   3659static void
   3660ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
   3661				    struct ath10k_sta *arsta,
   3662				    struct ath10k_per_peer_tx_stats *pstats,
   3663				    s8 legacy_rate_idx)
   3664{
   3665	struct rate_info *txrate = &arsta->txrate;
   3666	struct ath10k_htt_tx_stats *tx_stats;
   3667	int idx, ht_idx, gi, mcs, bw, nss;
   3668	unsigned long flags;
   3669
   3670	if (!arsta->tx_stats)
   3671		return;
   3672
   3673	tx_stats = arsta->tx_stats;
   3674	flags = txrate->flags;
   3675	gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
   3676	mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
   3677	bw = txrate->bw;
   3678	nss = txrate->nss;
   3679	ht_idx = mcs + (nss - 1) * 8;
   3680	idx = mcs * 8 + 8 * 10 * (nss - 1);
   3681	idx += bw * 2 + gi;
   3682
   3683#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
   3684
   3685	if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
   3686		STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
   3687		STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
   3688		STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
   3689		STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
   3690		STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
   3691		STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
   3692	} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
   3693		STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
   3694		STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
   3695		STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
   3696		STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
   3697		STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
   3698		STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
   3699	} else {
   3700		mcs = legacy_rate_idx;
   3701
   3702		STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
   3703		STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
   3704		STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
   3705		STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
   3706		STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
   3707		STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
   3708	}
   3709
   3710	if (ATH10K_HW_AMPDU(pstats->flags)) {
   3711		tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
   3712
   3713		if (txrate->flags & RATE_INFO_FLAGS_MCS) {
   3714			STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
   3715				pstats->succ_bytes + pstats->retry_bytes;
   3716			STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
   3717				pstats->succ_pkts + pstats->retry_pkts;
   3718		} else {
   3719			STATS_OP_FMT(AMPDU).vht[0][mcs] +=
   3720				pstats->succ_bytes + pstats->retry_bytes;
   3721			STATS_OP_FMT(AMPDU).vht[1][mcs] +=
   3722				pstats->succ_pkts + pstats->retry_pkts;
   3723		}
   3724		STATS_OP_FMT(AMPDU).bw[0][bw] +=
   3725			pstats->succ_bytes + pstats->retry_bytes;
   3726		STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
   3727			pstats->succ_bytes + pstats->retry_bytes;
   3728		STATS_OP_FMT(AMPDU).gi[0][gi] +=
   3729			pstats->succ_bytes + pstats->retry_bytes;
   3730		STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
   3731			pstats->succ_bytes + pstats->retry_bytes;
   3732		STATS_OP_FMT(AMPDU).bw[1][bw] +=
   3733			pstats->succ_pkts + pstats->retry_pkts;
   3734		STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
   3735			pstats->succ_pkts + pstats->retry_pkts;
   3736		STATS_OP_FMT(AMPDU).gi[1][gi] +=
   3737			pstats->succ_pkts + pstats->retry_pkts;
   3738		STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
   3739			pstats->succ_pkts + pstats->retry_pkts;
   3740	} else {
   3741		tx_stats->ack_fails +=
   3742				ATH10K_HW_BA_FAIL(pstats->flags);
   3743	}
   3744
   3745	STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
   3746	STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
   3747	STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
   3748
   3749	STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
   3750	STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
   3751	STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
   3752
   3753	STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
   3754	STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
   3755	STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
   3756
   3757	STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
   3758	STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
   3759	STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
   3760
   3761	STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
   3762	STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
   3763	STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
   3764
   3765	STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
   3766	STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
   3767	STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
   3768
   3769	if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
   3770		STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
   3771		STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
   3772		STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
   3773		STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
   3774		STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
   3775		STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
   3776	}
   3777
   3778	tx_stats->tx_duration += pstats->duration;
   3779}
   3780
   3781static void
   3782ath10k_update_per_peer_tx_stats(struct ath10k *ar,
   3783				struct ieee80211_sta *sta,
   3784				struct ath10k_per_peer_tx_stats *peer_stats)
   3785{
   3786	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
   3787	struct ieee80211_chanctx_conf *conf = NULL;
   3788	u8 rate = 0, sgi;
   3789	s8 rate_idx = 0;
   3790	bool skip_auto_rate;
   3791	struct rate_info txrate;
   3792
   3793	lockdep_assert_held(&ar->data_lock);
   3794
   3795	txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
   3796	txrate.bw = ATH10K_HW_BW(peer_stats->flags);
   3797	txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
   3798	txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
   3799	sgi = ATH10K_HW_GI(peer_stats->flags);
   3800	skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
   3801
   3802	/* Firmware's rate control skips broadcast/management frames,
   3803	 * if host has configure fixed rates and in some other special cases.
   3804	 */
   3805	if (skip_auto_rate)
   3806		return;
   3807
   3808	if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
   3809		ath10k_warn(ar, "Invalid VHT mcs %d peer stats",  txrate.mcs);
   3810		return;
   3811	}
   3812
   3813	if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
   3814	    (txrate.mcs > 7 || txrate.nss < 1)) {
   3815		ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
   3816			    txrate.mcs, txrate.nss);
   3817		return;
   3818	}
   3819
   3820	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
   3821	memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
   3822	if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
   3823	    txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
   3824		rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
   3825		/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
   3826		if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
   3827			rate = 5;
   3828		rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
   3829		if (rate_idx < 0)
   3830			return;
   3831		arsta->txrate.legacy = rate;
   3832	} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
   3833		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
   3834		arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
   3835	} else {
   3836		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
   3837		arsta->txrate.mcs = txrate.mcs;
   3838	}
   3839
   3840	switch (txrate.flags) {
   3841	case WMI_RATE_PREAMBLE_OFDM:
   3842		if (arsta->arvif && arsta->arvif->vif)
   3843			conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
   3844		if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
   3845			arsta->tx_info.status.rates[0].idx = rate_idx - 4;
   3846		break;
   3847	case WMI_RATE_PREAMBLE_CCK:
   3848		arsta->tx_info.status.rates[0].idx = rate_idx;
   3849		if (sgi)
   3850			arsta->tx_info.status.rates[0].flags |=
   3851				(IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
   3852				 IEEE80211_TX_RC_SHORT_GI);
   3853		break;
   3854	case WMI_RATE_PREAMBLE_HT:
   3855		arsta->tx_info.status.rates[0].idx =
   3856				txrate.mcs + ((txrate.nss - 1) * 8);
   3857		if (sgi)
   3858			arsta->tx_info.status.rates[0].flags |=
   3859					IEEE80211_TX_RC_SHORT_GI;
   3860		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
   3861		break;
   3862	case WMI_RATE_PREAMBLE_VHT:
   3863		ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
   3864				       txrate.mcs, txrate.nss);
   3865		if (sgi)
   3866			arsta->tx_info.status.rates[0].flags |=
   3867						IEEE80211_TX_RC_SHORT_GI;
   3868		arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
   3869		break;
   3870	}
   3871
   3872	arsta->txrate.nss = txrate.nss;
   3873	arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
   3874	arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
   3875	if (sgi)
   3876		arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
   3877
   3878	switch (arsta->txrate.bw) {
   3879	case RATE_INFO_BW_40:
   3880		arsta->tx_info.status.rates[0].flags |=
   3881				IEEE80211_TX_RC_40_MHZ_WIDTH;
   3882		break;
   3883	case RATE_INFO_BW_80:
   3884		arsta->tx_info.status.rates[0].flags |=
   3885				IEEE80211_TX_RC_80_MHZ_WIDTH;
   3886		break;
   3887	}
   3888
   3889	if (peer_stats->succ_pkts) {
   3890		arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
   3891		arsta->tx_info.status.rates[0].count = 1;
   3892		ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
   3893	}
   3894
   3895	if (ar->htt.disable_tx_comp) {
   3896		arsta->tx_failed += peer_stats->failed_pkts;
   3897		ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
   3898			   arsta->tx_failed);
   3899	}
   3900
   3901	arsta->tx_retries += peer_stats->retry_pkts;
   3902	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
   3903
   3904	if (ath10k_debug_is_extd_tx_stats_enabled(ar))
   3905		ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
   3906						    rate_idx);
   3907}
   3908
   3909static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
   3910					struct sk_buff *skb)
   3911{
   3912	struct htt_resp *resp = (struct htt_resp *)skb->data;
   3913	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
   3914	struct htt_per_peer_tx_stats_ind *tx_stats;
   3915	struct ieee80211_sta *sta;
   3916	struct ath10k_peer *peer;
   3917	int peer_id, i;
   3918	u8 ppdu_len, num_ppdu;
   3919
   3920	num_ppdu = resp->peer_tx_stats.num_ppdu;
   3921	ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
   3922
   3923	if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
   3924		ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
   3925		return;
   3926	}
   3927
   3928	tx_stats = (struct htt_per_peer_tx_stats_ind *)
   3929			(resp->peer_tx_stats.payload);
   3930	peer_id = __le16_to_cpu(tx_stats->peer_id);
   3931
   3932	rcu_read_lock();
   3933	spin_lock_bh(&ar->data_lock);
   3934	peer = ath10k_peer_find_by_id(ar, peer_id);
   3935	if (!peer || !peer->sta) {
   3936		ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
   3937			    peer_id);
   3938		goto out;
   3939	}
   3940
   3941	sta = peer->sta;
   3942	for (i = 0; i < num_ppdu; i++) {
   3943		tx_stats = (struct htt_per_peer_tx_stats_ind *)
   3944			   (resp->peer_tx_stats.payload + i * ppdu_len);
   3945
   3946		p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
   3947		p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
   3948		p_tx_stats->failed_bytes =
   3949				__le32_to_cpu(tx_stats->failed_bytes);
   3950		p_tx_stats->ratecode = tx_stats->ratecode;
   3951		p_tx_stats->flags = tx_stats->flags;
   3952		p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
   3953		p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
   3954		p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
   3955		p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
   3956
   3957		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
   3958	}
   3959
   3960out:
   3961	spin_unlock_bh(&ar->data_lock);
   3962	rcu_read_unlock();
   3963}
   3964
   3965static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
   3966{
   3967	struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
   3968	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
   3969	struct ath10k_10_2_peer_tx_stats *tx_stats;
   3970	struct ieee80211_sta *sta;
   3971	struct ath10k_peer *peer;
   3972	u16 log_type = __le16_to_cpu(hdr->log_type);
   3973	u32 peer_id = 0, i;
   3974
   3975	if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
   3976		return;
   3977
   3978	tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
   3979		    ATH10K_10_2_TX_STATS_OFFSET);
   3980
   3981	if (!tx_stats->tx_ppdu_cnt)
   3982		return;
   3983
   3984	peer_id = tx_stats->peer_id;
   3985
   3986	rcu_read_lock();
   3987	spin_lock_bh(&ar->data_lock);
   3988	peer = ath10k_peer_find_by_id(ar, peer_id);
   3989	if (!peer || !peer->sta) {
   3990		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
   3991			    peer_id);
   3992		goto out;
   3993	}
   3994
   3995	sta = peer->sta;
   3996	for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
   3997		p_tx_stats->succ_bytes =
   3998			__le16_to_cpu(tx_stats->success_bytes[i]);
   3999		p_tx_stats->retry_bytes =
   4000			__le16_to_cpu(tx_stats->retry_bytes[i]);
   4001		p_tx_stats->failed_bytes =
   4002			__le16_to_cpu(tx_stats->failed_bytes[i]);
   4003		p_tx_stats->ratecode = tx_stats->ratecode[i];
   4004		p_tx_stats->flags = tx_stats->flags[i];
   4005		p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
   4006		p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
   4007		p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
   4008
   4009		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
   4010	}
   4011	spin_unlock_bh(&ar->data_lock);
   4012	rcu_read_unlock();
   4013
   4014	return;
   4015
   4016out:
   4017	spin_unlock_bh(&ar->data_lock);
   4018	rcu_read_unlock();
   4019}
   4020
   4021static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
   4022{
   4023	switch (sec_type) {
   4024	case HTT_SECURITY_TKIP:
   4025	case HTT_SECURITY_TKIP_NOMIC:
   4026	case HTT_SECURITY_AES_CCMP:
   4027		return 48;
   4028	default:
   4029		return 0;
   4030	}
   4031}
   4032
   4033static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
   4034					  struct htt_security_indication *ev)
   4035{
   4036	enum htt_txrx_sec_cast_type sec_index;
   4037	enum htt_security_types sec_type;
   4038	struct ath10k_peer *peer;
   4039
   4040	spin_lock_bh(&ar->data_lock);
   4041
   4042	peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
   4043	if (!peer) {
   4044		ath10k_warn(ar, "failed to find peer id %d for security indication",
   4045			    __le16_to_cpu(ev->peer_id));
   4046		goto out;
   4047	}
   4048
   4049	sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
   4050
   4051	if (ev->flags & HTT_SECURITY_IS_UNICAST)
   4052		sec_index = HTT_TXRX_SEC_UCAST;
   4053	else
   4054		sec_index = HTT_TXRX_SEC_MCAST;
   4055
   4056	peer->rx_pn[sec_index].sec_type = sec_type;
   4057	peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
   4058
   4059	memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
   4060	memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
   4061
   4062out:
   4063	spin_unlock_bh(&ar->data_lock);
   4064}
   4065
   4066bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
   4067{
   4068	struct ath10k_htt *htt = &ar->htt;
   4069	struct htt_resp *resp = (struct htt_resp *)skb->data;
   4070	enum htt_t2h_msg_type type;
   4071
   4072	/* confirm alignment */
   4073	if (!IS_ALIGNED((unsigned long)skb->data, 4))
   4074		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
   4075
   4076	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
   4077		   resp->hdr.msg_type);
   4078
   4079	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
   4080		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
   4081			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
   4082		return true;
   4083	}
   4084	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
   4085
   4086	switch (type) {
   4087	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
   4088		htt->target_version_major = resp->ver_resp.major;
   4089		htt->target_version_minor = resp->ver_resp.minor;
   4090		complete(&htt->target_version_received);
   4091		break;
   4092	}
   4093	case HTT_T2H_MSG_TYPE_RX_IND:
   4094		if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
   4095			ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
   4096		} else {
   4097			skb_queue_tail(&htt->rx_indication_head, skb);
   4098			return false;
   4099		}
   4100		break;
   4101	case HTT_T2H_MSG_TYPE_PEER_MAP: {
   4102		struct htt_peer_map_event ev = {
   4103			.vdev_id = resp->peer_map.vdev_id,
   4104			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
   4105		};
   4106		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
   4107		ath10k_peer_map_event(htt, &ev);
   4108		break;
   4109	}
   4110	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
   4111		struct htt_peer_unmap_event ev = {
   4112			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
   4113		};
   4114		ath10k_peer_unmap_event(htt, &ev);
   4115		break;
   4116	}
   4117	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
   4118		struct htt_tx_done tx_done = {};
   4119		struct ath10k_htt *htt = &ar->htt;
   4120		struct ath10k_htc *htc = &ar->htc;
   4121		struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
   4122		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
   4123		int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
   4124
   4125		tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
   4126
   4127		switch (status) {
   4128		case HTT_MGMT_TX_STATUS_OK:
   4129			tx_done.status = HTT_TX_COMPL_STATE_ACK;
   4130			if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
   4131				     ar->wmi.svc_map) &&
   4132			    (resp->mgmt_tx_completion.flags &
   4133			     HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
   4134				tx_done.ack_rssi =
   4135				FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
   4136					  info);
   4137			}
   4138			break;
   4139		case HTT_MGMT_TX_STATUS_RETRY:
   4140			tx_done.status = HTT_TX_COMPL_STATE_NOACK;
   4141			break;
   4142		case HTT_MGMT_TX_STATUS_DROP:
   4143			tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
   4144			break;
   4145		}
   4146
   4147		if (htt->disable_tx_comp) {
   4148			spin_lock_bh(&htc->tx_lock);
   4149			ep->tx_credits++;
   4150			spin_unlock_bh(&htc->tx_lock);
   4151		}
   4152
   4153		status = ath10k_txrx_tx_unref(htt, &tx_done);
   4154		if (!status) {
   4155			spin_lock_bh(&htt->tx_lock);
   4156			ath10k_htt_tx_mgmt_dec_pending(htt);
   4157			spin_unlock_bh(&htt->tx_lock);
   4158		}
   4159		break;
   4160	}
   4161	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
   4162		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
   4163		break;
   4164	case HTT_T2H_MSG_TYPE_SEC_IND: {
   4165		struct ath10k *ar = htt->ar;
   4166		struct htt_security_indication *ev = &resp->security_indication;
   4167
   4168		ath10k_htt_rx_sec_ind_handler(ar, ev);
   4169		ath10k_dbg(ar, ATH10K_DBG_HTT,
   4170			   "sec ind peer_id %d unicast %d type %d\n",
   4171			  __le16_to_cpu(ev->peer_id),
   4172			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
   4173			  MS(ev->flags, HTT_SECURITY_TYPE));
   4174		complete(&ar->install_key_done);
   4175		break;
   4176	}
   4177	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
   4178		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
   4179				skb->data, skb->len);
   4180		atomic_inc(&htt->num_mpdus_ready);
   4181
   4182		return ath10k_htt_rx_proc_rx_frag_ind(htt,
   4183						      &resp->rx_frag_ind,
   4184						      skb);
   4185	}
   4186	case HTT_T2H_MSG_TYPE_TEST:
   4187		break;
   4188	case HTT_T2H_MSG_TYPE_STATS_CONF:
   4189		trace_ath10k_htt_stats(ar, skb->data, skb->len);
   4190		break;
   4191	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
   4192		/* Firmware can return tx frames if it's unable to fully
   4193		 * process them and suspects host may be able to fix it. ath10k
   4194		 * sends all tx frames as already inspected so this shouldn't
   4195		 * happen unless fw has a bug.
   4196		 */
   4197		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
   4198		break;
   4199	case HTT_T2H_MSG_TYPE_RX_ADDBA:
   4200		ath10k_htt_rx_addba(ar, resp);
   4201		break;
   4202	case HTT_T2H_MSG_TYPE_RX_DELBA:
   4203		ath10k_htt_rx_delba(ar, resp);
   4204		break;
   4205	case HTT_T2H_MSG_TYPE_PKTLOG: {
   4206		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
   4207					skb->len -
   4208					offsetof(struct htt_resp,
   4209						 pktlog_msg.payload));
   4210
   4211		if (ath10k_peer_stats_enabled(ar))
   4212			ath10k_fetch_10_2_tx_stats(ar,
   4213						   resp->pktlog_msg.payload);
   4214		break;
   4215	}
   4216	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
   4217		/* Ignore this event because mac80211 takes care of Rx
   4218		 * aggregation reordering.
   4219		 */
   4220		break;
   4221	}
   4222	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
   4223		skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
   4224		return false;
   4225	}
   4226	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
   4227		struct ath10k_htt *htt = &ar->htt;
   4228		struct ath10k_htc *htc = &ar->htc;
   4229		struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
   4230		u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
   4231		int htt_credit_delta;
   4232
   4233		htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
   4234		if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
   4235			htt_credit_delta = -htt_credit_delta;
   4236
   4237		ath10k_dbg(ar, ATH10K_DBG_HTT,
   4238			   "htt credit update delta %d\n",
   4239			   htt_credit_delta);
   4240
   4241		if (htt->disable_tx_comp) {
   4242			spin_lock_bh(&htc->tx_lock);
   4243			ep->tx_credits += htt_credit_delta;
   4244			spin_unlock_bh(&htc->tx_lock);
   4245			ath10k_dbg(ar, ATH10K_DBG_HTT,
   4246				   "htt credit total %d\n",
   4247				   ep->tx_credits);
   4248			ep->ep_ops.ep_tx_credits(htc->ar);
   4249		}
   4250		break;
   4251	}
   4252	case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
   4253		u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
   4254		u32 freq = __le32_to_cpu(resp->chan_change.freq);
   4255
   4256		ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
   4257		ath10k_dbg(ar, ATH10K_DBG_HTT,
   4258			   "htt chan change freq %u phymode %s\n",
   4259			   freq, ath10k_wmi_phymode_str(phymode));
   4260		break;
   4261	}
   4262	case HTT_T2H_MSG_TYPE_AGGR_CONF:
   4263		break;
   4264	case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
   4265		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
   4266
   4267		if (!tx_fetch_ind) {
   4268			ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
   4269			break;
   4270		}
   4271		skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
   4272		break;
   4273	}
   4274	case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
   4275		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
   4276		break;
   4277	case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
   4278		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
   4279		break;
   4280	case HTT_T2H_MSG_TYPE_PEER_STATS:
   4281		ath10k_htt_fetch_peer_stats(ar, skb);
   4282		break;
   4283	case HTT_T2H_MSG_TYPE_EN_STATS:
   4284	default:
   4285		ath10k_warn(ar, "htt event (%d) not handled\n",
   4286			    resp->hdr.msg_type);
   4287		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
   4288				skb->data, skb->len);
   4289		break;
   4290	}
   4291	return true;
   4292}
   4293EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
   4294
   4295void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
   4296					     struct sk_buff *skb)
   4297{
   4298	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
   4299	dev_kfree_skb_any(skb);
   4300}
   4301EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
   4302
   4303static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
   4304{
   4305	struct sk_buff *skb;
   4306
   4307	while (quota < budget) {
   4308		if (skb_queue_empty(&ar->htt.rx_msdus_q))
   4309			break;
   4310
   4311		skb = skb_dequeue(&ar->htt.rx_msdus_q);
   4312		if (!skb)
   4313			break;
   4314		ath10k_process_rx(ar, skb);
   4315		quota++;
   4316	}
   4317
   4318	return quota;
   4319}
   4320
   4321int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
   4322{
   4323	struct htt_resp *resp;
   4324	struct ath10k_htt *htt = &ar->htt;
   4325	struct sk_buff *skb;
   4326	bool release;
   4327	int quota;
   4328
   4329	for (quota = 0; quota < budget; quota++) {
   4330		skb = skb_dequeue(&htt->rx_indication_head);
   4331		if (!skb)
   4332			break;
   4333
   4334		resp = (struct htt_resp *)skb->data;
   4335
   4336		release = ath10k_htt_rx_proc_rx_ind_hl(htt,
   4337						       &resp->rx_ind_hl,
   4338						       skb,
   4339						       HTT_RX_PN_CHECK,
   4340						       HTT_RX_NON_TKIP_MIC);
   4341
   4342		if (release)
   4343			dev_kfree_skb_any(skb);
   4344
   4345		ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
   4346			   skb_queue_len(&htt->rx_indication_head));
   4347	}
   4348	return quota;
   4349}
   4350EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
   4351
   4352int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
   4353{
   4354	struct ath10k_htt *htt = &ar->htt;
   4355	struct htt_tx_done tx_done = {};
   4356	struct sk_buff_head tx_ind_q;
   4357	struct sk_buff *skb;
   4358	unsigned long flags;
   4359	int quota = 0, done, ret;
   4360	bool resched_napi = false;
   4361
   4362	__skb_queue_head_init(&tx_ind_q);
   4363
   4364	/* Process pending frames before dequeuing more data
   4365	 * from hardware.
   4366	 */
   4367	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
   4368	if (quota == budget) {
   4369		resched_napi = true;
   4370		goto exit;
   4371	}
   4372
   4373	while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
   4374		spin_lock_bh(&htt->rx_ring.lock);
   4375		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
   4376		spin_unlock_bh(&htt->rx_ring.lock);
   4377
   4378		dev_kfree_skb_any(skb);
   4379		if (ret == -EIO) {
   4380			resched_napi = true;
   4381			goto exit;
   4382		}
   4383	}
   4384
   4385	while (atomic_read(&htt->num_mpdus_ready)) {
   4386		ret = ath10k_htt_rx_handle_amsdu(htt);
   4387		if (ret == -EIO) {
   4388			resched_napi = true;
   4389			goto exit;
   4390		}
   4391		atomic_dec(&htt->num_mpdus_ready);
   4392	}
   4393
   4394	/* Deliver received data after processing data from hardware */
   4395	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
   4396
   4397	/* From NAPI documentation:
   4398	 *  The napi poll() function may also process TX completions, in which
   4399	 *  case if it processes the entire TX ring then it should count that
   4400	 *  work as the rest of the budget.
   4401	 */
   4402	if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
   4403		quota = budget;
   4404
   4405	/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
   4406	 * From kfifo_get() documentation:
   4407	 *  Note that with only one concurrent reader and one concurrent writer,
   4408	 *  you don't need extra locking to use these macro.
   4409	 */
   4410	while (kfifo_get(&htt->txdone_fifo, &tx_done))
   4411		ath10k_txrx_tx_unref(htt, &tx_done);
   4412
   4413	ath10k_mac_tx_push_pending(ar);
   4414
   4415	spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
   4416	skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
   4417	spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
   4418
   4419	while ((skb = __skb_dequeue(&tx_ind_q))) {
   4420		ath10k_htt_rx_tx_fetch_ind(ar, skb);
   4421		dev_kfree_skb_any(skb);
   4422	}
   4423
   4424exit:
   4425	ath10k_htt_rx_msdu_buff_replenish(htt);
   4426	/* In case of rx failure or more data to read, report budget
   4427	 * to reschedule NAPI poll
   4428	 */
   4429	done = resched_napi ? budget : quota;
   4430
   4431	return done;
   4432}
   4433EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
   4434
   4435static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
   4436	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
   4437	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
   4438	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
   4439	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
   4440	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
   4441};
   4442
   4443static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
   4444	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
   4445	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
   4446	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
   4447	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
   4448	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
   4449};
   4450
   4451static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
   4452	.htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
   4453};
   4454
   4455void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
   4456{
   4457	struct ath10k *ar = htt->ar;
   4458
   4459	if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
   4460		htt->rx_ops = &htt_rx_ops_hl;
   4461	else if (ar->hw_params.target_64bit)
   4462		htt->rx_ops = &htt_rx_ops_64;
   4463	else
   4464		htt->rx_ops = &htt_rx_ops_32;
   4465}