cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dp_tx.c (36702B)


      1// SPDX-License-Identifier: BSD-3-Clause-Clear
      2/*
      3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
      4 */
      5
      6#include "core.h"
      7#include "dp_tx.h"
      8#include "debug.h"
      9#include "debugfs_sta.h"
     10#include "hw.h"
     11#include "peer.h"
     12#include "mac.h"
     13
     14static enum hal_tcl_encap_type
     15ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
     16{
     17	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
     18	struct ath11k_base *ab = arvif->ar->ab;
     19
     20	if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
     21		return HAL_TCL_ENCAP_TYPE_RAW;
     22
     23	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
     24		return HAL_TCL_ENCAP_TYPE_ETHERNET;
     25
     26	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
     27}
     28
     29static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
     30{
     31	struct ieee80211_hdr *hdr = (void *)skb->data;
     32	u8 *qos_ctl;
     33
     34	if (!ieee80211_is_data_qos(hdr->frame_control))
     35		return;
     36
     37	qos_ctl = ieee80211_get_qos_ctl(hdr);
     38	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
     39		skb->data, (void *)qos_ctl - (void *)skb->data);
     40	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
     41
     42	hdr = (void *)skb->data;
     43	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
     44}
     45
     46static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
     47{
     48	struct ieee80211_hdr *hdr = (void *)skb->data;
     49	struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
     50
     51	if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
     52		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
     53	else if (!ieee80211_is_data_qos(hdr->frame_control))
     54		return HAL_DESC_REO_NON_QOS_TID;
     55	else
     56		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
     57}
     58
     59enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
     60{
     61	switch (cipher) {
     62	case WLAN_CIPHER_SUITE_WEP40:
     63		return HAL_ENCRYPT_TYPE_WEP_40;
     64	case WLAN_CIPHER_SUITE_WEP104:
     65		return HAL_ENCRYPT_TYPE_WEP_104;
     66	case WLAN_CIPHER_SUITE_TKIP:
     67		return HAL_ENCRYPT_TYPE_TKIP_MIC;
     68	case WLAN_CIPHER_SUITE_CCMP:
     69		return HAL_ENCRYPT_TYPE_CCMP_128;
     70	case WLAN_CIPHER_SUITE_CCMP_256:
     71		return HAL_ENCRYPT_TYPE_CCMP_256;
     72	case WLAN_CIPHER_SUITE_GCMP:
     73		return HAL_ENCRYPT_TYPE_GCMP_128;
     74	case WLAN_CIPHER_SUITE_GCMP_256:
     75		return HAL_ENCRYPT_TYPE_AES_GCMP_256;
     76	default:
     77		return HAL_ENCRYPT_TYPE_OPEN;
     78	}
     79}
     80
     81int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
     82		 struct ath11k_sta *arsta, struct sk_buff *skb)
     83{
     84	struct ath11k_base *ab = ar->ab;
     85	struct ath11k_dp *dp = &ab->dp;
     86	struct hal_tx_info ti = {0};
     87	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
     88	struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
     89	struct hal_srng *tcl_ring;
     90	struct ieee80211_hdr *hdr = (void *)skb->data;
     91	struct dp_tx_ring *tx_ring;
     92	void *hal_tcl_desc;
     93	u8 pool_id;
     94	u8 hal_ring_id;
     95	int ret;
     96	u8 ring_selector = 0, ring_map = 0;
     97	bool tcl_ring_retry;
     98
     99	if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
    100		return -ESHUTDOWN;
    101
    102	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
    103		     !ieee80211_is_data(hdr->frame_control)))
    104		return -ENOTSUPP;
    105
    106	pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
    107
    108	/* Let the default ring selection be based on current processor
    109	 * number, where one of the 3 tcl rings are selected based on
    110	 * the smp_processor_id(). In case that ring
    111	 * is full/busy, we resort to other available rings.
    112	 * If all rings are full, we drop the packet.
    113	 * //TODO Add throttling logic when all rings are full
    114	 */
    115	ring_selector = smp_processor_id();
    116
    117tcl_ring_sel:
    118	tcl_ring_retry = false;
    119
    120	ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
    121
    122	ring_map |= BIT(ti.ring_id);
    123
    124	tx_ring = &dp->tx_ring[ti.ring_id];
    125
    126	spin_lock_bh(&tx_ring->tx_idr_lock);
    127	ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
    128			DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
    129	spin_unlock_bh(&tx_ring->tx_idr_lock);
    130
    131	if (unlikely(ret < 0)) {
    132		if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
    133			atomic_inc(&ab->soc_stats.tx_err.misc_fail);
    134			return -ENOSPC;
    135		}
    136
    137		/* Check if the next ring is available */
    138		ring_selector++;
    139		goto tcl_ring_sel;
    140	}
    141
    142	ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
    143		     FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
    144		     FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
    145	ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
    146
    147	if (ieee80211_has_a4(hdr->frame_control) &&
    148	    is_multicast_ether_addr(hdr->addr3) && arsta &&
    149	    arsta->use_4addr_set) {
    150		ti.meta_data_flags = arsta->tcl_metadata;
    151		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
    152	} else {
    153		ti.meta_data_flags = arvif->tcl_metadata;
    154	}
    155
    156	if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
    157		if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
    158			ti.encrypt_type =
    159				ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
    160
    161			if (ieee80211_has_protected(hdr->frame_control))
    162				skb_put(skb, IEEE80211_CCMP_MIC_LEN);
    163		} else {
    164			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
    165		}
    166	}
    167
    168	ti.addr_search_flags = arvif->hal_addr_search_flags;
    169	ti.search_type = arvif->search_type;
    170	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
    171	ti.pkt_offset = 0;
    172	ti.lmac_id = ar->lmac_id;
    173	ti.bss_ast_hash = arvif->ast_hash;
    174	ti.bss_ast_idx = arvif->ast_idx;
    175	ti.dscp_tid_tbl_idx = 0;
    176
    177	if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
    178		   ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
    179		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
    180			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
    181			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
    182			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
    183			     FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
    184	}
    185
    186	if (ieee80211_vif_is_mesh(arvif->vif))
    187		ti.enable_mesh = true;
    188
    189	ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
    190
    191	ti.tid = ath11k_dp_tx_get_tid(skb);
    192
    193	switch (ti.encap_type) {
    194	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
    195		ath11k_dp_tx_encap_nwifi(skb);
    196		break;
    197	case HAL_TCL_ENCAP_TYPE_RAW:
    198		if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
    199			ret = -EINVAL;
    200			goto fail_remove_idr;
    201		}
    202		break;
    203	case HAL_TCL_ENCAP_TYPE_ETHERNET:
    204		/* no need to encap */
    205		break;
    206	case HAL_TCL_ENCAP_TYPE_802_3:
    207	default:
    208		/* TODO: Take care of other encap modes as well */
    209		ret = -EINVAL;
    210		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
    211		goto fail_remove_idr;
    212	}
    213
    214	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
    215	if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
    216		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
    217		ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
    218		ret = -ENOMEM;
    219		goto fail_remove_idr;
    220	}
    221
    222	ti.data_len = skb->len;
    223	skb_cb->paddr = ti.paddr;
    224	skb_cb->vif = arvif->vif;
    225	skb_cb->ar = ar;
    226
    227	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
    228	tcl_ring = &ab->hal.srng_list[hal_ring_id];
    229
    230	spin_lock_bh(&tcl_ring->lock);
    231
    232	ath11k_hal_srng_access_begin(ab, tcl_ring);
    233
    234	hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
    235	if (unlikely(!hal_tcl_desc)) {
    236		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
    237		 * desc because the desc is directly enqueued onto hw queue.
    238		 */
    239		ath11k_hal_srng_access_end(ab, tcl_ring);
    240		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
    241		spin_unlock_bh(&tcl_ring->lock);
    242		ret = -ENOMEM;
    243
    244		/* Checking for available tcl descritors in another ring in
    245		 * case of failure due to full tcl ring now, is better than
    246		 * checking this ring earlier for each pkt tx.
    247		 * Restart ring selection if some rings are not checked yet.
    248		 */
    249		if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
    250		    ab->hw_params.max_tx_ring > 1) {
    251			tcl_ring_retry = true;
    252			ring_selector++;
    253		}
    254
    255		goto fail_unmap_dma;
    256	}
    257
    258	ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
    259					 sizeof(struct hal_tlv_hdr), &ti);
    260
    261	ath11k_hal_srng_access_end(ab, tcl_ring);
    262
    263	ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
    264
    265	spin_unlock_bh(&tcl_ring->lock);
    266
    267	ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
    268			skb->data, skb->len);
    269
    270	atomic_inc(&ar->dp.num_tx_pending);
    271
    272	return 0;
    273
    274fail_unmap_dma:
    275	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
    276
    277fail_remove_idr:
    278	spin_lock_bh(&tx_ring->tx_idr_lock);
    279	idr_remove(&tx_ring->txbuf_idr,
    280		   FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
    281	spin_unlock_bh(&tx_ring->tx_idr_lock);
    282
    283	if (tcl_ring_retry)
    284		goto tcl_ring_sel;
    285
    286	return ret;
    287}
    288
    289static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
    290				    int msdu_id,
    291				    struct dp_tx_ring *tx_ring)
    292{
    293	struct ath11k *ar;
    294	struct sk_buff *msdu;
    295	struct ath11k_skb_cb *skb_cb;
    296
    297	spin_lock(&tx_ring->tx_idr_lock);
    298	msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
    299	spin_unlock(&tx_ring->tx_idr_lock);
    300
    301	if (unlikely(!msdu)) {
    302		ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
    303			    msdu_id);
    304		return;
    305	}
    306
    307	skb_cb = ATH11K_SKB_CB(msdu);
    308
    309	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
    310	dev_kfree_skb_any(msdu);
    311
    312	ar = ab->pdevs[mac_id].ar;
    313	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
    314		wake_up(&ar->dp.tx_empty_waitq);
    315}
    316
    317static void
    318ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
    319				 struct dp_tx_ring *tx_ring,
    320				 struct ath11k_dp_htt_wbm_tx_status *ts)
    321{
    322	struct sk_buff *msdu;
    323	struct ieee80211_tx_info *info;
    324	struct ath11k_skb_cb *skb_cb;
    325	struct ath11k *ar;
    326
    327	spin_lock(&tx_ring->tx_idr_lock);
    328	msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
    329	spin_unlock(&tx_ring->tx_idr_lock);
    330
    331	if (unlikely(!msdu)) {
    332		ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
    333			    ts->msdu_id);
    334		return;
    335	}
    336
    337	skb_cb = ATH11K_SKB_CB(msdu);
    338	info = IEEE80211_SKB_CB(msdu);
    339
    340	ar = skb_cb->ar;
    341
    342	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
    343		wake_up(&ar->dp.tx_empty_waitq);
    344
    345	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
    346
    347	memset(&info->status, 0, sizeof(info->status));
    348
    349	if (ts->acked) {
    350		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
    351			info->flags |= IEEE80211_TX_STAT_ACK;
    352			info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
    353						  ts->ack_rssi;
    354			info->status.flags |=
    355				IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
    356		} else {
    357			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
    358		}
    359	}
    360
    361	ieee80211_tx_status(ar->hw, msdu);
    362}
    363
    364static void
    365ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
    366				     void *desc, u8 mac_id,
    367				     u32 msdu_id, struct dp_tx_ring *tx_ring)
    368{
    369	struct htt_tx_wbm_completion *status_desc;
    370	struct ath11k_dp_htt_wbm_tx_status ts = {0};
    371	enum hal_wbm_htt_tx_comp_status wbm_status;
    372
    373	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
    374
    375	wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
    376			       status_desc->info0);
    377	switch (wbm_status) {
    378	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
    379	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
    380	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
    381		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
    382		ts.msdu_id = msdu_id;
    383		ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
    384					status_desc->info1);
    385		ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
    386		break;
    387	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
    388	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
    389		ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
    390		break;
    391	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
    392		/* This event is to be handled only when the driver decides to
    393		 * use WDS offload functionality.
    394		 */
    395		break;
    396	default:
    397		ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
    398		break;
    399	}
    400}
    401
    402static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
    403					  struct sk_buff *msdu,
    404					  struct hal_tx_status *ts)
    405{
    406	struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
    407
    408	if (ts->try_cnt > 1) {
    409		peer_stats->retry_pkts += ts->try_cnt - 1;
    410		peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
    411
    412		if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
    413			peer_stats->failed_pkts += 1;
    414			peer_stats->failed_bytes += msdu->len;
    415		}
    416	}
    417}
    418
    419void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
    420{
    421	struct ath11k_base *ab = ar->ab;
    422	struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
    423	enum hal_tx_rate_stats_pkt_type pkt_type;
    424	enum hal_tx_rate_stats_sgi sgi;
    425	enum hal_tx_rate_stats_bw bw;
    426	struct ath11k_peer *peer;
    427	struct ath11k_sta *arsta;
    428	struct ieee80211_sta *sta;
    429	u16 rate, ru_tones;
    430	u8 mcs, rate_idx = 0, ofdma;
    431	int ret;
    432
    433	spin_lock_bh(&ab->base_lock);
    434	peer = ath11k_peer_find_by_id(ab, ts->peer_id);
    435	if (!peer || !peer->sta) {
    436		ath11k_dbg(ab, ATH11K_DBG_DP_TX,
    437			   "failed to find the peer by id %u\n", ts->peer_id);
    438		goto err_out;
    439	}
    440
    441	sta = peer->sta;
    442	arsta = (struct ath11k_sta *)sta->drv_priv;
    443
    444	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
    445	pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
    446			     ts->rate_stats);
    447	mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
    448			ts->rate_stats);
    449	sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
    450			ts->rate_stats);
    451	bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
    452	ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
    453	ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
    454
    455	/* This is to prefer choose the real NSS value arsta->last_txrate.nss,
    456	 * if it is invalid, then choose the NSS value while assoc.
    457	 */
    458	if (arsta->last_txrate.nss)
    459		arsta->txrate.nss = arsta->last_txrate.nss;
    460	else
    461		arsta->txrate.nss = arsta->peer_nss;
    462
    463	if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
    464	    pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
    465		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
    466							    pkt_type,
    467							    &rate_idx,
    468							    &rate);
    469		if (ret < 0)
    470			goto err_out;
    471		arsta->txrate.legacy = rate;
    472	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
    473		if (mcs > 7) {
    474			ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
    475			goto err_out;
    476		}
    477
    478		if (arsta->txrate.nss != 0)
    479			arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
    480		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
    481		if (sgi)
    482			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
    483	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
    484		if (mcs > 9) {
    485			ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
    486			goto err_out;
    487		}
    488
    489		arsta->txrate.mcs = mcs;
    490		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
    491		if (sgi)
    492			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
    493	} else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
    494		if (mcs > 11) {
    495			ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
    496			goto err_out;
    497		}
    498
    499		arsta->txrate.mcs = mcs;
    500		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
    501		arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
    502	}
    503
    504	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
    505	if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
    506		arsta->txrate.bw = RATE_INFO_BW_HE_RU;
    507		arsta->txrate.he_ru_alloc =
    508			ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
    509	}
    510
    511	if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
    512		ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
    513
    514err_out:
    515	spin_unlock_bh(&ab->base_lock);
    516}
    517
    518static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
    519				       struct sk_buff *msdu,
    520				       struct hal_tx_status *ts)
    521{
    522	struct ieee80211_tx_status status = { 0 };
    523	struct ieee80211_rate_status status_rate = { 0 };
    524	struct ath11k_base *ab = ar->ab;
    525	struct ieee80211_tx_info *info;
    526	struct ath11k_skb_cb *skb_cb;
    527	struct ath11k_peer *peer;
    528	struct ath11k_sta *arsta;
    529	struct rate_info rate;
    530
    531	if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
    532		/* Must not happen */
    533		return;
    534	}
    535
    536	skb_cb = ATH11K_SKB_CB(msdu);
    537
    538	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
    539
    540	if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
    541		dev_kfree_skb_any(msdu);
    542		return;
    543	}
    544
    545	if (unlikely(!skb_cb->vif)) {
    546		dev_kfree_skb_any(msdu);
    547		return;
    548	}
    549
    550	info = IEEE80211_SKB_CB(msdu);
    551	memset(&info->status, 0, sizeof(info->status));
    552
    553	/* skip tx rate update from ieee80211_status*/
    554	info->status.rates[0].idx = -1;
    555
    556	if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
    557	    !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
    558		info->flags |= IEEE80211_TX_STAT_ACK;
    559		info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
    560					  ts->ack_rssi;
    561		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
    562	}
    563
    564	if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
    565	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
    566		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
    567
    568	if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ||
    569	    ab->hw_params.single_pdev_only) {
    570		if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
    571			if (ar->last_ppdu_id == 0) {
    572				ar->last_ppdu_id = ts->ppdu_id;
    573			} else if (ar->last_ppdu_id == ts->ppdu_id ||
    574				   ar->cached_ppdu_id == ar->last_ppdu_id) {
    575				ar->cached_ppdu_id = ar->last_ppdu_id;
    576				ar->cached_stats.is_ampdu = true;
    577				ath11k_dp_tx_update_txcompl(ar, ts);
    578				memset(&ar->cached_stats, 0,
    579				       sizeof(struct ath11k_per_peer_tx_stats));
    580			} else {
    581				ar->cached_stats.is_ampdu = false;
    582				ath11k_dp_tx_update_txcompl(ar, ts);
    583				memset(&ar->cached_stats, 0,
    584				       sizeof(struct ath11k_per_peer_tx_stats));
    585			}
    586			ar->last_ppdu_id = ts->ppdu_id;
    587		}
    588
    589		ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
    590	}
    591
    592	spin_lock_bh(&ab->base_lock);
    593	peer = ath11k_peer_find_by_id(ab, ts->peer_id);
    594	if (!peer || !peer->sta) {
    595		ath11k_dbg(ab, ATH11K_DBG_DATA,
    596			   "dp_tx: failed to find the peer with peer_id %d\n",
    597			    ts->peer_id);
    598		spin_unlock_bh(&ab->base_lock);
    599		dev_kfree_skb_any(msdu);
    600		return;
    601	}
    602	arsta = (struct ath11k_sta *)peer->sta->drv_priv;
    603	status.sta = peer->sta;
    604	status.skb = msdu;
    605	status.info = info;
    606	rate = arsta->last_txrate;
    607
    608	status_rate.rate_idx = rate;
    609	status_rate.try_count = 1;
    610
    611	status.rates = &status_rate;
    612	status.n_rates = 1;
    613
    614	spin_unlock_bh(&ab->base_lock);
    615
    616	ieee80211_tx_status_ext(ar->hw, &status);
    617}
    618
    619static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
    620					     struct hal_wbm_release_ring *desc,
    621					     struct hal_tx_status *ts)
    622{
    623	ts->buf_rel_source =
    624		FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
    625	if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
    626		     ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
    627		return;
    628
    629	if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
    630		return;
    631
    632	ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
    633			       desc->info0);
    634	ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
    635				desc->info1);
    636	ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
    637				desc->info1);
    638	ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
    639				 desc->info2);
    640	if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
    641		ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
    642	ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
    643	ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
    644	if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
    645		ts->rate_stats = desc->rate_stats.info0;
    646	else
    647		ts->rate_stats = 0;
    648}
    649
    650void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
    651{
    652	struct ath11k *ar;
    653	struct ath11k_dp *dp = &ab->dp;
    654	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
    655	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
    656	struct sk_buff *msdu;
    657	struct hal_tx_status ts = { 0 };
    658	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
    659	u32 *desc;
    660	u32 msdu_id;
    661	u8 mac_id;
    662
    663	spin_lock_bh(&status_ring->lock);
    664
    665	ath11k_hal_srng_access_begin(ab, status_ring);
    666
    667	while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
    668		tx_ring->tx_status_tail) &&
    669	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
    670		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
    671		       desc, sizeof(struct hal_wbm_release_ring));
    672		tx_ring->tx_status_head =
    673			ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
    674	}
    675
    676	if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
    677		     (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
    678		      tx_ring->tx_status_tail))) {
    679		/* TODO: Process pending tx_status messages when kfifo_is_full() */
    680		ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
    681	}
    682
    683	ath11k_hal_srng_access_end(ab, status_ring);
    684
    685	spin_unlock_bh(&status_ring->lock);
    686
    687	while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
    688		struct hal_wbm_release_ring *tx_status;
    689		u32 desc_id;
    690
    691		tx_ring->tx_status_tail =
    692			ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
    693		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
    694		ath11k_dp_tx_status_parse(ab, tx_status, &ts);
    695
    696		desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
    697				    tx_status->buf_addr_info.info1);
    698		mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
    699		msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
    700
    701		if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
    702			ath11k_dp_tx_process_htt_tx_complete(ab,
    703							     (void *)tx_status,
    704							     mac_id, msdu_id,
    705							     tx_ring);
    706			continue;
    707		}
    708
    709		spin_lock(&tx_ring->tx_idr_lock);
    710		msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
    711		if (unlikely(!msdu)) {
    712			ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
    713				    msdu_id);
    714			spin_unlock(&tx_ring->tx_idr_lock);
    715			continue;
    716		}
    717
    718		spin_unlock(&tx_ring->tx_idr_lock);
    719
    720		ar = ab->pdevs[mac_id].ar;
    721
    722		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
    723			wake_up(&ar->dp.tx_empty_waitq);
    724
    725		ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
    726	}
    727}
    728
    729int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
    730			      enum hal_reo_cmd_type type,
    731			      struct ath11k_hal_reo_cmd *cmd,
    732			      void (*cb)(struct ath11k_dp *, void *,
    733					 enum hal_reo_cmd_status))
    734{
    735	struct ath11k_dp *dp = &ab->dp;
    736	struct dp_reo_cmd *dp_cmd;
    737	struct hal_srng *cmd_ring;
    738	int cmd_num;
    739
    740	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
    741		return -ESHUTDOWN;
    742
    743	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
    744	cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
    745
    746	/* cmd_num should start from 1, during failure return the error code */
    747	if (cmd_num < 0)
    748		return cmd_num;
    749
    750	/* reo cmd ring descriptors has cmd_num starting from 1 */
    751	if (cmd_num == 0)
    752		return -EINVAL;
    753
    754	if (!cb)
    755		return 0;
    756
    757	/* Can this be optimized so that we keep the pending command list only
    758	 * for tid delete command to free up the resoruce on the command status
    759	 * indication?
    760	 */
    761	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
    762
    763	if (!dp_cmd)
    764		return -ENOMEM;
    765
    766	memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
    767	dp_cmd->cmd_num = cmd_num;
    768	dp_cmd->handler = cb;
    769
    770	spin_lock_bh(&dp->reo_cmd_lock);
    771	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
    772	spin_unlock_bh(&dp->reo_cmd_lock);
    773
    774	return 0;
    775}
    776
    777static int
    778ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
    779			      int mac_id, u32 ring_id,
    780			      enum hal_ring_type ring_type,
    781			      enum htt_srng_ring_type *htt_ring_type,
    782			      enum htt_srng_ring_id *htt_ring_id)
    783{
    784	int lmac_ring_id_offset = 0;
    785	int ret = 0;
    786
    787	switch (ring_type) {
    788	case HAL_RXDMA_BUF:
    789		lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
    790
    791		/* for QCA6390, host fills rx buffer to fw and fw fills to
    792		 * rxbuf ring for each rxdma
    793		 */
    794		if (!ab->hw_params.rx_mac_buf_ring) {
    795			if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
    796					  lmac_ring_id_offset) ||
    797				ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
    798					lmac_ring_id_offset))) {
    799				ret = -EINVAL;
    800			}
    801			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
    802			*htt_ring_type = HTT_SW_TO_HW_RING;
    803		} else {
    804			if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
    805				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
    806				*htt_ring_type = HTT_SW_TO_SW_RING;
    807			} else {
    808				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
    809				*htt_ring_type = HTT_SW_TO_HW_RING;
    810			}
    811		}
    812		break;
    813	case HAL_RXDMA_DST:
    814		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
    815		*htt_ring_type = HTT_HW_TO_SW_RING;
    816		break;
    817	case HAL_RXDMA_MONITOR_BUF:
    818		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
    819		*htt_ring_type = HTT_SW_TO_HW_RING;
    820		break;
    821	case HAL_RXDMA_MONITOR_STATUS:
    822		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
    823		*htt_ring_type = HTT_SW_TO_HW_RING;
    824		break;
    825	case HAL_RXDMA_MONITOR_DST:
    826		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
    827		*htt_ring_type = HTT_HW_TO_SW_RING;
    828		break;
    829	case HAL_RXDMA_MONITOR_DESC:
    830		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
    831		*htt_ring_type = HTT_SW_TO_HW_RING;
    832		break;
    833	default:
    834		ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
    835		ret = -EINVAL;
    836	}
    837	return ret;
    838}
    839
    840int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
    841				int mac_id, enum hal_ring_type ring_type)
    842{
    843	struct htt_srng_setup_cmd *cmd;
    844	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
    845	struct hal_srng_params params;
    846	struct sk_buff *skb;
    847	u32 ring_entry_sz;
    848	int len = sizeof(*cmd);
    849	dma_addr_t hp_addr, tp_addr;
    850	enum htt_srng_ring_type htt_ring_type;
    851	enum htt_srng_ring_id htt_ring_id;
    852	int ret;
    853
    854	skb = ath11k_htc_alloc_skb(ab, len);
    855	if (!skb)
    856		return -ENOMEM;
    857
    858	memset(&params, 0, sizeof(params));
    859	ath11k_hal_srng_get_params(ab, srng, &params);
    860
    861	hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
    862	tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
    863
    864	ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
    865					    ring_type, &htt_ring_type,
    866					    &htt_ring_id);
    867	if (ret)
    868		goto err_free;
    869
    870	skb_put(skb, len);
    871	cmd = (struct htt_srng_setup_cmd *)skb->data;
    872	cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
    873				HTT_H2T_MSG_TYPE_SRING_SETUP);
    874	if (htt_ring_type == HTT_SW_TO_HW_RING ||
    875	    htt_ring_type == HTT_HW_TO_SW_RING)
    876		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
    877					 DP_SW2HW_MACID(mac_id));
    878	else
    879		cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
    880					 mac_id);
    881	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
    882				 htt_ring_type);
    883	cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
    884
    885	cmd->ring_base_addr_lo = params.ring_base_paddr &
    886				 HAL_ADDR_LSB_REG_MASK;
    887
    888	cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
    889				 HAL_ADDR_MSB_REG_SHIFT;
    890
    891	ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
    892	if (ret < 0)
    893		goto err_free;
    894
    895	ring_entry_sz = ret;
    896
    897	ring_entry_sz >>= 2;
    898	cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
    899				ring_entry_sz);
    900	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
    901				 params.num_entries * ring_entry_sz);
    902	cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
    903				 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
    904	cmd->info1 |= FIELD_PREP(
    905			HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
    906			!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
    907	cmd->info1 |= FIELD_PREP(
    908			HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
    909			!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
    910	if (htt_ring_type == HTT_SW_TO_HW_RING)
    911		cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
    912
    913	cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
    914	cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
    915					      HAL_ADDR_MSB_REG_SHIFT;
    916
    917	cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
    918	cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
    919					      HAL_ADDR_MSB_REG_SHIFT;
    920
    921	cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
    922	cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
    923	cmd->msi_data = params.msi_data;
    924
    925	cmd->intr_info = FIELD_PREP(
    926			HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
    927			params.intr_batch_cntr_thres_entries * ring_entry_sz);
    928	cmd->intr_info |= FIELD_PREP(
    929			HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
    930			params.intr_timer_thres_us >> 3);
    931
    932	cmd->info2 = 0;
    933	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
    934		cmd->info2 = FIELD_PREP(
    935				HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
    936				params.low_threshold);
    937	}
    938
    939	ath11k_dbg(ab, ATH11k_DBG_HAL,
    940		   "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
    941		   __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
    942		   cmd->msi_data);
    943
    944	ath11k_dbg(ab, ATH11k_DBG_HAL,
    945		   "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
    946		   ring_id, ring_type, cmd->intr_info, cmd->info2);
    947
    948	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
    949	if (ret)
    950		goto err_free;
    951
    952	return 0;
    953
    954err_free:
    955	dev_kfree_skb_any(skb);
    956
    957	return ret;
    958}
    959
    960#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
    961
    962int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
    963{
    964	struct ath11k_dp *dp = &ab->dp;
    965	struct sk_buff *skb;
    966	struct htt_ver_req_cmd *cmd;
    967	int len = sizeof(*cmd);
    968	int ret;
    969
    970	init_completion(&dp->htt_tgt_version_received);
    971
    972	skb = ath11k_htc_alloc_skb(ab, len);
    973	if (!skb)
    974		return -ENOMEM;
    975
    976	skb_put(skb, len);
    977	cmd = (struct htt_ver_req_cmd *)skb->data;
    978	cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
    979				       HTT_H2T_MSG_TYPE_VERSION_REQ);
    980
    981	ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
    982	if (ret) {
    983		dev_kfree_skb_any(skb);
    984		return ret;
    985	}
    986
    987	ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
    988					  HTT_TARGET_VERSION_TIMEOUT_HZ);
    989	if (ret == 0) {
    990		ath11k_warn(ab, "htt target version request timed out\n");
    991		return -ETIMEDOUT;
    992	}
    993
    994	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
    995		ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
    996			   dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
    997		return -ENOTSUPP;
    998	}
    999
   1000	return 0;
   1001}
   1002
   1003int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
   1004{
   1005	struct ath11k_base *ab = ar->ab;
   1006	struct ath11k_dp *dp = &ab->dp;
   1007	struct sk_buff *skb;
   1008	struct htt_ppdu_stats_cfg_cmd *cmd;
   1009	int len = sizeof(*cmd);
   1010	u8 pdev_mask;
   1011	int ret;
   1012	int i;
   1013
   1014	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
   1015		skb = ath11k_htc_alloc_skb(ab, len);
   1016		if (!skb)
   1017			return -ENOMEM;
   1018
   1019		skb_put(skb, len);
   1020		cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
   1021		cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
   1022				      HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
   1023
   1024		pdev_mask = 1 << (ar->pdev_idx + i);
   1025		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
   1026		cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
   1027
   1028		ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
   1029		if (ret) {
   1030			dev_kfree_skb_any(skb);
   1031			return ret;
   1032		}
   1033	}
   1034
   1035	return 0;
   1036}
   1037
   1038int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
   1039				     int mac_id, enum hal_ring_type ring_type,
   1040				     int rx_buf_size,
   1041				     struct htt_rx_ring_tlv_filter *tlv_filter)
   1042{
   1043	struct htt_rx_ring_selection_cfg_cmd *cmd;
   1044	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
   1045	struct hal_srng_params params;
   1046	struct sk_buff *skb;
   1047	int len = sizeof(*cmd);
   1048	enum htt_srng_ring_type htt_ring_type;
   1049	enum htt_srng_ring_id htt_ring_id;
   1050	int ret;
   1051
   1052	skb = ath11k_htc_alloc_skb(ab, len);
   1053	if (!skb)
   1054		return -ENOMEM;
   1055
   1056	memset(&params, 0, sizeof(params));
   1057	ath11k_hal_srng_get_params(ab, srng, &params);
   1058
   1059	ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
   1060					    ring_type, &htt_ring_type,
   1061					    &htt_ring_id);
   1062	if (ret)
   1063		goto err_free;
   1064
   1065	skb_put(skb, len);
   1066	cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
   1067	cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
   1068				HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
   1069	if (htt_ring_type == HTT_SW_TO_HW_RING ||
   1070	    htt_ring_type == HTT_HW_TO_SW_RING)
   1071		cmd->info0 |=
   1072			FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
   1073				   DP_SW2HW_MACID(mac_id));
   1074	else
   1075		cmd->info0 |=
   1076			FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
   1077				   mac_id);
   1078	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
   1079				 htt_ring_id);
   1080	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
   1081				 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
   1082	cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
   1083				 !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
   1084
   1085	cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
   1086				rx_buf_size);
   1087	cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
   1088	cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
   1089	cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
   1090	cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
   1091	cmd->rx_filter_tlv = tlv_filter->rx_filter;
   1092
   1093	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
   1094	if (ret)
   1095		goto err_free;
   1096
   1097	return 0;
   1098
   1099err_free:
   1100	dev_kfree_skb_any(skb);
   1101
   1102	return ret;
   1103}
   1104
   1105int
   1106ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
   1107				   struct htt_ext_stats_cfg_params *cfg_params,
   1108				   u64 cookie)
   1109{
   1110	struct ath11k_base *ab = ar->ab;
   1111	struct ath11k_dp *dp = &ab->dp;
   1112	struct sk_buff *skb;
   1113	struct htt_ext_stats_cfg_cmd *cmd;
   1114	u32 pdev_id;
   1115	int len = sizeof(*cmd);
   1116	int ret;
   1117
   1118	skb = ath11k_htc_alloc_skb(ab, len);
   1119	if (!skb)
   1120		return -ENOMEM;
   1121
   1122	skb_put(skb, len);
   1123
   1124	cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
   1125	memset(cmd, 0, sizeof(*cmd));
   1126	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
   1127
   1128	if (ab->hw_params.single_pdev_only)
   1129		pdev_id = ath11k_mac_get_target_pdev_id(ar);
   1130	else
   1131		pdev_id = ar->pdev->pdev_id;
   1132
   1133	cmd->hdr.pdev_mask = 1 << pdev_id;
   1134
   1135	cmd->hdr.stats_type = type;
   1136	cmd->cfg_param0 = cfg_params->cfg0;
   1137	cmd->cfg_param1 = cfg_params->cfg1;
   1138	cmd->cfg_param2 = cfg_params->cfg2;
   1139	cmd->cfg_param3 = cfg_params->cfg3;
   1140	cmd->cookie_lsb = lower_32_bits(cookie);
   1141	cmd->cookie_msb = upper_32_bits(cookie);
   1142
   1143	ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
   1144	if (ret) {
   1145		ath11k_warn(ab, "failed to send htt type stats request: %d",
   1146			    ret);
   1147		dev_kfree_skb_any(skb);
   1148		return ret;
   1149	}
   1150
   1151	return 0;
   1152}
   1153
   1154int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
   1155{
   1156	struct ath11k_pdev_dp *dp = &ar->dp;
   1157	struct ath11k_base *ab = ar->ab;
   1158	struct htt_rx_ring_tlv_filter tlv_filter = {0};
   1159	int ret = 0, ring_id = 0, i;
   1160
   1161	if (ab->hw_params.full_monitor_mode) {
   1162		ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
   1163							 dp->mac_id, !reset);
   1164		if (ret < 0) {
   1165			ath11k_err(ab, "failed to setup full monitor %d\n", ret);
   1166			return ret;
   1167		}
   1168	}
   1169
   1170	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
   1171
   1172	if (!reset) {
   1173		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
   1174		tlv_filter.pkt_filter_flags0 =
   1175					HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
   1176					HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
   1177		tlv_filter.pkt_filter_flags1 =
   1178					HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
   1179					HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
   1180		tlv_filter.pkt_filter_flags2 =
   1181					HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
   1182					HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
   1183		tlv_filter.pkt_filter_flags3 =
   1184					HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
   1185					HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
   1186					HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
   1187					HTT_RX_MON_MO_DATA_FILTER_FLASG3;
   1188	}
   1189
   1190	if (ab->hw_params.rxdma1_enable) {
   1191		ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
   1192						       HAL_RXDMA_MONITOR_BUF,
   1193						       DP_RXDMA_REFILL_RING_SIZE,
   1194						       &tlv_filter);
   1195	} else if (!reset) {
   1196		/* set in monitor mode only */
   1197		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
   1198			ring_id = dp->rx_mac_buf_ring[i].ring_id;
   1199			ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
   1200							       dp->mac_id + i,
   1201							       HAL_RXDMA_BUF,
   1202							       1024,
   1203							       &tlv_filter);
   1204		}
   1205	}
   1206
   1207	if (ret)
   1208		return ret;
   1209
   1210	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
   1211		ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
   1212		if (!reset) {
   1213			tlv_filter.rx_filter =
   1214					HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
   1215		} else {
   1216			tlv_filter = ath11k_mac_mon_status_filter_default;
   1217
   1218			if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
   1219				tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
   1220		}
   1221
   1222		ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
   1223						       dp->mac_id + i,
   1224						       HAL_RXDMA_MONITOR_STATUS,
   1225						       DP_RXDMA_REFILL_RING_SIZE,
   1226						       &tlv_filter);
   1227	}
   1228
   1229	if (!ar->ab->hw_params.rxdma1_enable)
   1230		mod_timer(&ar->ab->mon_reap_timer, jiffies +
   1231			  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
   1232
   1233	return ret;
   1234}
   1235
   1236int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
   1237				       bool config)
   1238{
   1239	struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
   1240	struct sk_buff *skb;
   1241	int ret, len = sizeof(*cmd);
   1242
   1243	skb = ath11k_htc_alloc_skb(ab, len);
   1244	if (!skb)
   1245		return -ENOMEM;
   1246
   1247	skb_put(skb, len);
   1248	cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
   1249	memset(cmd, 0, sizeof(*cmd));
   1250	cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
   1251				HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
   1252
   1253	cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
   1254
   1255	cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
   1256		   FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
   1257			      HTT_RX_MON_RING_SW);
   1258	if (config) {
   1259		cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
   1260			    HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
   1261	}
   1262
   1263	ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
   1264	if (ret)
   1265		goto err_free;
   1266
   1267	return 0;
   1268
   1269err_free:
   1270	dev_kfree_skb_any(skb);
   1271
   1272	return ret;
   1273}