cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

utils.c (29631B)


      1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
      2/*
      3 * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
      4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
      5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
      6 */
      7#include <net/mac80211.h>
      8
      9#include "iwl-debug.h"
     10#include "iwl-io.h"
     11#include "iwl-prph.h"
     12#include "iwl-csr.h"
     13#include "mvm.h"
     14#include "fw/api/rs.h"
     15#include "fw/img.h"
     16
     17/*
     18 * Will return 0 even if the cmd failed when RFKILL is asserted unless
     19 * CMD_WANT_SKB is set in cmd->flags.
     20 */
     21int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
     22{
     23	int ret;
     24
     25#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
     26	if (WARN_ON(mvm->d3_test_active))
     27		return -EIO;
     28#endif
     29
     30	/*
     31	 * Synchronous commands from this op-mode must hold
     32	 * the mutex, this ensures we don't try to send two
     33	 * (or more) synchronous commands at a time.
     34	 */
     35	if (!(cmd->flags & CMD_ASYNC))
     36		lockdep_assert_held(&mvm->mutex);
     37
     38	ret = iwl_trans_send_cmd(mvm->trans, cmd);
     39
     40	/*
     41	 * If the caller wants the SKB, then don't hide any problems, the
     42	 * caller might access the response buffer which will be NULL if
     43	 * the command failed.
     44	 */
     45	if (cmd->flags & CMD_WANT_SKB)
     46		return ret;
     47
     48	/*
     49	 * Silently ignore failures if RFKILL is asserted or
     50	 * we are in suspend\resume process
     51	 */
     52	if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
     53		return 0;
     54	return ret;
     55}
     56
     57int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
     58			 u32 flags, u16 len, const void *data)
     59{
     60	struct iwl_host_cmd cmd = {
     61		.id = id,
     62		.len = { len, },
     63		.data = { data, },
     64		.flags = flags,
     65	};
     66
     67	return iwl_mvm_send_cmd(mvm, &cmd);
     68}
     69
     70/*
     71 * We assume that the caller set the status to the success value
     72 */
     73int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
     74			    u32 *status)
     75{
     76	struct iwl_rx_packet *pkt;
     77	struct iwl_cmd_response *resp;
     78	int ret, resp_len;
     79
     80	lockdep_assert_held(&mvm->mutex);
     81
     82#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
     83	if (WARN_ON(mvm->d3_test_active))
     84		return -EIO;
     85#endif
     86
     87	/*
     88	 * Only synchronous commands can wait for status,
     89	 * we use WANT_SKB so the caller can't.
     90	 */
     91	if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
     92		      "cmd flags %x", cmd->flags))
     93		return -EINVAL;
     94
     95	cmd->flags |= CMD_WANT_SKB;
     96
     97	ret = iwl_trans_send_cmd(mvm->trans, cmd);
     98	if (ret == -ERFKILL) {
     99		/*
    100		 * The command failed because of RFKILL, don't update
    101		 * the status, leave it as success and return 0.
    102		 */
    103		return 0;
    104	} else if (ret) {
    105		return ret;
    106	}
    107
    108	pkt = cmd->resp_pkt;
    109
    110	resp_len = iwl_rx_packet_payload_len(pkt);
    111	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
    112		ret = -EIO;
    113		goto out_free_resp;
    114	}
    115
    116	resp = (void *)pkt->data;
    117	*status = le32_to_cpu(resp->status);
    118 out_free_resp:
    119	iwl_free_resp(cmd);
    120	return ret;
    121}
    122
    123/*
    124 * We assume that the caller set the status to the sucess value
    125 */
    126int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
    127				const void *data, u32 *status)
    128{
    129	struct iwl_host_cmd cmd = {
    130		.id = id,
    131		.len = { len, },
    132		.data = { data, },
    133	};
    134
    135	return iwl_mvm_send_cmd_status(mvm, &cmd, status);
    136}
    137
    138int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
    139					  enum nl80211_band band)
    140{
    141	int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
    142	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
    143	bool is_LB = band == NL80211_BAND_2GHZ;
    144
    145	if (format == RATE_MCS_LEGACY_OFDM_MSK)
    146		return is_LB ? rate + IWL_FIRST_OFDM_RATE :
    147			rate;
    148
    149	/* CCK is not allowed in HB */
    150	return is_LB ? rate : -1;
    151}
    152
    153int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
    154					enum nl80211_band band)
    155{
    156	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
    157	int idx;
    158	int band_offset = 0;
    159
    160	/* Legacy rate format, search for match in table */
    161	if (band != NL80211_BAND_2GHZ)
    162		band_offset = IWL_FIRST_OFDM_RATE;
    163	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
    164		if (iwl_fw_rate_idx_to_plcp(idx) == rate)
    165			return idx - band_offset;
    166
    167	return -1;
    168}
    169
    170u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
    171{
    172	if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
    173		/* In the new rate legacy rates are indexed:
    174		 * 0 - 3 for CCK and 0 - 7 for OFDM.
    175		 */
    176		return (rate_idx >= IWL_FIRST_OFDM_RATE ?
    177			rate_idx - IWL_FIRST_OFDM_RATE :
    178			rate_idx);
    179
    180	return iwl_fw_rate_idx_to_plcp(rate_idx);
    181}
    182
    183u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
    184{
    185	static const u8 mac80211_ac_to_ucode_ac[] = {
    186		AC_VO,
    187		AC_VI,
    188		AC_BE,
    189		AC_BK
    190	};
    191
    192	return mac80211_ac_to_ucode_ac[ac];
    193}
    194
    195void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
    196{
    197	struct iwl_rx_packet *pkt = rxb_addr(rxb);
    198	struct iwl_error_resp *err_resp = (void *)pkt->data;
    199
    200	IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
    201		le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
    202	IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
    203		le16_to_cpu(err_resp->bad_cmd_seq_num),
    204		le32_to_cpu(err_resp->error_service));
    205	IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
    206		le64_to_cpu(err_resp->timestamp));
    207}
    208
    209/*
    210 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
    211 * The parameter should also be a combination of ANT_[ABC].
    212 */
    213u8 first_antenna(u8 mask)
    214{
    215	BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
    216	if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
    217		return BIT(0);
    218	return BIT(ffs(mask) - 1);
    219}
    220
    221#define MAX_ANT_NUM 2
    222/*
    223 * Toggles between TX antennas to send the probe request on.
    224 * Receives the bitmask of valid TX antennas and the *index* used
    225 * for the last TX, and returns the next valid *index* to use.
    226 * In order to set it in the tx_cmd, must do BIT(idx).
    227 */
    228u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
    229{
    230	u8 ind = last_idx;
    231	int i;
    232
    233	for (i = 0; i < MAX_ANT_NUM; i++) {
    234		ind = (ind + 1) % MAX_ANT_NUM;
    235		if (valid & BIT(ind))
    236			return ind;
    237	}
    238
    239	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
    240	return last_idx;
    241}
    242
    243/**
    244 * iwl_mvm_send_lq_cmd() - Send link quality command
    245 * @mvm: Driver data.
    246 * @lq: Link quality command to send.
    247 *
    248 * The link quality command is sent as the last step of station creation.
    249 * This is the special case in which init is set and we call a callback in
    250 * this case to clear the state indicating that station creation is in
    251 * progress.
    252 */
    253int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
    254{
    255	struct iwl_host_cmd cmd = {
    256		.id = LQ_CMD,
    257		.len = { sizeof(struct iwl_lq_cmd), },
    258		.flags = CMD_ASYNC,
    259		.data = { lq, },
    260	};
    261
    262	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
    263		    iwl_mvm_has_tlc_offload(mvm)))
    264		return -EINVAL;
    265
    266	return iwl_mvm_send_cmd(mvm, &cmd);
    267}
    268
    269/**
    270 * iwl_mvm_update_smps - Get a request to change the SMPS mode
    271 * @mvm: Driver data.
    272 * @vif: Pointer to the ieee80211_vif structure
    273 * @req_type: The part of the driver who call for a change.
    274 * @smps_request: The request to change the SMPS mode.
    275 *
    276 * Get a requst to change the SMPS mode,
    277 * and change it according to all other requests in the driver.
    278 */
    279void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
    280			 enum iwl_mvm_smps_type_request req_type,
    281			 enum ieee80211_smps_mode smps_request)
    282{
    283	struct iwl_mvm_vif *mvmvif;
    284	enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
    285	int i;
    286
    287	lockdep_assert_held(&mvm->mutex);
    288
    289	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
    290	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
    291		return;
    292
    293	if (vif->type != NL80211_IFTYPE_STATION)
    294		return;
    295
    296	mvmvif = iwl_mvm_vif_from_mac80211(vif);
    297	mvmvif->smps_requests[req_type] = smps_request;
    298	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
    299		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
    300			smps_mode = IEEE80211_SMPS_STATIC;
    301			break;
    302		}
    303		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
    304			smps_mode = IEEE80211_SMPS_DYNAMIC;
    305	}
    306
    307	ieee80211_request_smps(vif, smps_mode);
    308}
    309
    310static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
    311				    struct iwl_rx_packet *pkt, void *data)
    312{
    313	WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
    314
    315	return true;
    316}
    317
    318int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
    319{
    320	struct iwl_statistics_cmd scmd = {
    321		.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
    322	};
    323
    324	struct iwl_host_cmd cmd = {
    325		.id = STATISTICS_CMD,
    326		.len[0] = sizeof(scmd),
    327		.data[0] = &scmd,
    328	};
    329	int ret;
    330
    331	/* From version 15 - STATISTICS_NOTIFICATION, the reply for
    332	 * STATISTICS_CMD is empty, and the response is with
    333	 * STATISTICS_NOTIFICATION notification
    334	 */
    335	if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
    336				    STATISTICS_NOTIFICATION, 0) < 15) {
    337		cmd.flags = CMD_WANT_SKB;
    338
    339		ret = iwl_mvm_send_cmd(mvm, &cmd);
    340		if (ret)
    341			return ret;
    342
    343		iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
    344		iwl_free_resp(&cmd);
    345	} else {
    346		struct iwl_notification_wait stats_wait;
    347		static const u16 stats_complete[] = {
    348			STATISTICS_NOTIFICATION,
    349		};
    350
    351		iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
    352					   stats_complete, ARRAY_SIZE(stats_complete),
    353					   iwl_wait_stats_complete, NULL);
    354
    355		ret = iwl_mvm_send_cmd(mvm, &cmd);
    356		if (ret) {
    357			iwl_remove_notification(&mvm->notif_wait, &stats_wait);
    358			return ret;
    359		}
    360
    361		/* 200ms should be enough for FW to collect data from all
    362		 * LMACs and send STATISTICS_NOTIFICATION to host
    363		 */
    364		ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
    365		if (ret)
    366			return ret;
    367	}
    368
    369	if (clear)
    370		iwl_mvm_accu_radio_stats(mvm);
    371
    372	return 0;
    373}
    374
    375void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
    376{
    377	mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
    378	mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
    379	mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
    380	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
    381}
    382
    383struct iwl_mvm_diversity_iter_data {
    384	struct iwl_mvm_phy_ctxt *ctxt;
    385	bool result;
    386};
    387
    388static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
    389				   struct ieee80211_vif *vif)
    390{
    391	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    392	struct iwl_mvm_diversity_iter_data *data = _data;
    393	int i;
    394
    395	if (mvmvif->phy_ctxt != data->ctxt)
    396		return;
    397
    398	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
    399		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
    400		    mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
    401			data->result = false;
    402			break;
    403		}
    404	}
    405}
    406
    407bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
    408				  struct iwl_mvm_phy_ctxt *ctxt)
    409{
    410	struct iwl_mvm_diversity_iter_data data = {
    411		.ctxt = ctxt,
    412		.result = true,
    413	};
    414
    415	lockdep_assert_held(&mvm->mutex);
    416
    417	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
    418		return false;
    419
    420	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
    421		return false;
    422
    423	if (mvm->cfg->rx_with_siso_diversity)
    424		return false;
    425
    426	ieee80211_iterate_active_interfaces_atomic(
    427			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
    428			iwl_mvm_diversity_iter, &data);
    429
    430	return data.result;
    431}
    432
    433void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
    434				  bool low_latency, u16 mac_id)
    435{
    436	struct iwl_mac_low_latency_cmd cmd = {
    437		.mac_id = cpu_to_le32(mac_id)
    438	};
    439
    440	if (!fw_has_capa(&mvm->fw->ucode_capa,
    441			 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
    442		return;
    443
    444	if (low_latency) {
    445		/* currently we don't care about the direction */
    446		cmd.low_latency_rx = 1;
    447		cmd.low_latency_tx = 1;
    448	}
    449
    450	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
    451				 0, sizeof(cmd), &cmd))
    452		IWL_ERR(mvm, "Failed to send low latency command\n");
    453}
    454
    455int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
    456			       bool low_latency,
    457			       enum iwl_mvm_low_latency_cause cause)
    458{
    459	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    460	int res;
    461	bool prev;
    462
    463	lockdep_assert_held(&mvm->mutex);
    464
    465	prev = iwl_mvm_vif_low_latency(mvmvif);
    466	iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
    467
    468	low_latency = iwl_mvm_vif_low_latency(mvmvif);
    469
    470	if (low_latency == prev)
    471		return 0;
    472
    473	iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
    474
    475	res = iwl_mvm_update_quotas(mvm, false, NULL);
    476	if (res)
    477		return res;
    478
    479	iwl_mvm_bt_coex_vif_change(mvm);
    480
    481	return iwl_mvm_power_update_mac(mvm);
    482}
    483
    484struct iwl_mvm_low_latency_iter {
    485	bool result;
    486	bool result_per_band[NUM_NL80211_BANDS];
    487};
    488
    489static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
    490{
    491	struct iwl_mvm_low_latency_iter *result = _data;
    492	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    493	enum nl80211_band band;
    494
    495	if (iwl_mvm_vif_low_latency(mvmvif)) {
    496		result->result = true;
    497
    498		if (!mvmvif->phy_ctxt)
    499			return;
    500
    501		band = mvmvif->phy_ctxt->channel->band;
    502		result->result_per_band[band] = true;
    503	}
    504}
    505
    506bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
    507{
    508	struct iwl_mvm_low_latency_iter data = {};
    509
    510	ieee80211_iterate_active_interfaces_atomic(
    511			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
    512			iwl_mvm_ll_iter, &data);
    513
    514	return data.result;
    515}
    516
    517bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
    518{
    519	struct iwl_mvm_low_latency_iter data = {};
    520
    521	ieee80211_iterate_active_interfaces_atomic(
    522			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
    523			iwl_mvm_ll_iter, &data);
    524
    525	return data.result_per_band[band];
    526}
    527
    528struct iwl_bss_iter_data {
    529	struct ieee80211_vif *vif;
    530	bool error;
    531};
    532
    533static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
    534				       struct ieee80211_vif *vif)
    535{
    536	struct iwl_bss_iter_data *data = _data;
    537
    538	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
    539		return;
    540
    541	if (data->vif) {
    542		data->error = true;
    543		return;
    544	}
    545
    546	data->vif = vif;
    547}
    548
    549struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
    550{
    551	struct iwl_bss_iter_data bss_iter_data = {};
    552
    553	ieee80211_iterate_active_interfaces_atomic(
    554		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
    555		iwl_mvm_bss_iface_iterator, &bss_iter_data);
    556
    557	if (bss_iter_data.error) {
    558		IWL_ERR(mvm, "More than one managed interface active!\n");
    559		return ERR_PTR(-EINVAL);
    560	}
    561
    562	return bss_iter_data.vif;
    563}
    564
    565struct iwl_bss_find_iter_data {
    566	struct ieee80211_vif *vif;
    567	u32 macid;
    568};
    569
    570static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
    571					    struct ieee80211_vif *vif)
    572{
    573	struct iwl_bss_find_iter_data *data = _data;
    574	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    575
    576	if (mvmvif->id == data->macid)
    577		data->vif = vif;
    578}
    579
    580struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
    581{
    582	struct iwl_bss_find_iter_data data = {
    583		.macid = macid,
    584	};
    585
    586	lockdep_assert_held(&mvm->mutex);
    587
    588	ieee80211_iterate_active_interfaces_atomic(
    589		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
    590		iwl_mvm_bss_find_iface_iterator, &data);
    591
    592	return data.vif;
    593}
    594
    595struct iwl_sta_iter_data {
    596	bool assoc;
    597};
    598
    599static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
    600				       struct ieee80211_vif *vif)
    601{
    602	struct iwl_sta_iter_data *data = _data;
    603
    604	if (vif->type != NL80211_IFTYPE_STATION)
    605		return;
    606
    607	if (vif->bss_conf.assoc)
    608		data->assoc = true;
    609}
    610
    611bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
    612{
    613	struct iwl_sta_iter_data data = {
    614		.assoc = false,
    615	};
    616
    617	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
    618						   IEEE80211_IFACE_ITER_NORMAL,
    619						   iwl_mvm_sta_iface_iterator,
    620						   &data);
    621	return data.assoc;
    622}
    623
    624unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
    625				    struct ieee80211_vif *vif,
    626				    bool tdls, bool cmd_q)
    627{
    628	struct iwl_fw_dbg_trigger_tlv *trigger;
    629	struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
    630	unsigned int default_timeout = cmd_q ?
    631		IWL_DEF_WD_TIMEOUT :
    632		mvm->trans->trans_cfg->base_params->wd_timeout;
    633
    634	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
    635		/*
    636		 * We can't know when the station is asleep or awake, so we
    637		 * must disable the queue hang detection.
    638		 */
    639		if (fw_has_capa(&mvm->fw->ucode_capa,
    640				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
    641		    vif && vif->type == NL80211_IFTYPE_AP)
    642			return IWL_WATCHDOG_DISABLED;
    643		return default_timeout;
    644	}
    645
    646	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
    647	txq_timer = (void *)trigger->data;
    648
    649	if (tdls)
    650		return le32_to_cpu(txq_timer->tdls);
    651
    652	if (cmd_q)
    653		return le32_to_cpu(txq_timer->command_queue);
    654
    655	if (WARN_ON(!vif))
    656		return default_timeout;
    657
    658	switch (ieee80211_vif_type_p2p(vif)) {
    659	case NL80211_IFTYPE_ADHOC:
    660		return le32_to_cpu(txq_timer->ibss);
    661	case NL80211_IFTYPE_STATION:
    662		return le32_to_cpu(txq_timer->bss);
    663	case NL80211_IFTYPE_AP:
    664		return le32_to_cpu(txq_timer->softap);
    665	case NL80211_IFTYPE_P2P_CLIENT:
    666		return le32_to_cpu(txq_timer->p2p_client);
    667	case NL80211_IFTYPE_P2P_GO:
    668		return le32_to_cpu(txq_timer->p2p_go);
    669	case NL80211_IFTYPE_P2P_DEVICE:
    670		return le32_to_cpu(txq_timer->p2p_device);
    671	case NL80211_IFTYPE_MONITOR:
    672		return default_timeout;
    673	default:
    674		WARN_ON(1);
    675		return mvm->trans->trans_cfg->base_params->wd_timeout;
    676	}
    677}
    678
    679void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
    680			     const char *errmsg)
    681{
    682	struct iwl_fw_dbg_trigger_tlv *trig;
    683	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
    684
    685	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
    686				     FW_DBG_TRIGGER_MLME);
    687	if (!trig)
    688		goto out;
    689
    690	trig_mlme = (void *)trig->data;
    691
    692	if (trig_mlme->stop_connection_loss &&
    693	    --trig_mlme->stop_connection_loss)
    694		goto out;
    695
    696	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
    697
    698out:
    699	ieee80211_connection_loss(vif);
    700}
    701
    702void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
    703					  struct ieee80211_vif *vif,
    704					  const struct ieee80211_sta *sta,
    705					  u16 tid)
    706{
    707	struct iwl_fw_dbg_trigger_tlv *trig;
    708	struct iwl_fw_dbg_trigger_ba *ba_trig;
    709
    710	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
    711				     FW_DBG_TRIGGER_BA);
    712	if (!trig)
    713		return;
    714
    715	ba_trig = (void *)trig->data;
    716
    717	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
    718		return;
    719
    720	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
    721				"Frame from %pM timed out, tid %d",
    722				sta->addr, tid);
    723}
    724
    725u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
    726{
    727	if (!elapsed)
    728		return 0;
    729
    730	return (100 * airtime / elapsed) / USEC_PER_MSEC;
    731}
    732
    733static enum iwl_mvm_traffic_load
    734iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
    735{
    736	u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
    737
    738	if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
    739		return IWL_MVM_TRAFFIC_HIGH;
    740	if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
    741		return IWL_MVM_TRAFFIC_MEDIUM;
    742
    743	return IWL_MVM_TRAFFIC_LOW;
    744}
    745
    746static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
    747{
    748	struct iwl_mvm *mvm = _data;
    749	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    750	bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
    751
    752	if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
    753		return;
    754
    755	low_latency = mvm->tcm.result.low_latency[mvmvif->id];
    756
    757	if (!mvm->tcm.result.change[mvmvif->id] &&
    758	    prev == low_latency) {
    759		iwl_mvm_update_quotas(mvm, false, NULL);
    760		return;
    761	}
    762
    763	if (prev != low_latency) {
    764		/* this sends traffic load and updates quota as well */
    765		iwl_mvm_update_low_latency(mvm, vif, low_latency,
    766					   LOW_LATENCY_TRAFFIC);
    767	} else {
    768		iwl_mvm_update_quotas(mvm, false, NULL);
    769	}
    770}
    771
    772static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
    773{
    774	mutex_lock(&mvm->mutex);
    775
    776	ieee80211_iterate_active_interfaces(
    777		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
    778		iwl_mvm_tcm_iter, mvm);
    779
    780	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
    781		iwl_mvm_config_scan(mvm);
    782
    783	mutex_unlock(&mvm->mutex);
    784}
    785
    786static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
    787{
    788	struct iwl_mvm *mvm;
    789	struct iwl_mvm_vif *mvmvif;
    790	struct ieee80211_vif *vif;
    791
    792	mvmvif = container_of(wk, struct iwl_mvm_vif,
    793			      uapsd_nonagg_detected_wk.work);
    794	vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
    795	mvm = mvmvif->mvm;
    796
    797	if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
    798		return;
    799
    800	/* remember that this AP is broken */
    801	memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
    802	       vif->bss_conf.bssid, ETH_ALEN);
    803	mvm->uapsd_noagg_bssid_write_idx++;
    804	if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
    805		mvm->uapsd_noagg_bssid_write_idx = 0;
    806
    807	iwl_mvm_connection_loss(mvm, vif,
    808				"AP isn't using AMPDU with uAPSD enabled");
    809}
    810
    811static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
    812					 struct ieee80211_vif *vif)
    813{
    814	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    815
    816	if (vif->type != NL80211_IFTYPE_STATION)
    817		return;
    818
    819	if (!vif->bss_conf.assoc)
    820		return;
    821
    822	if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
    823	    !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
    824	    !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
    825	    !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
    826		return;
    827
    828	if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
    829		return;
    830
    831	mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
    832	IWL_INFO(mvm,
    833		 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
    834	schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
    835}
    836
    837static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
    838						 unsigned int elapsed,
    839						 int mac)
    840{
    841	u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
    842	u64 tpt;
    843	unsigned long rate;
    844	struct ieee80211_vif *vif;
    845
    846	rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
    847
    848	if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
    849	    mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
    850		return;
    851
    852	if (iwl_mvm_has_new_rx_api(mvm)) {
    853		tpt = 8 * bytes; /* kbps */
    854		do_div(tpt, elapsed);
    855		rate *= 1000; /* kbps */
    856		if (tpt < 22 * rate / 100)
    857			return;
    858	} else {
    859		/*
    860		 * the rate here is actually the threshold, in 100Kbps units,
    861		 * so do the needed conversion from bytes to 100Kbps:
    862		 * 100kb = bits / (100 * 1000),
    863		 * 100kbps = 100kb / (msecs / 1000) ==
    864		 *           (bits / (100 * 1000)) / (msecs / 1000) ==
    865		 *           bits / (100 * msecs)
    866		 */
    867		tpt = (8 * bytes);
    868		do_div(tpt, elapsed * 100);
    869		if (tpt < rate)
    870			return;
    871	}
    872
    873	rcu_read_lock();
    874	vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
    875	if (vif)
    876		iwl_mvm_uapsd_agg_disconnect(mvm, vif);
    877	rcu_read_unlock();
    878}
    879
    880static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
    881				 struct ieee80211_vif *vif)
    882{
    883	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
    884	u32 *band = _data;
    885
    886	if (!mvmvif->phy_ctxt)
    887		return;
    888
    889	band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
    890}
    891
    892static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
    893					    unsigned long ts,
    894					    bool handle_uapsd)
    895{
    896	unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
    897	unsigned int uapsd_elapsed =
    898		jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
    899	u32 total_airtime = 0;
    900	u32 band_airtime[NUM_NL80211_BANDS] = {0};
    901	u32 band[NUM_MAC_INDEX_DRIVER] = {0};
    902	int ac, mac, i;
    903	bool low_latency = false;
    904	enum iwl_mvm_traffic_load load, band_load;
    905	bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
    906
    907	if (handle_ll)
    908		mvm->tcm.ll_ts = ts;
    909	if (handle_uapsd)
    910		mvm->tcm.uapsd_nonagg_ts = ts;
    911
    912	mvm->tcm.result.elapsed = elapsed;
    913
    914	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
    915						   IEEE80211_IFACE_ITER_NORMAL,
    916						   iwl_mvm_tcm_iterator,
    917						   &band);
    918
    919	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
    920		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
    921		u32 vo_vi_pkts = 0;
    922		u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
    923
    924		total_airtime += airtime;
    925		band_airtime[band[mac]] += airtime;
    926
    927		load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
    928		mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
    929		mvm->tcm.result.load[mac] = load;
    930		mvm->tcm.result.airtime[mac] = airtime;
    931
    932		for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
    933			vo_vi_pkts += mdata->rx.pkts[ac] +
    934				      mdata->tx.pkts[ac];
    935
    936		/* enable immediately with enough packets but defer disabling */
    937		if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
    938			mvm->tcm.result.low_latency[mac] = true;
    939		else if (handle_ll)
    940			mvm->tcm.result.low_latency[mac] = false;
    941
    942		if (handle_ll) {
    943			/* clear old data */
    944			memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
    945			memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
    946		}
    947		low_latency |= mvm->tcm.result.low_latency[mac];
    948
    949		if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
    950			iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
    951							     mac);
    952		/* clear old data */
    953		if (handle_uapsd)
    954			mdata->uapsd_nonagg_detect.rx_bytes = 0;
    955		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
    956		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
    957	}
    958
    959	load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
    960	mvm->tcm.result.global_load = load;
    961
    962	for (i = 0; i < NUM_NL80211_BANDS; i++) {
    963		band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
    964		mvm->tcm.result.band_load[i] = band_load;
    965	}
    966
    967	/*
    968	 * If the current load isn't low we need to force re-evaluation
    969	 * in the TCM period, so that we can return to low load if there
    970	 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
    971	 * triggered by traffic).
    972	 */
    973	if (load != IWL_MVM_TRAFFIC_LOW)
    974		return MVM_TCM_PERIOD;
    975	/*
    976	 * If low-latency is active we need to force re-evaluation after
    977	 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
    978	 * when there's no traffic at all.
    979	 */
    980	if (low_latency)
    981		return MVM_LL_PERIOD;
    982	/*
    983	 * Otherwise, we don't need to run the work struct because we're
    984	 * in the default "idle" state - traffic indication is low (which
    985	 * also covers the "no traffic" case) and low-latency is disabled
    986	 * so there's no state that may need to be disabled when there's
    987	 * no traffic at all.
    988	 *
    989	 * Note that this has no impact on the regular scheduling of the
    990	 * updates triggered by traffic - those happen whenever one of the
    991	 * two timeouts expire (if there's traffic at all.)
    992	 */
    993	return 0;
    994}
    995
    996void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
    997{
    998	unsigned long ts = jiffies;
    999	bool handle_uapsd =
   1000		time_after(ts, mvm->tcm.uapsd_nonagg_ts +
   1001			       msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
   1002
   1003	spin_lock(&mvm->tcm.lock);
   1004	if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
   1005		spin_unlock(&mvm->tcm.lock);
   1006		return;
   1007	}
   1008	spin_unlock(&mvm->tcm.lock);
   1009
   1010	if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
   1011		mutex_lock(&mvm->mutex);
   1012		if (iwl_mvm_request_statistics(mvm, true))
   1013			handle_uapsd = false;
   1014		mutex_unlock(&mvm->mutex);
   1015	}
   1016
   1017	spin_lock(&mvm->tcm.lock);
   1018	/* re-check if somebody else won the recheck race */
   1019	if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
   1020		/* calculate statistics */
   1021		unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
   1022								  handle_uapsd);
   1023
   1024		/* the memset needs to be visible before the timestamp */
   1025		smp_mb();
   1026		mvm->tcm.ts = ts;
   1027		if (work_delay)
   1028			schedule_delayed_work(&mvm->tcm.work, work_delay);
   1029	}
   1030	spin_unlock(&mvm->tcm.lock);
   1031
   1032	iwl_mvm_tcm_results(mvm);
   1033}
   1034
   1035void iwl_mvm_tcm_work(struct work_struct *work)
   1036{
   1037	struct delayed_work *delayed_work = to_delayed_work(work);
   1038	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
   1039					   tcm.work);
   1040
   1041	iwl_mvm_recalc_tcm(mvm);
   1042}
   1043
   1044void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
   1045{
   1046	spin_lock_bh(&mvm->tcm.lock);
   1047	mvm->tcm.paused = true;
   1048	spin_unlock_bh(&mvm->tcm.lock);
   1049	if (with_cancel)
   1050		cancel_delayed_work_sync(&mvm->tcm.work);
   1051}
   1052
   1053void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
   1054{
   1055	int mac;
   1056	bool low_latency = false;
   1057
   1058	spin_lock_bh(&mvm->tcm.lock);
   1059	mvm->tcm.ts = jiffies;
   1060	mvm->tcm.ll_ts = jiffies;
   1061	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
   1062		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
   1063
   1064		memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
   1065		memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
   1066		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
   1067		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
   1068
   1069		if (mvm->tcm.result.low_latency[mac])
   1070			low_latency = true;
   1071	}
   1072	/* The TCM data needs to be reset before "paused" flag changes */
   1073	smp_mb();
   1074	mvm->tcm.paused = false;
   1075
   1076	/*
   1077	 * if the current load is not low or low latency is active, force
   1078	 * re-evaluation to cover the case of no traffic.
   1079	 */
   1080	if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
   1081		schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
   1082	else if (low_latency)
   1083		schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
   1084
   1085	spin_unlock_bh(&mvm->tcm.lock);
   1086}
   1087
   1088void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
   1089{
   1090	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
   1091
   1092	INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
   1093			  iwl_mvm_tcm_uapsd_nonagg_detected_wk);
   1094}
   1095
   1096void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
   1097{
   1098	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
   1099
   1100	cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
   1101}
   1102
   1103u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
   1104{
   1105	u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
   1106
   1107	if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
   1108	    mvm->trans->cfg->gp2_reg_addr)
   1109		reg_addr = mvm->trans->cfg->gp2_reg_addr;
   1110
   1111	return iwl_read_prph(mvm->trans, reg_addr);
   1112}
   1113
   1114void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
   1115			   u32 *gp2, u64 *boottime, ktime_t *realtime)
   1116{
   1117	bool ps_disabled;
   1118
   1119	lockdep_assert_held(&mvm->mutex);
   1120
   1121	/* Disable power save when reading GP2 */
   1122	ps_disabled = mvm->ps_disabled;
   1123	if (!ps_disabled) {
   1124		mvm->ps_disabled = true;
   1125		iwl_mvm_power_update_device(mvm);
   1126	}
   1127
   1128	*gp2 = iwl_mvm_get_systime(mvm);
   1129
   1130	if (clock_type == CLOCK_BOOTTIME && boottime)
   1131		*boottime = ktime_get_boottime_ns();
   1132	else if (clock_type == CLOCK_REALTIME && realtime)
   1133		*realtime = ktime_get_real();
   1134
   1135	if (!ps_disabled) {
   1136		mvm->ps_disabled = ps_disabled;
   1137		iwl_mvm_power_update_device(mvm);
   1138	}
   1139}