cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rs.c (121048B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/******************************************************************************
      3 *
      4 * Copyright(c) 2005 - 2014, 2018 - 2021 Intel Corporation. All rights reserved.
      5 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
      6 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
      7 *****************************************************************************/
      8#include <linux/kernel.h>
      9#include <linux/skbuff.h>
     10#include <linux/slab.h>
     11#include <net/mac80211.h>
     12
     13#include <linux/netdevice.h>
     14#include <linux/etherdevice.h>
     15#include <linux/delay.h>
     16
     17#include <linux/workqueue.h>
     18#include "rs.h"
     19#include "fw-api.h"
     20#include "sta.h"
     21#include "iwl-op-mode.h"
     22#include "mvm.h"
     23#include "debugfs.h"
     24
     25#define IWL_RATE_MAX_WINDOW		62	/* # tx in history window */
     26
     27/* Calculations of success ratio are done in fixed point where 12800 is 100%.
     28 * Use this macro when dealing with thresholds consts set as a percentage
     29 */
     30#define RS_PERCENT(x) (128 * x)
     31
     32static u8 rs_ht_to_legacy[] = {
     33	[IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
     34	[IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
     35	[IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
     36	[IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
     37	[IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
     38	[IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
     39	[IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
     40	[IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
     41	[IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
     42	[IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
     43};
     44
     45static const u8 ant_toggle_lookup[] = {
     46	[ANT_NONE] = ANT_NONE,
     47	[ANT_A] = ANT_B,
     48	[ANT_B] = ANT_A,
     49	[ANT_AB] = ANT_AB,
     50};
     51
     52#define IWL_DECLARE_RATE_INFO(r, s, rp, rn)			      \
     53	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,	      \
     54				    IWL_RATE_HT_SISO_MCS_##s##_PLCP,  \
     55				    IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
     56				    IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
     57				    IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
     58				    IWL_RATE_##rp##M_INDEX,	      \
     59				    IWL_RATE_##rn##M_INDEX }
     60
     61#define IWL_DECLARE_MCS_RATE(s)						  \
     62	[IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP,		  \
     63				       IWL_RATE_HT_SISO_MCS_##s##_PLCP,	  \
     64				       IWL_RATE_HT_MIMO2_MCS_##s##_PLCP,  \
     65				       IWL_RATE_VHT_SISO_MCS_##s##_PLCP,  \
     66				       IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
     67				       IWL_RATE_INVM_INDEX,	          \
     68				       IWL_RATE_INVM_INDEX }
     69
     70/*
     71 * Parameter order:
     72 *   rate, ht rate, prev rate, next rate
     73 *
     74 * If there isn't a valid next or previous rate then INV is used which
     75 * maps to IWL_RATE_INVALID
     76 *
     77 */
     78static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
     79	IWL_DECLARE_RATE_INFO(1, INV, INV, 2),   /*  1mbps */
     80	IWL_DECLARE_RATE_INFO(2, INV, 1, 5),     /*  2mbps */
     81	IWL_DECLARE_RATE_INFO(5, INV, 2, 11),    /*5.5mbps */
     82	IWL_DECLARE_RATE_INFO(11, INV, 9, 12),   /* 11mbps */
     83	IWL_DECLARE_RATE_INFO(6, 0, 5, 11),      /*  6mbps ; MCS 0 */
     84	IWL_DECLARE_RATE_INFO(9, INV, 6, 11),    /*  9mbps */
     85	IWL_DECLARE_RATE_INFO(12, 1, 11, 18),    /* 12mbps ; MCS 1 */
     86	IWL_DECLARE_RATE_INFO(18, 2, 12, 24),    /* 18mbps ; MCS 2 */
     87	IWL_DECLARE_RATE_INFO(24, 3, 18, 36),    /* 24mbps ; MCS 3 */
     88	IWL_DECLARE_RATE_INFO(36, 4, 24, 48),    /* 36mbps ; MCS 4 */
     89	IWL_DECLARE_RATE_INFO(48, 5, 36, 54),    /* 48mbps ; MCS 5 */
     90	IWL_DECLARE_RATE_INFO(54, 6, 48, INV),   /* 54mbps ; MCS 6 */
     91	IWL_DECLARE_MCS_RATE(7),                 /* MCS 7 */
     92	IWL_DECLARE_MCS_RATE(8),                 /* MCS 8 */
     93	IWL_DECLARE_MCS_RATE(9),                 /* MCS 9 */
     94};
     95
     96enum rs_action {
     97	RS_ACTION_STAY = 0,
     98	RS_ACTION_DOWNSCALE = -1,
     99	RS_ACTION_UPSCALE = 1,
    100};
    101
    102enum rs_column_mode {
    103	RS_INVALID = 0,
    104	RS_LEGACY,
    105	RS_SISO,
    106	RS_MIMO2,
    107};
    108
    109#define MAX_NEXT_COLUMNS 7
    110#define MAX_COLUMN_CHECKS 3
    111
    112struct rs_tx_column;
    113
    114typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
    115				     struct ieee80211_sta *sta,
    116				     struct rs_rate *rate,
    117				     const struct rs_tx_column *next_col);
    118
    119struct rs_tx_column {
    120	enum rs_column_mode mode;
    121	u8 ant;
    122	bool sgi;
    123	enum rs_column next_columns[MAX_NEXT_COLUMNS];
    124	allow_column_func_t checks[MAX_COLUMN_CHECKS];
    125};
    126
    127static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
    128			 struct rs_rate *rate,
    129			 const struct rs_tx_column *next_col)
    130{
    131	return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
    132}
    133
    134static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
    135			  struct rs_rate *rate,
    136			  const struct rs_tx_column *next_col)
    137{
    138	if (!sta->deflink.ht_cap.ht_supported)
    139		return false;
    140
    141	if (sta->smps_mode == IEEE80211_SMPS_STATIC)
    142		return false;
    143
    144	if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
    145		return false;
    146
    147	if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
    148		return false;
    149
    150	if (mvm->nvm_data->sku_cap_mimo_disabled)
    151		return false;
    152
    153	return true;
    154}
    155
    156static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
    157			  struct rs_rate *rate,
    158			  const struct rs_tx_column *next_col)
    159{
    160	if (!sta->deflink.ht_cap.ht_supported)
    161		return false;
    162
    163	return true;
    164}
    165
    166static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
    167			 struct rs_rate *rate,
    168			 const struct rs_tx_column *next_col)
    169{
    170	struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
    171	struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
    172
    173	if (is_ht20(rate) && (ht_cap->cap &
    174			     IEEE80211_HT_CAP_SGI_20))
    175		return true;
    176	if (is_ht40(rate) && (ht_cap->cap &
    177			     IEEE80211_HT_CAP_SGI_40))
    178		return true;
    179	if (is_ht80(rate) && (vht_cap->cap &
    180			     IEEE80211_VHT_CAP_SHORT_GI_80))
    181		return true;
    182	if (is_ht160(rate) && (vht_cap->cap &
    183			     IEEE80211_VHT_CAP_SHORT_GI_160))
    184		return true;
    185
    186	return false;
    187}
    188
    189static const struct rs_tx_column rs_tx_columns[] = {
    190	[RS_COLUMN_LEGACY_ANT_A] = {
    191		.mode = RS_LEGACY,
    192		.ant = ANT_A,
    193		.next_columns = {
    194			RS_COLUMN_LEGACY_ANT_B,
    195			RS_COLUMN_SISO_ANT_A,
    196			RS_COLUMN_MIMO2,
    197			RS_COLUMN_INVALID,
    198			RS_COLUMN_INVALID,
    199			RS_COLUMN_INVALID,
    200			RS_COLUMN_INVALID,
    201		},
    202		.checks = {
    203			rs_ant_allow,
    204		},
    205	},
    206	[RS_COLUMN_LEGACY_ANT_B] = {
    207		.mode = RS_LEGACY,
    208		.ant = ANT_B,
    209		.next_columns = {
    210			RS_COLUMN_LEGACY_ANT_A,
    211			RS_COLUMN_SISO_ANT_B,
    212			RS_COLUMN_MIMO2,
    213			RS_COLUMN_INVALID,
    214			RS_COLUMN_INVALID,
    215			RS_COLUMN_INVALID,
    216			RS_COLUMN_INVALID,
    217		},
    218		.checks = {
    219			rs_ant_allow,
    220		},
    221	},
    222	[RS_COLUMN_SISO_ANT_A] = {
    223		.mode = RS_SISO,
    224		.ant = ANT_A,
    225		.next_columns = {
    226			RS_COLUMN_SISO_ANT_B,
    227			RS_COLUMN_MIMO2,
    228			RS_COLUMN_SISO_ANT_A_SGI,
    229			RS_COLUMN_LEGACY_ANT_A,
    230			RS_COLUMN_LEGACY_ANT_B,
    231			RS_COLUMN_INVALID,
    232			RS_COLUMN_INVALID,
    233		},
    234		.checks = {
    235			rs_siso_allow,
    236			rs_ant_allow,
    237		},
    238	},
    239	[RS_COLUMN_SISO_ANT_B] = {
    240		.mode = RS_SISO,
    241		.ant = ANT_B,
    242		.next_columns = {
    243			RS_COLUMN_SISO_ANT_A,
    244			RS_COLUMN_MIMO2,
    245			RS_COLUMN_SISO_ANT_B_SGI,
    246			RS_COLUMN_LEGACY_ANT_A,
    247			RS_COLUMN_LEGACY_ANT_B,
    248			RS_COLUMN_INVALID,
    249			RS_COLUMN_INVALID,
    250		},
    251		.checks = {
    252			rs_siso_allow,
    253			rs_ant_allow,
    254		},
    255	},
    256	[RS_COLUMN_SISO_ANT_A_SGI] = {
    257		.mode = RS_SISO,
    258		.ant = ANT_A,
    259		.sgi = true,
    260		.next_columns = {
    261			RS_COLUMN_SISO_ANT_B_SGI,
    262			RS_COLUMN_MIMO2_SGI,
    263			RS_COLUMN_SISO_ANT_A,
    264			RS_COLUMN_LEGACY_ANT_A,
    265			RS_COLUMN_LEGACY_ANT_B,
    266			RS_COLUMN_INVALID,
    267			RS_COLUMN_INVALID,
    268		},
    269		.checks = {
    270			rs_siso_allow,
    271			rs_ant_allow,
    272			rs_sgi_allow,
    273		},
    274	},
    275	[RS_COLUMN_SISO_ANT_B_SGI] = {
    276		.mode = RS_SISO,
    277		.ant = ANT_B,
    278		.sgi = true,
    279		.next_columns = {
    280			RS_COLUMN_SISO_ANT_A_SGI,
    281			RS_COLUMN_MIMO2_SGI,
    282			RS_COLUMN_SISO_ANT_B,
    283			RS_COLUMN_LEGACY_ANT_A,
    284			RS_COLUMN_LEGACY_ANT_B,
    285			RS_COLUMN_INVALID,
    286			RS_COLUMN_INVALID,
    287		},
    288		.checks = {
    289			rs_siso_allow,
    290			rs_ant_allow,
    291			rs_sgi_allow,
    292		},
    293	},
    294	[RS_COLUMN_MIMO2] = {
    295		.mode = RS_MIMO2,
    296		.ant = ANT_AB,
    297		.next_columns = {
    298			RS_COLUMN_SISO_ANT_A,
    299			RS_COLUMN_MIMO2_SGI,
    300			RS_COLUMN_LEGACY_ANT_A,
    301			RS_COLUMN_LEGACY_ANT_B,
    302			RS_COLUMN_INVALID,
    303			RS_COLUMN_INVALID,
    304			RS_COLUMN_INVALID,
    305		},
    306		.checks = {
    307			rs_mimo_allow,
    308		},
    309	},
    310	[RS_COLUMN_MIMO2_SGI] = {
    311		.mode = RS_MIMO2,
    312		.ant = ANT_AB,
    313		.sgi = true,
    314		.next_columns = {
    315			RS_COLUMN_SISO_ANT_A_SGI,
    316			RS_COLUMN_MIMO2,
    317			RS_COLUMN_LEGACY_ANT_A,
    318			RS_COLUMN_LEGACY_ANT_B,
    319			RS_COLUMN_INVALID,
    320			RS_COLUMN_INVALID,
    321			RS_COLUMN_INVALID,
    322		},
    323		.checks = {
    324			rs_mimo_allow,
    325			rs_sgi_allow,
    326		},
    327	},
    328};
    329
    330static inline u8 rs_extract_rate(u32 rate_n_flags)
    331{
    332	/* also works for HT because bits 7:6 are zero there */
    333	return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK_V1);
    334}
    335
    336static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
    337{
    338	int idx = 0;
    339
    340	if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
    341		idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1;
    342		idx += IWL_RATE_MCS_0_INDEX;
    343
    344		/* skip 9M not supported in HT*/
    345		if (idx >= IWL_RATE_9M_INDEX)
    346			idx += 1;
    347		if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
    348			return idx;
    349	} else if (rate_n_flags & RATE_MCS_VHT_MSK_V1 ||
    350		   rate_n_flags & RATE_MCS_HE_MSK_V1) {
    351		idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
    352		idx += IWL_RATE_MCS_0_INDEX;
    353
    354		/* skip 9M not supported in VHT*/
    355		if (idx >= IWL_RATE_9M_INDEX)
    356			idx++;
    357		if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
    358			return idx;
    359		if ((rate_n_flags & RATE_MCS_HE_MSK_V1) &&
    360		    idx <= IWL_LAST_HE_RATE)
    361			return idx;
    362	} else {
    363		/* legacy rate format, search for match in table */
    364
    365		u8 legacy_rate = rs_extract_rate(rate_n_flags);
    366		for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
    367			if (iwl_rates[idx].plcp == legacy_rate)
    368				return idx;
    369	}
    370
    371	return IWL_RATE_INVALID;
    372}
    373
    374static void rs_rate_scale_perform(struct iwl_mvm *mvm,
    375				  struct ieee80211_sta *sta,
    376				  struct iwl_lq_sta *lq_sta,
    377				  int tid, bool ndp);
    378static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
    379			   struct ieee80211_sta *sta,
    380			   struct iwl_lq_sta *lq_sta,
    381			   const struct rs_rate *initial_rate);
    382static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
    383
    384/*
    385 * The following tables contain the expected throughput metrics for all rates
    386 *
    387 *	1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
    388 *
    389 * where invalid entries are zeros.
    390 *
    391 * CCK rates are only valid in legacy table and will only be used in G
    392 * (2.4 GHz) band.
    393 */
    394static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
    395	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
    396};
    397
    398/* Expected TpT tables. 4 indexes:
    399 * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
    400 */
    401static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
    402	{0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202, 216, 0},
    403	{0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210, 225, 0},
    404	{0, 0, 0, 0, 49, 0,  97, 145, 192, 285, 375, 420, 464, 551, 0},
    405	{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
    406};
    407
    408static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
    409	{0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250,  257,  269,  275},
    410	{0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257,  264,  275,  280},
    411	{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828,  911, 1070, 1173},
    412	{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
    413};
    414
    415static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
    416	{0, 0, 0, 0, 130, 0, 191, 223, 244,  273,  288,  294,  298,  305,  308},
    417	{0, 0, 0, 0, 138, 0, 200, 231, 251,  279,  293,  298,  302,  308,  312},
    418	{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
    419	{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
    420};
    421
    422static const u16 expected_tpt_siso_160MHz[4][IWL_RATE_COUNT] = {
    423	{0, 0, 0, 0, 191, 0, 244, 288,  298,  308,  313,  318,  323,  328,  330},
    424	{0, 0, 0, 0, 200, 0, 251, 293,  302,  312,  317,  322,  327,  332,  334},
    425	{0, 0, 0, 0, 439, 0, 875, 1307, 1736, 2584, 3419, 3831, 4240, 5049, 5581},
    426	{0, 0, 0, 0, 488, 0, 972, 1451, 1925, 2864, 3785, 4240, 4691, 5581, 6165},
    427};
    428
    429static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
    430	{0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250,  261, 0},
    431	{0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256,  267, 0},
    432	{0, 0, 0, 0,  98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
    433	{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
    434};
    435
    436static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
    437	{0, 0, 0, 0, 123, 0, 182, 214, 235,  264,  279,  285,  289,  296,  300},
    438	{0, 0, 0, 0, 131, 0, 191, 222, 242,  270,  284,  289,  293,  300,  303},
    439	{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
    440	{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
    441};
    442
    443static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
    444	{0, 0, 0, 0, 182, 0, 240,  264,  278,  299,  308,  311,  313,  317,  319},
    445	{0, 0, 0, 0, 190, 0, 247,  269,  282,  302,  310,  313,  315,  319,  320},
    446	{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
    447	{0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
    448};
    449
    450static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
    451	{0, 0, 0, 0, 240, 0, 278,  308,  313,  319,  322,  324,  328,  330,   334},
    452	{0, 0, 0, 0, 247, 0, 282,  310,  315,  320,  323,  325,  329,  332,   338},
    453	{0, 0, 0, 0, 875, 0, 1735, 2582, 3414, 5043, 6619, 7389, 8147, 9629,  10592},
    454	{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
    455};
    456
    457static const char *rs_pretty_lq_type(enum iwl_table_type type)
    458{
    459	static const char * const lq_types[] = {
    460		[LQ_NONE] = "NONE",
    461		[LQ_LEGACY_A] = "LEGACY_A",
    462		[LQ_LEGACY_G] = "LEGACY_G",
    463		[LQ_HT_SISO] = "HT SISO",
    464		[LQ_HT_MIMO2] = "HT MIMO",
    465		[LQ_VHT_SISO] = "VHT SISO",
    466		[LQ_VHT_MIMO2] = "VHT MIMO",
    467		[LQ_HE_SISO] = "HE SISO",
    468		[LQ_HE_MIMO2] = "HE MIMO",
    469	};
    470
    471	if (type < LQ_NONE || type >= LQ_MAX)
    472		return "UNKNOWN";
    473
    474	return lq_types[type];
    475}
    476
    477static char *rs_pretty_rate(const struct rs_rate *rate)
    478{
    479	static char buf[40];
    480	static const char * const legacy_rates[] = {
    481		[IWL_RATE_1M_INDEX] = "1M",
    482		[IWL_RATE_2M_INDEX] = "2M",
    483		[IWL_RATE_5M_INDEX] = "5.5M",
    484		[IWL_RATE_11M_INDEX] = "11M",
    485		[IWL_RATE_6M_INDEX] = "6M",
    486		[IWL_RATE_9M_INDEX] = "9M",
    487		[IWL_RATE_12M_INDEX] = "12M",
    488		[IWL_RATE_18M_INDEX] = "18M",
    489		[IWL_RATE_24M_INDEX] = "24M",
    490		[IWL_RATE_36M_INDEX] = "36M",
    491		[IWL_RATE_48M_INDEX] = "48M",
    492		[IWL_RATE_54M_INDEX] = "54M",
    493	};
    494	static const char *const ht_vht_rates[] = {
    495		[IWL_RATE_MCS_0_INDEX] = "MCS0",
    496		[IWL_RATE_MCS_1_INDEX] = "MCS1",
    497		[IWL_RATE_MCS_2_INDEX] = "MCS2",
    498		[IWL_RATE_MCS_3_INDEX] = "MCS3",
    499		[IWL_RATE_MCS_4_INDEX] = "MCS4",
    500		[IWL_RATE_MCS_5_INDEX] = "MCS5",
    501		[IWL_RATE_MCS_6_INDEX] = "MCS6",
    502		[IWL_RATE_MCS_7_INDEX] = "MCS7",
    503		[IWL_RATE_MCS_8_INDEX] = "MCS8",
    504		[IWL_RATE_MCS_9_INDEX] = "MCS9",
    505	};
    506	const char *rate_str;
    507
    508	if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX))
    509		rate_str = legacy_rates[rate->index];
    510	else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) &&
    511		 (rate->index >= IWL_RATE_MCS_0_INDEX) &&
    512		 (rate->index <= IWL_RATE_MCS_9_INDEX))
    513		rate_str = ht_vht_rates[rate->index];
    514	else
    515		rate_str = "BAD_RATE";
    516
    517	sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
    518		iwl_rs_pretty_ant(rate->ant), rate_str);
    519	return buf;
    520}
    521
    522static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
    523				const char *prefix)
    524{
    525	IWL_DEBUG_RATE(mvm,
    526		       "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
    527		       prefix, rs_pretty_rate(rate), rate->bw,
    528		       rate->sgi, rate->ldpc, rate->stbc);
    529}
    530
    531static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
    532{
    533	window->data = 0;
    534	window->success_counter = 0;
    535	window->success_ratio = IWL_INVALID_VALUE;
    536	window->counter = 0;
    537	window->average_tpt = IWL_INVALID_VALUE;
    538}
    539
    540static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
    541					    struct iwl_scale_tbl_info *tbl)
    542{
    543	int i;
    544
    545	IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
    546	for (i = 0; i < IWL_RATE_COUNT; i++)
    547		rs_rate_scale_clear_window(&tbl->win[i]);
    548
    549	for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
    550		rs_rate_scale_clear_window(&tbl->tpc_win[i]);
    551}
    552
    553static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
    554{
    555	return (ant_type & valid_antenna) == ant_type;
    556}
    557
    558static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
    559				     struct iwl_lq_sta *lq_data, u8 tid,
    560				     struct ieee80211_sta *sta)
    561{
    562	int ret;
    563
    564	IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
    565		     sta->addr, tid);
    566
    567	/* start BA session until the peer sends del BA */
    568	ret = ieee80211_start_tx_ba_session(sta, tid, 0);
    569	if (ret == -EAGAIN) {
    570		/*
    571		 * driver and mac80211 is out of sync
    572		 * this might be cause by reloading firmware
    573		 * stop the tx ba session here
    574		 */
    575		IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
    576			tid);
    577		ieee80211_stop_tx_ba_session(sta, tid);
    578	}
    579	return ret;
    580}
    581
    582static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
    583			      u8 tid, struct iwl_lq_sta *lq_sta,
    584			      struct ieee80211_sta *sta)
    585{
    586	struct iwl_mvm_tid_data *tid_data;
    587
    588	/*
    589	 * In AP mode, tid can be equal to IWL_MAX_TID_COUNT
    590	 * when the frame is not QoS
    591	 */
    592	if (WARN_ON_ONCE(tid > IWL_MAX_TID_COUNT)) {
    593		IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
    594			tid, IWL_MAX_TID_COUNT);
    595		return;
    596	} else if (tid == IWL_MAX_TID_COUNT) {
    597		return;
    598	}
    599
    600	tid_data = &mvmsta->tid_data[tid];
    601	if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED &&
    602	    tid_data->state == IWL_AGG_OFF &&
    603	    (lq_sta->tx_agg_tid_en & BIT(tid)) &&
    604	    tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD) {
    605		IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
    606		if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
    607			tid_data->state = IWL_AGG_QUEUED;
    608	}
    609}
    610
    611static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
    612{
    613	return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
    614	       !!(rate_n_flags & RATE_MCS_ANT_B_MSK);
    615}
    616
    617/*
    618 * Static function to get the expected throughput from an iwl_scale_tbl_info
    619 * that wraps a NULL pointer check
    620 */
    621static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
    622{
    623	if (tbl->expected_tpt)
    624		return tbl->expected_tpt[rs_index];
    625	return 0;
    626}
    627
    628/*
    629 * rs_collect_tx_data - Update the success/failure sliding window
    630 *
    631 * We keep a sliding window of the last 62 packets transmitted
    632 * at this rate.  window->data contains the bitmask of successful
    633 * packets.
    634 */
    635static int _rs_collect_tx_data(struct iwl_mvm *mvm,
    636			       struct iwl_scale_tbl_info *tbl,
    637			       int scale_index, int attempts, int successes,
    638			       struct iwl_rate_scale_data *window)
    639{
    640	static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
    641	s32 fail_count, tpt;
    642
    643	/* Get expected throughput */
    644	tpt = get_expected_tpt(tbl, scale_index);
    645
    646	/*
    647	 * Keep track of only the latest 62 tx frame attempts in this rate's
    648	 * history window; anything older isn't really relevant any more.
    649	 * If we have filled up the sliding window, drop the oldest attempt;
    650	 * if the oldest attempt (highest bit in bitmap) shows "success",
    651	 * subtract "1" from the success counter (this is the main reason
    652	 * we keep these bitmaps!).
    653	 */
    654	while (attempts > 0) {
    655		if (window->counter >= IWL_RATE_MAX_WINDOW) {
    656			/* remove earliest */
    657			window->counter = IWL_RATE_MAX_WINDOW - 1;
    658
    659			if (window->data & mask) {
    660				window->data &= ~mask;
    661				window->success_counter--;
    662			}
    663		}
    664
    665		/* Increment frames-attempted counter */
    666		window->counter++;
    667
    668		/* Shift bitmap by one frame to throw away oldest history */
    669		window->data <<= 1;
    670
    671		/* Mark the most recent #successes attempts as successful */
    672		if (successes > 0) {
    673			window->success_counter++;
    674			window->data |= 0x1;
    675			successes--;
    676		}
    677
    678		attempts--;
    679	}
    680
    681	/* Calculate current success ratio, avoid divide-by-0! */
    682	if (window->counter > 0)
    683		window->success_ratio = 128 * (100 * window->success_counter)
    684					/ window->counter;
    685	else
    686		window->success_ratio = IWL_INVALID_VALUE;
    687
    688	fail_count = window->counter - window->success_counter;
    689
    690	/* Calculate average throughput, if we have enough history. */
    691	if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) ||
    692	    (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH))
    693		window->average_tpt = (window->success_ratio * tpt + 64) / 128;
    694	else
    695		window->average_tpt = IWL_INVALID_VALUE;
    696
    697	return 0;
    698}
    699
    700static int rs_collect_tpc_data(struct iwl_mvm *mvm,
    701			       struct iwl_lq_sta *lq_sta,
    702			       struct iwl_scale_tbl_info *tbl,
    703			       int scale_index, int attempts, int successes,
    704			       u8 reduced_txp)
    705{
    706	struct iwl_rate_scale_data *window = NULL;
    707
    708	if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
    709		return -EINVAL;
    710
    711	window = &tbl->tpc_win[reduced_txp];
    712	return  _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
    713				    window);
    714}
    715
    716static void rs_update_tid_tpt_stats(struct iwl_mvm *mvm,
    717				    struct iwl_mvm_sta *mvmsta,
    718				    u8 tid, int successes)
    719{
    720	struct iwl_mvm_tid_data *tid_data;
    721
    722	if (tid >= IWL_MAX_TID_COUNT)
    723		return;
    724
    725	tid_data = &mvmsta->tid_data[tid];
    726
    727	/*
    728	 * Measure if there're enough successful transmits per second.
    729	 * These statistics are used only to decide if we can start a
    730	 * BA session, so it should be updated only when A-MPDU is
    731	 * off.
    732	 */
    733	if (tid_data->state != IWL_AGG_OFF)
    734		return;
    735
    736	if (time_is_before_jiffies(tid_data->tpt_meas_start + HZ) ||
    737	    (tid_data->tx_count >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
    738		tid_data->tx_count_last = tid_data->tx_count;
    739		tid_data->tx_count = 0;
    740		tid_data->tpt_meas_start = jiffies;
    741	} else {
    742		tid_data->tx_count += successes;
    743	}
    744}
    745
    746static int rs_collect_tlc_data(struct iwl_mvm *mvm,
    747			       struct iwl_mvm_sta *mvmsta, u8 tid,
    748			       struct iwl_scale_tbl_info *tbl,
    749			       int scale_index, int attempts, int successes)
    750{
    751	struct iwl_rate_scale_data *window = NULL;
    752
    753	if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
    754		return -EINVAL;
    755
    756	if (tbl->column != RS_COLUMN_INVALID) {
    757		struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
    758
    759		pers->tx_stats[tbl->column][scale_index].total += attempts;
    760		pers->tx_stats[tbl->column][scale_index].success += successes;
    761	}
    762
    763	rs_update_tid_tpt_stats(mvm, mvmsta, tid, successes);
    764
    765	/* Select window for current tx bit rate */
    766	window = &(tbl->win[scale_index]);
    767	return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
    768				   window);
    769}
    770
    771/* Convert rs_rate object into ucode rate bitmask */
    772static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
    773				  struct rs_rate *rate)
    774{
    775	u32 ucode_rate = 0;
    776	int index = rate->index;
    777
    778	ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
    779			 RATE_MCS_ANT_AB_MSK);
    780
    781	if (is_legacy(rate)) {
    782		ucode_rate |= iwl_rates[index].plcp;
    783		if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
    784			ucode_rate |= RATE_MCS_CCK_MSK_V1;
    785		return ucode_rate;
    786	}
    787
    788	/* set RTS protection for all non legacy rates
    789	 * This helps with congested environments reducing the conflict cost to
    790	 * RTS retries only, instead of the entire BA packet.
    791	 */
    792	ucode_rate |= RATE_MCS_RTS_REQUIRED_MSK;
    793
    794	if (is_ht(rate)) {
    795		if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
    796			IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
    797			index = IWL_LAST_HT_RATE;
    798		}
    799		ucode_rate |= RATE_MCS_HT_MSK_V1;
    800
    801		if (is_ht_siso(rate))
    802			ucode_rate |= iwl_rates[index].plcp_ht_siso;
    803		else if (is_ht_mimo2(rate))
    804			ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
    805		else
    806			WARN_ON_ONCE(1);
    807	} else if (is_vht(rate)) {
    808		if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
    809			IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
    810			index = IWL_LAST_VHT_RATE;
    811		}
    812		ucode_rate |= RATE_MCS_VHT_MSK_V1;
    813		if (is_vht_siso(rate))
    814			ucode_rate |= iwl_rates[index].plcp_vht_siso;
    815		else if (is_vht_mimo2(rate))
    816			ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
    817		else
    818			WARN_ON_ONCE(1);
    819
    820	} else {
    821		IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
    822	}
    823
    824	if (is_siso(rate) && rate->stbc) {
    825		/* To enable STBC we need to set both a flag and ANT_AB */
    826		ucode_rate |= RATE_MCS_ANT_AB_MSK;
    827		ucode_rate |= RATE_MCS_STBC_MSK;
    828	}
    829
    830	ucode_rate |= rate->bw;
    831	if (rate->sgi)
    832		ucode_rate |= RATE_MCS_SGI_MSK_V1;
    833	if (rate->ldpc)
    834		ucode_rate |= RATE_MCS_LDPC_MSK_V1;
    835
    836	return ucode_rate;
    837}
    838
    839/* Convert a ucode rate into an rs_rate object */
    840static int rs_rate_from_ucode_rate(const u32 ucode_rate,
    841				   enum nl80211_band band,
    842				   struct rs_rate *rate)
    843{
    844	u32 ant_msk = ucode_rate & RATE_MCS_ANT_AB_MSK;
    845	u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
    846	u8 nss;
    847
    848	memset(rate, 0, sizeof(*rate));
    849	rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
    850
    851	if (rate->index == IWL_RATE_INVALID)
    852		return -EINVAL;
    853
    854	rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
    855
    856	/* Legacy */
    857	if (!(ucode_rate & RATE_MCS_HT_MSK_V1) &&
    858	    !(ucode_rate & RATE_MCS_VHT_MSK_V1) &&
    859	    !(ucode_rate & RATE_MCS_HE_MSK_V1)) {
    860		if (num_of_ant == 1) {
    861			if (band == NL80211_BAND_5GHZ)
    862				rate->type = LQ_LEGACY_A;
    863			else
    864				rate->type = LQ_LEGACY_G;
    865		}
    866
    867		return 0;
    868	}
    869
    870	/* HT, VHT or HE */
    871	if (ucode_rate & RATE_MCS_SGI_MSK_V1)
    872		rate->sgi = true;
    873	if (ucode_rate & RATE_MCS_LDPC_MSK_V1)
    874		rate->ldpc = true;
    875	if (ucode_rate & RATE_MCS_STBC_MSK)
    876		rate->stbc = true;
    877	if (ucode_rate & RATE_MCS_BF_MSK)
    878		rate->bfer = true;
    879
    880	rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK_V1;
    881
    882	if (ucode_rate & RATE_MCS_HT_MSK_V1) {
    883		nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK_V1) >>
    884		       RATE_HT_MCS_NSS_POS_V1) + 1;
    885
    886		if (nss == 1) {
    887			rate->type = LQ_HT_SISO;
    888			WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
    889				  "stbc %d bfer %d",
    890				  rate->stbc, rate->bfer);
    891		} else if (nss == 2) {
    892			rate->type = LQ_HT_MIMO2;
    893			WARN_ON_ONCE(num_of_ant != 2);
    894		} else {
    895			WARN_ON_ONCE(1);
    896		}
    897	} else if (ucode_rate & RATE_MCS_VHT_MSK_V1) {
    898		nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
    899		       RATE_VHT_MCS_NSS_POS) + 1;
    900
    901		if (nss == 1) {
    902			rate->type = LQ_VHT_SISO;
    903			WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
    904				  "stbc %d bfer %d",
    905				  rate->stbc, rate->bfer);
    906		} else if (nss == 2) {
    907			rate->type = LQ_VHT_MIMO2;
    908			WARN_ON_ONCE(num_of_ant != 2);
    909		} else {
    910			WARN_ON_ONCE(1);
    911		}
    912	} else if (ucode_rate & RATE_MCS_HE_MSK_V1) {
    913		nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
    914		      RATE_VHT_MCS_NSS_POS) + 1;
    915
    916		if (nss == 1) {
    917			rate->type = LQ_HE_SISO;
    918			WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
    919				  "stbc %d bfer %d", rate->stbc, rate->bfer);
    920		} else if (nss == 2) {
    921			rate->type = LQ_HE_MIMO2;
    922			WARN_ON_ONCE(num_of_ant != 2);
    923		} else {
    924			WARN_ON_ONCE(1);
    925		}
    926	}
    927
    928	WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
    929		     !is_he(rate) && !is_vht(rate));
    930
    931	return 0;
    932}
    933
    934/* switch to another antenna/antennas and return 1 */
    935/* if no other valid antenna found, return 0 */
    936static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
    937{
    938	u8 new_ant_type;
    939
    940	if (!rs_is_valid_ant(valid_ant, rate->ant))
    941		return 0;
    942
    943	new_ant_type = ant_toggle_lookup[rate->ant];
    944
    945	while ((new_ant_type != rate->ant) &&
    946	       !rs_is_valid_ant(valid_ant, new_ant_type))
    947		new_ant_type = ant_toggle_lookup[new_ant_type];
    948
    949	if (new_ant_type == rate->ant)
    950		return 0;
    951
    952	rate->ant = new_ant_type;
    953
    954	return 1;
    955}
    956
    957static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
    958				  struct rs_rate *rate)
    959{
    960	if (is_legacy(rate))
    961		return lq_sta->active_legacy_rate;
    962	else if (is_siso(rate))
    963		return lq_sta->active_siso_rate;
    964	else if (is_mimo2(rate))
    965		return lq_sta->active_mimo2_rate;
    966
    967	WARN_ON_ONCE(1);
    968	return 0;
    969}
    970
    971static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
    972				int rate_type)
    973{
    974	u8 high = IWL_RATE_INVALID;
    975	u8 low = IWL_RATE_INVALID;
    976
    977	/* 802.11A or ht walks to the next literal adjacent rate in
    978	 * the rate table */
    979	if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
    980		int i;
    981		u32 mask;
    982
    983		/* Find the previous rate that is in the rate mask */
    984		i = index - 1;
    985		if (i >= 0)
    986			mask = BIT(i);
    987		for (; i >= 0; i--, mask >>= 1) {
    988			if (rate_mask & mask) {
    989				low = i;
    990				break;
    991			}
    992		}
    993
    994		/* Find the next rate that is in the rate mask */
    995		i = index + 1;
    996		for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
    997			if (rate_mask & mask) {
    998				high = i;
    999				break;
   1000			}
   1001		}
   1002
   1003		return (high << 8) | low;
   1004	}
   1005
   1006	low = index;
   1007	while (low != IWL_RATE_INVALID) {
   1008		low = iwl_rates[low].prev_rs;
   1009		if (low == IWL_RATE_INVALID)
   1010			break;
   1011		if (rate_mask & (1 << low))
   1012			break;
   1013	}
   1014
   1015	high = index;
   1016	while (high != IWL_RATE_INVALID) {
   1017		high = iwl_rates[high].next_rs;
   1018		if (high == IWL_RATE_INVALID)
   1019			break;
   1020		if (rate_mask & (1 << high))
   1021			break;
   1022	}
   1023
   1024	return (high << 8) | low;
   1025}
   1026
   1027static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
   1028				     struct rs_rate *rate)
   1029{
   1030	return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
   1031}
   1032
   1033/* Get the next supported lower rate in the current column.
   1034 * Return true if bottom rate in the current column was reached
   1035 */
   1036static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
   1037					struct rs_rate *rate)
   1038{
   1039	u8 low;
   1040	u16 high_low;
   1041	u16 rate_mask;
   1042	struct iwl_mvm *mvm = lq_sta->pers.drv;
   1043
   1044	rate_mask = rs_get_supported_rates(lq_sta, rate);
   1045	high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
   1046					rate->type);
   1047	low = high_low & 0xff;
   1048
   1049	/* Bottom rate of column reached */
   1050	if (low == IWL_RATE_INVALID)
   1051		return true;
   1052
   1053	rate->index = low;
   1054	return false;
   1055}
   1056
   1057/* Get the next rate to use following a column downgrade */
   1058static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
   1059					  struct rs_rate *rate)
   1060{
   1061	struct iwl_mvm *mvm = lq_sta->pers.drv;
   1062
   1063	if (is_legacy(rate)) {
   1064		/* No column to downgrade from Legacy */
   1065		return;
   1066	} else if (is_siso(rate)) {
   1067		/* Downgrade to Legacy if we were in SISO */
   1068		if (lq_sta->band == NL80211_BAND_5GHZ)
   1069			rate->type = LQ_LEGACY_A;
   1070		else
   1071			rate->type = LQ_LEGACY_G;
   1072
   1073		rate->bw = RATE_MCS_CHAN_WIDTH_20;
   1074
   1075		WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
   1076			     rate->index > IWL_RATE_MCS_9_INDEX);
   1077
   1078		rate->index = rs_ht_to_legacy[rate->index];
   1079		rate->ldpc = false;
   1080	} else {
   1081		/* Downgrade to SISO with same MCS if in MIMO  */
   1082		rate->type = is_vht_mimo2(rate) ?
   1083			LQ_VHT_SISO : LQ_HT_SISO;
   1084	}
   1085
   1086	if (num_of_ant(rate->ant) > 1)
   1087		rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
   1088
   1089	/* Relevant in both switching to SISO or Legacy */
   1090	rate->sgi = false;
   1091
   1092	if (!rs_rate_supported(lq_sta, rate))
   1093		rs_get_lower_rate_in_column(lq_sta, rate);
   1094}
   1095
   1096/* Check if both rates share the same column */
   1097static inline bool rs_rate_column_match(struct rs_rate *a,
   1098					struct rs_rate *b)
   1099{
   1100	bool ant_match;
   1101
   1102	if (a->stbc || a->bfer)
   1103		ant_match = (b->ant == ANT_A || b->ant == ANT_B);
   1104	else
   1105		ant_match = (a->ant == b->ant);
   1106
   1107	return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi)
   1108		&& ant_match;
   1109}
   1110
   1111static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
   1112{
   1113	if (is_legacy(rate)) {
   1114		if (rate->ant == ANT_A)
   1115			return RS_COLUMN_LEGACY_ANT_A;
   1116
   1117		if (rate->ant == ANT_B)
   1118			return RS_COLUMN_LEGACY_ANT_B;
   1119
   1120		goto err;
   1121	}
   1122
   1123	if (is_siso(rate)) {
   1124		if (rate->ant == ANT_A || rate->stbc || rate->bfer)
   1125			return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
   1126				RS_COLUMN_SISO_ANT_A;
   1127
   1128		if (rate->ant == ANT_B)
   1129			return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
   1130				RS_COLUMN_SISO_ANT_B;
   1131
   1132		goto err;
   1133	}
   1134
   1135	if (is_mimo(rate))
   1136		return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
   1137
   1138err:
   1139	return RS_COLUMN_INVALID;
   1140}
   1141
   1142static u8 rs_get_tid(struct ieee80211_hdr *hdr)
   1143{
   1144	u8 tid = IWL_MAX_TID_COUNT;
   1145
   1146	if (ieee80211_is_data_qos(hdr->frame_control)) {
   1147		u8 *qc = ieee80211_get_qos_ctl(hdr);
   1148		tid = qc[0] & 0xf;
   1149	}
   1150
   1151	if (unlikely(tid > IWL_MAX_TID_COUNT))
   1152		tid = IWL_MAX_TID_COUNT;
   1153
   1154	return tid;
   1155}
   1156
   1157/*
   1158 * mac80211 sends us Tx status
   1159 */
   1160static void rs_drv_mac80211_tx_status(void *mvm_r,
   1161				      struct ieee80211_supported_band *sband,
   1162				      struct ieee80211_sta *sta, void *priv_sta,
   1163				      struct sk_buff *skb)
   1164{
   1165	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
   1166	struct iwl_op_mode *op_mode = mvm_r;
   1167	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
   1168	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
   1169	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   1170
   1171	if (!mvmsta->vif)
   1172		return;
   1173
   1174	if (!ieee80211_is_data(hdr->frame_control) ||
   1175	    info->flags & IEEE80211_TX_CTL_NO_ACK)
   1176		return;
   1177
   1178	iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
   1179			     ieee80211_is_qos_nullfunc(hdr->frame_control));
   1180}
   1181
   1182/*
   1183 * Begin a period of staying with a selected modulation mode.
   1184 * Set "stay_in_tbl" flag to prevent any mode switches.
   1185 * Set frame tx success limits according to legacy vs. high-throughput,
   1186 * and reset overall (spanning all rates) tx success history statistics.
   1187 * These control how long we stay using same modulation mode before
   1188 * searching for a new mode.
   1189 */
   1190static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
   1191				 struct iwl_lq_sta *lq_sta)
   1192{
   1193	IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
   1194	lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
   1195	if (is_legacy) {
   1196		lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT;
   1197		lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT;
   1198		lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT;
   1199	} else {
   1200		lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT;
   1201		lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT;
   1202		lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT;
   1203	}
   1204	lq_sta->table_count = 0;
   1205	lq_sta->total_failed = 0;
   1206	lq_sta->total_success = 0;
   1207	lq_sta->flush_timer = jiffies;
   1208	lq_sta->visited_columns = 0;
   1209}
   1210
   1211static inline int rs_get_max_rate_from_mask(unsigned long rate_mask)
   1212{
   1213	if (rate_mask)
   1214		return find_last_bit(&rate_mask, BITS_PER_LONG);
   1215	return IWL_RATE_INVALID;
   1216}
   1217
   1218static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
   1219				   const struct rs_tx_column *column)
   1220{
   1221	switch (column->mode) {
   1222	case RS_LEGACY:
   1223		return lq_sta->max_legacy_rate_idx;
   1224	case RS_SISO:
   1225		return lq_sta->max_siso_rate_idx;
   1226	case RS_MIMO2:
   1227		return lq_sta->max_mimo2_rate_idx;
   1228	default:
   1229		WARN_ON_ONCE(1);
   1230	}
   1231
   1232	return lq_sta->max_legacy_rate_idx;
   1233}
   1234
   1235static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
   1236					    const struct rs_tx_column *column,
   1237					    u32 bw)
   1238{
   1239	/* Used to choose among HT tables */
   1240	const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
   1241
   1242	if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
   1243			 column->mode != RS_SISO &&
   1244			 column->mode != RS_MIMO2))
   1245		return expected_tpt_legacy;
   1246
   1247	/* Legacy rates have only one table */
   1248	if (column->mode == RS_LEGACY)
   1249		return expected_tpt_legacy;
   1250
   1251	ht_tbl_pointer = expected_tpt_mimo2_20MHz;
   1252	/* Choose among many HT tables depending on number of streams
   1253	 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
   1254	 * status */
   1255	if (column->mode == RS_SISO) {
   1256		switch (bw) {
   1257		case RATE_MCS_CHAN_WIDTH_20:
   1258			ht_tbl_pointer = expected_tpt_siso_20MHz;
   1259			break;
   1260		case RATE_MCS_CHAN_WIDTH_40:
   1261			ht_tbl_pointer = expected_tpt_siso_40MHz;
   1262			break;
   1263		case RATE_MCS_CHAN_WIDTH_80:
   1264			ht_tbl_pointer = expected_tpt_siso_80MHz;
   1265			break;
   1266		case RATE_MCS_CHAN_WIDTH_160:
   1267			ht_tbl_pointer = expected_tpt_siso_160MHz;
   1268			break;
   1269		default:
   1270			WARN_ON_ONCE(1);
   1271		}
   1272	} else if (column->mode == RS_MIMO2) {
   1273		switch (bw) {
   1274		case RATE_MCS_CHAN_WIDTH_20:
   1275			ht_tbl_pointer = expected_tpt_mimo2_20MHz;
   1276			break;
   1277		case RATE_MCS_CHAN_WIDTH_40:
   1278			ht_tbl_pointer = expected_tpt_mimo2_40MHz;
   1279			break;
   1280		case RATE_MCS_CHAN_WIDTH_80:
   1281			ht_tbl_pointer = expected_tpt_mimo2_80MHz;
   1282			break;
   1283		case RATE_MCS_CHAN_WIDTH_160:
   1284			ht_tbl_pointer = expected_tpt_mimo2_160MHz;
   1285			break;
   1286		default:
   1287			WARN_ON_ONCE(1);
   1288		}
   1289	} else {
   1290		WARN_ON_ONCE(1);
   1291	}
   1292
   1293	if (!column->sgi && !lq_sta->is_agg)		/* Normal */
   1294		return ht_tbl_pointer[0];
   1295	else if (column->sgi && !lq_sta->is_agg)        /* SGI */
   1296		return ht_tbl_pointer[1];
   1297	else if (!column->sgi && lq_sta->is_agg)        /* AGG */
   1298		return ht_tbl_pointer[2];
   1299	else						/* AGG+SGI */
   1300		return ht_tbl_pointer[3];
   1301}
   1302
   1303static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
   1304				      struct iwl_scale_tbl_info *tbl)
   1305{
   1306	struct rs_rate *rate = &tbl->rate;
   1307	const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
   1308
   1309	tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
   1310}
   1311
   1312/* rs uses two tables, one is active and the second is for searching better
   1313 * configuration. This function, according to the index of the currently
   1314 * active table returns the search table, which is located at the
   1315 * index complementary to 1 according to the active table (active = 1,
   1316 * search = 0 or active = 0, search = 1).
   1317 * Since lq_info is an arary of size 2, make sure index cannot be out of bounds.
   1318 */
   1319static inline u8 rs_search_tbl(u8 active_tbl)
   1320{
   1321	return (active_tbl ^ 1) & 1;
   1322}
   1323
   1324static s32 rs_get_best_rate(struct iwl_mvm *mvm,
   1325			    struct iwl_lq_sta *lq_sta,
   1326			    struct iwl_scale_tbl_info *tbl,	/* "search" */
   1327			    unsigned long rate_mask, s8 index)
   1328{
   1329	struct iwl_scale_tbl_info *active_tbl =
   1330	    &(lq_sta->lq_info[lq_sta->active_tbl]);
   1331	s32 success_ratio = active_tbl->win[index].success_ratio;
   1332	u16 expected_current_tpt = active_tbl->expected_tpt[index];
   1333	const u16 *tpt_tbl = tbl->expected_tpt;
   1334	u16 high_low;
   1335	u32 target_tpt;
   1336	int rate_idx;
   1337
   1338	if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
   1339		target_tpt = 100 * expected_current_tpt;
   1340		IWL_DEBUG_RATE(mvm,
   1341			       "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
   1342			       success_ratio, target_tpt);
   1343	} else {
   1344		target_tpt = lq_sta->last_tpt;
   1345		IWL_DEBUG_RATE(mvm,
   1346			       "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
   1347			       success_ratio, target_tpt);
   1348	}
   1349
   1350	rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
   1351
   1352	while (rate_idx != IWL_RATE_INVALID) {
   1353		if (target_tpt < (100 * tpt_tbl[rate_idx]))
   1354			break;
   1355
   1356		high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
   1357						tbl->rate.type);
   1358
   1359		rate_idx = (high_low >> 8) & 0xff;
   1360	}
   1361
   1362	IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
   1363		       rate_idx, target_tpt,
   1364		       rate_idx != IWL_RATE_INVALID ?
   1365		       100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
   1366
   1367	return rate_idx;
   1368}
   1369
   1370static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
   1371{
   1372	struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->deflink.vht_cap;
   1373	struct ieee80211_vht_cap vht_cap = {
   1374		.vht_cap_info = cpu_to_le32(sta_vht_cap->cap),
   1375		.supp_mcs = sta_vht_cap->vht_mcs,
   1376	};
   1377
   1378	switch (sta->deflink.bandwidth) {
   1379	case IEEE80211_STA_RX_BW_160:
   1380		/*
   1381		 * Don't use 160 MHz if VHT extended NSS support
   1382		 * says we cannot use 2 streams, we don't want to
   1383		 * deal with this.
   1384		 * We only check MCS 0 - they will support that if
   1385		 * we got here at all and we don't care which MCS,
   1386		 * we want to determine a more global state.
   1387		 */
   1388		if (ieee80211_get_vht_max_nss(&vht_cap,
   1389					      IEEE80211_VHT_CHANWIDTH_160MHZ,
   1390					      0, true,
   1391					      sta->deflink.rx_nss) < sta->deflink.rx_nss)
   1392			return RATE_MCS_CHAN_WIDTH_80;
   1393		return RATE_MCS_CHAN_WIDTH_160;
   1394	case IEEE80211_STA_RX_BW_80:
   1395		return RATE_MCS_CHAN_WIDTH_80;
   1396	case IEEE80211_STA_RX_BW_40:
   1397		return RATE_MCS_CHAN_WIDTH_40;
   1398	case IEEE80211_STA_RX_BW_20:
   1399	default:
   1400		return RATE_MCS_CHAN_WIDTH_20;
   1401	}
   1402}
   1403
   1404/*
   1405 * Check whether we should continue using same modulation mode, or
   1406 * begin search for a new mode, based on:
   1407 * 1) # tx successes or failures while using this mode
   1408 * 2) # times calling this function
   1409 * 3) elapsed time in this mode (not used, for now)
   1410 */
   1411static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
   1412{
   1413	struct iwl_scale_tbl_info *tbl;
   1414	int active_tbl;
   1415	int flush_interval_passed = 0;
   1416	struct iwl_mvm *mvm;
   1417
   1418	mvm = lq_sta->pers.drv;
   1419	active_tbl = lq_sta->active_tbl;
   1420
   1421	tbl = &(lq_sta->lq_info[active_tbl]);
   1422
   1423	/* If we've been disallowing search, see if we should now allow it */
   1424	if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
   1425		/* Elapsed time using current modulation mode */
   1426		if (lq_sta->flush_timer)
   1427			flush_interval_passed =
   1428				time_after(jiffies,
   1429					   (unsigned long)(lq_sta->flush_timer +
   1430							   (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ)));
   1431
   1432		/*
   1433		 * Check if we should allow search for new modulation mode.
   1434		 * If many frames have failed or succeeded, or we've used
   1435		 * this same modulation for a long time, allow search, and
   1436		 * reset history stats that keep track of whether we should
   1437		 * allow a new search.  Also (below) reset all bitmaps and
   1438		 * stats in active history.
   1439		 */
   1440		if (force_search ||
   1441		    (lq_sta->total_failed > lq_sta->max_failure_limit) ||
   1442		    (lq_sta->total_success > lq_sta->max_success_limit) ||
   1443		    ((!lq_sta->search_better_tbl) &&
   1444		     (lq_sta->flush_timer) && (flush_interval_passed))) {
   1445			IWL_DEBUG_RATE(mvm,
   1446				       "LQ: stay is expired %d %d %d\n",
   1447				     lq_sta->total_failed,
   1448				     lq_sta->total_success,
   1449				     flush_interval_passed);
   1450
   1451			/* Allow search for new mode */
   1452			lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
   1453			IWL_DEBUG_RATE(mvm,
   1454				       "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
   1455			lq_sta->total_failed = 0;
   1456			lq_sta->total_success = 0;
   1457			lq_sta->flush_timer = 0;
   1458			/* mark the current column as visited */
   1459			lq_sta->visited_columns = BIT(tbl->column);
   1460		/*
   1461		 * Else if we've used this modulation mode enough repetitions
   1462		 * (regardless of elapsed time or success/failure), reset
   1463		 * history bitmaps and rate-specific stats for all rates in
   1464		 * active table.
   1465		 */
   1466		} else {
   1467			lq_sta->table_count++;
   1468			if (lq_sta->table_count >=
   1469			    lq_sta->table_count_limit) {
   1470				lq_sta->table_count = 0;
   1471
   1472				IWL_DEBUG_RATE(mvm,
   1473					       "LQ: stay in table clear win\n");
   1474				rs_rate_scale_clear_tbl_windows(mvm, tbl);
   1475			}
   1476		}
   1477
   1478		/* If transitioning to allow "search", reset all history
   1479		 * bitmaps and stats in active table (this will become the new
   1480		 * "search" table). */
   1481		if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
   1482			rs_rate_scale_clear_tbl_windows(mvm, tbl);
   1483		}
   1484	}
   1485}
   1486
   1487static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
   1488			     struct iwl_scale_tbl_info *tbl,
   1489			     enum rs_action scale_action)
   1490{
   1491	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   1492	int i;
   1493
   1494	sta->max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
   1495
   1496	/*
   1497	 * In case TLC offload is not active amsdu_enabled is either 0xFFFF
   1498	 * or 0, since there is no per-TID alg.
   1499	 */
   1500	if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
   1501	    tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
   1502	    scale_action == RS_ACTION_DOWNSCALE)
   1503		mvmsta->amsdu_enabled = 0;
   1504	else
   1505		mvmsta->amsdu_enabled = 0xFFFF;
   1506
   1507	if (mvmsta->vif->bss_conf.he_support &&
   1508	    !iwlwifi_mod_params.disable_11ax)
   1509		mvmsta->max_amsdu_len = sta->max_amsdu_len;
   1510	else
   1511		mvmsta->max_amsdu_len = min_t(int, sta->max_amsdu_len, 8500);
   1512
   1513	sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
   1514
   1515	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
   1516		if (mvmsta->amsdu_enabled)
   1517			sta->max_tid_amsdu_len[i] =
   1518				iwl_mvm_max_amsdu_size(mvm, sta, i);
   1519		else
   1520			/*
   1521			 * Not so elegant, but this will effectively
   1522			 * prevent AMSDU on this TID
   1523			 */
   1524			sta->max_tid_amsdu_len[i] = 1;
   1525	}
   1526}
   1527
   1528/*
   1529 * setup rate table in uCode
   1530 */
   1531static void rs_update_rate_tbl(struct iwl_mvm *mvm,
   1532			       struct ieee80211_sta *sta,
   1533			       struct iwl_lq_sta *lq_sta,
   1534			       struct iwl_scale_tbl_info *tbl)
   1535{
   1536	rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
   1537	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
   1538}
   1539
   1540static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
   1541			      struct ieee80211_sta *sta,
   1542			      struct iwl_lq_sta *lq_sta,
   1543			      struct iwl_scale_tbl_info *tbl,
   1544			      enum rs_action scale_action)
   1545{
   1546	if (rs_bw_from_sta_bw(sta) != RATE_MCS_CHAN_WIDTH_80)
   1547		return false;
   1548
   1549	if (!is_vht_siso(&tbl->rate))
   1550		return false;
   1551
   1552	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
   1553	    (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
   1554	    (scale_action == RS_ACTION_DOWNSCALE)) {
   1555		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
   1556		tbl->rate.index = IWL_RATE_MCS_4_INDEX;
   1557		IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
   1558		goto tweaked;
   1559	}
   1560
   1561	/* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
   1562	 * sustainable, i.e. we're past the test window. We can't go back
   1563	 * if MCS5 is just tested as this will happen always after switching
   1564	 * to 20Mhz MCS4 because the rate stats are cleared.
   1565	 */
   1566	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
   1567	    (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
   1568	     (scale_action == RS_ACTION_STAY)) ||
   1569	     ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
   1570	      (scale_action == RS_ACTION_UPSCALE)))) {
   1571		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
   1572		tbl->rate.index = IWL_RATE_MCS_1_INDEX;
   1573		IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
   1574		goto tweaked;
   1575	}
   1576
   1577	return false;
   1578
   1579tweaked:
   1580	rs_set_expected_tpt_table(lq_sta, tbl);
   1581	rs_rate_scale_clear_tbl_windows(mvm, tbl);
   1582	return true;
   1583}
   1584
   1585static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
   1586					 struct iwl_lq_sta *lq_sta,
   1587					 struct ieee80211_sta *sta,
   1588					 struct iwl_scale_tbl_info *tbl)
   1589{
   1590	int i, j, max_rate;
   1591	enum rs_column next_col_id;
   1592	const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
   1593	const struct rs_tx_column *next_col;
   1594	allow_column_func_t allow_func;
   1595	u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm);
   1596	const u16 *expected_tpt_tbl;
   1597	u16 tpt, max_expected_tpt;
   1598
   1599	for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
   1600		next_col_id = curr_col->next_columns[i];
   1601
   1602		if (next_col_id == RS_COLUMN_INVALID)
   1603			continue;
   1604
   1605		if (lq_sta->visited_columns & BIT(next_col_id)) {
   1606			IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
   1607				       next_col_id);
   1608			continue;
   1609		}
   1610
   1611		next_col = &rs_tx_columns[next_col_id];
   1612
   1613		if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
   1614			IWL_DEBUG_RATE(mvm,
   1615				       "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
   1616				       next_col_id, valid_ants, next_col->ant);
   1617			continue;
   1618		}
   1619
   1620		for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
   1621			allow_func = next_col->checks[j];
   1622			if (allow_func && !allow_func(mvm, sta, &tbl->rate,
   1623						      next_col))
   1624				break;
   1625		}
   1626
   1627		if (j != MAX_COLUMN_CHECKS) {
   1628			IWL_DEBUG_RATE(mvm,
   1629				       "Skip column %d: not allowed (check %d failed)\n",
   1630				       next_col_id, j);
   1631
   1632			continue;
   1633		}
   1634
   1635		tpt = lq_sta->last_tpt / 100;
   1636		expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
   1637						     rs_bw_from_sta_bw(sta));
   1638		if (WARN_ON_ONCE(!expected_tpt_tbl))
   1639			continue;
   1640
   1641		max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
   1642		if (max_rate == IWL_RATE_INVALID) {
   1643			IWL_DEBUG_RATE(mvm,
   1644				       "Skip column %d: no rate is allowed in this column\n",
   1645				       next_col_id);
   1646			continue;
   1647		}
   1648
   1649		max_expected_tpt = expected_tpt_tbl[max_rate];
   1650		if (tpt >= max_expected_tpt) {
   1651			IWL_DEBUG_RATE(mvm,
   1652				       "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
   1653				       next_col_id, max_expected_tpt, tpt);
   1654			continue;
   1655		}
   1656
   1657		IWL_DEBUG_RATE(mvm,
   1658			       "Found potential column %d. Max expected %d current %d\n",
   1659			       next_col_id, max_expected_tpt, tpt);
   1660		break;
   1661	}
   1662
   1663	if (i == MAX_NEXT_COLUMNS)
   1664		return RS_COLUMN_INVALID;
   1665
   1666	return next_col_id;
   1667}
   1668
   1669static int rs_switch_to_column(struct iwl_mvm *mvm,
   1670			       struct iwl_lq_sta *lq_sta,
   1671			       struct ieee80211_sta *sta,
   1672			       enum rs_column col_id)
   1673{
   1674	struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
   1675	struct iwl_scale_tbl_info *search_tbl =
   1676		&lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
   1677	struct rs_rate *rate = &search_tbl->rate;
   1678	const struct rs_tx_column *column = &rs_tx_columns[col_id];
   1679	const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
   1680	unsigned long rate_mask = 0;
   1681	u32 rate_idx = 0;
   1682
   1683	memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
   1684
   1685	rate->sgi = column->sgi;
   1686	rate->ant = column->ant;
   1687
   1688	if (column->mode == RS_LEGACY) {
   1689		if (lq_sta->band == NL80211_BAND_5GHZ)
   1690			rate->type = LQ_LEGACY_A;
   1691		else
   1692			rate->type = LQ_LEGACY_G;
   1693
   1694		rate->bw = RATE_MCS_CHAN_WIDTH_20;
   1695		rate->ldpc = false;
   1696		rate_mask = lq_sta->active_legacy_rate;
   1697	} else if (column->mode == RS_SISO) {
   1698		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
   1699		rate_mask = lq_sta->active_siso_rate;
   1700	} else if (column->mode == RS_MIMO2) {
   1701		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
   1702		rate_mask = lq_sta->active_mimo2_rate;
   1703	} else {
   1704		WARN_ONCE(1, "Bad column mode");
   1705	}
   1706
   1707	if (column->mode != RS_LEGACY) {
   1708		rate->bw = rs_bw_from_sta_bw(sta);
   1709		rate->ldpc = lq_sta->ldpc;
   1710	}
   1711
   1712	search_tbl->column = col_id;
   1713	rs_set_expected_tpt_table(lq_sta, search_tbl);
   1714
   1715	lq_sta->visited_columns |= BIT(col_id);
   1716
   1717	/* Get the best matching rate if we're changing modes. e.g.
   1718	 * SISO->MIMO, LEGACY->SISO, MIMO->SISO
   1719	 */
   1720	if (curr_column->mode != column->mode) {
   1721		rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
   1722					    rate_mask, rate->index);
   1723
   1724		if ((rate_idx == IWL_RATE_INVALID) ||
   1725		    !(BIT(rate_idx) & rate_mask)) {
   1726			IWL_DEBUG_RATE(mvm,
   1727				       "can not switch with index %d"
   1728				       " rate mask %lx\n",
   1729				       rate_idx, rate_mask);
   1730
   1731			goto err;
   1732		}
   1733
   1734		rate->index = rate_idx;
   1735	}
   1736
   1737	IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
   1738		       col_id, rate->index);
   1739
   1740	return 0;
   1741
   1742err:
   1743	rate->type = LQ_NONE;
   1744	return -1;
   1745}
   1746
   1747static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
   1748					 struct iwl_scale_tbl_info *tbl,
   1749					 s32 sr, int low, int high,
   1750					 int current_tpt,
   1751					 int low_tpt, int high_tpt)
   1752{
   1753	enum rs_action action = RS_ACTION_STAY;
   1754
   1755	if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) ||
   1756	    (current_tpt == 0)) {
   1757		IWL_DEBUG_RATE(mvm,
   1758			       "Decrease rate because of low SR\n");
   1759		return RS_ACTION_DOWNSCALE;
   1760	}
   1761
   1762	if ((low_tpt == IWL_INVALID_VALUE) &&
   1763	    (high_tpt == IWL_INVALID_VALUE) &&
   1764	    (high != IWL_RATE_INVALID)) {
   1765		IWL_DEBUG_RATE(mvm,
   1766			       "No data about high/low rates. Increase rate\n");
   1767		return RS_ACTION_UPSCALE;
   1768	}
   1769
   1770	if ((high_tpt == IWL_INVALID_VALUE) &&
   1771	    (high != IWL_RATE_INVALID) &&
   1772	    (low_tpt != IWL_INVALID_VALUE) &&
   1773	    (low_tpt < current_tpt)) {
   1774		IWL_DEBUG_RATE(mvm,
   1775			       "No data about high rate and low rate is worse. Increase rate\n");
   1776		return RS_ACTION_UPSCALE;
   1777	}
   1778
   1779	if ((high_tpt != IWL_INVALID_VALUE) &&
   1780	    (high_tpt > current_tpt)) {
   1781		IWL_DEBUG_RATE(mvm,
   1782			       "Higher rate is better. Increate rate\n");
   1783		return RS_ACTION_UPSCALE;
   1784	}
   1785
   1786	if ((low_tpt != IWL_INVALID_VALUE) &&
   1787	    (high_tpt != IWL_INVALID_VALUE) &&
   1788	    (low_tpt < current_tpt) &&
   1789	    (high_tpt < current_tpt)) {
   1790		IWL_DEBUG_RATE(mvm,
   1791			       "Both high and low are worse. Maintain rate\n");
   1792		return RS_ACTION_STAY;
   1793	}
   1794
   1795	if ((low_tpt != IWL_INVALID_VALUE) &&
   1796	    (low_tpt > current_tpt)) {
   1797		IWL_DEBUG_RATE(mvm,
   1798			       "Lower rate is better\n");
   1799		action = RS_ACTION_DOWNSCALE;
   1800		goto out;
   1801	}
   1802
   1803	if ((low_tpt == IWL_INVALID_VALUE) &&
   1804	    (low != IWL_RATE_INVALID)) {
   1805		IWL_DEBUG_RATE(mvm,
   1806			       "No data about lower rate\n");
   1807		action = RS_ACTION_DOWNSCALE;
   1808		goto out;
   1809	}
   1810
   1811	IWL_DEBUG_RATE(mvm, "Maintain rate\n");
   1812
   1813out:
   1814	if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
   1815		if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
   1816			IWL_DEBUG_RATE(mvm,
   1817				       "SR is above NO DECREASE. Avoid downscale\n");
   1818			action = RS_ACTION_STAY;
   1819		} else if (current_tpt > (100 * tbl->expected_tpt[low])) {
   1820			IWL_DEBUG_RATE(mvm,
   1821				       "Current TPT is higher than max expected in low rate. Avoid downscale\n");
   1822			action = RS_ACTION_STAY;
   1823		} else {
   1824			IWL_DEBUG_RATE(mvm, "Decrease rate\n");
   1825		}
   1826	}
   1827
   1828	return action;
   1829}
   1830
   1831static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
   1832			  struct iwl_lq_sta *lq_sta)
   1833{
   1834	/* Our chip supports Tx STBC and the peer is an HT/VHT STA which
   1835	 * supports STBC of at least 1*SS
   1836	 */
   1837	if (!lq_sta->stbc_capable)
   1838		return false;
   1839
   1840	if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
   1841		return false;
   1842
   1843	return true;
   1844}
   1845
   1846static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
   1847				int *weaker, int *stronger)
   1848{
   1849	*weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP;
   1850	if (*weaker > TPC_MAX_REDUCTION)
   1851		*weaker = TPC_INVALID;
   1852
   1853	*stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP;
   1854	if (*stronger < 0)
   1855		*stronger = TPC_INVALID;
   1856}
   1857
   1858static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
   1859			   struct rs_rate *rate, enum nl80211_band band)
   1860{
   1861	int index = rate->index;
   1862	bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
   1863	bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
   1864				!vif->bss_conf.ps);
   1865
   1866	IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
   1867		       cam, sta_ps_disabled);
   1868	/*
   1869	 * allow tpc only if power management is enabled, or bt coex
   1870	 * activity grade allows it and we are on 2.4Ghz.
   1871	 */
   1872	if ((cam || sta_ps_disabled) &&
   1873	    !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
   1874		return false;
   1875
   1876	IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
   1877	if (is_legacy(rate))
   1878		return index == IWL_RATE_54M_INDEX;
   1879	if (is_ht(rate))
   1880		return index == IWL_RATE_MCS_7_INDEX;
   1881	if (is_vht(rate))
   1882		return index == IWL_RATE_MCS_9_INDEX;
   1883
   1884	WARN_ON_ONCE(1);
   1885	return false;
   1886}
   1887
   1888enum tpc_action {
   1889	TPC_ACTION_STAY,
   1890	TPC_ACTION_DECREASE,
   1891	TPC_ACTION_INCREASE,
   1892	TPC_ACTION_NO_RESTIRCTION,
   1893};
   1894
   1895static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
   1896					 s32 sr, int weak, int strong,
   1897					 int current_tpt,
   1898					 int weak_tpt, int strong_tpt)
   1899{
   1900	/* stay until we have valid tpt */
   1901	if (current_tpt == IWL_INVALID_VALUE) {
   1902		IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
   1903		return TPC_ACTION_STAY;
   1904	}
   1905
   1906	/* Too many failures, increase txp */
   1907	if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) ||
   1908	    current_tpt == 0) {
   1909		IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
   1910		return TPC_ACTION_NO_RESTIRCTION;
   1911	}
   1912
   1913	/* try decreasing first if applicable */
   1914	if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
   1915	    weak != TPC_INVALID) {
   1916		if (weak_tpt == IWL_INVALID_VALUE &&
   1917		    (strong_tpt == IWL_INVALID_VALUE ||
   1918		     current_tpt >= strong_tpt)) {
   1919			IWL_DEBUG_RATE(mvm,
   1920				       "no weak txp measurement. decrease txp\n");
   1921			return TPC_ACTION_DECREASE;
   1922		}
   1923
   1924		if (weak_tpt > current_tpt) {
   1925			IWL_DEBUG_RATE(mvm,
   1926				       "lower txp has better tpt. decrease txp\n");
   1927			return TPC_ACTION_DECREASE;
   1928		}
   1929	}
   1930
   1931	/* next, increase if needed */
   1932	if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
   1933	    strong != TPC_INVALID) {
   1934		if (weak_tpt == IWL_INVALID_VALUE &&
   1935		    strong_tpt != IWL_INVALID_VALUE &&
   1936		    current_tpt < strong_tpt) {
   1937			IWL_DEBUG_RATE(mvm,
   1938				       "higher txp has better tpt. increase txp\n");
   1939			return TPC_ACTION_INCREASE;
   1940		}
   1941
   1942		if (weak_tpt < current_tpt &&
   1943		    (strong_tpt == IWL_INVALID_VALUE ||
   1944		     strong_tpt > current_tpt)) {
   1945			IWL_DEBUG_RATE(mvm,
   1946				       "lower txp has worse tpt. increase txp\n");
   1947			return TPC_ACTION_INCREASE;
   1948		}
   1949	}
   1950
   1951	IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
   1952	return TPC_ACTION_STAY;
   1953}
   1954
   1955static bool rs_tpc_perform(struct iwl_mvm *mvm,
   1956			   struct ieee80211_sta *sta,
   1957			   struct iwl_lq_sta *lq_sta,
   1958			   struct iwl_scale_tbl_info *tbl)
   1959{
   1960	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
   1961	struct ieee80211_vif *vif = mvm_sta->vif;
   1962	struct ieee80211_chanctx_conf *chanctx_conf;
   1963	enum nl80211_band band;
   1964	struct iwl_rate_scale_data *window;
   1965	struct rs_rate *rate = &tbl->rate;
   1966	enum tpc_action action;
   1967	s32 sr;
   1968	u8 cur = lq_sta->lq.reduced_tpc;
   1969	int current_tpt;
   1970	int weak, strong;
   1971	int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
   1972
   1973#ifdef CONFIG_MAC80211_DEBUGFS
   1974	if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
   1975		IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
   1976			       lq_sta->pers.dbg_fixed_txp_reduction);
   1977		lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction;
   1978		return cur != lq_sta->pers.dbg_fixed_txp_reduction;
   1979	}
   1980#endif
   1981
   1982	rcu_read_lock();
   1983	chanctx_conf = rcu_dereference(vif->chanctx_conf);
   1984	if (WARN_ON(!chanctx_conf))
   1985		band = NUM_NL80211_BANDS;
   1986	else
   1987		band = chanctx_conf->def.chan->band;
   1988	rcu_read_unlock();
   1989
   1990	if (!rs_tpc_allowed(mvm, vif, rate, band)) {
   1991		IWL_DEBUG_RATE(mvm,
   1992			       "tpc is not allowed. remove txp restrictions\n");
   1993		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
   1994		return cur != TPC_NO_REDUCTION;
   1995	}
   1996
   1997	rs_get_adjacent_txp(mvm, cur, &weak, &strong);
   1998
   1999	/* Collect measured throughputs for current and adjacent rates */
   2000	window = tbl->tpc_win;
   2001	sr = window[cur].success_ratio;
   2002	current_tpt = window[cur].average_tpt;
   2003	if (weak != TPC_INVALID)
   2004		weak_tpt = window[weak].average_tpt;
   2005	if (strong != TPC_INVALID)
   2006		strong_tpt = window[strong].average_tpt;
   2007
   2008	IWL_DEBUG_RATE(mvm,
   2009		       "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
   2010		       cur, current_tpt, sr, weak, strong,
   2011		       weak_tpt, strong_tpt);
   2012
   2013	action = rs_get_tpc_action(mvm, sr, weak, strong,
   2014				   current_tpt, weak_tpt, strong_tpt);
   2015
   2016	/* override actions if we are on the edge */
   2017	if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
   2018		IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
   2019		action = TPC_ACTION_STAY;
   2020	} else if (strong == TPC_INVALID &&
   2021		   (action == TPC_ACTION_INCREASE ||
   2022		    action == TPC_ACTION_NO_RESTIRCTION)) {
   2023		IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
   2024		action = TPC_ACTION_STAY;
   2025	}
   2026
   2027	switch (action) {
   2028	case TPC_ACTION_DECREASE:
   2029		lq_sta->lq.reduced_tpc = weak;
   2030		return true;
   2031	case TPC_ACTION_INCREASE:
   2032		lq_sta->lq.reduced_tpc = strong;
   2033		return true;
   2034	case TPC_ACTION_NO_RESTIRCTION:
   2035		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
   2036		return true;
   2037	case TPC_ACTION_STAY:
   2038		/* do nothing */
   2039		break;
   2040	}
   2041	return false;
   2042}
   2043
   2044/*
   2045 * Do rate scaling and search for new modulation mode.
   2046 */
   2047static void rs_rate_scale_perform(struct iwl_mvm *mvm,
   2048				  struct ieee80211_sta *sta,
   2049				  struct iwl_lq_sta *lq_sta,
   2050				  int tid, bool ndp)
   2051{
   2052	int low = IWL_RATE_INVALID;
   2053	int high = IWL_RATE_INVALID;
   2054	int index;
   2055	struct iwl_rate_scale_data *window = NULL;
   2056	int current_tpt = IWL_INVALID_VALUE;
   2057	int low_tpt = IWL_INVALID_VALUE;
   2058	int high_tpt = IWL_INVALID_VALUE;
   2059	u32 fail_count;
   2060	enum rs_action scale_action = RS_ACTION_STAY;
   2061	u16 rate_mask;
   2062	u8 update_lq = 0;
   2063	struct iwl_scale_tbl_info *tbl, *tbl1;
   2064	u8 active_tbl = 0;
   2065	u8 done_search = 0;
   2066	u16 high_low;
   2067	s32 sr;
   2068	u8 prev_agg = lq_sta->is_agg;
   2069	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   2070	struct rs_rate *rate;
   2071
   2072	lq_sta->is_agg = !!mvmsta->agg_tids;
   2073
   2074	/*
   2075	 * Select rate-scale / modulation-mode table to work with in
   2076	 * the rest of this function:  "search" if searching for better
   2077	 * modulation mode, or "active" if doing rate scaling within a mode.
   2078	 */
   2079	if (!lq_sta->search_better_tbl)
   2080		active_tbl = lq_sta->active_tbl;
   2081	else
   2082		active_tbl = rs_search_tbl(lq_sta->active_tbl);
   2083
   2084	tbl = &(lq_sta->lq_info[active_tbl]);
   2085	rate = &tbl->rate;
   2086
   2087	if (prev_agg != lq_sta->is_agg) {
   2088		IWL_DEBUG_RATE(mvm,
   2089			       "Aggregation changed: prev %d current %d. Update expected TPT table\n",
   2090			       prev_agg, lq_sta->is_agg);
   2091		rs_set_expected_tpt_table(lq_sta, tbl);
   2092		rs_rate_scale_clear_tbl_windows(mvm, tbl);
   2093	}
   2094
   2095	/* current tx rate */
   2096	index = rate->index;
   2097
   2098	/* rates available for this association, and for modulation mode */
   2099	rate_mask = rs_get_supported_rates(lq_sta, rate);
   2100
   2101	if (!(BIT(index) & rate_mask)) {
   2102		IWL_ERR(mvm, "Current Rate is not valid\n");
   2103		if (lq_sta->search_better_tbl) {
   2104			/* revert to active table if search table is not valid*/
   2105			rate->type = LQ_NONE;
   2106			lq_sta->search_better_tbl = 0;
   2107			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
   2108			rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
   2109		}
   2110		return;
   2111	}
   2112
   2113	/* Get expected throughput table and history window for current rate */
   2114	if (!tbl->expected_tpt) {
   2115		IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
   2116		return;
   2117	}
   2118
   2119	/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
   2120	window = &(tbl->win[index]);
   2121
   2122	/*
   2123	 * If there is not enough history to calculate actual average
   2124	 * throughput, keep analyzing results of more tx frames, without
   2125	 * changing rate or mode (bypass most of the rest of this function).
   2126	 * Set up new rate table in uCode only if old rate is not supported
   2127	 * in current association (use new rate found above).
   2128	 */
   2129	fail_count = window->counter - window->success_counter;
   2130	if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
   2131	    (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
   2132		IWL_DEBUG_RATE(mvm,
   2133			       "%s: Test Window: succ %d total %d\n",
   2134			       rs_pretty_rate(rate),
   2135			       window->success_counter, window->counter);
   2136
   2137		/* Can't calculate this yet; not enough history */
   2138		window->average_tpt = IWL_INVALID_VALUE;
   2139
   2140		/* Should we stay with this modulation mode,
   2141		 * or search for a new one? */
   2142		rs_stay_in_table(lq_sta, false);
   2143
   2144		return;
   2145	}
   2146
   2147	/* If we are searching for better modulation mode, check success. */
   2148	if (lq_sta->search_better_tbl) {
   2149		/* If good success, continue using the "search" mode;
   2150		 * no need to send new link quality command, since we're
   2151		 * continuing to use the setup that we've been trying. */
   2152		if (window->average_tpt > lq_sta->last_tpt) {
   2153			IWL_DEBUG_RATE(mvm,
   2154				       "SWITCHING TO NEW TABLE SR: %d "
   2155				       "cur-tpt %d old-tpt %d\n",
   2156				       window->success_ratio,
   2157				       window->average_tpt,
   2158				       lq_sta->last_tpt);
   2159
   2160			/* Swap tables; "search" becomes "active" */
   2161			lq_sta->active_tbl = active_tbl;
   2162			current_tpt = window->average_tpt;
   2163		/* Else poor success; go back to mode in "active" table */
   2164		} else {
   2165			IWL_DEBUG_RATE(mvm,
   2166				       "GOING BACK TO THE OLD TABLE: SR %d "
   2167				       "cur-tpt %d old-tpt %d\n",
   2168				       window->success_ratio,
   2169				       window->average_tpt,
   2170				       lq_sta->last_tpt);
   2171
   2172			/* Nullify "search" table */
   2173			rate->type = LQ_NONE;
   2174
   2175			/* Revert to "active" table */
   2176			active_tbl = lq_sta->active_tbl;
   2177			tbl = &(lq_sta->lq_info[active_tbl]);
   2178
   2179			/* Revert to "active" rate and throughput info */
   2180			index = tbl->rate.index;
   2181			current_tpt = lq_sta->last_tpt;
   2182
   2183			/* Need to set up a new rate table in uCode */
   2184			update_lq = 1;
   2185		}
   2186
   2187		/* Either way, we've made a decision; modulation mode
   2188		 * search is done, allow rate adjustment next time. */
   2189		lq_sta->search_better_tbl = 0;
   2190		done_search = 1;	/* Don't switch modes below! */
   2191		goto lq_update;
   2192	}
   2193
   2194	/* (Else) not in search of better modulation mode, try for better
   2195	 * starting rate, while staying in this mode. */
   2196	high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
   2197	low = high_low & 0xff;
   2198	high = (high_low >> 8) & 0xff;
   2199
   2200	/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
   2201
   2202	sr = window->success_ratio;
   2203
   2204	/* Collect measured throughputs for current and adjacent rates */
   2205	current_tpt = window->average_tpt;
   2206	if (low != IWL_RATE_INVALID)
   2207		low_tpt = tbl->win[low].average_tpt;
   2208	if (high != IWL_RATE_INVALID)
   2209		high_tpt = tbl->win[high].average_tpt;
   2210
   2211	IWL_DEBUG_RATE(mvm,
   2212		       "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
   2213		       rs_pretty_rate(rate), current_tpt, sr,
   2214		       low, high, low_tpt, high_tpt);
   2215
   2216	scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
   2217					  current_tpt, low_tpt, high_tpt);
   2218
   2219	/* Force a search in case BT doesn't like us being in MIMO */
   2220	if (is_mimo(rate) &&
   2221	    !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
   2222		IWL_DEBUG_RATE(mvm,
   2223			       "BT Coex forbids MIMO. Search for new config\n");
   2224		rs_stay_in_table(lq_sta, true);
   2225		goto lq_update;
   2226	}
   2227
   2228	switch (scale_action) {
   2229	case RS_ACTION_DOWNSCALE:
   2230		/* Decrease starting rate, update uCode's rate table */
   2231		if (low != IWL_RATE_INVALID) {
   2232			update_lq = 1;
   2233			index = low;
   2234		} else {
   2235			IWL_DEBUG_RATE(mvm,
   2236				       "At the bottom rate. Can't decrease\n");
   2237		}
   2238
   2239		break;
   2240	case RS_ACTION_UPSCALE:
   2241		/* Increase starting rate, update uCode's rate table */
   2242		if (high != IWL_RATE_INVALID) {
   2243			update_lq = 1;
   2244			index = high;
   2245		} else {
   2246			IWL_DEBUG_RATE(mvm,
   2247				       "At the top rate. Can't increase\n");
   2248		}
   2249
   2250		break;
   2251	case RS_ACTION_STAY:
   2252		/* No change */
   2253		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
   2254			update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
   2255		break;
   2256	default:
   2257		break;
   2258	}
   2259
   2260lq_update:
   2261	/* Replace uCode's rate table for the destination station. */
   2262	if (update_lq) {
   2263		tbl->rate.index = index;
   2264		if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
   2265			rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
   2266		rs_set_amsdu_len(mvm, sta, tbl, scale_action);
   2267		rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
   2268	}
   2269
   2270	rs_stay_in_table(lq_sta, false);
   2271
   2272	/*
   2273	 * Search for new modulation mode if we're:
   2274	 * 1)  Not changing rates right now
   2275	 * 2)  Not just finishing up a search
   2276	 * 3)  Allowing a new search
   2277	 */
   2278	if (!update_lq && !done_search &&
   2279	    lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
   2280	    && window->counter) {
   2281		enum rs_column next_column;
   2282
   2283		/* Save current throughput to compare with "search" throughput*/
   2284		lq_sta->last_tpt = current_tpt;
   2285
   2286		IWL_DEBUG_RATE(mvm,
   2287			       "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
   2288			       update_lq, done_search, lq_sta->rs_state,
   2289			       window->counter);
   2290
   2291		next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
   2292		if (next_column != RS_COLUMN_INVALID) {
   2293			int ret = rs_switch_to_column(mvm, lq_sta, sta,
   2294						      next_column);
   2295			if (!ret)
   2296				lq_sta->search_better_tbl = 1;
   2297		} else {
   2298			IWL_DEBUG_RATE(mvm,
   2299				       "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
   2300			lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
   2301		}
   2302
   2303		/* If new "search" mode was selected, set up in uCode table */
   2304		if (lq_sta->search_better_tbl) {
   2305			/* Access the "search" table, clear its history. */
   2306			tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
   2307			rs_rate_scale_clear_tbl_windows(mvm, tbl);
   2308
   2309			/* Use new "search" start rate */
   2310			index = tbl->rate.index;
   2311
   2312			rs_dump_rate(mvm, &tbl->rate,
   2313				     "Switch to SEARCH TABLE:");
   2314			rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
   2315		} else {
   2316			done_search = 1;
   2317		}
   2318	}
   2319
   2320	if (!ndp)
   2321		rs_tl_turn_on_agg(mvm, mvmsta, tid, lq_sta, sta);
   2322
   2323	if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
   2324		tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
   2325		rs_set_stay_in_table(mvm, is_legacy(&tbl1->rate), lq_sta);
   2326	}
   2327}
   2328
   2329struct rs_init_rate_info {
   2330	s8 rssi;
   2331	u8 rate_idx;
   2332};
   2333
   2334static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
   2335	{ -60, IWL_RATE_54M_INDEX },
   2336	{ -64, IWL_RATE_48M_INDEX },
   2337	{ -68, IWL_RATE_36M_INDEX },
   2338	{ -80, IWL_RATE_24M_INDEX },
   2339	{ -84, IWL_RATE_18M_INDEX },
   2340	{ -85, IWL_RATE_12M_INDEX },
   2341	{ -86, IWL_RATE_11M_INDEX },
   2342	{ -88, IWL_RATE_5M_INDEX  },
   2343	{ -90, IWL_RATE_2M_INDEX  },
   2344	{ S8_MIN, IWL_RATE_1M_INDEX },
   2345};
   2346
   2347static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
   2348	{ -60, IWL_RATE_54M_INDEX },
   2349	{ -64, IWL_RATE_48M_INDEX },
   2350	{ -72, IWL_RATE_36M_INDEX },
   2351	{ -80, IWL_RATE_24M_INDEX },
   2352	{ -84, IWL_RATE_18M_INDEX },
   2353	{ -85, IWL_RATE_12M_INDEX },
   2354	{ -87, IWL_RATE_9M_INDEX  },
   2355	{ S8_MIN, IWL_RATE_6M_INDEX },
   2356};
   2357
   2358static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
   2359	{ -60, IWL_RATE_MCS_7_INDEX },
   2360	{ -64, IWL_RATE_MCS_6_INDEX },
   2361	{ -68, IWL_RATE_MCS_5_INDEX },
   2362	{ -72, IWL_RATE_MCS_4_INDEX },
   2363	{ -80, IWL_RATE_MCS_3_INDEX },
   2364	{ -84, IWL_RATE_MCS_2_INDEX },
   2365	{ -85, IWL_RATE_MCS_1_INDEX },
   2366	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
   2367};
   2368
   2369/* MCS index 9 is not valid for 20MHz VHT channel width,
   2370 * but is ok for 40, 80 and 160MHz channels.
   2371 */
   2372static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
   2373	{ -60, IWL_RATE_MCS_8_INDEX },
   2374	{ -64, IWL_RATE_MCS_7_INDEX },
   2375	{ -68, IWL_RATE_MCS_6_INDEX },
   2376	{ -72, IWL_RATE_MCS_5_INDEX },
   2377	{ -80, IWL_RATE_MCS_4_INDEX },
   2378	{ -84, IWL_RATE_MCS_3_INDEX },
   2379	{ -85, IWL_RATE_MCS_2_INDEX },
   2380	{ -87, IWL_RATE_MCS_1_INDEX },
   2381	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
   2382};
   2383
   2384static const struct rs_init_rate_info rs_optimal_rates_vht[] = {
   2385	{ -60, IWL_RATE_MCS_9_INDEX },
   2386	{ -64, IWL_RATE_MCS_8_INDEX },
   2387	{ -68, IWL_RATE_MCS_7_INDEX },
   2388	{ -72, IWL_RATE_MCS_6_INDEX },
   2389	{ -80, IWL_RATE_MCS_5_INDEX },
   2390	{ -84, IWL_RATE_MCS_4_INDEX },
   2391	{ -85, IWL_RATE_MCS_3_INDEX },
   2392	{ -87, IWL_RATE_MCS_2_INDEX },
   2393	{ -88, IWL_RATE_MCS_1_INDEX },
   2394	{ S8_MIN, IWL_RATE_MCS_0_INDEX },
   2395};
   2396
   2397#define IWL_RS_LOW_RSSI_THRESHOLD (-76) /* dBm */
   2398
   2399/* Init the optimal rate based on STA caps
   2400 * This combined with rssi is used to report the last tx rate
   2401 * to userspace when we haven't transmitted enough frames.
   2402 */
   2403static void rs_init_optimal_rate(struct iwl_mvm *mvm,
   2404				 struct ieee80211_sta *sta,
   2405				 struct iwl_lq_sta *lq_sta)
   2406{
   2407	struct rs_rate *rate = &lq_sta->optimal_rate;
   2408
   2409	if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
   2410		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
   2411	else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
   2412		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
   2413	else if (lq_sta->band == NL80211_BAND_5GHZ)
   2414		rate->type = LQ_LEGACY_A;
   2415	else
   2416		rate->type = LQ_LEGACY_G;
   2417
   2418	rate->bw = rs_bw_from_sta_bw(sta);
   2419	rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
   2420
   2421	/* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
   2422
   2423	if (is_mimo(rate)) {
   2424		lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
   2425	} else if (is_siso(rate)) {
   2426		lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
   2427	} else {
   2428		lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
   2429
   2430		if (lq_sta->band == NL80211_BAND_5GHZ) {
   2431			lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
   2432			lq_sta->optimal_nentries =
   2433				ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
   2434		} else {
   2435			lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
   2436			lq_sta->optimal_nentries =
   2437				ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
   2438		}
   2439	}
   2440
   2441	if (is_vht(rate)) {
   2442		if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
   2443			lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
   2444			lq_sta->optimal_nentries =
   2445				ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
   2446		} else {
   2447			lq_sta->optimal_rates = rs_optimal_rates_vht;
   2448			lq_sta->optimal_nentries =
   2449				ARRAY_SIZE(rs_optimal_rates_vht);
   2450		}
   2451	} else if (is_ht(rate)) {
   2452		lq_sta->optimal_rates = rs_optimal_rates_ht;
   2453		lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
   2454	}
   2455}
   2456
   2457/* Compute the optimal rate index based on RSSI */
   2458static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
   2459					   struct iwl_lq_sta *lq_sta)
   2460{
   2461	struct rs_rate *rate = &lq_sta->optimal_rate;
   2462	int i;
   2463
   2464	rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
   2465				     BITS_PER_LONG);
   2466
   2467	for (i = 0; i < lq_sta->optimal_nentries; i++) {
   2468		int rate_idx = lq_sta->optimal_rates[i].rate_idx;
   2469
   2470		if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
   2471		    (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
   2472			rate->index = rate_idx;
   2473			break;
   2474		}
   2475	}
   2476
   2477	return rate;
   2478}
   2479
   2480/* Choose an initial legacy rate and antenna to use based on the RSSI
   2481 * of last Rx
   2482 */
   2483static void rs_get_initial_rate(struct iwl_mvm *mvm,
   2484				struct ieee80211_sta *sta,
   2485				struct iwl_lq_sta *lq_sta,
   2486				enum nl80211_band band,
   2487				struct rs_rate *rate)
   2488{
   2489	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   2490	int i, nentries;
   2491	unsigned long active_rate;
   2492	s8 best_rssi = S8_MIN;
   2493	u8 best_ant = ANT_NONE;
   2494	u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
   2495	const struct rs_init_rate_info *initial_rates;
   2496
   2497	for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
   2498		if (!(lq_sta->pers.chains & BIT(i)))
   2499			continue;
   2500
   2501		if (lq_sta->pers.chain_signal[i] > best_rssi) {
   2502			best_rssi = lq_sta->pers.chain_signal[i];
   2503			best_ant = BIT(i);
   2504		}
   2505	}
   2506
   2507	IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n",
   2508		       iwl_rs_pretty_ant(best_ant), best_rssi);
   2509
   2510	if (best_ant != ANT_A && best_ant != ANT_B)
   2511		rate->ant = first_antenna(valid_tx_ant);
   2512	else
   2513		rate->ant = best_ant;
   2514
   2515	rate->sgi = false;
   2516	rate->ldpc = false;
   2517	rate->bw = RATE_MCS_CHAN_WIDTH_20;
   2518
   2519	rate->index = find_first_bit(&lq_sta->active_legacy_rate,
   2520				     BITS_PER_LONG);
   2521
   2522	if (band == NL80211_BAND_5GHZ) {
   2523		rate->type = LQ_LEGACY_A;
   2524		initial_rates = rs_optimal_rates_5ghz_legacy;
   2525		nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
   2526	} else {
   2527		rate->type = LQ_LEGACY_G;
   2528		initial_rates = rs_optimal_rates_24ghz_legacy;
   2529		nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
   2530	}
   2531
   2532	if (!IWL_MVM_RS_RSSI_BASED_INIT_RATE)
   2533		goto out;
   2534
   2535	/* Start from a higher rate if the corresponding debug capability
   2536	 * is enabled. The rate is chosen according to AP capabilities.
   2537	 * In case of VHT/HT when the rssi is low fallback to the case of
   2538	 * legacy rates.
   2539	 */
   2540	if (sta->deflink.vht_cap.vht_supported &&
   2541	    best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
   2542		/*
   2543		 * In AP mode, when a new station associates, rs is initialized
   2544		 * immediately upon association completion, before the phy
   2545		 * context is updated with the association parameters, so the
   2546		 * sta bandwidth might be wider than the phy context allows.
   2547		 * To avoid this issue, always initialize rs with 20mhz
   2548		 * bandwidth rate, and after authorization, when the phy context
   2549		 * is already up-to-date, re-init rs with the correct bw.
   2550		 */
   2551		u32 bw = mvmsta->sta_state < IEEE80211_STA_AUTHORIZED ?
   2552				RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
   2553
   2554		switch (bw) {
   2555		case RATE_MCS_CHAN_WIDTH_40:
   2556		case RATE_MCS_CHAN_WIDTH_80:
   2557		case RATE_MCS_CHAN_WIDTH_160:
   2558			initial_rates = rs_optimal_rates_vht;
   2559			nentries = ARRAY_SIZE(rs_optimal_rates_vht);
   2560			break;
   2561		case RATE_MCS_CHAN_WIDTH_20:
   2562			initial_rates = rs_optimal_rates_vht_20mhz;
   2563			nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
   2564			break;
   2565		default:
   2566			IWL_ERR(mvm, "Invalid BW %d\n",
   2567				sta->deflink.bandwidth);
   2568			goto out;
   2569		}
   2570
   2571		active_rate = lq_sta->active_siso_rate;
   2572		rate->type = LQ_VHT_SISO;
   2573		rate->bw = bw;
   2574	} else if (sta->deflink.ht_cap.ht_supported &&
   2575		   best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
   2576		initial_rates = rs_optimal_rates_ht;
   2577		nentries = ARRAY_SIZE(rs_optimal_rates_ht);
   2578		active_rate = lq_sta->active_siso_rate;
   2579		rate->type = LQ_HT_SISO;
   2580	} else {
   2581		active_rate = lq_sta->active_legacy_rate;
   2582	}
   2583
   2584	for (i = 0; i < nentries; i++) {
   2585		int rate_idx = initial_rates[i].rate_idx;
   2586
   2587		if ((best_rssi >= initial_rates[i].rssi) &&
   2588		    (BIT(rate_idx) & active_rate)) {
   2589			rate->index = rate_idx;
   2590			break;
   2591		}
   2592	}
   2593
   2594out:
   2595	rs_dump_rate(mvm, rate, "INITIAL");
   2596}
   2597
   2598/* Save info about RSSI of last Rx */
   2599void rs_update_last_rssi(struct iwl_mvm *mvm,
   2600			 struct iwl_mvm_sta *mvmsta,
   2601			 struct ieee80211_rx_status *rx_status)
   2602{
   2603	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
   2604	int i;
   2605
   2606	lq_sta->pers.chains = rx_status->chains;
   2607	lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
   2608	lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
   2609	lq_sta->pers.last_rssi = S8_MIN;
   2610
   2611	for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
   2612		if (!(lq_sta->pers.chains & BIT(i)))
   2613			continue;
   2614
   2615		if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
   2616			lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
   2617	}
   2618}
   2619
   2620/*
   2621 * rs_initialize_lq - Initialize a station's hardware rate table
   2622 *
   2623 * The uCode's station table contains a table of fallback rates
   2624 * for automatic fallback during transmission.
   2625 *
   2626 * NOTE: This sets up a default set of values.  These will be replaced later
   2627 *       if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
   2628 *       rc80211_simple.
   2629 *
   2630 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
   2631 *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
   2632 *       which requires station table entry to exist).
   2633 */
   2634static void rs_initialize_lq(struct iwl_mvm *mvm,
   2635			     struct ieee80211_sta *sta,
   2636			     struct iwl_lq_sta *lq_sta,
   2637			     enum nl80211_band band)
   2638{
   2639	struct iwl_scale_tbl_info *tbl;
   2640	struct rs_rate *rate;
   2641	u8 active_tbl = 0;
   2642
   2643	if (!sta || !lq_sta)
   2644		return;
   2645
   2646	if (!lq_sta->search_better_tbl)
   2647		active_tbl = lq_sta->active_tbl;
   2648	else
   2649		active_tbl = rs_search_tbl(lq_sta->active_tbl);
   2650
   2651	tbl = &(lq_sta->lq_info[active_tbl]);
   2652	rate = &tbl->rate;
   2653
   2654	rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
   2655	rs_init_optimal_rate(mvm, sta, lq_sta);
   2656
   2657	WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
   2658		  "ant: 0x%x, chains 0x%x, fw tx ant: 0x%x, nvm tx ant: 0x%x\n",
   2659		  rate->ant, lq_sta->pers.chains, mvm->fw->valid_tx_ant,
   2660		  mvm->nvm_data ? mvm->nvm_data->valid_tx_ant : ANT_INVALID);
   2661
   2662	tbl->column = rs_get_column_from_rate(rate);
   2663
   2664	rs_set_expected_tpt_table(lq_sta, tbl);
   2665	rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
   2666	/* TODO restore station should remember the lq cmd */
   2667	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
   2668}
   2669
   2670static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
   2671			    void *mvm_sta,
   2672			    struct ieee80211_tx_rate_control *txrc)
   2673{
   2674	struct iwl_op_mode *op_mode = mvm_r;
   2675	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
   2676	struct sk_buff *skb = txrc->skb;
   2677	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
   2678	struct iwl_lq_sta *lq_sta;
   2679	struct rs_rate *optimal_rate;
   2680	u32 last_ucode_rate;
   2681
   2682	if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
   2683		/* if vif isn't initialized mvm doesn't know about
   2684		 * this station, so don't do anything with the it
   2685		 */
   2686		sta = NULL;
   2687		mvm_sta = NULL;
   2688	}
   2689
   2690	if (!mvm_sta)
   2691		return;
   2692
   2693	lq_sta = mvm_sta;
   2694	iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
   2695				     info->band, &info->control.rates[0]);
   2696	info->control.rates[0].count = 1;
   2697
   2698	/* Report the optimal rate based on rssi and STA caps if we haven't
   2699	 * converged yet (too little traffic) or exploring other modulations
   2700	 */
   2701	if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
   2702		optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
   2703		last_ucode_rate = ucode_rate_from_rs_rate(mvm,
   2704							  optimal_rate);
   2705		iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
   2706					     &txrc->reported_rate);
   2707	}
   2708}
   2709
   2710static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
   2711			      gfp_t gfp)
   2712{
   2713	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   2714	struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
   2715	struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
   2716	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
   2717
   2718	IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
   2719
   2720	lq_sta->pers.drv = mvm;
   2721#ifdef CONFIG_MAC80211_DEBUGFS
   2722	lq_sta->pers.dbg_fixed_rate = 0;
   2723	lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
   2724	lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
   2725#endif
   2726	lq_sta->pers.chains = 0;
   2727	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
   2728	lq_sta->pers.last_rssi = S8_MIN;
   2729
   2730	return lq_sta;
   2731}
   2732
   2733static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
   2734				       int nss)
   2735{
   2736	u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
   2737		(0x3 << (2 * (nss - 1)));
   2738	rx_mcs >>= (2 * (nss - 1));
   2739
   2740	if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
   2741		return IWL_RATE_MCS_7_INDEX;
   2742	else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
   2743		return IWL_RATE_MCS_8_INDEX;
   2744	else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
   2745		return IWL_RATE_MCS_9_INDEX;
   2746
   2747	WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
   2748	return -1;
   2749}
   2750
   2751static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
   2752				     struct ieee80211_sta_vht_cap *vht_cap,
   2753				     struct iwl_lq_sta *lq_sta)
   2754{
   2755	int i;
   2756	int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
   2757
   2758	if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
   2759		for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
   2760			if (i == IWL_RATE_9M_INDEX)
   2761				continue;
   2762
   2763			/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
   2764			if (i == IWL_RATE_MCS_9_INDEX &&
   2765			    sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
   2766				continue;
   2767
   2768			lq_sta->active_siso_rate |= BIT(i);
   2769		}
   2770	}
   2771
   2772	if (sta->deflink.rx_nss < 2)
   2773		return;
   2774
   2775	highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
   2776	if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
   2777		for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
   2778			if (i == IWL_RATE_9M_INDEX)
   2779				continue;
   2780
   2781			/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
   2782			if (i == IWL_RATE_MCS_9_INDEX &&
   2783			    sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
   2784				continue;
   2785
   2786			lq_sta->active_mimo2_rate |= BIT(i);
   2787		}
   2788	}
   2789}
   2790
   2791static void rs_ht_init(struct iwl_mvm *mvm,
   2792		       struct ieee80211_sta *sta,
   2793		       struct iwl_lq_sta *lq_sta,
   2794		       struct ieee80211_sta_ht_cap *ht_cap)
   2795{
   2796	/* active_siso_rate mask includes 9 MBits (bit 5),
   2797	 * and CCK (bits 0-3), supp_rates[] does not;
   2798	 * shift to convert format, force 9 MBits off.
   2799	 */
   2800	lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
   2801	lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
   2802	lq_sta->active_siso_rate &= ~((u16)0x2);
   2803	lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
   2804
   2805	lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
   2806	lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
   2807	lq_sta->active_mimo2_rate &= ~((u16)0x2);
   2808	lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
   2809
   2810	if (mvm->cfg->ht_params->ldpc &&
   2811	    (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
   2812		lq_sta->ldpc = true;
   2813
   2814	if (mvm->cfg->ht_params->stbc &&
   2815	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
   2816	    (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
   2817		lq_sta->stbc_capable = true;
   2818
   2819	lq_sta->is_vht = false;
   2820}
   2821
   2822static void rs_vht_init(struct iwl_mvm *mvm,
   2823			struct ieee80211_sta *sta,
   2824			struct iwl_lq_sta *lq_sta,
   2825			struct ieee80211_sta_vht_cap *vht_cap)
   2826{
   2827	rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
   2828
   2829	if (mvm->cfg->ht_params->ldpc &&
   2830	    (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
   2831		lq_sta->ldpc = true;
   2832
   2833	if (mvm->cfg->ht_params->stbc &&
   2834	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
   2835	    (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
   2836		lq_sta->stbc_capable = true;
   2837
   2838	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
   2839	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
   2840	    (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
   2841		lq_sta->bfer_capable = true;
   2842
   2843	lq_sta->is_vht = true;
   2844}
   2845
   2846#ifdef CONFIG_IWLWIFI_DEBUGFS
   2847void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
   2848{
   2849	spin_lock_bh(&mvm->drv_stats_lock);
   2850	memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
   2851	spin_unlock_bh(&mvm->drv_stats_lock);
   2852}
   2853
   2854void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
   2855{
   2856	u8 nss = 0;
   2857
   2858	spin_lock(&mvm->drv_stats_lock);
   2859
   2860	if (agg)
   2861		mvm->drv_rx_stats.agg_frames++;
   2862
   2863	mvm->drv_rx_stats.success_frames++;
   2864
   2865	switch (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) {
   2866	case RATE_MCS_CHAN_WIDTH_20:
   2867		mvm->drv_rx_stats.bw_20_frames++;
   2868		break;
   2869	case RATE_MCS_CHAN_WIDTH_40:
   2870		mvm->drv_rx_stats.bw_40_frames++;
   2871		break;
   2872	case RATE_MCS_CHAN_WIDTH_80:
   2873		mvm->drv_rx_stats.bw_80_frames++;
   2874		break;
   2875	case RATE_MCS_CHAN_WIDTH_160:
   2876		mvm->drv_rx_stats.bw_160_frames++;
   2877		break;
   2878	default:
   2879		WARN_ONCE(1, "bad BW. rate 0x%x", rate);
   2880	}
   2881
   2882	if (rate & RATE_MCS_HT_MSK_V1) {
   2883		mvm->drv_rx_stats.ht_frames++;
   2884		nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1) + 1;
   2885	} else if (rate & RATE_MCS_VHT_MSK_V1) {
   2886		mvm->drv_rx_stats.vht_frames++;
   2887		nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
   2888		       RATE_VHT_MCS_NSS_POS) + 1;
   2889	} else {
   2890		mvm->drv_rx_stats.legacy_frames++;
   2891	}
   2892
   2893	if (nss == 1)
   2894		mvm->drv_rx_stats.siso_frames++;
   2895	else if (nss == 2)
   2896		mvm->drv_rx_stats.mimo2_frames++;
   2897
   2898	if (rate & RATE_MCS_SGI_MSK_V1)
   2899		mvm->drv_rx_stats.sgi_frames++;
   2900	else
   2901		mvm->drv_rx_stats.ngi_frames++;
   2902
   2903	mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
   2904	mvm->drv_rx_stats.last_frame_idx =
   2905		(mvm->drv_rx_stats.last_frame_idx + 1) %
   2906			ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
   2907
   2908	spin_unlock(&mvm->drv_stats_lock);
   2909}
   2910#endif
   2911
   2912/*
   2913 * Called after adding a new station to initialize rate scaling
   2914 */
   2915static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
   2916			     enum nl80211_band band)
   2917{
   2918	int i, j;
   2919	struct ieee80211_hw *hw = mvm->hw;
   2920	struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
   2921	struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
   2922	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   2923	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
   2924	struct ieee80211_supported_band *sband;
   2925	unsigned long supp; /* must be unsigned long for for_each_set_bit */
   2926
   2927	lockdep_assert_held(&mvmsta->lq_sta.rs_drv.pers.lock);
   2928
   2929	/* clear all non-persistent lq data */
   2930	memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
   2931
   2932	sband = hw->wiphy->bands[band];
   2933
   2934	lq_sta->lq.sta_id = mvmsta->sta_id;
   2935	mvmsta->amsdu_enabled = 0;
   2936	mvmsta->max_amsdu_len = sta->max_amsdu_len;
   2937
   2938	for (j = 0; j < LQ_SIZE; j++)
   2939		rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
   2940
   2941	lq_sta->flush_timer = 0;
   2942	lq_sta->last_tx = jiffies;
   2943
   2944	IWL_DEBUG_RATE(mvm,
   2945		       "LQ: *** rate scale station global init for station %d ***\n",
   2946		       mvmsta->sta_id);
   2947	/* TODO: what is a good starting rate for STA? About middle? Maybe not
   2948	 * the lowest or the highest rate.. Could consider using RSSI from
   2949	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
   2950	 * after assoc.. */
   2951
   2952	lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX;
   2953	lq_sta->band = sband->band;
   2954	/*
   2955	 * active legacy rates as per supported rates bitmap
   2956	 */
   2957	supp = sta->deflink.supp_rates[sband->band];
   2958	lq_sta->active_legacy_rate = 0;
   2959	for_each_set_bit(i, &supp, BITS_PER_LONG)
   2960		lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
   2961
   2962	/* TODO: should probably account for rx_highest for both HT/VHT */
   2963	if (!vht_cap || !vht_cap->vht_supported)
   2964		rs_ht_init(mvm, sta, lq_sta, ht_cap);
   2965	else
   2966		rs_vht_init(mvm, sta, lq_sta, vht_cap);
   2967
   2968	lq_sta->max_legacy_rate_idx =
   2969		rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
   2970	lq_sta->max_siso_rate_idx =
   2971		rs_get_max_rate_from_mask(lq_sta->active_siso_rate);
   2972	lq_sta->max_mimo2_rate_idx =
   2973		rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
   2974
   2975	IWL_DEBUG_RATE(mvm,
   2976		       "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
   2977		       lq_sta->active_legacy_rate,
   2978		       lq_sta->active_siso_rate,
   2979		       lq_sta->active_mimo2_rate,
   2980		       lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
   2981		       lq_sta->bfer_capable);
   2982	IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
   2983		       lq_sta->max_legacy_rate_idx,
   2984		       lq_sta->max_siso_rate_idx,
   2985		       lq_sta->max_mimo2_rate_idx);
   2986
   2987	/* These values will be overridden later */
   2988	lq_sta->lq.single_stream_ant_msk =
   2989		iwl_mvm_bt_coex_get_single_ant_msk(mvm, iwl_mvm_get_valid_tx_ant(mvm));
   2990	lq_sta->lq.dual_stream_ant_msk = ANT_AB;
   2991
   2992	/* as default allow aggregation for all tids */
   2993	lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
   2994	lq_sta->is_agg = 0;
   2995#ifdef CONFIG_IWLWIFI_DEBUGFS
   2996	iwl_mvm_reset_frame_stats(mvm);
   2997#endif
   2998	rs_initialize_lq(mvm, sta, lq_sta, band);
   2999}
   3000
   3001static void rs_drv_rate_update(void *mvm_r,
   3002			       struct ieee80211_supported_band *sband,
   3003			       struct cfg80211_chan_def *chandef,
   3004			       struct ieee80211_sta *sta,
   3005			       void *priv_sta, u32 changed)
   3006{
   3007	struct iwl_op_mode *op_mode = mvm_r;
   3008	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
   3009	u8 tid;
   3010
   3011	if (!iwl_mvm_sta_from_mac80211(sta)->vif)
   3012		return;
   3013
   3014	/* Stop any ongoing aggregations as rs starts off assuming no agg */
   3015	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
   3016		ieee80211_stop_tx_ba_session(sta, tid);
   3017
   3018	iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
   3019}
   3020
   3021static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm,
   3022				   struct ieee80211_sta *sta,
   3023				   int tid, struct ieee80211_tx_info *info,
   3024				   bool ndp)
   3025{
   3026	int legacy_success;
   3027	int retries;
   3028	int i;
   3029	struct iwl_lq_cmd *table;
   3030	u32 lq_hwrate;
   3031	struct rs_rate lq_rate, tx_resp_rate;
   3032	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
   3033	u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
   3034	u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
   3035	u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
   3036	u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
   3037	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   3038	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
   3039
   3040	if (!lq_sta->pers.drv) {
   3041		IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
   3042		return;
   3043	}
   3044
   3045	/* This packet was aggregated but doesn't carry status info */
   3046	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
   3047	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
   3048		return;
   3049
   3050	if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
   3051				    &tx_resp_rate)) {
   3052		WARN_ON_ONCE(1);
   3053		return;
   3054	}
   3055
   3056#ifdef CONFIG_MAC80211_DEBUGFS
   3057	/* Disable last tx check if we are debugging with fixed rate but
   3058	 * update tx stats
   3059	 */
   3060	if (lq_sta->pers.dbg_fixed_rate) {
   3061		int index = tx_resp_rate.index;
   3062		enum rs_column column;
   3063		int attempts, success;
   3064
   3065		column = rs_get_column_from_rate(&tx_resp_rate);
   3066		if (WARN_ONCE(column == RS_COLUMN_INVALID,
   3067			      "Can't map rate 0x%x to column",
   3068			      tx_resp_hwrate))
   3069			return;
   3070
   3071		if (info->flags & IEEE80211_TX_STAT_AMPDU) {
   3072			attempts = info->status.ampdu_len;
   3073			success = info->status.ampdu_ack_len;
   3074		} else {
   3075			attempts = info->status.rates[0].count;
   3076			success = !!(info->flags & IEEE80211_TX_STAT_ACK);
   3077		}
   3078
   3079		lq_sta->pers.tx_stats[column][index].total += attempts;
   3080		lq_sta->pers.tx_stats[column][index].success += success;
   3081
   3082		IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
   3083			       tx_resp_hwrate, success, attempts);
   3084		return;
   3085	}
   3086#endif
   3087
   3088	if (time_after(jiffies,
   3089		       (unsigned long)(lq_sta->last_tx +
   3090				       (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
   3091		IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
   3092		/* reach here only in case of driver RS, call directly
   3093		 * the unlocked version
   3094		 */
   3095		rs_drv_rate_init(mvm, sta, info->band);
   3096		return;
   3097	}
   3098	lq_sta->last_tx = jiffies;
   3099
   3100	/* Ignore this Tx frame response if its initial rate doesn't match
   3101	 * that of latest Link Quality command.  There may be stragglers
   3102	 * from a previous Link Quality command, but we're no longer interested
   3103	 * in those; they're either from the "active" mode while we're trying
   3104	 * to check "search" mode, or a prior "search" mode after we've moved
   3105	 * to a new "search" mode (which might become the new "active" mode).
   3106	 */
   3107	table = &lq_sta->lq;
   3108	lq_hwrate = le32_to_cpu(table->rs_table[0]);
   3109	if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
   3110		WARN_ON_ONCE(1);
   3111		return;
   3112	}
   3113
   3114	/* Here we actually compare this rate to the latest LQ command */
   3115	if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
   3116		IWL_DEBUG_RATE(mvm,
   3117			       "tx resp color 0x%x does not match 0x%x\n",
   3118			       lq_color, LQ_FLAG_COLOR_GET(table->flags));
   3119
   3120		/* Since rates mis-match, the last LQ command may have failed.
   3121		 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
   3122		 * ... driver.
   3123		 */
   3124		lq_sta->missed_rate_counter++;
   3125		if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
   3126			lq_sta->missed_rate_counter = 0;
   3127			IWL_DEBUG_RATE(mvm,
   3128				       "Too many rates mismatch. Send sync LQ. rs_state %d\n",
   3129				       lq_sta->rs_state);
   3130			iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
   3131		}
   3132		/* Regardless, ignore this status info for outdated rate */
   3133		return;
   3134	}
   3135
   3136	/* Rate did match, so reset the missed_rate_counter */
   3137	lq_sta->missed_rate_counter = 0;
   3138
   3139	if (!lq_sta->search_better_tbl) {
   3140		curr_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
   3141		other_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
   3142	} else {
   3143		curr_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
   3144		other_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
   3145	}
   3146
   3147	if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
   3148		IWL_DEBUG_RATE(mvm,
   3149			       "Neither active nor search matches tx rate\n");
   3150		tmp_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
   3151		rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
   3152		tmp_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
   3153		rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
   3154		rs_dump_rate(mvm, &lq_rate, "ACTUAL");
   3155
   3156		/* no matching table found, let's by-pass the data collection
   3157		 * and continue to perform rate scale to find the rate table
   3158		 */
   3159		rs_stay_in_table(lq_sta, true);
   3160		goto done;
   3161	}
   3162
   3163	/* Updating the frame history depends on whether packets were
   3164	 * aggregated.
   3165	 *
   3166	 * For aggregation, all packets were transmitted at the same rate, the
   3167	 * first index into rate scale table.
   3168	 */
   3169	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
   3170		rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
   3171				    info->status.ampdu_len,
   3172				    info->status.ampdu_ack_len,
   3173				    reduced_txp);
   3174
   3175		/* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
   3176		 * it as a single frame loss as we don't want the success ratio
   3177		 * to dip too quickly because a BA wasn't received.
   3178		 * For TPC, there's no need for this optimisation since we want
   3179		 * to recover very quickly from a bad power reduction and,
   3180		 * therefore we'd like the success ratio to get an immediate hit
   3181		 * when failing to get a BA, so we'd switch back to a lower or
   3182		 * zero power reduction. When FW transmits agg with a rate
   3183		 * different from the initial rate, it will not use reduced txp
   3184		 * and will send BA notification twice (one empty with reduced
   3185		 * txp equal to the value from LQ and one with reduced txp 0).
   3186		 * We need to update counters for each txp level accordingly.
   3187		 */
   3188		if (info->status.ampdu_ack_len == 0)
   3189			info->status.ampdu_len = 1;
   3190
   3191		rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
   3192				    tx_resp_rate.index,
   3193				    info->status.ampdu_len,
   3194				    info->status.ampdu_ack_len);
   3195
   3196		/* Update success/fail counts if not searching for new mode */
   3197		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
   3198			lq_sta->total_success += info->status.ampdu_ack_len;
   3199			lq_sta->total_failed += (info->status.ampdu_len -
   3200					info->status.ampdu_ack_len);
   3201		}
   3202	} else {
   3203		/* For legacy, update frame history with for each Tx retry. */
   3204		retries = info->status.rates[0].count - 1;
   3205		/* HW doesn't send more than 15 retries */
   3206		retries = min(retries, 15);
   3207
   3208		/* The last transmission may have been successful */
   3209		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
   3210		/* Collect data for each rate used during failed TX attempts */
   3211		for (i = 0; i <= retries; ++i) {
   3212			lq_hwrate = le32_to_cpu(table->rs_table[i]);
   3213			if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
   3214						    &lq_rate)) {
   3215				WARN_ON_ONCE(1);
   3216				return;
   3217			}
   3218
   3219			/* Only collect stats if retried rate is in the same RS
   3220			 * table as active/search.
   3221			 */
   3222			if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
   3223				tmp_tbl = curr_tbl;
   3224			else if (rs_rate_column_match(&lq_rate,
   3225						      &other_tbl->rate))
   3226				tmp_tbl = other_tbl;
   3227			else
   3228				continue;
   3229
   3230			rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
   3231					    tx_resp_rate.index, 1,
   3232					    i < retries ? 0 : legacy_success,
   3233					    reduced_txp);
   3234			rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
   3235					    tx_resp_rate.index, 1,
   3236					    i < retries ? 0 : legacy_success);
   3237		}
   3238
   3239		/* Update success/fail counts if not searching for new mode */
   3240		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
   3241			lq_sta->total_success += legacy_success;
   3242			lq_sta->total_failed += retries + (1 - legacy_success);
   3243		}
   3244	}
   3245	/* The last TX rate is cached in lq_sta; it's set in if/else above */
   3246	lq_sta->last_rate_n_flags = lq_hwrate;
   3247	IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
   3248done:
   3249	/* See if there's a better rate or modulation mode to try. */
   3250	if (sta->deflink.supp_rates[info->band])
   3251		rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
   3252}
   3253
   3254void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
   3255			  int tid, struct ieee80211_tx_info *info, bool ndp)
   3256{
   3257	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   3258
   3259	/* If it's locked we are in middle of init flow
   3260	 * just wait for next tx status to update the lq_sta data
   3261	 */
   3262	if (!spin_trylock(&mvmsta->lq_sta.rs_drv.pers.lock))
   3263		return;
   3264
   3265	__iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp);
   3266	spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
   3267}
   3268
   3269#ifdef CONFIG_MAC80211_DEBUGFS
   3270static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
   3271					    struct iwl_lq_cmd *lq_cmd,
   3272					    enum nl80211_band band,
   3273					    u32 ucode_rate)
   3274{
   3275	struct rs_rate rate;
   3276	int i;
   3277	int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
   3278	__le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
   3279	u8 ant = (ucode_rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
   3280
   3281	for (i = 0; i < num_rates; i++)
   3282		lq_cmd->rs_table[i] = ucode_rate_le32;
   3283
   3284	if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
   3285		WARN_ON_ONCE(1);
   3286		return;
   3287	}
   3288
   3289	if (is_mimo(&rate))
   3290		lq_cmd->mimo_delim = num_rates - 1;
   3291	else
   3292		lq_cmd->mimo_delim = 0;
   3293
   3294	lq_cmd->reduced_tpc = 0;
   3295
   3296	if (num_of_ant(ant) == 1)
   3297		lq_cmd->single_stream_ant_msk = ant;
   3298
   3299	if (!mvm->trans->trans_cfg->gen2)
   3300		lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
   3301	else
   3302		lq_cmd->agg_frame_cnt_limit =
   3303			LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
   3304}
   3305#endif /* CONFIG_MAC80211_DEBUGFS */
   3306
   3307static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
   3308				     struct iwl_lq_sta *lq_sta,
   3309				     struct rs_rate *rate,
   3310				     __le32 *rs_table, int *rs_table_index,
   3311				     int num_rates, int num_retries,
   3312				     u8 valid_tx_ant, bool toggle_ant)
   3313{
   3314	int i, j;
   3315	__le32 ucode_rate;
   3316	bool bottom_reached = false;
   3317	int prev_rate_idx = rate->index;
   3318	int end = LINK_QUAL_MAX_RETRY_NUM;
   3319	int index = *rs_table_index;
   3320
   3321	for (i = 0; i < num_rates && index < end; i++) {
   3322		for (j = 0; j < num_retries && index < end; j++, index++) {
   3323			ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
   3324									 rate));
   3325			rs_table[index] = ucode_rate;
   3326			if (toggle_ant)
   3327				rs_toggle_antenna(valid_tx_ant, rate);
   3328		}
   3329
   3330		prev_rate_idx = rate->index;
   3331		bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
   3332		if (bottom_reached && !is_legacy(rate))
   3333			break;
   3334	}
   3335
   3336	if (!bottom_reached && !is_legacy(rate))
   3337		rate->index = prev_rate_idx;
   3338
   3339	*rs_table_index = index;
   3340}
   3341
   3342/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
   3343 * column the rate table should look like this:
   3344 *
   3345 * rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
   3346 * rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
   3347 * rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
   3348 * rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
   3349 * rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
   3350 * rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
   3351 * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
   3352 * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
   3353 * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
   3354 * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
   3355 * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
   3356 * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
   3357 * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
   3358 * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
   3359 * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
   3360 * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
   3361 */
   3362static void rs_build_rates_table(struct iwl_mvm *mvm,
   3363				 struct ieee80211_sta *sta,
   3364				 struct iwl_lq_sta *lq_sta,
   3365				 const struct rs_rate *initial_rate)
   3366{
   3367	struct rs_rate rate;
   3368	int num_rates, num_retries, index = 0;
   3369	u8 valid_tx_ant = 0;
   3370	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
   3371	bool toggle_ant = false;
   3372	u32 color;
   3373
   3374	memcpy(&rate, initial_rate, sizeof(rate));
   3375
   3376	valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
   3377
   3378	/* TODO: remove old API when min FW API hits 14 */
   3379	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
   3380	    rs_stbc_allow(mvm, sta, lq_sta))
   3381		rate.stbc = true;
   3382
   3383	if (is_siso(&rate)) {
   3384		num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
   3385		num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
   3386	} else if (is_mimo(&rate)) {
   3387		num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES;
   3388		num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
   3389	} else {
   3390		num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
   3391		num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
   3392		toggle_ant = true;
   3393	}
   3394
   3395	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
   3396				 num_rates, num_retries, valid_tx_ant,
   3397				 toggle_ant);
   3398
   3399	rs_get_lower_rate_down_column(lq_sta, &rate);
   3400
   3401	if (is_siso(&rate)) {
   3402		num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES;
   3403		num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES;
   3404		lq_cmd->mimo_delim = index;
   3405	} else if (is_legacy(&rate)) {
   3406		num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
   3407		num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
   3408	} else {
   3409		WARN_ON_ONCE(1);
   3410	}
   3411
   3412	toggle_ant = true;
   3413
   3414	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
   3415				 num_rates, num_retries, valid_tx_ant,
   3416				 toggle_ant);
   3417
   3418	rs_get_lower_rate_down_column(lq_sta, &rate);
   3419
   3420	num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
   3421	num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
   3422
   3423	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
   3424				 num_rates, num_retries, valid_tx_ant,
   3425				 toggle_ant);
   3426
   3427	/* update the color of the LQ command (as a counter at bits 1-3) */
   3428	color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
   3429	lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
   3430}
   3431
   3432struct rs_bfer_active_iter_data {
   3433	struct ieee80211_sta *exclude_sta;
   3434	struct iwl_mvm_sta *bfer_mvmsta;
   3435};
   3436
   3437static void rs_bfer_active_iter(void *_data,
   3438				struct ieee80211_sta *sta)
   3439{
   3440	struct rs_bfer_active_iter_data *data = _data;
   3441	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   3442	struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
   3443	u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
   3444
   3445	if (sta == data->exclude_sta)
   3446		return;
   3447
   3448	/* The current sta has BFER allowed */
   3449	if (ss_params & LQ_SS_BFER_ALLOWED) {
   3450		WARN_ON_ONCE(data->bfer_mvmsta != NULL);
   3451
   3452		data->bfer_mvmsta = mvmsta;
   3453	}
   3454}
   3455
   3456static int rs_bfer_priority(struct iwl_mvm_sta *sta)
   3457{
   3458	int prio = -1;
   3459	enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
   3460
   3461	switch (viftype) {
   3462	case NL80211_IFTYPE_AP:
   3463	case NL80211_IFTYPE_P2P_GO:
   3464		prio = 3;
   3465		break;
   3466	case NL80211_IFTYPE_P2P_CLIENT:
   3467		prio = 2;
   3468		break;
   3469	case NL80211_IFTYPE_STATION:
   3470		prio = 1;
   3471		break;
   3472	default:
   3473		WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
   3474		prio = -1;
   3475	}
   3476
   3477	return prio;
   3478}
   3479
   3480/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
   3481static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
   3482				struct iwl_mvm_sta *sta2)
   3483{
   3484	int prio1 = rs_bfer_priority(sta1);
   3485	int prio2 = rs_bfer_priority(sta2);
   3486
   3487	if (prio1 > prio2)
   3488		return 1;
   3489	if (prio1 < prio2)
   3490		return -1;
   3491	return 0;
   3492}
   3493
   3494static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
   3495				struct ieee80211_sta *sta,
   3496				struct iwl_lq_sta *lq_sta,
   3497				const struct rs_rate *initial_rate)
   3498{
   3499	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
   3500	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   3501	struct rs_bfer_active_iter_data data = {
   3502		.exclude_sta = sta,
   3503		.bfer_mvmsta = NULL,
   3504	};
   3505	struct iwl_mvm_sta *bfer_mvmsta = NULL;
   3506	u32 ss_params = LQ_SS_PARAMS_VALID;
   3507
   3508	if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
   3509		goto out;
   3510
   3511#ifdef CONFIG_MAC80211_DEBUGFS
   3512	/* Check if forcing the decision is configured.
   3513	 * Note that SISO is forced by not allowing STBC or BFER
   3514	 */
   3515	if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
   3516		ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
   3517	else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
   3518		ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
   3519
   3520	if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
   3521		IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
   3522			       lq_sta->pers.ss_force);
   3523		goto out;
   3524	}
   3525#endif
   3526
   3527	if (lq_sta->stbc_capable)
   3528		ss_params |= LQ_SS_STBC_1SS_ALLOWED;
   3529
   3530	if (!lq_sta->bfer_capable)
   3531		goto out;
   3532
   3533	ieee80211_iterate_stations_atomic(mvm->hw,
   3534					  rs_bfer_active_iter,
   3535					  &data);
   3536	bfer_mvmsta = data.bfer_mvmsta;
   3537
   3538	/* This code is safe as it doesn't run concurrently for different
   3539	 * stations. This is guaranteed by the fact that calls to
   3540	 * ieee80211_tx_status wouldn't run concurrently for a single HW.
   3541	 */
   3542	if (!bfer_mvmsta) {
   3543		IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
   3544
   3545		ss_params |= LQ_SS_BFER_ALLOWED;
   3546		goto out;
   3547	}
   3548
   3549	IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
   3550		       bfer_mvmsta->sta_id);
   3551
   3552	/* Disallow BFER on another STA if active and we're a higher priority */
   3553	if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
   3554		struct iwl_lq_cmd *bfersta_lq_cmd =
   3555			&bfer_mvmsta->lq_sta.rs_drv.lq;
   3556		u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
   3557
   3558		bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
   3559		bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
   3560		iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd);
   3561
   3562		ss_params |= LQ_SS_BFER_ALLOWED;
   3563		IWL_DEBUG_RATE(mvm,
   3564			       "Lower priority BFER sta found (%d). Switch BFER\n",
   3565			       bfer_mvmsta->sta_id);
   3566	}
   3567out:
   3568	lq_cmd->ss_params = cpu_to_le32(ss_params);
   3569}
   3570
   3571static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
   3572			   struct ieee80211_sta *sta,
   3573			   struct iwl_lq_sta *lq_sta,
   3574			   const struct rs_rate *initial_rate)
   3575{
   3576	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
   3577	struct iwl_mvm_sta *mvmsta;
   3578	struct iwl_mvm_vif *mvmvif;
   3579
   3580	lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START;
   3581	lq_cmd->agg_time_limit =
   3582		cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT);
   3583
   3584#ifdef CONFIG_MAC80211_DEBUGFS
   3585	if (lq_sta->pers.dbg_fixed_rate) {
   3586		rs_build_rates_table_from_fixed(mvm, lq_cmd,
   3587						lq_sta->band,
   3588						lq_sta->pers.dbg_fixed_rate);
   3589		return;
   3590	}
   3591#endif
   3592	if (WARN_ON_ONCE(!sta || !initial_rate))
   3593		return;
   3594
   3595	rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
   3596
   3597	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
   3598		rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
   3599
   3600	mvmsta = iwl_mvm_sta_from_mac80211(sta);
   3601	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
   3602
   3603	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
   3604	    num_of_ant(initial_rate->ant) == 1)
   3605		lq_cmd->single_stream_ant_msk = initial_rate->ant;
   3606
   3607	lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
   3608
   3609	/*
   3610	 * In case of low latency, tell the firmware to leave a frame in the
   3611	 * Tx Fifo so that it can start a transaction in the same TxOP. This
   3612	 * basically allows the firmware to send bursts.
   3613	 */
   3614	if (iwl_mvm_vif_low_latency(mvmvif))
   3615		lq_cmd->agg_frame_cnt_limit--;
   3616
   3617	if (mvmsta->vif->p2p)
   3618		lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK;
   3619
   3620	lq_cmd->agg_time_limit =
   3621			cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
   3622}
   3623
   3624static void *rs_alloc(struct ieee80211_hw *hw)
   3625{
   3626	return hw->priv;
   3627}
   3628
   3629/* rate scale requires free function to be implemented */
   3630static void rs_free(void *mvm_rate)
   3631{
   3632	return;
   3633}
   3634
   3635static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
   3636{
   3637	struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
   3638	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
   3639
   3640	IWL_DEBUG_RATE(mvm, "enter\n");
   3641	IWL_DEBUG_RATE(mvm, "leave\n");
   3642}
   3643
   3644int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate)
   3645{
   3646
   3647	char *type;
   3648	u8 mcs = 0, nss = 0;
   3649	u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
   3650	u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) >>
   3651		RATE_MCS_CHAN_WIDTH_POS;
   3652
   3653	if (!(rate & RATE_MCS_HT_MSK_V1) &&
   3654	    !(rate & RATE_MCS_VHT_MSK_V1) &&
   3655	    !(rate & RATE_MCS_HE_MSK_V1)) {
   3656		int index = iwl_hwrate_to_plcp_idx(rate);
   3657
   3658		return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
   3659				 iwl_rs_pretty_ant(ant),
   3660				 index == IWL_RATE_INVALID ? "BAD" :
   3661				 iwl_rate_mcs(index)->mbps);
   3662	}
   3663
   3664	if (rate & RATE_MCS_VHT_MSK_V1) {
   3665		type = "VHT";
   3666		mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
   3667		nss = ((rate & RATE_VHT_MCS_NSS_MSK)
   3668		       >> RATE_VHT_MCS_NSS_POS) + 1;
   3669	} else if (rate & RATE_MCS_HT_MSK_V1) {
   3670		type = "HT";
   3671		mcs = rate & RATE_HT_MCS_INDEX_MSK_V1;
   3672		nss = ((rate & RATE_HT_MCS_NSS_MSK_V1)
   3673		       >> RATE_HT_MCS_NSS_POS_V1) + 1;
   3674	} else if (rate & RATE_MCS_HE_MSK_V1) {
   3675		type = "HE";
   3676		mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
   3677		nss = ((rate & RATE_VHT_MCS_NSS_MSK)
   3678		       >> RATE_VHT_MCS_NSS_POS) + 1;
   3679	} else {
   3680		type = "Unknown"; /* shouldn't happen */
   3681	}
   3682
   3683	return scnprintf(buf, bufsz,
   3684			 "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
   3685			 rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss,
   3686			 (rate & RATE_MCS_SGI_MSK_V1) ? "SGI " : "NGI ",
   3687			 (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
   3688			 (rate & RATE_MCS_LDPC_MSK_V1) ? "LDPC " : "",
   3689			 (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
   3690			 (rate & RATE_MCS_BF_MSK) ? "BF " : "");
   3691}
   3692
   3693#ifdef CONFIG_MAC80211_DEBUGFS
   3694/*
   3695 * Program the device to use fixed rate for frame transmit
   3696 * This is for debugging/testing only
   3697 * once the device start use fixed rate, we need to reload the module
   3698 * to being back the normal operation.
   3699 */
   3700static void rs_program_fix_rate(struct iwl_mvm *mvm,
   3701				struct iwl_lq_sta *lq_sta)
   3702{
   3703	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
   3704	lq_sta->active_siso_rate   = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
   3705	lq_sta->active_mimo2_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
   3706
   3707	IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
   3708		       lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate);
   3709
   3710	if (lq_sta->pers.dbg_fixed_rate) {
   3711		rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
   3712		iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq);
   3713	}
   3714}
   3715
   3716static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
   3717			const char __user *user_buf, size_t count, loff_t *ppos)
   3718{
   3719	struct iwl_lq_sta *lq_sta = file->private_data;
   3720	struct iwl_mvm *mvm;
   3721	char buf[64];
   3722	size_t buf_size;
   3723	u32 parsed_rate;
   3724
   3725	mvm = lq_sta->pers.drv;
   3726	memset(buf, 0, sizeof(buf));
   3727	buf_size = min(count, sizeof(buf) -  1);
   3728	if (copy_from_user(buf, user_buf, buf_size))
   3729		return -EFAULT;
   3730
   3731	if (sscanf(buf, "%x", &parsed_rate) == 1)
   3732		lq_sta->pers.dbg_fixed_rate = parsed_rate;
   3733	else
   3734		lq_sta->pers.dbg_fixed_rate = 0;
   3735
   3736	rs_program_fix_rate(mvm, lq_sta);
   3737
   3738	return count;
   3739}
   3740
   3741static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
   3742			char __user *user_buf, size_t count, loff_t *ppos)
   3743{
   3744	char *buff;
   3745	int desc = 0;
   3746	int i = 0;
   3747	ssize_t ret;
   3748	static const size_t bufsz = 2048;
   3749
   3750	struct iwl_lq_sta *lq_sta = file->private_data;
   3751	struct iwl_mvm_sta *mvmsta =
   3752		container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
   3753	struct iwl_mvm *mvm;
   3754	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
   3755	struct rs_rate *rate = &tbl->rate;
   3756	u32 ss_params;
   3757
   3758	mvm = lq_sta->pers.drv;
   3759	buff = kmalloc(bufsz, GFP_KERNEL);
   3760	if (!buff)
   3761		return -ENOMEM;
   3762
   3763	desc += scnprintf(buff + desc, bufsz - desc,
   3764			  "sta_id %d\n", lq_sta->lq.sta_id);
   3765	desc += scnprintf(buff + desc, bufsz - desc,
   3766			  "failed=%d success=%d rate=0%lX\n",
   3767			  lq_sta->total_failed, lq_sta->total_success,
   3768			  lq_sta->active_legacy_rate);
   3769	desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
   3770			  lq_sta->pers.dbg_fixed_rate);
   3771	desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s\n",
   3772	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
   3773	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "");
   3774	desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
   3775			  (is_legacy(rate)) ? "legacy" :
   3776			  is_vht(rate) ? "VHT" : "HT");
   3777	if (!is_legacy(rate)) {
   3778		desc += scnprintf(buff + desc, bufsz - desc, " %s",
   3779		   (is_siso(rate)) ? "SISO" : "MIMO2");
   3780		desc += scnprintf(buff + desc, bufsz - desc, " %s",
   3781				(is_ht20(rate)) ? "20MHz" :
   3782				(is_ht40(rate)) ? "40MHz" :
   3783				(is_ht80(rate)) ? "80MHz" :
   3784				(is_ht160(rate)) ? "160MHz" : "BAD BW");
   3785		desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
   3786				(rate->sgi) ? "SGI" : "NGI",
   3787				(rate->ldpc) ? "LDPC" : "BCC",
   3788				(lq_sta->is_agg) ? "AGG on" : "",
   3789				(mvmsta->amsdu_enabled) ? "AMSDU on" : "");
   3790	}
   3791	desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
   3792			lq_sta->last_rate_n_flags);
   3793	desc += scnprintf(buff + desc, bufsz - desc,
   3794			"general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
   3795			lq_sta->lq.flags,
   3796			lq_sta->lq.mimo_delim,
   3797			lq_sta->lq.single_stream_ant_msk,
   3798			lq_sta->lq.dual_stream_ant_msk);
   3799
   3800	desc += scnprintf(buff + desc, bufsz - desc,
   3801			"agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
   3802			le16_to_cpu(lq_sta->lq.agg_time_limit),
   3803			lq_sta->lq.agg_disable_start_th,
   3804			lq_sta->lq.agg_frame_cnt_limit);
   3805
   3806	desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
   3807			  lq_sta->lq.reduced_tpc);
   3808	ss_params = le32_to_cpu(lq_sta->lq.ss_params);
   3809	desc += scnprintf(buff + desc, bufsz - desc,
   3810			"single stream params: %s%s%s%s\n",
   3811			(ss_params & LQ_SS_PARAMS_VALID) ?
   3812			"VALID" : "INVALID",
   3813			(ss_params & LQ_SS_BFER_ALLOWED) ?
   3814			", BFER" : "",
   3815			(ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
   3816			", STBC" : "",
   3817			(ss_params & LQ_SS_FORCE) ?
   3818			", FORCE" : "");
   3819	desc += scnprintf(buff + desc, bufsz - desc,
   3820			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
   3821			lq_sta->lq.initial_rate_index[0],
   3822			lq_sta->lq.initial_rate_index[1],
   3823			lq_sta->lq.initial_rate_index[2],
   3824			lq_sta->lq.initial_rate_index[3]);
   3825
   3826	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
   3827		u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
   3828
   3829		desc += scnprintf(buff + desc, bufsz - desc,
   3830				  " rate[%d] 0x%X ", i, r);
   3831		desc += rs_pretty_print_rate_v1(buff + desc, bufsz - desc, r);
   3832		if (desc < bufsz - 1)
   3833			buff[desc++] = '\n';
   3834	}
   3835
   3836	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
   3837	kfree(buff);
   3838	return ret;
   3839}
   3840
   3841static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
   3842	.write = rs_sta_dbgfs_scale_table_write,
   3843	.read = rs_sta_dbgfs_scale_table_read,
   3844	.open = simple_open,
   3845	.llseek = default_llseek,
   3846};
   3847static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
   3848			char __user *user_buf, size_t count, loff_t *ppos)
   3849{
   3850	char *buff;
   3851	int desc = 0;
   3852	int i, j;
   3853	ssize_t ret;
   3854	struct iwl_scale_tbl_info *tbl;
   3855	struct rs_rate *rate;
   3856	struct iwl_lq_sta *lq_sta = file->private_data;
   3857
   3858	buff = kmalloc(1024, GFP_KERNEL);
   3859	if (!buff)
   3860		return -ENOMEM;
   3861
   3862	for (i = 0; i < LQ_SIZE; i++) {
   3863		tbl = &(lq_sta->lq_info[i]);
   3864		rate = &tbl->rate;
   3865		desc += sprintf(buff+desc,
   3866				"%s type=%d SGI=%d BW=%s DUP=0\n"
   3867				"index=%d\n",
   3868				lq_sta->active_tbl == i ? "*" : "x",
   3869				rate->type,
   3870				rate->sgi,
   3871				is_ht20(rate) ? "20MHz" :
   3872				is_ht40(rate) ? "40MHz" :
   3873				is_ht80(rate) ? "80MHz" :
   3874				is_ht160(rate) ? "160MHz" : "ERR",
   3875				rate->index);
   3876		for (j = 0; j < IWL_RATE_COUNT; j++) {
   3877			desc += sprintf(buff+desc,
   3878				"counter=%d success=%d %%=%d\n",
   3879				tbl->win[j].counter,
   3880				tbl->win[j].success_counter,
   3881				tbl->win[j].success_ratio);
   3882		}
   3883	}
   3884	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
   3885	kfree(buff);
   3886	return ret;
   3887}
   3888
   3889static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
   3890	.read = rs_sta_dbgfs_stats_table_read,
   3891	.open = simple_open,
   3892	.llseek = default_llseek,
   3893};
   3894
   3895static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
   3896					      char __user *user_buf,
   3897					      size_t count, loff_t *ppos)
   3898{
   3899	static const char * const column_name[] = {
   3900		[RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
   3901		[RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
   3902		[RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
   3903		[RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
   3904		[RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
   3905		[RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
   3906		[RS_COLUMN_MIMO2] = "MIMO2",
   3907		[RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
   3908	};
   3909
   3910	static const char * const rate_name[] = {
   3911		[IWL_RATE_1M_INDEX] = "1M",
   3912		[IWL_RATE_2M_INDEX] = "2M",
   3913		[IWL_RATE_5M_INDEX] = "5.5M",
   3914		[IWL_RATE_11M_INDEX] = "11M",
   3915		[IWL_RATE_6M_INDEX] = "6M|MCS0",
   3916		[IWL_RATE_9M_INDEX] = "9M",
   3917		[IWL_RATE_12M_INDEX] = "12M|MCS1",
   3918		[IWL_RATE_18M_INDEX] = "18M|MCS2",
   3919		[IWL_RATE_24M_INDEX] = "24M|MCS3",
   3920		[IWL_RATE_36M_INDEX] = "36M|MCS4",
   3921		[IWL_RATE_48M_INDEX] = "48M|MCS5",
   3922		[IWL_RATE_54M_INDEX] = "54M|MCS6",
   3923		[IWL_RATE_MCS_7_INDEX] = "MCS7",
   3924		[IWL_RATE_MCS_8_INDEX] = "MCS8",
   3925		[IWL_RATE_MCS_9_INDEX] = "MCS9",
   3926		[IWL_RATE_MCS_10_INDEX] = "MCS10",
   3927		[IWL_RATE_MCS_11_INDEX] = "MCS11",
   3928	};
   3929
   3930	char *buff, *pos, *endpos;
   3931	int col, rate;
   3932	ssize_t ret;
   3933	struct iwl_lq_sta *lq_sta = file->private_data;
   3934	struct rs_rate_stats *stats;
   3935	static const size_t bufsz = 1024;
   3936
   3937	buff = kmalloc(bufsz, GFP_KERNEL);
   3938	if (!buff)
   3939		return -ENOMEM;
   3940
   3941	pos = buff;
   3942	endpos = pos + bufsz;
   3943
   3944	pos += scnprintf(pos, endpos - pos, "COLUMN,");
   3945	for (rate = 0; rate < IWL_RATE_COUNT; rate++)
   3946		pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
   3947	pos += scnprintf(pos, endpos - pos, "\n");
   3948
   3949	for (col = 0; col < RS_COLUMN_COUNT; col++) {
   3950		pos += scnprintf(pos, endpos - pos,
   3951				 "%s,", column_name[col]);
   3952
   3953		for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
   3954			stats = &(lq_sta->pers.tx_stats[col][rate]);
   3955			pos += scnprintf(pos, endpos - pos,
   3956					 "%llu/%llu,",
   3957					 stats->success,
   3958					 stats->total);
   3959		}
   3960		pos += scnprintf(pos, endpos - pos, "\n");
   3961	}
   3962
   3963	ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
   3964	kfree(buff);
   3965	return ret;
   3966}
   3967
   3968static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
   3969					       const char __user *user_buf,
   3970					       size_t count, loff_t *ppos)
   3971{
   3972	struct iwl_lq_sta *lq_sta = file->private_data;
   3973	memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats));
   3974
   3975	return count;
   3976}
   3977
   3978static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
   3979	.read = rs_sta_dbgfs_drv_tx_stats_read,
   3980	.write = rs_sta_dbgfs_drv_tx_stats_write,
   3981	.open = simple_open,
   3982	.llseek = default_llseek,
   3983};
   3984
   3985static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
   3986				       char __user *user_buf,
   3987				       size_t count, loff_t *ppos)
   3988{
   3989	struct iwl_lq_sta *lq_sta = file->private_data;
   3990	char buf[12];
   3991	int bufsz = sizeof(buf);
   3992	int pos = 0;
   3993	static const char * const ss_force_name[] = {
   3994		[RS_SS_FORCE_NONE] = "none",
   3995		[RS_SS_FORCE_STBC] = "stbc",
   3996		[RS_SS_FORCE_BFER] = "bfer",
   3997		[RS_SS_FORCE_SISO] = "siso",
   3998	};
   3999
   4000	pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
   4001			 ss_force_name[lq_sta->pers.ss_force]);
   4002	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
   4003}
   4004
   4005static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
   4006					size_t count, loff_t *ppos)
   4007{
   4008	struct iwl_mvm *mvm = lq_sta->pers.drv;
   4009	int ret = 0;
   4010
   4011	if (!strncmp("none", buf, 4)) {
   4012		lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
   4013	} else if (!strncmp("siso", buf, 4)) {
   4014		lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
   4015	} else if (!strncmp("stbc", buf, 4)) {
   4016		if (lq_sta->stbc_capable) {
   4017			lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
   4018		} else {
   4019			IWL_ERR(mvm,
   4020				"can't force STBC. peer doesn't support\n");
   4021			ret = -EINVAL;
   4022		}
   4023	} else if (!strncmp("bfer", buf, 4)) {
   4024		if (lq_sta->bfer_capable) {
   4025			lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
   4026		} else {
   4027			IWL_ERR(mvm,
   4028				"can't force BFER. peer doesn't support\n");
   4029			ret = -EINVAL;
   4030		}
   4031	} else {
   4032		IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
   4033		ret = -EINVAL;
   4034	}
   4035	return ret ?: count;
   4036}
   4037
   4038#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
   4039	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
   4040#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do {		\
   4041		debugfs_create_file(#name, mode, parent, lq_sta,	\
   4042				    &iwl_dbgfs_##name##_ops);		\
   4043	} while (0)
   4044
   4045MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
   4046
   4047static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
   4048				   struct dentry *dir)
   4049{
   4050	struct iwl_lq_sta *lq_sta = priv_sta;
   4051	struct iwl_mvm_sta *mvmsta;
   4052
   4053	mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
   4054
   4055	if (!mvmsta->vif)
   4056		return;
   4057
   4058	debugfs_create_file("rate_scale_table", 0600, dir,
   4059			    lq_sta, &rs_sta_dbgfs_scale_table_ops);
   4060	debugfs_create_file("rate_stats_table", 0400, dir,
   4061			    lq_sta, &rs_sta_dbgfs_stats_table_ops);
   4062	debugfs_create_file("drv_tx_stats", 0600, dir,
   4063			    lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
   4064	debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
   4065			  &lq_sta->tx_agg_tid_en);
   4066	debugfs_create_u8("reduced_tpc", 0600, dir,
   4067			  &lq_sta->pers.dbg_fixed_txp_reduction);
   4068
   4069	MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
   4070}
   4071#endif
   4072
   4073/*
   4074 * Initialization of rate scaling information is done by driver after
   4075 * the station is added. Since mac80211 calls this function before a
   4076 * station is added we ignore it.
   4077 */
   4078static void rs_rate_init_ops(void *mvm_r,
   4079			     struct ieee80211_supported_band *sband,
   4080			     struct cfg80211_chan_def *chandef,
   4081			     struct ieee80211_sta *sta, void *mvm_sta)
   4082{
   4083}
   4084
   4085/* ops for rate scaling implemented in the driver */
   4086static const struct rate_control_ops rs_mvm_ops_drv = {
   4087	.name = RS_NAME,
   4088	.tx_status = rs_drv_mac80211_tx_status,
   4089	.get_rate = rs_drv_get_rate,
   4090	.rate_init = rs_rate_init_ops,
   4091	.alloc = rs_alloc,
   4092	.free = rs_free,
   4093	.alloc_sta = rs_drv_alloc_sta,
   4094	.free_sta = rs_free_sta,
   4095	.rate_update = rs_drv_rate_update,
   4096#ifdef CONFIG_MAC80211_DEBUGFS
   4097	.add_sta_debugfs = rs_drv_add_sta_debugfs,
   4098#endif
   4099	.capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW,
   4100};
   4101
   4102void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
   4103			  enum nl80211_band band, bool update)
   4104{
   4105	if (iwl_mvm_has_tlc_offload(mvm)) {
   4106		rs_fw_rate_init(mvm, sta, band, update);
   4107	} else {
   4108		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
   4109
   4110		spin_lock(&mvmsta->lq_sta.rs_drv.pers.lock);
   4111		rs_drv_rate_init(mvm, sta, band);
   4112		spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
   4113	}
   4114}
   4115
   4116int iwl_mvm_rate_control_register(void)
   4117{
   4118	return ieee80211_rate_control_register(&rs_mvm_ops_drv);
   4119}
   4120
   4121void iwl_mvm_rate_control_unregister(void)
   4122{
   4123	ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
   4124}
   4125
   4126static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
   4127				bool enable)
   4128{
   4129	struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
   4130
   4131	lockdep_assert_held(&mvm->mutex);
   4132
   4133	if (enable) {
   4134		if (mvmsta->tx_protection == 0)
   4135			lq->flags |= LQ_FLAG_USE_RTS_MSK;
   4136		mvmsta->tx_protection++;
   4137	} else {
   4138		mvmsta->tx_protection--;
   4139		if (mvmsta->tx_protection == 0)
   4140			lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
   4141	}
   4142
   4143	return iwl_mvm_send_lq_cmd(mvm, lq);
   4144}
   4145
   4146/**
   4147 * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
   4148 * @mvm: The mvm component
   4149 * @mvmsta: The station
   4150 * @enable: Enable Tx protection?
   4151 */
   4152int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
   4153			  bool enable)
   4154{
   4155	if (iwl_mvm_has_tlc_offload(mvm))
   4156		return rs_fw_tx_protection(mvm, mvmsta, enable);
   4157	else
   4158		return rs_drv_tx_protection(mvm, mvmsta, enable);
   4159}