cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mt76.h (36581B)


      1/* SPDX-License-Identifier: ISC */
      2/*
      3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
      4 */
      5
      6#ifndef __MT76_H
      7#define __MT76_H
      8
      9#include <linux/kernel.h>
     10#include <linux/io.h>
     11#include <linux/spinlock.h>
     12#include <linux/skbuff.h>
     13#include <linux/leds.h>
     14#include <linux/usb.h>
     15#include <linux/average.h>
     16#include <linux/soc/mediatek/mtk_wed.h>
     17#include <net/mac80211.h>
     18#include "util.h"
     19#include "testmode.h"
     20
     21#define MT_MCU_RING_SIZE	32
     22#define MT_RX_BUF_SIZE		2048
     23#define MT_SKB_HEAD_LEN		256
     24
     25#define MT_MAX_NON_AQL_PKT	16
     26#define MT_TXQ_FREE_THR		32
     27
     28#define MT76_TOKEN_FREE_THR	64
     29
     30#define MT_QFLAG_WED_RING	GENMASK(1, 0)
     31#define MT_QFLAG_WED_TYPE	GENMASK(3, 2)
     32#define MT_QFLAG_WED		BIT(4)
     33
     34#define __MT_WED_Q(_type, _n)	(MT_QFLAG_WED | \
     35				 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
     36				 FIELD_PREP(MT_QFLAG_WED_RING, _n))
     37#define MT_WED_Q_TX(_n)		__MT_WED_Q(MT76_WED_Q_TX, _n)
     38#define MT_WED_Q_TXFREE		__MT_WED_Q(MT76_WED_Q_TXFREE, 0)
     39
     40struct mt76_dev;
     41struct mt76_phy;
     42struct mt76_wcid;
     43struct mt76s_intr;
     44
     45struct mt76_reg_pair {
     46	u32 reg;
     47	u32 value;
     48};
     49
     50enum mt76_bus_type {
     51	MT76_BUS_MMIO,
     52	MT76_BUS_USB,
     53	MT76_BUS_SDIO,
     54};
     55
     56enum mt76_wed_type {
     57	MT76_WED_Q_TX,
     58	MT76_WED_Q_TXFREE,
     59};
     60
     61struct mt76_bus_ops {
     62	u32 (*rr)(struct mt76_dev *dev, u32 offset);
     63	void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
     64	u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
     65	void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
     66			   int len);
     67	void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
     68			  int len);
     69	int (*wr_rp)(struct mt76_dev *dev, u32 base,
     70		     const struct mt76_reg_pair *rp, int len);
     71	int (*rd_rp)(struct mt76_dev *dev, u32 base,
     72		     struct mt76_reg_pair *rp, int len);
     73	enum mt76_bus_type type;
     74};
     75
     76#define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
     77#define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
     78#define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
     79
     80enum mt76_txq_id {
     81	MT_TXQ_VO = IEEE80211_AC_VO,
     82	MT_TXQ_VI = IEEE80211_AC_VI,
     83	MT_TXQ_BE = IEEE80211_AC_BE,
     84	MT_TXQ_BK = IEEE80211_AC_BK,
     85	MT_TXQ_PSD,
     86	MT_TXQ_BEACON,
     87	MT_TXQ_CAB,
     88	__MT_TXQ_MAX
     89};
     90
     91enum mt76_mcuq_id {
     92	MT_MCUQ_WM,
     93	MT_MCUQ_WA,
     94	MT_MCUQ_FWDL,
     95	__MT_MCUQ_MAX
     96};
     97
     98enum mt76_rxq_id {
     99	MT_RXQ_MAIN,
    100	MT_RXQ_MCU,
    101	MT_RXQ_MCU_WA,
    102	MT_RXQ_EXT,
    103	MT_RXQ_EXT_WA,
    104	MT_RXQ_MAIN_WA,
    105	__MT_RXQ_MAX
    106};
    107
    108enum mt76_cipher_type {
    109	MT_CIPHER_NONE,
    110	MT_CIPHER_WEP40,
    111	MT_CIPHER_TKIP,
    112	MT_CIPHER_TKIP_NO_MIC,
    113	MT_CIPHER_AES_CCMP,
    114	MT_CIPHER_WEP104,
    115	MT_CIPHER_BIP_CMAC_128,
    116	MT_CIPHER_WEP128,
    117	MT_CIPHER_WAPI,
    118	MT_CIPHER_CCMP_CCX,
    119	MT_CIPHER_CCMP_256,
    120	MT_CIPHER_GCMP,
    121	MT_CIPHER_GCMP_256,
    122};
    123
    124enum mt76_dfs_state {
    125	MT_DFS_STATE_UNKNOWN,
    126	MT_DFS_STATE_DISABLED,
    127	MT_DFS_STATE_CAC,
    128	MT_DFS_STATE_ACTIVE,
    129};
    130
    131struct mt76_queue_buf {
    132	dma_addr_t addr;
    133	u16 len;
    134	bool skip_unmap;
    135};
    136
    137struct mt76_tx_info {
    138	struct mt76_queue_buf buf[32];
    139	struct sk_buff *skb;
    140	int nbuf;
    141	u32 info;
    142};
    143
    144struct mt76_queue_entry {
    145	union {
    146		void *buf;
    147		struct sk_buff *skb;
    148	};
    149	union {
    150		struct mt76_txwi_cache *txwi;
    151		struct urb *urb;
    152		int buf_sz;
    153	};
    154	u32 dma_addr[2];
    155	u16 dma_len[2];
    156	u16 wcid;
    157	bool skip_buf0:1;
    158	bool skip_buf1:1;
    159	bool done:1;
    160};
    161
    162struct mt76_queue_regs {
    163	u32 desc_base;
    164	u32 ring_size;
    165	u32 cpu_idx;
    166	u32 dma_idx;
    167} __packed __aligned(4);
    168
    169struct mt76_queue {
    170	struct mt76_queue_regs __iomem *regs;
    171
    172	spinlock_t lock;
    173	spinlock_t cleanup_lock;
    174	struct mt76_queue_entry *entry;
    175	struct mt76_desc *desc;
    176
    177	u16 first;
    178	u16 head;
    179	u16 tail;
    180	int ndesc;
    181	int queued;
    182	int buf_size;
    183	bool stopped;
    184	bool blocked;
    185
    186	u8 buf_offset;
    187	u8 hw_idx;
    188	u8 qid;
    189	u8 flags;
    190
    191	u32 wed_regs;
    192
    193	dma_addr_t desc_dma;
    194	struct sk_buff *rx_head;
    195	struct page_frag_cache rx_page;
    196};
    197
    198struct mt76_mcu_ops {
    199	u32 headroom;
    200	u32 tailroom;
    201
    202	int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
    203			    int len, bool wait_resp);
    204	int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
    205				int cmd, int *seq);
    206	int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
    207				  struct sk_buff *skb, int seq);
    208	u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
    209	void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
    210	int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
    211			 const struct mt76_reg_pair *rp, int len);
    212	int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
    213			 struct mt76_reg_pair *rp, int len);
    214	int (*mcu_restart)(struct mt76_dev *dev);
    215};
    216
    217struct mt76_queue_ops {
    218	int (*init)(struct mt76_dev *dev,
    219		    int (*poll)(struct napi_struct *napi, int budget));
    220
    221	int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
    222		     int idx, int n_desc, int bufsize,
    223		     u32 ring_base);
    224
    225	int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
    226			    struct sk_buff *skb, struct mt76_wcid *wcid,
    227			    struct ieee80211_sta *sta);
    228
    229	int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
    230				struct sk_buff *skb, u32 tx_info);
    231
    232	void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
    233			 int *len, u32 *info, bool *more);
    234
    235	void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
    236
    237	void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
    238			   bool flush);
    239
    240	void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
    241
    242	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
    243
    244	void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
    245};
    246
    247enum mt76_wcid_flags {
    248	MT_WCID_FLAG_CHECK_PS,
    249	MT_WCID_FLAG_PS,
    250	MT_WCID_FLAG_4ADDR,
    251	MT_WCID_FLAG_HDR_TRANS,
    252};
    253
    254#define MT76_N_WCIDS 544
    255
    256/* stored in ieee80211_tx_info::hw_queue */
    257#define MT_TX_HW_QUEUE_EXT_PHY		BIT(3)
    258
    259DECLARE_EWMA(signal, 10, 8);
    260
    261#define MT_WCID_TX_INFO_RATE		GENMASK(15, 0)
    262#define MT_WCID_TX_INFO_NSS		GENMASK(17, 16)
    263#define MT_WCID_TX_INFO_TXPWR_ADJ	GENMASK(25, 18)
    264#define MT_WCID_TX_INFO_SET		BIT(31)
    265
    266struct mt76_wcid {
    267	struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
    268
    269	atomic_t non_aql_packets;
    270	unsigned long flags;
    271
    272	struct ewma_signal rssi;
    273	int inactive_count;
    274
    275	struct rate_info rate;
    276
    277	u16 idx;
    278	u8 hw_key_idx;
    279	u8 hw_key_idx2;
    280
    281	u8 sta:1;
    282	u8 ext_phy:1;
    283	u8 amsdu:1;
    284
    285	u8 rx_check_pn;
    286	u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
    287	u16 cipher;
    288
    289	u32 tx_info;
    290	bool sw_iv;
    291
    292	struct list_head list;
    293	struct idr pktid;
    294};
    295
    296struct mt76_txq {
    297	u16 wcid;
    298
    299	u16 agg_ssn;
    300	bool send_bar;
    301	bool aggr;
    302};
    303
    304struct mt76_txwi_cache {
    305	struct list_head list;
    306	dma_addr_t dma_addr;
    307
    308	struct sk_buff *skb;
    309};
    310
    311struct mt76_rx_tid {
    312	struct rcu_head rcu_head;
    313
    314	struct mt76_dev *dev;
    315
    316	spinlock_t lock;
    317	struct delayed_work reorder_work;
    318
    319	u16 head;
    320	u16 size;
    321	u16 nframes;
    322
    323	u8 num;
    324
    325	u8 started:1, stopped:1, timer_pending:1;
    326
    327	struct sk_buff *reorder_buf[];
    328};
    329
    330#define MT_TX_CB_DMA_DONE		BIT(0)
    331#define MT_TX_CB_TXS_DONE		BIT(1)
    332#define MT_TX_CB_TXS_FAILED		BIT(2)
    333
    334#define MT_PACKET_ID_MASK		GENMASK(6, 0)
    335#define MT_PACKET_ID_NO_ACK		0
    336#define MT_PACKET_ID_NO_SKB		1
    337#define MT_PACKET_ID_FIRST		2
    338#define MT_PACKET_ID_HAS_RATE		BIT(7)
    339/* This is timer for when to give up when waiting for TXS callback,
    340 * with starting time being the time at which the DMA_DONE callback
    341 * was seen (so, we know packet was processed then, it should not take
    342 * long after that for firmware to send the TXS callback if it is going
    343 * to do so.)
    344 */
    345#define MT_TX_STATUS_SKB_TIMEOUT	(HZ / 4)
    346
    347struct mt76_tx_cb {
    348	unsigned long jiffies;
    349	u16 wcid;
    350	u8 pktid;
    351	u8 flags;
    352};
    353
    354enum {
    355	MT76_STATE_INITIALIZED,
    356	MT76_STATE_RUNNING,
    357	MT76_STATE_MCU_RUNNING,
    358	MT76_SCANNING,
    359	MT76_HW_SCANNING,
    360	MT76_HW_SCHED_SCANNING,
    361	MT76_RESTART,
    362	MT76_RESET,
    363	MT76_MCU_RESET,
    364	MT76_REMOVED,
    365	MT76_READING_STATS,
    366	MT76_STATE_POWER_OFF,
    367	MT76_STATE_SUSPEND,
    368	MT76_STATE_ROC,
    369	MT76_STATE_PM,
    370};
    371
    372struct mt76_hw_cap {
    373	bool has_2ghz;
    374	bool has_5ghz;
    375	bool has_6ghz;
    376};
    377
    378#define MT_DRV_TXWI_NO_FREE		BIT(0)
    379#define MT_DRV_TX_ALIGNED4_SKBS		BIT(1)
    380#define MT_DRV_SW_RX_AIRTIME		BIT(2)
    381#define MT_DRV_RX_DMA_HDR		BIT(3)
    382#define MT_DRV_HW_MGMT_TXQ		BIT(4)
    383
    384struct mt76_driver_ops {
    385	u32 drv_flags;
    386	u32 survey_flags;
    387	u16 txwi_size;
    388	u16 token_size;
    389	u8 mcs_rates;
    390
    391	void (*update_survey)(struct mt76_phy *phy);
    392
    393	int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
    394			      enum mt76_txq_id qid, struct mt76_wcid *wcid,
    395			      struct ieee80211_sta *sta,
    396			      struct mt76_tx_info *tx_info);
    397
    398	void (*tx_complete_skb)(struct mt76_dev *dev,
    399				struct mt76_queue_entry *e);
    400
    401	bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
    402
    403	bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
    404
    405	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
    406		       struct sk_buff *skb);
    407
    408	void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
    409
    410	void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
    411		       bool ps);
    412
    413	int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
    414		       struct ieee80211_sta *sta);
    415
    416	void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
    417			  struct ieee80211_sta *sta);
    418
    419	void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
    420			   struct ieee80211_sta *sta);
    421};
    422
    423struct mt76_channel_state {
    424	u64 cc_active;
    425	u64 cc_busy;
    426	u64 cc_rx;
    427	u64 cc_bss_rx;
    428	u64 cc_tx;
    429
    430	s8 noise;
    431};
    432
    433struct mt76_sband {
    434	struct ieee80211_supported_band sband;
    435	struct mt76_channel_state *chan;
    436};
    437
    438struct mt76_rate_power {
    439	union {
    440		struct {
    441			s8 cck[4];
    442			s8 ofdm[8];
    443			s8 stbc[10];
    444			s8 ht[16];
    445			s8 vht[10];
    446		};
    447		s8 all[48];
    448	};
    449};
    450
    451/* addr req mask */
    452#define MT_VEND_TYPE_EEPROM	BIT(31)
    453#define MT_VEND_TYPE_CFG	BIT(30)
    454#define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
    455
    456#define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
    457enum mt_vendor_req {
    458	MT_VEND_DEV_MODE =	0x1,
    459	MT_VEND_WRITE =		0x2,
    460	MT_VEND_POWER_ON =	0x4,
    461	MT_VEND_MULTI_WRITE =	0x6,
    462	MT_VEND_MULTI_READ =	0x7,
    463	MT_VEND_READ_EEPROM =	0x9,
    464	MT_VEND_WRITE_FCE =	0x42,
    465	MT_VEND_WRITE_CFG =	0x46,
    466	MT_VEND_READ_CFG =	0x47,
    467	MT_VEND_READ_EXT =	0x63,
    468	MT_VEND_WRITE_EXT =	0x66,
    469	MT_VEND_FEATURE_SET =	0x91,
    470};
    471
    472enum mt76u_in_ep {
    473	MT_EP_IN_PKT_RX,
    474	MT_EP_IN_CMD_RESP,
    475	__MT_EP_IN_MAX,
    476};
    477
    478enum mt76u_out_ep {
    479	MT_EP_OUT_INBAND_CMD,
    480	MT_EP_OUT_AC_BE,
    481	MT_EP_OUT_AC_BK,
    482	MT_EP_OUT_AC_VI,
    483	MT_EP_OUT_AC_VO,
    484	MT_EP_OUT_HCCA,
    485	__MT_EP_OUT_MAX,
    486};
    487
    488struct mt76_mcu {
    489	struct mutex mutex;
    490	u32 msg_seq;
    491	int timeout;
    492
    493	struct sk_buff_head res_q;
    494	wait_queue_head_t wait;
    495};
    496
    497#define MT_TX_SG_MAX_SIZE	8
    498#define MT_RX_SG_MAX_SIZE	4
    499#define MT_NUM_TX_ENTRIES	256
    500#define MT_NUM_RX_ENTRIES	128
    501#define MCU_RESP_URB_SIZE	1024
    502struct mt76_usb {
    503	struct mutex usb_ctrl_mtx;
    504	u8 *data;
    505	u16 data_len;
    506
    507	struct mt76_worker status_worker;
    508	struct mt76_worker rx_worker;
    509
    510	struct work_struct stat_work;
    511
    512	u8 out_ep[__MT_EP_OUT_MAX];
    513	u8 in_ep[__MT_EP_IN_MAX];
    514	bool sg_en;
    515
    516	struct mt76u_mcu {
    517		u8 *data;
    518		/* multiple reads */
    519		struct mt76_reg_pair *rp;
    520		int rp_len;
    521		u32 base;
    522		bool burst;
    523	} mcu;
    524};
    525
    526#define MT76S_XMIT_BUF_SZ	0x3fe00
    527#define MT76S_NUM_TX_ENTRIES	256
    528#define MT76S_NUM_RX_ENTRIES	512
    529struct mt76_sdio {
    530	struct mt76_worker txrx_worker;
    531	struct mt76_worker status_worker;
    532	struct mt76_worker net_worker;
    533
    534	struct work_struct stat_work;
    535
    536	u8 *xmit_buf;
    537	u32 xmit_buf_sz;
    538
    539	struct sdio_func *func;
    540	void *intr_data;
    541	u8 hw_ver;
    542	wait_queue_head_t wait;
    543
    544	struct {
    545		int pse_data_quota;
    546		int ple_data_quota;
    547		int pse_mcu_quota;
    548		int pse_page_size;
    549		int deficit;
    550	} sched;
    551
    552	int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr);
    553};
    554
    555struct mt76_mmio {
    556	void __iomem *regs;
    557	spinlock_t irq_lock;
    558	u32 irqmask;
    559
    560	struct mtk_wed_device wed;
    561};
    562
    563struct mt76_rx_status {
    564	union {
    565		struct mt76_wcid *wcid;
    566		u16 wcid_idx;
    567	};
    568
    569	u32 reorder_time;
    570
    571	u32 ampdu_ref;
    572	u32 timestamp;
    573
    574	u8 iv[6];
    575
    576	u8 ext_phy:1;
    577	u8 aggr:1;
    578	u8 qos_ctl;
    579	u16 seqno;
    580
    581	u16 freq;
    582	u32 flag;
    583	u8 enc_flags;
    584	u8 encoding:2, bw:3, he_ru:3;
    585	u8 he_gi:2, he_dcm:1;
    586	u8 amsdu:1, first_amsdu:1, last_amsdu:1;
    587	u8 rate_idx;
    588	u8 nss;
    589	u8 band;
    590	s8 signal;
    591	u8 chains;
    592	s8 chain_signal[IEEE80211_MAX_CHAINS];
    593};
    594
    595struct mt76_freq_range_power {
    596	const struct cfg80211_sar_freq_ranges *range;
    597	s8 power;
    598};
    599
    600struct mt76_testmode_ops {
    601	int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state);
    602	int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
    603			  enum mt76_testmode_state new_state);
    604	int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
    605};
    606
    607struct mt76_testmode_data {
    608	enum mt76_testmode_state state;
    609
    610	u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
    611	struct sk_buff *tx_skb;
    612
    613	u32 tx_count;
    614	u16 tx_mpdu_len;
    615
    616	u8 tx_rate_mode;
    617	u8 tx_rate_idx;
    618	u8 tx_rate_nss;
    619	u8 tx_rate_sgi;
    620	u8 tx_rate_ldpc;
    621	u8 tx_rate_stbc;
    622	u8 tx_ltf;
    623
    624	u8 tx_antenna_mask;
    625	u8 tx_spe_idx;
    626
    627	u8 tx_duty_cycle;
    628	u32 tx_time;
    629	u32 tx_ipg;
    630
    631	u32 freq_offset;
    632
    633	u8 tx_power[4];
    634	u8 tx_power_control;
    635
    636	u8 addr[3][ETH_ALEN];
    637
    638	u32 tx_pending;
    639	u32 tx_queued;
    640	u16 tx_queued_limit;
    641	u32 tx_done;
    642	struct {
    643		u64 packets[__MT_RXQ_MAX];
    644		u64 fcs_error[__MT_RXQ_MAX];
    645	} rx_stats;
    646};
    647
    648struct mt76_vif {
    649	u8 idx;
    650	u8 omac_idx;
    651	u8 band_idx;
    652	u8 wmm_idx;
    653	u8 scan_seq_num;
    654	u8 cipher;
    655};
    656
    657struct mt76_phy {
    658	struct ieee80211_hw *hw;
    659	struct mt76_dev *dev;
    660	void *priv;
    661
    662	unsigned long state;
    663
    664	struct mt76_queue *q_tx[__MT_TXQ_MAX];
    665
    666	struct cfg80211_chan_def chandef;
    667	struct ieee80211_channel *main_chan;
    668
    669	struct mt76_channel_state *chan_state;
    670	enum mt76_dfs_state dfs_state;
    671	ktime_t survey_time;
    672
    673	struct mt76_hw_cap cap;
    674	struct mt76_sband sband_2g;
    675	struct mt76_sband sband_5g;
    676	struct mt76_sband sband_6g;
    677
    678	u8 macaddr[ETH_ALEN];
    679
    680	int txpower_cur;
    681	u8 antenna_mask;
    682	u16 chainmask;
    683
    684#ifdef CONFIG_NL80211_TESTMODE
    685	struct mt76_testmode_data test;
    686#endif
    687
    688	struct delayed_work mac_work;
    689	u8 mac_work_count;
    690
    691	struct {
    692		struct sk_buff *head;
    693		struct sk_buff **tail;
    694		u16 seqno;
    695	} rx_amsdu[__MT_RXQ_MAX];
    696
    697	struct mt76_freq_range_power *frp;
    698};
    699
    700struct mt76_dev {
    701	struct mt76_phy phy; /* must be first */
    702
    703	struct mt76_phy *phy2;
    704
    705	struct ieee80211_hw *hw;
    706
    707	spinlock_t lock;
    708	spinlock_t cc_lock;
    709
    710	u32 cur_cc_bss_rx;
    711
    712	struct mt76_rx_status rx_ampdu_status;
    713	u32 rx_ampdu_len;
    714	u32 rx_ampdu_ref;
    715
    716	struct mutex mutex;
    717
    718	const struct mt76_bus_ops *bus;
    719	const struct mt76_driver_ops *drv;
    720	const struct mt76_mcu_ops *mcu_ops;
    721	struct device *dev;
    722	struct device *dma_dev;
    723
    724	struct mt76_mcu mcu;
    725
    726	struct net_device napi_dev;
    727	struct net_device tx_napi_dev;
    728	spinlock_t rx_lock;
    729	struct napi_struct napi[__MT_RXQ_MAX];
    730	struct sk_buff_head rx_skb[__MT_RXQ_MAX];
    731
    732	struct list_head txwi_cache;
    733	struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
    734	struct mt76_queue q_rx[__MT_RXQ_MAX];
    735	const struct mt76_queue_ops *queue_ops;
    736	int tx_dma_idx[4];
    737
    738	struct mt76_worker tx_worker;
    739	struct napi_struct tx_napi;
    740
    741	spinlock_t token_lock;
    742	struct idr token;
    743	u16 wed_token_count;
    744	u16 token_count;
    745	u16 token_size;
    746
    747	wait_queue_head_t tx_wait;
    748	/* spinclock used to protect wcid pktid linked list */
    749	spinlock_t status_lock;
    750
    751	u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
    752	u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
    753
    754	u64 vif_mask;
    755
    756	struct mt76_wcid global_wcid;
    757	struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
    758	struct list_head wcid_list;
    759
    760	u32 rev;
    761
    762	u32 aggr_stats[32];
    763
    764	struct tasklet_struct pre_tbtt_tasklet;
    765	int beacon_int;
    766	u8 beacon_mask;
    767
    768	struct debugfs_blob_wrapper eeprom;
    769	struct debugfs_blob_wrapper otp;
    770
    771	struct mt76_rate_power rate_power;
    772
    773	char alpha2[3];
    774	enum nl80211_dfs_regions region;
    775
    776	u32 debugfs_reg;
    777
    778	struct led_classdev led_cdev;
    779	char led_name[32];
    780	bool led_al;
    781	u8 led_pin;
    782
    783	u8 csa_complete;
    784
    785	u32 rxfilter;
    786
    787#ifdef CONFIG_NL80211_TESTMODE
    788	const struct mt76_testmode_ops *test_ops;
    789	struct {
    790		const char *name;
    791		u32 offset;
    792	} test_mtd;
    793#endif
    794	struct workqueue_struct *wq;
    795
    796	union {
    797		struct mt76_mmio mmio;
    798		struct mt76_usb usb;
    799		struct mt76_sdio sdio;
    800	};
    801};
    802
    803struct mt76_power_limits {
    804	s8 cck[4];
    805	s8 ofdm[8];
    806	s8 mcs[4][10];
    807	s8 ru[7][12];
    808};
    809
    810enum mt76_phy_type {
    811	MT_PHY_TYPE_CCK,
    812	MT_PHY_TYPE_OFDM,
    813	MT_PHY_TYPE_HT,
    814	MT_PHY_TYPE_HT_GF,
    815	MT_PHY_TYPE_VHT,
    816	MT_PHY_TYPE_HE_SU = 8,
    817	MT_PHY_TYPE_HE_EXT_SU,
    818	MT_PHY_TYPE_HE_TB,
    819	MT_PHY_TYPE_HE_MU,
    820	__MT_PHY_TYPE_HE_MAX,
    821};
    822
    823struct mt76_sta_stats {
    824	u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
    825	u64 tx_bw[4];		/* 20, 40, 80, 160 */
    826	u64 tx_nss[4];		/* 1, 2, 3, 4 */
    827	u64 tx_mcs[16];		/* mcs idx */
    828};
    829
    830struct mt76_ethtool_worker_info {
    831	u64 *data;
    832	int idx;
    833	int initial_stat_idx;
    834	int worker_stat_count;
    835	int sta_count;
    836};
    837
    838#define CCK_RATE(_idx, _rate) {					\
    839	.bitrate = _rate,					\
    840	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
    841	.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),		\
    842	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),	\
    843}
    844
    845#define OFDM_RATE(_idx, _rate) {				\
    846	.bitrate = _rate,					\
    847	.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),		\
    848	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),	\
    849}
    850
    851extern struct ieee80211_rate mt76_rates[12];
    852
    853#define __mt76_rr(dev, ...)	(dev)->bus->rr((dev), __VA_ARGS__)
    854#define __mt76_wr(dev, ...)	(dev)->bus->wr((dev), __VA_ARGS__)
    855#define __mt76_rmw(dev, ...)	(dev)->bus->rmw((dev), __VA_ARGS__)
    856#define __mt76_wr_copy(dev, ...)	(dev)->bus->write_copy((dev), __VA_ARGS__)
    857#define __mt76_rr_copy(dev, ...)	(dev)->bus->read_copy((dev), __VA_ARGS__)
    858
    859#define __mt76_set(dev, offset, val)	__mt76_rmw(dev, offset, 0, val)
    860#define __mt76_clear(dev, offset, val)	__mt76_rmw(dev, offset, val, 0)
    861
    862#define mt76_rr(dev, ...)	(dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
    863#define mt76_wr(dev, ...)	(dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
    864#define mt76_rmw(dev, ...)	(dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
    865#define mt76_wr_copy(dev, ...)	(dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
    866#define mt76_rr_copy(dev, ...)	(dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
    867#define mt76_wr_rp(dev, ...)	(dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
    868#define mt76_rd_rp(dev, ...)	(dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
    869
    870
    871#define mt76_mcu_restart(dev, ...)	(dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
    872#define __mt76_mcu_restart(dev, ...)	(dev)->mcu_ops->mcu_restart((dev))
    873
    874#define mt76_set(dev, offset, val)	mt76_rmw(dev, offset, 0, val)
    875#define mt76_clear(dev, offset, val)	mt76_rmw(dev, offset, val, 0)
    876
    877#define mt76_get_field(_dev, _reg, _field)		\
    878	FIELD_GET(_field, mt76_rr(dev, _reg))
    879
    880#define mt76_rmw_field(_dev, _reg, _field, _val)	\
    881	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
    882
    883#define __mt76_rmw_field(_dev, _reg, _field, _val)	\
    884	__mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
    885
    886#define mt76_hw(dev) (dev)->mphy.hw
    887
    888static inline struct ieee80211_hw *
    889mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
    890{
    891	if (wcid <= MT76_N_WCIDS &&
    892	    mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
    893		return dev->phy2->hw;
    894
    895	return dev->phy.hw;
    896}
    897
    898bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
    899		 int timeout);
    900
    901#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
    902
    903bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
    904		      int timeout);
    905
    906#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
    907
    908void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
    909void mt76_pci_disable_aspm(struct pci_dev *pdev);
    910
    911static inline u16 mt76_chip(struct mt76_dev *dev)
    912{
    913	return dev->rev >> 16;
    914}
    915
    916static inline u16 mt76_rev(struct mt76_dev *dev)
    917{
    918	return dev->rev & 0xffff;
    919}
    920
    921#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
    922#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
    923
    924#define mt76_init_queues(dev, ...)		(dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
    925#define mt76_queue_alloc(dev, ...)	(dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
    926#define mt76_tx_queue_skb_raw(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
    927#define mt76_tx_queue_skb(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
    928#define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
    929#define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
    930#define mt76_queue_rx_cleanup(dev, ...)	(dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
    931#define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
    932#define mt76_queue_reset(dev, ...)	(dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
    933
    934#define mt76_for_each_q_rx(dev, i)	\
    935	for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++)	\
    936		if ((dev)->q_rx[i].ndesc)
    937
    938struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
    939				   const struct ieee80211_ops *ops,
    940				   const struct mt76_driver_ops *drv_ops);
    941int mt76_register_device(struct mt76_dev *dev, bool vht,
    942			 struct ieee80211_rate *rates, int n_rates);
    943void mt76_unregister_device(struct mt76_dev *dev);
    944void mt76_free_device(struct mt76_dev *dev);
    945void mt76_unregister_phy(struct mt76_phy *phy);
    946
    947struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
    948				const struct ieee80211_ops *ops);
    949int mt76_register_phy(struct mt76_phy *phy, bool vht,
    950		      struct ieee80211_rate *rates, int n_rates);
    951
    952struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
    953					  const struct file_operations *ops);
    954static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
    955{
    956	return mt76_register_debugfs_fops(&dev->phy, NULL);
    957}
    958
    959int mt76_queues_read(struct seq_file *s, void *data);
    960void mt76_seq_puts_array(struct seq_file *file, const char *str,
    961			 s8 *val, int len);
    962
    963int mt76_eeprom_init(struct mt76_dev *dev, int len);
    964void mt76_eeprom_override(struct mt76_phy *phy);
    965int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
    966
    967struct mt76_queue *
    968mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
    969		int ring_base, u32 flags);
    970u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
    971static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
    972				     int n_desc, int ring_base, u32 flags)
    973{
    974	struct mt76_queue *q;
    975
    976	q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
    977	if (IS_ERR(q))
    978		return PTR_ERR(q);
    979
    980	q->qid = qid;
    981	phy->q_tx[qid] = q;
    982
    983	return 0;
    984}
    985
    986static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
    987				      int n_desc, int ring_base)
    988{
    989	struct mt76_queue *q;
    990
    991	q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
    992	if (IS_ERR(q))
    993		return PTR_ERR(q);
    994
    995	q->qid = __MT_TXQ_MAX + qid;
    996	dev->q_mcu[qid] = q;
    997
    998	return 0;
    999}
   1000
   1001static inline struct mt76_phy *
   1002mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
   1003{
   1004	if (phy_ext && dev->phy2)
   1005		return dev->phy2;
   1006	return &dev->phy;
   1007}
   1008
   1009static inline struct ieee80211_hw *
   1010mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
   1011{
   1012	return mt76_dev_phy(dev, phy_ext)->hw;
   1013}
   1014
   1015static inline u8 *
   1016mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
   1017{
   1018	return (u8 *)t - dev->drv->txwi_size;
   1019}
   1020
   1021/* increment with wrap-around */
   1022static inline int mt76_incr(int val, int size)
   1023{
   1024	return (val + 1) & (size - 1);
   1025}
   1026
   1027/* decrement with wrap-around */
   1028static inline int mt76_decr(int val, int size)
   1029{
   1030	return (val - 1) & (size - 1);
   1031}
   1032
   1033u8 mt76_ac_to_hwq(u8 ac);
   1034
   1035static inline struct ieee80211_txq *
   1036mtxq_to_txq(struct mt76_txq *mtxq)
   1037{
   1038	void *ptr = mtxq;
   1039
   1040	return container_of(ptr, struct ieee80211_txq, drv_priv);
   1041}
   1042
   1043static inline struct ieee80211_sta *
   1044wcid_to_sta(struct mt76_wcid *wcid)
   1045{
   1046	void *ptr = wcid;
   1047
   1048	if (!wcid || !wcid->sta)
   1049		return NULL;
   1050
   1051	return container_of(ptr, struct ieee80211_sta, drv_priv);
   1052}
   1053
   1054static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
   1055{
   1056	BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
   1057		     sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
   1058	return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
   1059}
   1060
   1061static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
   1062{
   1063	struct mt76_rx_status mstat;
   1064	u8 *data = skb->data;
   1065
   1066	/* Alignment concerns */
   1067	BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
   1068	BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
   1069
   1070	mstat = *((struct mt76_rx_status *)skb->cb);
   1071
   1072	if (mstat.flag & RX_FLAG_RADIOTAP_HE)
   1073		data += sizeof(struct ieee80211_radiotap_he);
   1074	if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
   1075		data += sizeof(struct ieee80211_radiotap_he_mu);
   1076
   1077	return data;
   1078}
   1079
   1080static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
   1081{
   1082	int len = ieee80211_get_hdrlen_from_skb(skb);
   1083
   1084	if (len % 4 == 0)
   1085		return;
   1086
   1087	skb_push(skb, 2);
   1088	memmove(skb->data, skb->data + 2, len);
   1089
   1090	skb->data[len] = 0;
   1091	skb->data[len + 1] = 0;
   1092}
   1093
   1094static inline bool mt76_is_skb_pktid(u8 pktid)
   1095{
   1096	if (pktid & MT_PACKET_ID_HAS_RATE)
   1097		return false;
   1098
   1099	return pktid >= MT_PACKET_ID_FIRST;
   1100}
   1101
   1102static inline u8 mt76_tx_power_nss_delta(u8 nss)
   1103{
   1104	static const u8 nss_delta[4] = { 0, 6, 9, 12 };
   1105
   1106	return nss_delta[nss - 1];
   1107}
   1108
   1109static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
   1110{
   1111#ifdef CONFIG_NL80211_TESTMODE
   1112	return phy->test.state != MT76_TM_STATE_OFF;
   1113#else
   1114	return false;
   1115#endif
   1116}
   1117
   1118static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
   1119					struct sk_buff *skb,
   1120					struct ieee80211_hw **hw)
   1121{
   1122#ifdef CONFIG_NL80211_TESTMODE
   1123	if (skb == dev->phy.test.tx_skb)
   1124		*hw = dev->phy.hw;
   1125	else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
   1126		*hw = dev->phy2->hw;
   1127	else
   1128		return false;
   1129	return true;
   1130#else
   1131	return false;
   1132#endif
   1133}
   1134
   1135void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
   1136void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
   1137	     struct mt76_wcid *wcid, struct sk_buff *skb);
   1138void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
   1139void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
   1140			 bool send_bar);
   1141void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
   1142void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
   1143void mt76_txq_schedule_all(struct mt76_phy *phy);
   1144void mt76_tx_worker_run(struct mt76_dev *dev);
   1145void mt76_tx_worker(struct mt76_worker *w);
   1146void mt76_release_buffered_frames(struct ieee80211_hw *hw,
   1147				  struct ieee80211_sta *sta,
   1148				  u16 tids, int nframes,
   1149				  enum ieee80211_frame_release_type reason,
   1150				  bool more_data);
   1151bool mt76_has_tx_pending(struct mt76_phy *phy);
   1152void mt76_set_channel(struct mt76_phy *phy);
   1153void mt76_update_survey(struct mt76_phy *phy);
   1154void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
   1155int mt76_get_survey(struct ieee80211_hw *hw, int idx,
   1156		    struct survey_info *survey);
   1157void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
   1158
   1159int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
   1160		       u16 ssn, u16 size);
   1161void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
   1162
   1163void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
   1164			 struct ieee80211_key_conf *key);
   1165
   1166void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
   1167			 __acquires(&dev->status_lock);
   1168void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
   1169			   __releases(&dev->status_lock);
   1170
   1171int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
   1172			   struct sk_buff *skb);
   1173struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
   1174				       struct mt76_wcid *wcid, int pktid,
   1175				       struct sk_buff_head *list);
   1176void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
   1177			     struct sk_buff_head *list);
   1178void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
   1179			    struct list_head *free_list);
   1180static inline void
   1181mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
   1182{
   1183    __mt76_tx_complete_skb(dev, wcid, skb, NULL);
   1184}
   1185
   1186void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
   1187int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   1188		   struct ieee80211_sta *sta,
   1189		   enum ieee80211_sta_state old_state,
   1190		   enum ieee80211_sta_state new_state);
   1191void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
   1192		       struct ieee80211_sta *sta);
   1193void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   1194			     struct ieee80211_sta *sta);
   1195
   1196int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
   1197
   1198int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   1199		     int *dbm);
   1200int mt76_init_sar_power(struct ieee80211_hw *hw,
   1201			const struct cfg80211_sar_specs *sar);
   1202int mt76_get_sar_power(struct mt76_phy *phy,
   1203		       struct ieee80211_channel *chan,
   1204		       int power);
   1205
   1206void mt76_csa_check(struct mt76_dev *dev);
   1207void mt76_csa_finish(struct mt76_dev *dev);
   1208
   1209int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
   1210int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
   1211void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
   1212int mt76_get_rate(struct mt76_dev *dev,
   1213		  struct ieee80211_supported_band *sband,
   1214		  int idx, bool cck);
   1215void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   1216		  const u8 *mac);
   1217void mt76_sw_scan_complete(struct ieee80211_hw *hw,
   1218			   struct ieee80211_vif *vif);
   1219enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
   1220int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
   1221		      void *data, int len);
   1222int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
   1223		       struct netlink_callback *cb, void *data, int len);
   1224int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
   1225int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
   1226
   1227static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
   1228{
   1229#ifdef CONFIG_NL80211_TESTMODE
   1230	enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
   1231
   1232	if (disable || phy->test.state == MT76_TM_STATE_OFF)
   1233		state = MT76_TM_STATE_OFF;
   1234
   1235	mt76_testmode_set_state(phy, state);
   1236#endif
   1237}
   1238
   1239
   1240/* internal */
   1241static inline struct ieee80211_hw *
   1242mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
   1243{
   1244	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
   1245	struct ieee80211_hw *hw = dev->phy.hw;
   1246
   1247	if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
   1248		hw = dev->phy2->hw;
   1249
   1250	info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
   1251
   1252	return hw;
   1253}
   1254
   1255void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
   1256void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
   1257		      struct napi_struct *napi);
   1258void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
   1259			   struct napi_struct *napi);
   1260void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
   1261void mt76_testmode_tx_pending(struct mt76_phy *phy);
   1262void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
   1263			    struct mt76_queue_entry *e);
   1264
   1265/* usb */
   1266static inline bool mt76u_urb_error(struct urb *urb)
   1267{
   1268	return urb->status &&
   1269	       urb->status != -ECONNRESET &&
   1270	       urb->status != -ESHUTDOWN &&
   1271	       urb->status != -ENOENT;
   1272}
   1273
   1274/* Map hardware queues to usb endpoints */
   1275static inline u8 q2ep(u8 qid)
   1276{
   1277	/* TODO: take management packets to queue 5 */
   1278	return qid + 1;
   1279}
   1280
   1281static inline int
   1282mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
   1283	       int timeout, int ep)
   1284{
   1285	struct usb_interface *uintf = to_usb_interface(dev->dev);
   1286	struct usb_device *udev = interface_to_usbdev(uintf);
   1287	struct mt76_usb *usb = &dev->usb;
   1288	unsigned int pipe;
   1289
   1290	if (actual_len)
   1291		pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
   1292	else
   1293		pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
   1294
   1295	return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
   1296}
   1297
   1298void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
   1299			 struct mt76_sta_stats *stats);
   1300int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
   1301int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
   1302			   u16 val, u16 offset, void *buf, size_t len);
   1303int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
   1304			 u8 req_type, u16 val, u16 offset,
   1305			 void *buf, size_t len);
   1306void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
   1307		     const u16 offset, const u32 val);
   1308void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
   1309		     void *data, int len);
   1310u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
   1311void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
   1312		 u32 addr, u32 val);
   1313int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
   1314		 struct mt76_bus_ops *ops);
   1315int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
   1316int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
   1317int mt76u_alloc_queues(struct mt76_dev *dev);
   1318void mt76u_stop_tx(struct mt76_dev *dev);
   1319void mt76u_stop_rx(struct mt76_dev *dev);
   1320int mt76u_resume_rx(struct mt76_dev *dev);
   1321void mt76u_queues_deinit(struct mt76_dev *dev);
   1322
   1323int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
   1324	       const struct mt76_bus_ops *bus_ops);
   1325int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
   1326int mt76s_alloc_tx(struct mt76_dev *dev);
   1327void mt76s_deinit(struct mt76_dev *dev);
   1328void mt76s_sdio_irq(struct sdio_func *func);
   1329void mt76s_txrx_worker(struct mt76_sdio *sdio);
   1330bool mt76s_txqs_empty(struct mt76_dev *dev);
   1331int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
   1332		  int hw_ver);
   1333u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
   1334void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
   1335u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
   1336u32 mt76s_read_pcr(struct mt76_dev *dev);
   1337void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
   1338		      const void *data, int len);
   1339void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
   1340		     void *data, int len);
   1341int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
   1342		const struct mt76_reg_pair *data,
   1343		int len);
   1344int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
   1345		struct mt76_reg_pair *data, int len);
   1346
   1347struct sk_buff *
   1348__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
   1349		     int data_len, gfp_t gfp);
   1350static inline struct sk_buff *
   1351mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
   1352		   int data_len)
   1353{
   1354	return __mt76_mcu_msg_alloc(dev, data, data_len, GFP_KERNEL);
   1355}
   1356
   1357void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
   1358struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
   1359				      unsigned long expires);
   1360int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
   1361			      int len, bool wait_resp, struct sk_buff **ret);
   1362int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
   1363				  int cmd, bool wait_resp, struct sk_buff **ret);
   1364int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
   1365			     int len, int max_len);
   1366static inline int
   1367mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
   1368		       int len)
   1369{
   1370	int max_len = 4096 - dev->mcu_ops->headroom;
   1371
   1372	return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len);
   1373}
   1374
   1375static inline int
   1376mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
   1377		  bool wait_resp)
   1378{
   1379	return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL);
   1380}
   1381
   1382static inline int
   1383mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
   1384		      bool wait_resp)
   1385{
   1386	return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL);
   1387}
   1388
   1389void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
   1390
   1391s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
   1392			      struct ieee80211_channel *chan,
   1393			      struct mt76_power_limits *dest,
   1394			      s8 target_power);
   1395
   1396struct mt76_txwi_cache *
   1397mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
   1398int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
   1399void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
   1400
   1401static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
   1402{
   1403	spin_lock_bh(&dev->token_lock);
   1404	__mt76_set_tx_blocked(dev, blocked);
   1405	spin_unlock_bh(&dev->token_lock);
   1406}
   1407
   1408static inline int
   1409mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
   1410{
   1411	int token;
   1412
   1413	spin_lock_bh(&dev->token_lock);
   1414	token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
   1415	spin_unlock_bh(&dev->token_lock);
   1416
   1417	return token;
   1418}
   1419
   1420static inline struct mt76_txwi_cache *
   1421mt76_token_put(struct mt76_dev *dev, int token)
   1422{
   1423	struct mt76_txwi_cache *txwi;
   1424
   1425	spin_lock_bh(&dev->token_lock);
   1426	txwi = idr_remove(&dev->token, token);
   1427	spin_unlock_bh(&dev->token_lock);
   1428
   1429	return txwi;
   1430}
   1431
   1432static inline void mt76_packet_id_init(struct mt76_wcid *wcid)
   1433{
   1434	INIT_LIST_HEAD(&wcid->list);
   1435	idr_init(&wcid->pktid);
   1436}
   1437
   1438static inline void
   1439mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
   1440{
   1441	struct sk_buff_head list;
   1442
   1443	mt76_tx_status_lock(dev, &list);
   1444	mt76_tx_status_skb_get(dev, wcid, -1, &list);
   1445	mt76_tx_status_unlock(dev, &list);
   1446
   1447	idr_destroy(&wcid->pktid);
   1448}
   1449
   1450#endif