cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

htt.h (75078B)


      1/* SPDX-License-Identifier: ISC */
      2/*
      3 * Copyright (c) 2005-2011 Atheros Communications Inc.
      4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
      5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
      6 */
      7
      8#ifndef _HTT_H_
      9#define _HTT_H_
     10
     11#include <linux/bug.h>
     12#include <linux/interrupt.h>
     13#include <linux/dmapool.h>
     14#include <linux/hashtable.h>
     15#include <linux/kfifo.h>
     16#include <net/mac80211.h>
     17
     18#include "htc.h"
     19#include "hw.h"
     20#include "rx_desc.h"
     21
     22enum htt_dbg_stats_type {
     23	HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
     24	HTT_DBG_STATS_RX_REORDER    = 1 << 1,
     25	HTT_DBG_STATS_RX_RATE_INFO  = 1 << 2,
     26	HTT_DBG_STATS_TX_PPDU_LOG   = 1 << 3,
     27	HTT_DBG_STATS_TX_RATE_INFO  = 1 << 4,
     28	/* bits 5-23 currently reserved */
     29
     30	HTT_DBG_NUM_STATS /* keep this last */
     31};
     32
     33enum htt_h2t_msg_type { /* host-to-target */
     34	HTT_H2T_MSG_TYPE_VERSION_REQ        = 0,
     35	HTT_H2T_MSG_TYPE_TX_FRM             = 1,
     36	HTT_H2T_MSG_TYPE_RX_RING_CFG        = 2,
     37	HTT_H2T_MSG_TYPE_STATS_REQ          = 3,
     38	HTT_H2T_MSG_TYPE_SYNC               = 4,
     39	HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,
     40	HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
     41
     42	/* This command is used for sending management frames in HTT < 3.0.
     43	 * HTT >= 3.0 uses TX_FRM for everything.
     44	 */
     45	HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
     46	HTT_H2T_MSG_TYPE_TX_FETCH_RESP      = 11,
     47
     48	HTT_H2T_NUM_MSGS /* keep this last */
     49};
     50
     51struct htt_cmd_hdr {
     52	u8 msg_type;
     53} __packed;
     54
     55struct htt_ver_req {
     56	u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
     57} __packed;
     58
     59/*
     60 * HTT tx MSDU descriptor
     61 *
     62 * The HTT tx MSDU descriptor is created by the host HTT SW for each
     63 * tx MSDU.  The HTT tx MSDU descriptor contains the information that
     64 * the target firmware needs for the FW's tx processing, particularly
     65 * for creating the HW msdu descriptor.
     66 * The same HTT tx descriptor is used for HL and LL systems, though
     67 * a few fields within the tx descriptor are used only by LL or
     68 * only by HL.
     69 * The HTT tx descriptor is defined in two manners: by a struct with
     70 * bitfields, and by a series of [dword offset, bit mask, bit shift]
     71 * definitions.
     72 * The target should use the struct def, for simplicitly and clarity,
     73 * but the host shall use the bit-mast + bit-shift defs, to be endian-
     74 * neutral.  Specifically, the host shall use the get/set macros built
     75 * around the mask + shift defs.
     76 */
     77struct htt_data_tx_desc_frag {
     78	union {
     79		struct double_word_addr {
     80			__le32 paddr;
     81			__le32 len;
     82		} __packed dword_addr;
     83		struct triple_word_addr {
     84			__le32 paddr_lo;
     85			__le16 paddr_hi;
     86			__le16 len_16;
     87		} __packed tword_addr;
     88	} __packed;
     89} __packed;
     90
     91struct htt_msdu_ext_desc {
     92	__le32 tso_flag[3];
     93	__le16 ip_identification;
     94	u8 flags;
     95	u8 reserved;
     96	struct htt_data_tx_desc_frag frags[6];
     97};
     98
     99struct htt_msdu_ext_desc_64 {
    100	__le32 tso_flag[5];
    101	__le16 ip_identification;
    102	u8 flags;
    103	u8 reserved;
    104	struct htt_data_tx_desc_frag frags[6];
    105};
    106
    107#define	HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE		BIT(0)
    108#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE	BIT(1)
    109#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE	BIT(2)
    110#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE	BIT(3)
    111#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE	BIT(4)
    112
    113#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
    114				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
    115				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
    116				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
    117				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
    118
    119#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64		BIT(16)
    120#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64		BIT(17)
    121#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64		BIT(18)
    122#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64		BIT(19)
    123#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64		BIT(20)
    124#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64		BIT(21)
    125
    126#define HTT_MSDU_CHECKSUM_ENABLE_64  (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
    127				     | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
    128				     | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
    129				     | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
    130				     | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
    131
    132enum htt_data_tx_desc_flags0 {
    133	HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
    134	HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
    135	HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT      = 1 << 2,
    136	HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY     = 1 << 3,
    137	HTT_DATA_TX_DESC_FLAGS0_RSVD0           = 1 << 4
    138#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
    139#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
    140};
    141
    142enum htt_data_tx_desc_flags1 {
    143#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
    144#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
    145#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB  0
    146#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
    147#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
    148#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB  6
    149	HTT_DATA_TX_DESC_FLAGS1_POSTPONED        = 1 << 11,
    150	HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH    = 1 << 12,
    151	HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
    152	HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
    153	HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE      = 1 << 15
    154};
    155
    156#define HTT_TX_CREDIT_DELTA_ABS_M      0xffff0000
    157#define HTT_TX_CREDIT_DELTA_ABS_S      16
    158#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
    159	    (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
    160
    161#define HTT_TX_CREDIT_SIGN_BIT_M       0x00000100
    162#define HTT_TX_CREDIT_SIGN_BIT_S       8
    163#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
    164	    (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
    165
    166enum htt_data_tx_ext_tid {
    167	HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
    168	HTT_DATA_TX_EXT_TID_MGMT                = 17,
    169	HTT_DATA_TX_EXT_TID_INVALID             = 31
    170};
    171
    172#define HTT_INVALID_PEERID 0xFFFF
    173
    174/*
    175 * htt_data_tx_desc - used for data tx path
    176 *
    177 * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
    178 *       ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
    179 *                for special kinds of tids
    180 *       postponed: only for HL hosts. indicates if this is a resend
    181 *                  (HL hosts manage queues on the host )
    182 *       more_in_batch: only for HL hosts. indicates if more packets are
    183 *                      pending. this allows target to wait and aggregate
    184 *       freq: 0 means home channel of given vdev. intended for offchannel
    185 */
    186struct htt_data_tx_desc {
    187	u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
    188	__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
    189	__le16 len;
    190	__le16 id;
    191	__le32 frags_paddr;
    192	union {
    193		__le32 peerid;
    194		struct {
    195			__le16 peerid;
    196			__le16 freq;
    197		} __packed offchan_tx;
    198	} __packed;
    199	u8 prefetch[0]; /* start of frame, for FW classification engine */
    200} __packed;
    201
    202struct htt_data_tx_desc_64 {
    203	u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
    204	__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
    205	__le16 len;
    206	__le16 id;
    207	__le64 frags_paddr;
    208	union {
    209		__le32 peerid;
    210		struct {
    211			__le16 peerid;
    212			__le16 freq;
    213		} __packed offchan_tx;
    214	} __packed;
    215	u8 prefetch[0]; /* start of frame, for FW classification engine */
    216} __packed;
    217
    218enum htt_rx_ring_flags {
    219	HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
    220	HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
    221	HTT_RX_RING_FLAGS_PPDU_START   = 1 << 2,
    222	HTT_RX_RING_FLAGS_PPDU_END     = 1 << 3,
    223	HTT_RX_RING_FLAGS_MPDU_START   = 1 << 4,
    224	HTT_RX_RING_FLAGS_MPDU_END     = 1 << 5,
    225	HTT_RX_RING_FLAGS_MSDU_START   = 1 << 6,
    226	HTT_RX_RING_FLAGS_MSDU_END     = 1 << 7,
    227	HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
    228	HTT_RX_RING_FLAGS_FRAG_INFO    = 1 << 9,
    229	HTT_RX_RING_FLAGS_UNICAST_RX   = 1 << 10,
    230	HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
    231	HTT_RX_RING_FLAGS_CTRL_RX      = 1 << 12,
    232	HTT_RX_RING_FLAGS_MGMT_RX      = 1 << 13,
    233	HTT_RX_RING_FLAGS_NULL_RX      = 1 << 14,
    234	HTT_RX_RING_FLAGS_PHY_DATA_RX  = 1 << 15
    235};
    236
    237#define HTT_RX_RING_SIZE_MIN 128
    238#define HTT_RX_RING_SIZE_MAX 2048
    239#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
    240#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
    241#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
    242
    243struct htt_rx_ring_rx_desc_offsets {
    244	/* the following offsets are in 4-byte units */
    245	__le16 mac80211_hdr_offset;
    246	__le16 msdu_payload_offset;
    247	__le16 ppdu_start_offset;
    248	__le16 ppdu_end_offset;
    249	__le16 mpdu_start_offset;
    250	__le16 mpdu_end_offset;
    251	__le16 msdu_start_offset;
    252	__le16 msdu_end_offset;
    253	__le16 rx_attention_offset;
    254	__le16 frag_info_offset;
    255} __packed;
    256
    257struct htt_rx_ring_setup_ring32 {
    258	__le32 fw_idx_shadow_reg_paddr;
    259	__le32 rx_ring_base_paddr;
    260	__le16 rx_ring_len; /* in 4-byte words */
    261	__le16 rx_ring_bufsize; /* rx skb size - in bytes */
    262	__le16 flags; /* %HTT_RX_RING_FLAGS_ */
    263	__le16 fw_idx_init_val;
    264
    265	struct htt_rx_ring_rx_desc_offsets offsets;
    266} __packed;
    267
    268struct htt_rx_ring_setup_ring64 {
    269	__le64 fw_idx_shadow_reg_paddr;
    270	__le64 rx_ring_base_paddr;
    271	__le16 rx_ring_len; /* in 4-byte words */
    272	__le16 rx_ring_bufsize; /* rx skb size - in bytes */
    273	__le16 flags; /* %HTT_RX_RING_FLAGS_ */
    274	__le16 fw_idx_init_val;
    275
    276	struct htt_rx_ring_rx_desc_offsets offsets;
    277} __packed;
    278
    279struct htt_rx_ring_setup_hdr {
    280	u8 num_rings; /* supported values: 1, 2 */
    281	__le16 rsvd0;
    282} __packed;
    283
    284struct htt_rx_ring_setup_32 {
    285	struct htt_rx_ring_setup_hdr hdr;
    286	struct htt_rx_ring_setup_ring32 rings[];
    287} __packed;
    288
    289struct htt_rx_ring_setup_64 {
    290	struct htt_rx_ring_setup_hdr hdr;
    291	struct htt_rx_ring_setup_ring64 rings[];
    292} __packed;
    293
    294/*
    295 * htt_stats_req - request target to send specified statistics
    296 *
    297 * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
    298 * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
    299 *	so make sure its little-endian.
    300 * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
    301 *	so make sure its little-endian.
    302 * @cfg_val: stat_type specific configuration
    303 * @stat_type: see %htt_dbg_stats_type
    304 * @cookie_lsb: used for confirmation message from target->host
    305 * @cookie_msb: ditto as %cookie
    306 */
    307struct htt_stats_req {
    308	u8 upload_types[3];
    309	u8 rsvd0;
    310	u8 reset_types[3];
    311	struct {
    312		u8 mpdu_bytes;
    313		u8 mpdu_num_msdus;
    314		u8 msdu_bytes;
    315	} __packed;
    316	u8 stat_type;
    317	__le32 cookie_lsb;
    318	__le32 cookie_msb;
    319} __packed;
    320
    321#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
    322#define HTT_STATS_BIT_MASK GENMASK(16, 0)
    323
    324/*
    325 * htt_oob_sync_req - request out-of-band sync
    326 *
    327 * The HTT SYNC tells the target to suspend processing of subsequent
    328 * HTT host-to-target messages until some other target agent locally
    329 * informs the target HTT FW that the current sync counter is equal to
    330 * or greater than (in a modulo sense) the sync counter specified in
    331 * the SYNC message.
    332 *
    333 * This allows other host-target components to synchronize their operation
    334 * with HTT, e.g. to ensure that tx frames don't get transmitted until a
    335 * security key has been downloaded to and activated by the target.
    336 * In the absence of any explicit synchronization counter value
    337 * specification, the target HTT FW will use zero as the default current
    338 * sync value.
    339 *
    340 * The HTT target FW will suspend its host->target message processing as long
    341 * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
    342 */
    343struct htt_oob_sync_req {
    344	u8 sync_count;
    345	__le16 rsvd0;
    346} __packed;
    347
    348struct htt_aggr_conf {
    349	u8 max_num_ampdu_subframes;
    350	/* amsdu_subframes is limited by 0x1F mask */
    351	u8 max_num_amsdu_subframes;
    352} __packed;
    353
    354struct htt_aggr_conf_v2 {
    355	u8 max_num_ampdu_subframes;
    356	/* amsdu_subframes is limited by 0x1F mask */
    357	u8 max_num_amsdu_subframes;
    358	u8 reserved;
    359} __packed;
    360
    361#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
    362struct htt_mgmt_tx_desc_qca99x0 {
    363	__le32 rate;
    364} __packed;
    365
    366struct htt_mgmt_tx_desc {
    367	u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
    368	__le32 msdu_paddr;
    369	__le32 desc_id;
    370	__le32 len;
    371	__le32 vdev_id;
    372	u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
    373	union {
    374		struct htt_mgmt_tx_desc_qca99x0 qca99x0;
    375	} __packed;
    376} __packed;
    377
    378enum htt_mgmt_tx_status {
    379	HTT_MGMT_TX_STATUS_OK    = 0,
    380	HTT_MGMT_TX_STATUS_RETRY = 1,
    381	HTT_MGMT_TX_STATUS_DROP  = 2
    382};
    383
    384/*=== target -> host messages ===============================================*/
    385
    386enum htt_main_t2h_msg_type {
    387	HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF             = 0x0,
    388	HTT_MAIN_T2H_MSG_TYPE_RX_IND                   = 0x1,
    389	HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH                 = 0x2,
    390	HTT_MAIN_T2H_MSG_TYPE_PEER_MAP                 = 0x3,
    391	HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP               = 0x4,
    392	HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA                 = 0x5,
    393	HTT_MAIN_T2H_MSG_TYPE_RX_DELBA                 = 0x6,
    394	HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND             = 0x7,
    395	HTT_MAIN_T2H_MSG_TYPE_PKTLOG                   = 0x8,
    396	HTT_MAIN_T2H_MSG_TYPE_STATS_CONF               = 0x9,
    397	HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND              = 0xa,
    398	HTT_MAIN_T2H_MSG_TYPE_SEC_IND                  = 0xb,
    399	HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND           = 0xd,
    400	HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND        = 0xe,
    401	HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND     = 0xf,
    402	HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND                = 0x10,
    403	HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND   = 0x11,
    404	HTT_MAIN_T2H_MSG_TYPE_TEST,
    405	/* keep this last */
    406	HTT_MAIN_T2H_NUM_MSGS
    407};
    408
    409enum htt_10x_t2h_msg_type {
    410	HTT_10X_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
    411	HTT_10X_T2H_MSG_TYPE_RX_IND                    = 0x1,
    412	HTT_10X_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
    413	HTT_10X_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
    414	HTT_10X_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
    415	HTT_10X_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
    416	HTT_10X_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
    417	HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
    418	HTT_10X_T2H_MSG_TYPE_PKTLOG                    = 0x8,
    419	HTT_10X_T2H_MSG_TYPE_STATS_CONF                = 0x9,
    420	HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
    421	HTT_10X_T2H_MSG_TYPE_SEC_IND                   = 0xb,
    422	HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc,
    423	HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
    424	HTT_10X_T2H_MSG_TYPE_TEST                      = 0xe,
    425	HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE               = 0xf,
    426	HTT_10X_T2H_MSG_TYPE_AGGR_CONF                 = 0x11,
    427	HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD            = 0x12,
    428	HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0x13,
    429	/* keep this last */
    430	HTT_10X_T2H_NUM_MSGS
    431};
    432
    433enum htt_tlv_t2h_msg_type {
    434	HTT_TLV_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
    435	HTT_TLV_T2H_MSG_TYPE_RX_IND                    = 0x1,
    436	HTT_TLV_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
    437	HTT_TLV_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
    438	HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
    439	HTT_TLV_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
    440	HTT_TLV_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
    441	HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
    442	HTT_TLV_T2H_MSG_TYPE_PKTLOG                    = 0x8,
    443	HTT_TLV_T2H_MSG_TYPE_STATS_CONF                = 0x9,
    444	HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
    445	HTT_TLV_T2H_MSG_TYPE_SEC_IND                   = 0xb,
    446	HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc, /* deprecated */
    447	HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
    448	HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0xe,
    449	HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND      = 0xf,
    450	HTT_TLV_T2H_MSG_TYPE_RX_PN_IND                 = 0x10,
    451	HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND    = 0x11,
    452	HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND       = 0x12,
    453	/* 0x13 reservd */
    454	HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE       = 0x14,
    455	HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE               = 0x15,
    456	HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR           = 0x16,
    457	HTT_TLV_T2H_MSG_TYPE_TEST,
    458	/* keep this last */
    459	HTT_TLV_T2H_NUM_MSGS
    460};
    461
    462enum htt_10_4_t2h_msg_type {
    463	HTT_10_4_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
    464	HTT_10_4_T2H_MSG_TYPE_RX_IND                 = 0x1,
    465	HTT_10_4_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
    466	HTT_10_4_T2H_MSG_TYPE_PEER_MAP               = 0x3,
    467	HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
    468	HTT_10_4_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
    469	HTT_10_4_T2H_MSG_TYPE_RX_DELBA               = 0x6,
    470	HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
    471	HTT_10_4_T2H_MSG_TYPE_PKTLOG                 = 0x8,
    472	HTT_10_4_T2H_MSG_TYPE_STATS_CONF             = 0x9,
    473	HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
    474	HTT_10_4_T2H_MSG_TYPE_SEC_IND                = 0xb,
    475	HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
    476	HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
    477	HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND      = 0xe,
    478	HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE            = 0xf,
    479	HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0x10,
    480	HTT_10_4_T2H_MSG_TYPE_RX_PN_IND              = 0x11,
    481	HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
    482	HTT_10_4_T2H_MSG_TYPE_TEST                   = 0x13,
    483	HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
    484	HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
    485	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
    486	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM       = 0x17,
    487	HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
    488	/* 0x19 to 0x2f are reserved */
    489	HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND     = 0x30,
    490	HTT_10_4_T2H_MSG_TYPE_PEER_STATS	     = 0x31,
    491	/* keep this last */
    492	HTT_10_4_T2H_NUM_MSGS
    493};
    494
    495enum htt_t2h_msg_type {
    496	HTT_T2H_MSG_TYPE_VERSION_CONF,
    497	HTT_T2H_MSG_TYPE_RX_IND,
    498	HTT_T2H_MSG_TYPE_RX_FLUSH,
    499	HTT_T2H_MSG_TYPE_PEER_MAP,
    500	HTT_T2H_MSG_TYPE_PEER_UNMAP,
    501	HTT_T2H_MSG_TYPE_RX_ADDBA,
    502	HTT_T2H_MSG_TYPE_RX_DELBA,
    503	HTT_T2H_MSG_TYPE_TX_COMPL_IND,
    504	HTT_T2H_MSG_TYPE_PKTLOG,
    505	HTT_T2H_MSG_TYPE_STATS_CONF,
    506	HTT_T2H_MSG_TYPE_RX_FRAG_IND,
    507	HTT_T2H_MSG_TYPE_SEC_IND,
    508	HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
    509	HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
    510	HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
    511	HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
    512	HTT_T2H_MSG_TYPE_RX_PN_IND,
    513	HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
    514	HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
    515	HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
    516	HTT_T2H_MSG_TYPE_CHAN_CHANGE,
    517	HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
    518	HTT_T2H_MSG_TYPE_AGGR_CONF,
    519	HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
    520	HTT_T2H_MSG_TYPE_TEST,
    521	HTT_T2H_MSG_TYPE_EN_STATS,
    522	HTT_T2H_MSG_TYPE_TX_FETCH_IND,
    523	HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
    524	HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
    525	HTT_T2H_MSG_TYPE_PEER_STATS,
    526	/* keep this last */
    527	HTT_T2H_NUM_MSGS
    528};
    529
    530/*
    531 * htt_resp_hdr - header for target-to-host messages
    532 *
    533 * msg_type: see htt_t2h_msg_type
    534 */
    535struct htt_resp_hdr {
    536	u8 msg_type;
    537} __packed;
    538
    539#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
    540#define HTT_RESP_HDR_MSG_TYPE_MASK   0xff
    541#define HTT_RESP_HDR_MSG_TYPE_LSB    0
    542
    543/* htt_ver_resp - response sent for htt_ver_req */
    544struct htt_ver_resp {
    545	u8 minor;
    546	u8 major;
    547	u8 rsvd0;
    548} __packed;
    549
    550#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
    551
    552#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK	GENMASK(7, 0)
    553
    554struct htt_mgmt_tx_completion {
    555	u8 rsvd0;
    556	u8 rsvd1;
    557	u8 flags;
    558	__le32 desc_id;
    559	__le32 status;
    560	__le32 ppdu_id;
    561	__le32 info;
    562} __packed;
    563
    564#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x1F)
    565#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB   (0)
    566#define HTT_RX_INDICATION_INFO0_FLUSH_VALID   (1 << 5)
    567#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
    568#define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7)
    569
    570#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK   0x0000003F
    571#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB    0
    572#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK     0x00000FC0
    573#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB      6
    574#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
    575#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB  12
    576#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK   0x00FC0000
    577#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB    18
    578#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK     0xFF000000
    579#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB      24
    580
    581#define HTT_TX_CMPL_FLAG_DATA_RSSI		BIT(0)
    582#define HTT_TX_CMPL_FLAG_PPID_PRESENT		BIT(1)
    583#define HTT_TX_CMPL_FLAG_PA_PRESENT		BIT(2)
    584#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT	BIT(3)
    585
    586#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
    587#define HTT_TX_DATA_APPEND_RETRIES BIT(0)
    588#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
    589
    590struct htt_rx_indication_hdr {
    591	u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
    592	__le16 peer_id;
    593	__le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
    594} __packed;
    595
    596#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID    (1 << 0)
    597#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
    598#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB  (1)
    599#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK  (1 << 5)
    600#define HTT_RX_INDICATION_INFO0_END_VALID        (1 << 6)
    601#define HTT_RX_INDICATION_INFO0_START_VALID      (1 << 7)
    602
    603#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK    0x00FFFFFF
    604#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB     0
    605#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
    606#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB  24
    607
    608#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
    609#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB  0
    610#define HTT_RX_INDICATION_INFO2_SERVICE_MASK    0xFF000000
    611#define HTT_RX_INDICATION_INFO2_SERVICE_LSB     24
    612
    613enum htt_rx_legacy_rate {
    614	HTT_RX_OFDM_48 = 0,
    615	HTT_RX_OFDM_24 = 1,
    616	HTT_RX_OFDM_12,
    617	HTT_RX_OFDM_6,
    618	HTT_RX_OFDM_54,
    619	HTT_RX_OFDM_36,
    620	HTT_RX_OFDM_18,
    621	HTT_RX_OFDM_9,
    622
    623	/* long preamble */
    624	HTT_RX_CCK_11_LP = 0,
    625	HTT_RX_CCK_5_5_LP = 1,
    626	HTT_RX_CCK_2_LP,
    627	HTT_RX_CCK_1_LP,
    628	/* short preamble */
    629	HTT_RX_CCK_11_SP,
    630	HTT_RX_CCK_5_5_SP,
    631	HTT_RX_CCK_2_SP
    632};
    633
    634enum htt_rx_legacy_rate_type {
    635	HTT_RX_LEGACY_RATE_OFDM = 0,
    636	HTT_RX_LEGACY_RATE_CCK
    637};
    638
    639enum htt_rx_preamble_type {
    640	HTT_RX_LEGACY        = 0x4,
    641	HTT_RX_HT            = 0x8,
    642	HTT_RX_HT_WITH_TXBF  = 0x9,
    643	HTT_RX_VHT           = 0xC,
    644	HTT_RX_VHT_WITH_TXBF = 0xD,
    645};
    646
    647/*
    648 * Fields: phy_err_valid, phy_err_code, tsf,
    649 * usec_timestamp, sub_usec_timestamp
    650 * ..are valid only if end_valid == 1.
    651 *
    652 * Fields: rssi_chains, legacy_rate_type,
    653 * legacy_rate_cck, preamble_type, service,
    654 * vht_sig_*
    655 * ..are valid only if start_valid == 1;
    656 */
    657struct htt_rx_indication_ppdu {
    658	u8 combined_rssi;
    659	u8 sub_usec_timestamp;
    660	u8 phy_err_code;
    661	u8 info0; /* HTT_RX_INDICATION_INFO0_ */
    662	struct {
    663		u8 pri20_db;
    664		u8 ext20_db;
    665		u8 ext40_db;
    666		u8 ext80_db;
    667	} __packed rssi_chains[4];
    668	__le32 tsf;
    669	__le32 usec_timestamp;
    670	__le32 info1; /* HTT_RX_INDICATION_INFO1_ */
    671	__le32 info2; /* HTT_RX_INDICATION_INFO2_ */
    672} __packed;
    673
    674enum htt_rx_mpdu_status {
    675	HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
    676	HTT_RX_IND_MPDU_STATUS_OK,
    677	HTT_RX_IND_MPDU_STATUS_ERR_FCS,
    678	HTT_RX_IND_MPDU_STATUS_ERR_DUP,
    679	HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
    680	HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
    681	/* only accept EAPOL frames */
    682	HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
    683	HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
    684	/* Non-data in promiscuous mode */
    685	HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
    686	HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
    687	HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
    688	HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
    689	HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
    690	HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
    691
    692	/*
    693	 * MISC: discard for unspecified reasons.
    694	 * Leave this enum value last.
    695	 */
    696	HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
    697};
    698
    699struct htt_rx_indication_mpdu_range {
    700	u8 mpdu_count;
    701	u8 mpdu_range_status; /* %htt_rx_mpdu_status */
    702	u8 pad0;
    703	u8 pad1;
    704} __packed;
    705
    706struct htt_rx_indication_prefix {
    707	__le16 fw_rx_desc_bytes;
    708	u8 pad0;
    709	u8 pad1;
    710};
    711
    712struct htt_rx_indication {
    713	struct htt_rx_indication_hdr hdr;
    714	struct htt_rx_indication_ppdu ppdu;
    715	struct htt_rx_indication_prefix prefix;
    716
    717	/*
    718	 * the following fields are both dynamically sized, so
    719	 * take care addressing them
    720	 */
    721
    722	/* the size of this is %fw_rx_desc_bytes */
    723	struct fw_rx_desc_base fw_desc;
    724
    725	/*
    726	 * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
    727	 * and has %num_mpdu_ranges elements.
    728	 */
    729	struct htt_rx_indication_mpdu_range mpdu_ranges[];
    730} __packed;
    731
    732/* High latency version of the RX indication */
    733struct htt_rx_indication_hl {
    734	struct htt_rx_indication_hdr hdr;
    735	struct htt_rx_indication_ppdu ppdu;
    736	struct htt_rx_indication_prefix prefix;
    737	struct fw_rx_desc_hl fw_desc;
    738	struct htt_rx_indication_mpdu_range mpdu_ranges[];
    739} __packed;
    740
    741struct htt_hl_rx_desc {
    742	__le32 info;
    743	__le32 pn_31_0;
    744	union {
    745		struct {
    746			__le16 pn_47_32;
    747			__le16 pn_63_48;
    748		} pn16;
    749		__le32 pn_63_32;
    750	} u0;
    751	__le32 pn_95_64;
    752	__le32 pn_127_96;
    753} __packed;
    754
    755static inline struct htt_rx_indication_mpdu_range *
    756		htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
    757{
    758	void *ptr = rx_ind;
    759
    760	ptr += sizeof(rx_ind->hdr)
    761	     + sizeof(rx_ind->ppdu)
    762	     + sizeof(rx_ind->prefix)
    763	     + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
    764	return ptr;
    765}
    766
    767static inline struct htt_rx_indication_mpdu_range *
    768	htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
    769{
    770	void *ptr = rx_ind;
    771
    772	ptr += sizeof(rx_ind->hdr)
    773	     + sizeof(rx_ind->ppdu)
    774	     + sizeof(rx_ind->prefix)
    775	     + sizeof(rx_ind->fw_desc);
    776	return ptr;
    777}
    778
    779enum htt_rx_flush_mpdu_status {
    780	HTT_RX_FLUSH_MPDU_DISCARD = 0,
    781	HTT_RX_FLUSH_MPDU_REORDER = 1,
    782};
    783
    784/*
    785 * htt_rx_flush - discard or reorder given range of mpdus
    786 *
    787 * Note: host must check if all sequence numbers between
    788 *	[seq_num_start, seq_num_end-1] are valid.
    789 */
    790struct htt_rx_flush {
    791	__le16 peer_id;
    792	u8 tid;
    793	u8 rsvd0;
    794	u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
    795	u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
    796	u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
    797};
    798
    799struct htt_rx_peer_map {
    800	u8 vdev_id;
    801	__le16 peer_id;
    802	u8 addr[6];
    803	u8 rsvd0;
    804	u8 rsvd1;
    805} __packed;
    806
    807struct htt_rx_peer_unmap {
    808	u8 rsvd0;
    809	__le16 peer_id;
    810} __packed;
    811
    812enum htt_txrx_sec_cast_type {
    813	HTT_TXRX_SEC_MCAST = 0,
    814	HTT_TXRX_SEC_UCAST
    815};
    816
    817enum htt_rx_pn_check_type {
    818	HTT_RX_NON_PN_CHECK = 0,
    819	HTT_RX_PN_CHECK
    820};
    821
    822enum htt_rx_tkip_demic_type {
    823	HTT_RX_NON_TKIP_MIC = 0,
    824	HTT_RX_TKIP_MIC
    825};
    826
    827enum htt_security_types {
    828	HTT_SECURITY_NONE,
    829	HTT_SECURITY_WEP128,
    830	HTT_SECURITY_WEP104,
    831	HTT_SECURITY_WEP40,
    832	HTT_SECURITY_TKIP,
    833	HTT_SECURITY_TKIP_NOMIC,
    834	HTT_SECURITY_AES_CCMP,
    835	HTT_SECURITY_WAPI,
    836
    837	HTT_NUM_SECURITY_TYPES /* keep this last! */
    838};
    839
    840#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
    841#define ATH10K_TXRX_NUM_EXT_TIDS 19
    842#define ATH10K_TXRX_NON_QOS_TID 16
    843
    844enum htt_security_flags {
    845#define HTT_SECURITY_TYPE_MASK 0x7F
    846#define HTT_SECURITY_TYPE_LSB  0
    847	HTT_SECURITY_IS_UNICAST = 1 << 7
    848};
    849
    850struct htt_security_indication {
    851	union {
    852		/* dont use bitfields; undefined behaviour */
    853		u8 flags; /* %htt_security_flags */
    854		struct {
    855			u8 security_type:7, /* %htt_security_types */
    856			   is_unicast:1;
    857		} __packed;
    858	} __packed;
    859	__le16 peer_id;
    860	u8 michael_key[8];
    861	u8 wapi_rsc[16];
    862} __packed;
    863
    864#define HTT_RX_BA_INFO0_TID_MASK     0x000F
    865#define HTT_RX_BA_INFO0_TID_LSB      0
    866#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
    867#define HTT_RX_BA_INFO0_PEER_ID_LSB  4
    868
    869struct htt_rx_addba {
    870	u8 window_size;
    871	__le16 info0; /* %HTT_RX_BA_INFO0_ */
    872} __packed;
    873
    874struct htt_rx_delba {
    875	u8 rsvd0;
    876	__le16 info0; /* %HTT_RX_BA_INFO0_ */
    877} __packed;
    878
    879enum htt_data_tx_status {
    880	HTT_DATA_TX_STATUS_OK            = 0,
    881	HTT_DATA_TX_STATUS_DISCARD       = 1,
    882	HTT_DATA_TX_STATUS_NO_ACK        = 2,
    883	HTT_DATA_TX_STATUS_POSTPONE      = 3, /* HL only */
    884	HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
    885};
    886
    887enum htt_data_tx_flags {
    888#define HTT_DATA_TX_STATUS_MASK 0x07
    889#define HTT_DATA_TX_STATUS_LSB  0
    890#define HTT_DATA_TX_TID_MASK    0x78
    891#define HTT_DATA_TX_TID_LSB     3
    892	HTT_DATA_TX_TID_INVALID = 1 << 7
    893};
    894
    895#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
    896
    897struct htt_append_retries {
    898	__le16 msdu_id;
    899	u8 tx_retries;
    900	u8 flag;
    901} __packed;
    902
    903struct htt_data_tx_completion_ext {
    904	struct htt_append_retries a_retries;
    905	__le32 t_stamp;
    906	__le16 msdus_rssi[];
    907} __packed;
    908
    909/**
    910 * @brief target -> host TX completion indication message definition
    911 *
    912 * @details
    913 * The following diagram shows the format of the TX completion indication sent
    914 * from the target to the host
    915 *
    916 *          |31 28|27|26|25|24|23        16| 15 |14 11|10   8|7          0|
    917 *          |-------------------------------------------------------------|
    918 * header:  |rsvd |A2|TP|A1|A0|     num    | t_i| tid |status|  msg_type  |
    919 *          |-------------------------------------------------------------|
    920 * payload: |            MSDU1 ID          |         MSDU0 ID             |
    921 *          |-------------------------------------------------------------|
    922 *          :            MSDU3 ID          :         MSDU2 ID             :
    923 *          |-------------------------------------------------------------|
    924 *          |          struct htt_tx_compl_ind_append_retries             |
    925 *          |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
    926 *          |          struct htt_tx_compl_ind_append_tx_tstamp           |
    927 *          |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
    928 *          |           MSDU1 ACK RSSI     |        MSDU0 ACK RSSI        |
    929 *          |-------------------------------------------------------------|
    930 *          :           MSDU3 ACK RSSI     :        MSDU2 ACK RSSI        :
    931 *          |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
    932 *    -msg_type
    933 *     Bits 7:0
    934 *     Purpose: identifies this as HTT TX completion indication
    935 *    -status
    936 *     Bits 10:8
    937 *     Purpose: the TX completion status of payload fragmentations descriptors
    938 *     Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
    939 *    -tid
    940 *     Bits 14:11
    941 *     Purpose: the tid associated with those fragmentation descriptors. It is
    942 *     valid or not, depending on the tid_invalid bit.
    943 *     Value: 0 to 15
    944 *    -tid_invalid
    945 *     Bits 15:15
    946 *     Purpose: this bit indicates whether the tid field is valid or not
    947 *     Value: 0 indicates valid, 1 indicates invalid
    948 *    -num
    949 *     Bits 23:16
    950 *     Purpose: the number of payload in this indication
    951 *     Value: 1 to 255
    952 *    -A0 = append
    953 *     Bits 24:24
    954 *     Purpose: append the struct htt_tx_compl_ind_append_retries which contains
    955 *            the number of tx retries for one MSDU at the end of this message
    956 *     Value: 0 indicates no appending, 1 indicates appending
    957 *    -A1 = append1
    958 *     Bits 25:25
    959 *     Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
    960 *            contains the timestamp info for each TX msdu id in payload.
    961 *     Value: 0 indicates no appending, 1 indicates appending
    962 *    -TP = MSDU tx power presence
    963 *     Bits 26:26
    964 *     Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
    965 *            for each MSDU referenced by the TX_COMPL_IND message.
    966 *            The order of the per-MSDU tx power reports matches the order
    967 *            of the MSDU IDs.
    968 *     Value: 0 indicates not appending, 1 indicates appending
    969 *    -A2 = append2
    970 *     Bits 27:27
    971 *     Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
    972 *            TX_COMP_IND message.  The order of the per-MSDU ACK RSSI report
    973 *            matches the order of the MSDU IDs.
    974 *            The ACK RSSI values are valid when status is COMPLETE_OK (and
    975 *            this append2 bit is set).
    976 *     Value: 0 indicates not appending, 1 indicates appending
    977 */
    978
    979struct htt_data_tx_completion {
    980	union {
    981		u8 flags;
    982		struct {
    983			u8 status:3,
    984			   tid:4,
    985			   tid_invalid:1;
    986		} __packed;
    987	} __packed;
    988	u8 num_msdus;
    989	u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
    990	__le16 msdus[]; /* variable length based on %num_msdus */
    991} __packed;
    992
    993#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK	GENMASK(15, 0)
    994#define HTT_TX_PPDU_DUR_INFO0_TID_MASK		GENMASK(20, 16)
    995
    996struct htt_data_tx_ppdu_dur {
    997	__le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */
    998	__le32 tx_duration; /* in usecs */
    999} __packed;
   1000
   1001#define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK	GENMASK(7, 0)
   1002
   1003struct htt_data_tx_compl_ppdu_dur {
   1004	__le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
   1005	struct htt_data_tx_ppdu_dur ppdu_dur[];
   1006} __packed;
   1007
   1008struct htt_tx_compl_ind_base {
   1009	u32 hdr;
   1010	u16 payload[1/*or more*/];
   1011} __packed;
   1012
   1013struct htt_rc_tx_done_params {
   1014	u32 rate_code;
   1015	u32 rate_code_flags;
   1016	u32 flags;
   1017	u32 num_enqued; /* 1 for non-AMPDU */
   1018	u32 num_retries;
   1019	u32 num_failed; /* for AMPDU */
   1020	u32 ack_rssi;
   1021	u32 time_stamp;
   1022	u32 is_probe;
   1023};
   1024
   1025struct htt_rc_update {
   1026	u8 vdev_id;
   1027	__le16 peer_id;
   1028	u8 addr[6];
   1029	u8 num_elems;
   1030	u8 rsvd0;
   1031	struct htt_rc_tx_done_params params[]; /* variable length %num_elems */
   1032} __packed;
   1033
   1034/* see htt_rx_indication for similar fields and descriptions */
   1035struct htt_rx_fragment_indication {
   1036	union {
   1037		u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
   1038		struct {
   1039			u8 ext_tid:5,
   1040			   flush_valid:1;
   1041		} __packed;
   1042	} __packed;
   1043	__le16 peer_id;
   1044	__le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
   1045	__le16 fw_rx_desc_bytes;
   1046	__le16 rsvd0;
   1047
   1048	u8 fw_msdu_rx_desc[];
   1049} __packed;
   1050
   1051#define ATH10K_IEEE80211_EXTIV               BIT(5)
   1052#define ATH10K_IEEE80211_TKIP_MICLEN         8   /* trailing MIC */
   1053
   1054#define HTT_RX_FRAG_IND_INFO0_HEADER_LEN     16
   1055
   1056#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK     0x1F
   1057#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB      0
   1058#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
   1059#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB  5
   1060
   1061#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
   1062#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB  0
   1063#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK   0x00000FC0
   1064#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB    6
   1065
   1066struct htt_rx_pn_ind {
   1067	__le16 peer_id;
   1068	u8 tid;
   1069	u8 seqno_start;
   1070	u8 seqno_end;
   1071	u8 pn_ie_count;
   1072	u8 reserved;
   1073	u8 pn_ies[];
   1074} __packed;
   1075
   1076struct htt_rx_offload_msdu {
   1077	__le16 msdu_len;
   1078	__le16 peer_id;
   1079	u8 vdev_id;
   1080	u8 tid;
   1081	u8 fw_desc;
   1082	u8 payload[];
   1083} __packed;
   1084
   1085struct htt_rx_offload_ind {
   1086	u8 reserved;
   1087	__le16 msdu_count;
   1088} __packed;
   1089
   1090struct htt_rx_in_ord_msdu_desc {
   1091	__le32 msdu_paddr;
   1092	__le16 msdu_len;
   1093	u8 fw_desc;
   1094	u8 reserved;
   1095} __packed;
   1096
   1097struct htt_rx_in_ord_msdu_desc_ext {
   1098	__le64 msdu_paddr;
   1099	__le16 msdu_len;
   1100	u8 fw_desc;
   1101	u8 reserved;
   1102} __packed;
   1103
   1104struct htt_rx_in_ord_ind {
   1105	u8 info;
   1106	__le16 peer_id;
   1107	u8 vdev_id;
   1108	u8 reserved;
   1109	__le16 msdu_count;
   1110	union {
   1111		struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
   1112		struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
   1113	} __packed;
   1114} __packed;
   1115
   1116#define HTT_RX_IN_ORD_IND_INFO_TID_MASK		0x0000001f
   1117#define HTT_RX_IN_ORD_IND_INFO_TID_LSB		0
   1118#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK	0x00000020
   1119#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB	5
   1120#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK	0x00000040
   1121#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB		6
   1122
   1123/*
   1124 * target -> host test message definition
   1125 *
   1126 * The following field definitions describe the format of the test
   1127 * message sent from the target to the host.
   1128 * The message consists of a 4-octet header, followed by a variable
   1129 * number of 32-bit integer values, followed by a variable number
   1130 * of 8-bit character values.
   1131 *
   1132 * |31                         16|15           8|7            0|
   1133 * |-----------------------------------------------------------|
   1134 * |          num chars          |   num ints   |   msg type   |
   1135 * |-----------------------------------------------------------|
   1136 * |                           int 0                           |
   1137 * |-----------------------------------------------------------|
   1138 * |                           int 1                           |
   1139 * |-----------------------------------------------------------|
   1140 * |                            ...                            |
   1141 * |-----------------------------------------------------------|
   1142 * |    char 3    |    char 2    |    char 1    |    char 0    |
   1143 * |-----------------------------------------------------------|
   1144 * |              |              |      ...     |    char 4    |
   1145 * |-----------------------------------------------------------|
   1146 *   - MSG_TYPE
   1147 *     Bits 7:0
   1148 *     Purpose: identifies this as a test message
   1149 *     Value: HTT_MSG_TYPE_TEST
   1150 *   - NUM_INTS
   1151 *     Bits 15:8
   1152 *     Purpose: indicate how many 32-bit integers follow the message header
   1153 *   - NUM_CHARS
   1154 *     Bits 31:16
   1155 *     Purpose: indicate how many 8-bit characters follow the series of integers
   1156 */
   1157struct htt_rx_test {
   1158	u8 num_ints;
   1159	__le16 num_chars;
   1160
   1161	/* payload consists of 2 lists:
   1162	 *  a) num_ints * sizeof(__le32)
   1163	 *  b) num_chars * sizeof(u8) aligned to 4bytes
   1164	 */
   1165	u8 payload[];
   1166} __packed;
   1167
   1168static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
   1169{
   1170	return (__le32 *)rx_test->payload;
   1171}
   1172
   1173static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
   1174{
   1175	return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
   1176}
   1177
   1178/*
   1179 * target -> host packet log message
   1180 *
   1181 * The following field definitions describe the format of the packet log
   1182 * message sent from the target to the host.
   1183 * The message consists of a 4-octet header,followed by a variable number
   1184 * of 32-bit character values.
   1185 *
   1186 * |31          24|23          16|15           8|7            0|
   1187 * |-----------------------------------------------------------|
   1188 * |              |              |              |   msg type   |
   1189 * |-----------------------------------------------------------|
   1190 * |                        payload                            |
   1191 * |-----------------------------------------------------------|
   1192 *   - MSG_TYPE
   1193 *     Bits 7:0
   1194 *     Purpose: identifies this as a test message
   1195 *     Value: HTT_MSG_TYPE_PACKETLOG
   1196 */
   1197struct htt_pktlog_msg {
   1198	u8 pad[3];
   1199	u8 payload[];
   1200} __packed;
   1201
   1202struct htt_dbg_stats_rx_reorder_stats {
   1203	/* Non QoS MPDUs received */
   1204	__le32 deliver_non_qos;
   1205
   1206	/* MPDUs received in-order */
   1207	__le32 deliver_in_order;
   1208
   1209	/* Flush due to reorder timer expired */
   1210	__le32 deliver_flush_timeout;
   1211
   1212	/* Flush due to move out of window */
   1213	__le32 deliver_flush_oow;
   1214
   1215	/* Flush due to DELBA */
   1216	__le32 deliver_flush_delba;
   1217
   1218	/* MPDUs dropped due to FCS error */
   1219	__le32 fcs_error;
   1220
   1221	/* MPDUs dropped due to monitor mode non-data packet */
   1222	__le32 mgmt_ctrl;
   1223
   1224	/* MPDUs dropped due to invalid peer */
   1225	__le32 invalid_peer;
   1226
   1227	/* MPDUs dropped due to duplication (non aggregation) */
   1228	__le32 dup_non_aggr;
   1229
   1230	/* MPDUs dropped due to processed before */
   1231	__le32 dup_past;
   1232
   1233	/* MPDUs dropped due to duplicate in reorder queue */
   1234	__le32 dup_in_reorder;
   1235
   1236	/* Reorder timeout happened */
   1237	__le32 reorder_timeout;
   1238
   1239	/* invalid bar ssn */
   1240	__le32 invalid_bar_ssn;
   1241
   1242	/* reorder reset due to bar ssn */
   1243	__le32 ssn_reset;
   1244};
   1245
   1246struct htt_dbg_stats_wal_tx_stats {
   1247	/* Num HTT cookies queued to dispatch list */
   1248	__le32 comp_queued;
   1249
   1250	/* Num HTT cookies dispatched */
   1251	__le32 comp_delivered;
   1252
   1253	/* Num MSDU queued to WAL */
   1254	__le32 msdu_enqued;
   1255
   1256	/* Num MPDU queue to WAL */
   1257	__le32 mpdu_enqued;
   1258
   1259	/* Num MSDUs dropped by WMM limit */
   1260	__le32 wmm_drop;
   1261
   1262	/* Num Local frames queued */
   1263	__le32 local_enqued;
   1264
   1265	/* Num Local frames done */
   1266	__le32 local_freed;
   1267
   1268	/* Num queued to HW */
   1269	__le32 hw_queued;
   1270
   1271	/* Num PPDU reaped from HW */
   1272	__le32 hw_reaped;
   1273
   1274	/* Num underruns */
   1275	__le32 underrun;
   1276
   1277	/* Num PPDUs cleaned up in TX abort */
   1278	__le32 tx_abort;
   1279
   1280	/* Num MPDUs requeued by SW */
   1281	__le32 mpdus_requeued;
   1282
   1283	/* excessive retries */
   1284	__le32 tx_ko;
   1285
   1286	/* data hw rate code */
   1287	__le32 data_rc;
   1288
   1289	/* Scheduler self triggers */
   1290	__le32 self_triggers;
   1291
   1292	/* frames dropped due to excessive sw retries */
   1293	__le32 sw_retry_failure;
   1294
   1295	/* illegal rate phy errors  */
   1296	__le32 illgl_rate_phy_err;
   1297
   1298	/* wal pdev continuous xretry */
   1299	__le32 pdev_cont_xretry;
   1300
   1301	/* wal pdev continuous xretry */
   1302	__le32 pdev_tx_timeout;
   1303
   1304	/* wal pdev resets  */
   1305	__le32 pdev_resets;
   1306
   1307	__le32 phy_underrun;
   1308
   1309	/* MPDU is more than txop limit */
   1310	__le32 txop_ovf;
   1311} __packed;
   1312
   1313struct htt_dbg_stats_wal_rx_stats {
   1314	/* Cnts any change in ring routing mid-ppdu */
   1315	__le32 mid_ppdu_route_change;
   1316
   1317	/* Total number of statuses processed */
   1318	__le32 status_rcvd;
   1319
   1320	/* Extra frags on rings 0-3 */
   1321	__le32 r0_frags;
   1322	__le32 r1_frags;
   1323	__le32 r2_frags;
   1324	__le32 r3_frags;
   1325
   1326	/* MSDUs / MPDUs delivered to HTT */
   1327	__le32 htt_msdus;
   1328	__le32 htt_mpdus;
   1329
   1330	/* MSDUs / MPDUs delivered to local stack */
   1331	__le32 loc_msdus;
   1332	__le32 loc_mpdus;
   1333
   1334	/* AMSDUs that have more MSDUs than the status ring size */
   1335	__le32 oversize_amsdu;
   1336
   1337	/* Number of PHY errors */
   1338	__le32 phy_errs;
   1339
   1340	/* Number of PHY errors drops */
   1341	__le32 phy_err_drop;
   1342
   1343	/* Number of mpdu errors - FCS, MIC, ENC etc. */
   1344	__le32 mpdu_errs;
   1345} __packed;
   1346
   1347struct htt_dbg_stats_wal_peer_stats {
   1348	__le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
   1349} __packed;
   1350
   1351struct htt_dbg_stats_wal_pdev_txrx {
   1352	struct htt_dbg_stats_wal_tx_stats tx_stats;
   1353	struct htt_dbg_stats_wal_rx_stats rx_stats;
   1354	struct htt_dbg_stats_wal_peer_stats peer_stats;
   1355} __packed;
   1356
   1357struct htt_dbg_stats_rx_rate_info {
   1358	__le32 mcs[10];
   1359	__le32 sgi[10];
   1360	__le32 nss[4];
   1361	__le32 stbc[10];
   1362	__le32 bw[3];
   1363	__le32 pream[6];
   1364	__le32 ldpc;
   1365	__le32 txbf;
   1366};
   1367
   1368/*
   1369 * htt_dbg_stats_status -
   1370 * present -     The requested stats have been delivered in full.
   1371 *               This indicates that either the stats information was contained
   1372 *               in its entirety within this message, or else this message
   1373 *               completes the delivery of the requested stats info that was
   1374 *               partially delivered through earlier STATS_CONF messages.
   1375 * partial -     The requested stats have been delivered in part.
   1376 *               One or more subsequent STATS_CONF messages with the same
   1377 *               cookie value will be sent to deliver the remainder of the
   1378 *               information.
   1379 * error -       The requested stats could not be delivered, for example due
   1380 *               to a shortage of memory to construct a message holding the
   1381 *               requested stats.
   1382 * invalid -     The requested stat type is either not recognized, or the
   1383 *               target is configured to not gather the stats type in question.
   1384 * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
   1385 * series_done - This special value indicates that no further stats info
   1386 *               elements are present within a series of stats info elems
   1387 *               (within a stats upload confirmation message).
   1388 */
   1389enum htt_dbg_stats_status {
   1390	HTT_DBG_STATS_STATUS_PRESENT     = 0,
   1391	HTT_DBG_STATS_STATUS_PARTIAL     = 1,
   1392	HTT_DBG_STATS_STATUS_ERROR       = 2,
   1393	HTT_DBG_STATS_STATUS_INVALID     = 3,
   1394	HTT_DBG_STATS_STATUS_SERIES_DONE = 7
   1395};
   1396
   1397/*
   1398 * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
   1399 *
   1400 * The following field definitions describe the format of the HTT host
   1401 * to target frag_desc/msdu_ext bank configuration message.
   1402 * The message contains the based address and the min and max id of the
   1403 * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
   1404 * MSDU_EXT/FRAG_DESC.
   1405 * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
   1406 * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
   1407 * the hardware does the mapping/translation.
   1408 *
   1409 * Total banks that can be configured is configured to 16.
   1410 *
   1411 * This should be called before any TX has be initiated by the HTT
   1412 *
   1413 * |31                         16|15           8|7   5|4       0|
   1414 * |------------------------------------------------------------|
   1415 * | DESC_SIZE    |  NUM_BANKS   | RES |SWP|pdev|    msg type   |
   1416 * |------------------------------------------------------------|
   1417 * |                     BANK0_BASE_ADDRESS                     |
   1418 * |------------------------------------------------------------|
   1419 * |                            ...                             |
   1420 * |------------------------------------------------------------|
   1421 * |                    BANK15_BASE_ADDRESS                     |
   1422 * |------------------------------------------------------------|
   1423 * |       BANK0_MAX_ID          |       BANK0_MIN_ID           |
   1424 * |------------------------------------------------------------|
   1425 * |                            ...                             |
   1426 * |------------------------------------------------------------|
   1427 * |       BANK15_MAX_ID         |       BANK15_MIN_ID          |
   1428 * |------------------------------------------------------------|
   1429 * Header fields:
   1430 *  - MSG_TYPE
   1431 *    Bits 7:0
   1432 *    Value: 0x6
   1433 *  - BANKx_BASE_ADDRESS
   1434 *    Bits 31:0
   1435 *    Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
   1436 *         bank physical/bus address.
   1437 *  - BANKx_MIN_ID
   1438 *    Bits 15:0
   1439 *    Purpose: Provide a mechanism to specify the min index that needs to
   1440 *          mapped.
   1441 *  - BANKx_MAX_ID
   1442 *    Bits 31:16
   1443 *    Purpose: Provide a mechanism to specify the max index that needs to
   1444 *
   1445 */
   1446struct htt_frag_desc_bank_id {
   1447	__le16 bank_min_id;
   1448	__le16 bank_max_id;
   1449} __packed;
   1450
   1451/* real is 16 but it wouldn't fit in the max htt message size
   1452 * so we use a conservatively safe value for now
   1453 */
   1454#define HTT_FRAG_DESC_BANK_MAX 4
   1455
   1456#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK		0x03
   1457#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB			0
   1458#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP			BIT(2)
   1459#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID		BIT(3)
   1460#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK	BIT(4)
   1461#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB	4
   1462
   1463enum htt_q_depth_type {
   1464	HTT_Q_DEPTH_TYPE_BYTES = 0,
   1465	HTT_Q_DEPTH_TYPE_MSDUS = 1,
   1466};
   1467
   1468#define HTT_TX_Q_STATE_NUM_PEERS		(TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
   1469						 TARGET_10_4_NUM_VDEVS)
   1470#define HTT_TX_Q_STATE_NUM_TIDS			8
   1471#define HTT_TX_Q_STATE_ENTRY_SIZE		1
   1472#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER		0
   1473
   1474/**
   1475 * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
   1476 *
   1477 * Defines host q state format and behavior. See htt_q_state.
   1478 *
   1479 * @record_size: Defines the size of each host q entry in bytes. In practice
   1480 *	however firmware (at least 10.4.3-00191) ignores this host
   1481 *	configuration value and uses hardcoded value of 1.
   1482 * @record_multiplier: This is valid only when q depth type is MSDUs. It
   1483 *	defines the exponent for the power of 2 multiplication.
   1484 */
   1485struct htt_q_state_conf {
   1486	__le32 paddr;
   1487	__le16 num_peers;
   1488	__le16 num_tids;
   1489	u8 record_size;
   1490	u8 record_multiplier;
   1491	u8 pad[2];
   1492} __packed;
   1493
   1494struct htt_frag_desc_bank_cfg32 {
   1495	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
   1496	u8 num_banks;
   1497	u8 desc_size;
   1498	__le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
   1499	struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
   1500	struct htt_q_state_conf q_state;
   1501} __packed;
   1502
   1503struct htt_frag_desc_bank_cfg64 {
   1504	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
   1505	u8 num_banks;
   1506	u8 desc_size;
   1507	__le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
   1508	struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
   1509	struct htt_q_state_conf q_state;
   1510} __packed;
   1511
   1512#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT	128
   1513#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK	0x3f
   1514#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB		0
   1515#define HTT_TX_Q_STATE_ENTRY_EXP_MASK		0xc0
   1516#define HTT_TX_Q_STATE_ENTRY_EXP_LSB		6
   1517
   1518/**
   1519 * htt_q_state - shared between host and firmware via DMA
   1520 *
   1521 * This structure is used for the host to expose it's software queue state to
   1522 * firmware so that its rate control can schedule fetch requests for optimized
   1523 * performance. This is most notably used for MU-MIMO aggregation when multiple
   1524 * MU clients are connected.
   1525 *
   1526 * @count: Each element defines the host queue depth. When q depth type was
   1527 *	configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
   1528 *	FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
   1529 *	HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
   1530 *	HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
   1531 *	record_multiplier (see htt_q_state_conf).
   1532 * @map: Used by firmware to quickly check which host queues are not empty. It
   1533 *	is a bitmap simply saying.
   1534 * @seq: Used by firmware to quickly check if the host queues were updated
   1535 *	since it last checked.
   1536 *
   1537 * FIXME: Is the q_state map[] size calculation really correct?
   1538 */
   1539struct htt_q_state {
   1540	u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
   1541	u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
   1542	__le32 seq;
   1543} __packed;
   1544
   1545#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK	0x0fff
   1546#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB	0
   1547#define HTT_TX_FETCH_RECORD_INFO_TID_MASK	0xf000
   1548#define HTT_TX_FETCH_RECORD_INFO_TID_LSB	12
   1549
   1550struct htt_tx_fetch_record {
   1551	__le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
   1552	__le16 num_msdus;
   1553	__le32 num_bytes;
   1554} __packed;
   1555
   1556struct htt_tx_fetch_ind {
   1557	u8 pad0;
   1558	__le16 fetch_seq_num;
   1559	__le32 token;
   1560	__le16 num_resp_ids;
   1561	__le16 num_records;
   1562	union {
   1563		/* ath10k_htt_get_tx_fetch_ind_resp_ids() */
   1564		DECLARE_FLEX_ARRAY(__le32, resp_ids);
   1565		DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
   1566	};
   1567} __packed;
   1568
   1569static inline void *
   1570ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
   1571{
   1572	return (void *)&ind->records[le16_to_cpu(ind->num_records)];
   1573}
   1574
   1575struct htt_tx_fetch_resp {
   1576	u8 pad0;
   1577	__le16 resp_id;
   1578	__le16 fetch_seq_num;
   1579	__le16 num_records;
   1580	__le32 token;
   1581	struct htt_tx_fetch_record records[];
   1582} __packed;
   1583
   1584struct htt_tx_fetch_confirm {
   1585	u8 pad0;
   1586	__le16 num_resp_ids;
   1587	__le32 resp_ids[];
   1588} __packed;
   1589
   1590enum htt_tx_mode_switch_mode {
   1591	HTT_TX_MODE_SWITCH_PUSH = 0,
   1592	HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
   1593};
   1594
   1595#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE		BIT(0)
   1596#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK	0xfffe
   1597#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB	1
   1598
   1599#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK		0x0003
   1600#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB		0
   1601#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK	0xfffc
   1602#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB	2
   1603
   1604#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK	0x0fff
   1605#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB	0
   1606#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK	0xf000
   1607#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB		12
   1608
   1609struct htt_tx_mode_switch_record {
   1610	__le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
   1611	__le16 num_max_msdus;
   1612} __packed;
   1613
   1614struct htt_tx_mode_switch_ind {
   1615	u8 pad0;
   1616	__le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
   1617	__le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
   1618	u8 pad1[2];
   1619	struct htt_tx_mode_switch_record records[];
   1620} __packed;
   1621
   1622struct htt_channel_change {
   1623	u8 pad[3];
   1624	__le32 freq;
   1625	__le32 center_freq1;
   1626	__le32 center_freq2;
   1627	__le32 phymode;
   1628} __packed;
   1629
   1630struct htt_per_peer_tx_stats_ind {
   1631	__le32	succ_bytes;
   1632	__le32  retry_bytes;
   1633	__le32  failed_bytes;
   1634	u8	ratecode;
   1635	u8	flags;
   1636	__le16	peer_id;
   1637	__le16  succ_pkts;
   1638	__le16	retry_pkts;
   1639	__le16	failed_pkts;
   1640	__le16	tx_duration;
   1641	__le32	reserved1;
   1642	__le32	reserved2;
   1643} __packed;
   1644
   1645struct htt_peer_tx_stats {
   1646	u8 num_ppdu;
   1647	u8 ppdu_len;
   1648	u8 version;
   1649	u8 payload[];
   1650} __packed;
   1651
   1652#define ATH10K_10_2_TX_STATS_OFFSET	136
   1653#define PEER_STATS_FOR_NO_OF_PPDUS	4
   1654
   1655struct ath10k_10_2_peer_tx_stats {
   1656	u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
   1657	u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
   1658	__le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
   1659	u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
   1660	__le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
   1661	u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
   1662	__le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
   1663	u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
   1664	__le32 tx_duration;
   1665	u8 tx_ppdu_cnt;
   1666	u8 peer_id;
   1667} __packed;
   1668
   1669union htt_rx_pn_t {
   1670	/* WEP: 24-bit PN */
   1671	u32 pn24;
   1672
   1673	/* TKIP or CCMP: 48-bit PN */
   1674	u64 pn48;
   1675
   1676	/* WAPI: 128-bit PN */
   1677	u64 pn128[2];
   1678};
   1679
   1680struct htt_cmd {
   1681	struct htt_cmd_hdr hdr;
   1682	union {
   1683		struct htt_ver_req ver_req;
   1684		struct htt_mgmt_tx_desc mgmt_tx;
   1685		struct htt_data_tx_desc data_tx;
   1686		struct htt_rx_ring_setup_32 rx_setup_32;
   1687		struct htt_rx_ring_setup_64 rx_setup_64;
   1688		struct htt_stats_req stats_req;
   1689		struct htt_oob_sync_req oob_sync_req;
   1690		struct htt_aggr_conf aggr_conf;
   1691		struct htt_aggr_conf_v2 aggr_conf_v2;
   1692		struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
   1693		struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
   1694		struct htt_tx_fetch_resp tx_fetch_resp;
   1695	};
   1696} __packed;
   1697
   1698struct htt_resp {
   1699	struct htt_resp_hdr hdr;
   1700	union {
   1701		struct htt_ver_resp ver_resp;
   1702		struct htt_mgmt_tx_completion mgmt_tx_completion;
   1703		struct htt_data_tx_completion data_tx_completion;
   1704		struct htt_rx_indication rx_ind;
   1705		struct htt_rx_indication_hl rx_ind_hl;
   1706		struct htt_rx_fragment_indication rx_frag_ind;
   1707		struct htt_rx_peer_map peer_map;
   1708		struct htt_rx_peer_unmap peer_unmap;
   1709		struct htt_rx_flush rx_flush;
   1710		struct htt_rx_addba rx_addba;
   1711		struct htt_rx_delba rx_delba;
   1712		struct htt_security_indication security_indication;
   1713		struct htt_rc_update rc_update;
   1714		struct htt_rx_test rx_test;
   1715		struct htt_pktlog_msg pktlog_msg;
   1716		struct htt_rx_pn_ind rx_pn_ind;
   1717		struct htt_rx_offload_ind rx_offload_ind;
   1718		struct htt_rx_in_ord_ind rx_in_ord_ind;
   1719		struct htt_tx_fetch_ind tx_fetch_ind;
   1720		struct htt_tx_fetch_confirm tx_fetch_confirm;
   1721		struct htt_tx_mode_switch_ind tx_mode_switch_ind;
   1722		struct htt_channel_change chan_change;
   1723		struct htt_peer_tx_stats peer_tx_stats;
   1724	};
   1725} __packed;
   1726
   1727/*** host side structures follow ***/
   1728
   1729struct htt_tx_done {
   1730	u16 msdu_id;
   1731	u16 status;
   1732	u8 ack_rssi;
   1733};
   1734
   1735enum htt_tx_compl_state {
   1736	HTT_TX_COMPL_STATE_NONE,
   1737	HTT_TX_COMPL_STATE_ACK,
   1738	HTT_TX_COMPL_STATE_NOACK,
   1739	HTT_TX_COMPL_STATE_DISCARD,
   1740};
   1741
   1742struct htt_peer_map_event {
   1743	u8 vdev_id;
   1744	u16 peer_id;
   1745	u8 addr[ETH_ALEN];
   1746};
   1747
   1748struct htt_peer_unmap_event {
   1749	u16 peer_id;
   1750};
   1751
   1752struct ath10k_htt_txbuf_32 {
   1753	struct htt_data_tx_desc_frag frags[2];
   1754	struct ath10k_htc_hdr htc_hdr;
   1755	struct htt_cmd_hdr cmd_hdr;
   1756	struct htt_data_tx_desc cmd_tx;
   1757} __packed __aligned(4);
   1758
   1759struct ath10k_htt_txbuf_64 {
   1760	struct htt_data_tx_desc_frag frags[2];
   1761	struct ath10k_htc_hdr htc_hdr;
   1762	struct htt_cmd_hdr cmd_hdr;
   1763	struct htt_data_tx_desc_64 cmd_tx;
   1764} __packed __aligned(4);
   1765
   1766struct ath10k_htt {
   1767	struct ath10k *ar;
   1768	enum ath10k_htc_ep_id eid;
   1769
   1770	struct sk_buff_head rx_indication_head;
   1771
   1772	u8 target_version_major;
   1773	u8 target_version_minor;
   1774	struct completion target_version_received;
   1775	u8 max_num_amsdu;
   1776	u8 max_num_ampdu;
   1777
   1778	const enum htt_t2h_msg_type *t2h_msg_types;
   1779	u32 t2h_msg_types_max;
   1780
   1781	struct {
   1782		/*
   1783		 * Ring of network buffer objects - This ring is
   1784		 * used exclusively by the host SW. This ring
   1785		 * mirrors the dev_addrs_ring that is shared
   1786		 * between the host SW and the MAC HW. The host SW
   1787		 * uses this netbufs ring to locate the network
   1788		 * buffer objects whose data buffers the HW has
   1789		 * filled.
   1790		 */
   1791		struct sk_buff **netbufs_ring;
   1792
   1793		/* This is used only with firmware supporting IN_ORD_IND.
   1794		 *
   1795		 * With Full Rx Reorder the HTT Rx Ring is more of a temporary
   1796		 * buffer ring from which buffer addresses are copied by the
   1797		 * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
   1798		 * pointing to specific (re-ordered) buffers.
   1799		 *
   1800		 * FIXME: With kernel generic hashing functions there's a lot
   1801		 * of hash collisions for sk_buffs.
   1802		 */
   1803		bool in_ord_rx;
   1804		DECLARE_HASHTABLE(skb_table, 4);
   1805
   1806		/*
   1807		 * Ring of buffer addresses -
   1808		 * This ring holds the "physical" device address of the
   1809		 * rx buffers the host SW provides for the MAC HW to
   1810		 * fill.
   1811		 */
   1812		union {
   1813			__le64 *paddrs_ring_64;
   1814			__le32 *paddrs_ring_32;
   1815		};
   1816
   1817		/*
   1818		 * Base address of ring, as a "physical" device address
   1819		 * rather than a CPU address.
   1820		 */
   1821		dma_addr_t base_paddr;
   1822
   1823		/* how many elems in the ring (power of 2) */
   1824		int size;
   1825
   1826		/* size - 1 */
   1827		unsigned int size_mask;
   1828
   1829		/* how many rx buffers to keep in the ring */
   1830		int fill_level;
   1831
   1832		/* how many rx buffers (full+empty) are in the ring */
   1833		int fill_cnt;
   1834
   1835		/*
   1836		 * alloc_idx - where HTT SW has deposited empty buffers
   1837		 * This is allocated in consistent mem, so that the FW can
   1838		 * read this variable, and program the HW's FW_IDX reg with
   1839		 * the value of this shadow register.
   1840		 */
   1841		struct {
   1842			__le32 *vaddr;
   1843			dma_addr_t paddr;
   1844		} alloc_idx;
   1845
   1846		/* where HTT SW has processed bufs filled by rx MAC DMA */
   1847		struct {
   1848			unsigned int msdu_payld;
   1849		} sw_rd_idx;
   1850
   1851		/*
   1852		 * refill_retry_timer - timer triggered when the ring is
   1853		 * not refilled to the level expected
   1854		 */
   1855		struct timer_list refill_retry_timer;
   1856
   1857		/* Protects access to all rx ring buffer state variables */
   1858		spinlock_t lock;
   1859	} rx_ring;
   1860
   1861	unsigned int prefetch_len;
   1862
   1863	/* Protects access to pending_tx, num_pending_tx */
   1864	spinlock_t tx_lock;
   1865	int max_num_pending_tx;
   1866	int num_pending_tx;
   1867	int num_pending_mgmt_tx;
   1868	struct idr pending_tx;
   1869	wait_queue_head_t empty_tx_wq;
   1870
   1871	/* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
   1872	DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
   1873
   1874	/* set if host-fw communication goes haywire
   1875	 * used to avoid further failures
   1876	 */
   1877	bool rx_confused;
   1878	atomic_t num_mpdus_ready;
   1879
   1880	/* This is used to group tx/rx completions separately and process them
   1881	 * in batches to reduce cache stalls
   1882	 */
   1883	struct sk_buff_head rx_msdus_q;
   1884	struct sk_buff_head rx_in_ord_compl_q;
   1885	struct sk_buff_head tx_fetch_ind_q;
   1886
   1887	/* rx_status template */
   1888	struct ieee80211_rx_status rx_status;
   1889
   1890	struct {
   1891		dma_addr_t paddr;
   1892		union {
   1893			struct htt_msdu_ext_desc *vaddr_desc_32;
   1894			struct htt_msdu_ext_desc_64 *vaddr_desc_64;
   1895		};
   1896		size_t size;
   1897	} frag_desc;
   1898
   1899	struct {
   1900		dma_addr_t paddr;
   1901		union {
   1902			struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
   1903			struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
   1904		};
   1905		size_t size;
   1906	} txbuf;
   1907
   1908	struct {
   1909		bool enabled;
   1910		struct htt_q_state *vaddr;
   1911		dma_addr_t paddr;
   1912		u16 num_push_allowed;
   1913		u16 num_peers;
   1914		u16 num_tids;
   1915		enum htt_tx_mode_switch_mode mode;
   1916		enum htt_q_depth_type type;
   1917	} tx_q_state;
   1918
   1919	bool tx_mem_allocated;
   1920	const struct ath10k_htt_tx_ops *tx_ops;
   1921	const struct ath10k_htt_rx_ops *rx_ops;
   1922	bool disable_tx_comp;
   1923	bool bundle_tx;
   1924	struct sk_buff_head tx_req_head;
   1925	struct sk_buff_head tx_complete_head;
   1926};
   1927
   1928struct ath10k_htt_tx_ops {
   1929	int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
   1930	int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
   1931	int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
   1932	void (*htt_free_frag_desc)(struct ath10k_htt *htt);
   1933	int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
   1934		      struct sk_buff *msdu);
   1935	int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
   1936	void (*htt_free_txbuff)(struct ath10k_htt *htt);
   1937	int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
   1938				    u8 max_subfrms_ampdu,
   1939				    u8 max_subfrms_amsdu);
   1940	void (*htt_flush_tx)(struct ath10k_htt *htt);
   1941};
   1942
   1943static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
   1944{
   1945	if (!htt->tx_ops->htt_send_rx_ring_cfg)
   1946		return -EOPNOTSUPP;
   1947
   1948	return htt->tx_ops->htt_send_rx_ring_cfg(htt);
   1949}
   1950
   1951static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
   1952{
   1953	if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
   1954		return -EOPNOTSUPP;
   1955
   1956	return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
   1957}
   1958
   1959static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
   1960{
   1961	if (!htt->tx_ops->htt_alloc_frag_desc)
   1962		return -EOPNOTSUPP;
   1963
   1964	return htt->tx_ops->htt_alloc_frag_desc(htt);
   1965}
   1966
   1967static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
   1968{
   1969	if (htt->tx_ops->htt_free_frag_desc)
   1970		htt->tx_ops->htt_free_frag_desc(htt);
   1971}
   1972
   1973static inline int ath10k_htt_tx(struct ath10k_htt *htt,
   1974				enum ath10k_hw_txrx_mode txmode,
   1975				struct sk_buff *msdu)
   1976{
   1977	return htt->tx_ops->htt_tx(htt, txmode, msdu);
   1978}
   1979
   1980static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
   1981{
   1982	if (htt->tx_ops->htt_flush_tx)
   1983		htt->tx_ops->htt_flush_tx(htt);
   1984}
   1985
   1986static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
   1987{
   1988	if (!htt->tx_ops->htt_alloc_txbuff)
   1989		return -EOPNOTSUPP;
   1990
   1991	return htt->tx_ops->htt_alloc_txbuff(htt);
   1992}
   1993
   1994static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
   1995{
   1996	if (htt->tx_ops->htt_free_txbuff)
   1997		htt->tx_ops->htt_free_txbuff(htt);
   1998}
   1999
   2000static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
   2001					      u8 max_subfrms_ampdu,
   2002					      u8 max_subfrms_amsdu)
   2003
   2004{
   2005	if (!htt->tx_ops->htt_h2t_aggr_cfg_msg)
   2006		return -EOPNOTSUPP;
   2007
   2008	return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
   2009						 max_subfrms_ampdu,
   2010						 max_subfrms_amsdu);
   2011}
   2012
   2013struct ath10k_htt_rx_ops {
   2014	size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
   2015	void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
   2016	void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
   2017				    int idx);
   2018	void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
   2019	void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
   2020	bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt,
   2021					struct htt_rx_fragment_indication *rx,
   2022					struct sk_buff *skb);
   2023};
   2024
   2025static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
   2026{
   2027	if (!htt->rx_ops->htt_get_rx_ring_size)
   2028		return 0;
   2029
   2030	return htt->rx_ops->htt_get_rx_ring_size(htt);
   2031}
   2032
   2033static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
   2034						 void *vaddr)
   2035{
   2036	if (htt->rx_ops->htt_config_paddrs_ring)
   2037		htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
   2038}
   2039
   2040static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
   2041					      dma_addr_t paddr,
   2042					      int idx)
   2043{
   2044	if (htt->rx_ops->htt_set_paddrs_ring)
   2045		htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
   2046}
   2047
   2048static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
   2049{
   2050	if (!htt->rx_ops->htt_get_vaddr_ring)
   2051		return NULL;
   2052
   2053	return htt->rx_ops->htt_get_vaddr_ring(htt);
   2054}
   2055
   2056static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
   2057{
   2058	if (htt->rx_ops->htt_reset_paddrs_ring)
   2059		htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
   2060}
   2061
   2062static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
   2063						  struct htt_rx_fragment_indication *rx,
   2064						  struct sk_buff *skb)
   2065{
   2066	if (!htt->rx_ops->htt_rx_proc_rx_frag_ind)
   2067		return true;
   2068
   2069	return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
   2070}
   2071
   2072/* the driver strongly assumes that the rx header status be 64 bytes long,
   2073 * so all possible rx_desc structures must respect this assumption.
   2074 */
   2075#define RX_HTT_HDR_STATUS_LEN 64
   2076
   2077/* The rx descriptor structure layout is programmed via rx ring setup
   2078 * so that FW knows how to transfer the rx descriptor to the host.
   2079 * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
   2080 * when modifying the structure layout of the rx descriptor beyond what it expects
   2081 * (even if it correctly programmed during the rx ring setup).
   2082 * Therefore we must keep two different memory layouts, abstract the rx descriptor
   2083 * representation and use ath10k_rx_desc_ops
   2084 * for correctly accessing rx descriptor data.
   2085 */
   2086
   2087/* base struct used for abstracting the rx descritor representation */
   2088struct htt_rx_desc {
   2089	union {
   2090		/* This field is filled on the host using the msdu buffer
   2091		 * from htt_rx_indication
   2092		 */
   2093		struct fw_rx_desc_base fw_desc;
   2094		u32 pad;
   2095	} __packed;
   2096} __packed;
   2097
   2098/* rx descriptor for wcn3990 and possibly extensible for newer cards
   2099 * Buffers like this are placed on the rx ring.
   2100 */
   2101struct htt_rx_desc_v2 {
   2102	struct htt_rx_desc base;
   2103	struct {
   2104		struct rx_attention attention;
   2105		struct rx_frag_info frag_info;
   2106		struct rx_mpdu_start mpdu_start;
   2107		struct rx_msdu_start msdu_start;
   2108		struct rx_msdu_end msdu_end;
   2109		struct rx_mpdu_end mpdu_end;
   2110		struct rx_ppdu_start ppdu_start;
   2111		struct rx_ppdu_end ppdu_end;
   2112	} __packed;
   2113	u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
   2114	u8 msdu_payload[];
   2115};
   2116
   2117/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
   2118 * works correctly. We keep a single rx descriptor for all these three
   2119 * families of cards because from tests it seems to be the most stable solution,
   2120 * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
   2121 * during some tests.
   2122 * Buffers like this are placed on the rx ring.
   2123 */
   2124struct htt_rx_desc_v1 {
   2125	struct htt_rx_desc base;
   2126	struct {
   2127		struct rx_attention attention;
   2128		struct rx_frag_info_v1 frag_info;
   2129		struct rx_mpdu_start mpdu_start;
   2130		struct rx_msdu_start_v1 msdu_start;
   2131		struct rx_msdu_end_v1 msdu_end;
   2132		struct rx_mpdu_end mpdu_end;
   2133		struct rx_ppdu_start ppdu_start;
   2134		struct rx_ppdu_end_v1 ppdu_end;
   2135	} __packed;
   2136	u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
   2137	u8 msdu_payload[];
   2138};
   2139
   2140/* rx_desc abstraction */
   2141struct ath10k_htt_rx_desc_ops {
   2142	/* These fields are mandatory, they must be specified in any instance */
   2143
   2144	/* sizeof() of the rx_desc structure used by this hw */
   2145	size_t rx_desc_size;
   2146
   2147	/* offset of msdu_payload inside the rx_desc structure used by this hw */
   2148	size_t rx_desc_msdu_payload_offset;
   2149
   2150	/* These fields are options.
   2151	 * When a field is not provided the default implementation gets used
   2152	 * (see the ath10k_rx_desc_* operations below for more info about the defaults)
   2153	 */
   2154	bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
   2155	int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
   2156
   2157	/* Safely cast from a void* buffer containing an rx descriptor
   2158	 * to the proper rx_desc structure
   2159	 */
   2160	struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
   2161
   2162	void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
   2163	struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
   2164	struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
   2165	struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
   2166	struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
   2167	struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
   2168	struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
   2169	struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
   2170	struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
   2171	u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
   2172	u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
   2173};
   2174
   2175extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
   2176extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
   2177extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
   2178
   2179static inline int
   2180ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2181{
   2182	if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
   2183		return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
   2184	return 0;
   2185}
   2186
   2187static inline bool
   2188ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2189{
   2190	if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
   2191		return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
   2192	return false;
   2193}
   2194
   2195/* The default implementation of all these getters is using the old rx_desc,
   2196 * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
   2197 * But probably, if new wireless cards must be supported, it would be better
   2198 * to switch the default implementation to the new rx_desc, since this would
   2199 * make the extension easier .
   2200 */
   2201static inline struct htt_rx_desc *
   2202ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw,	void *buff)
   2203{
   2204	if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
   2205		return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
   2206	return &((struct htt_rx_desc_v1 *)buff)->base;
   2207}
   2208
   2209static inline void
   2210ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
   2211			       struct htt_rx_ring_rx_desc_offsets *off)
   2212{
   2213	if (hw->rx_desc_ops->rx_desc_get_offsets) {
   2214		hw->rx_desc_ops->rx_desc_get_offsets(off);
   2215	} else {
   2216#define	desc_offset(x) (offsetof(struct	htt_rx_desc_v1, x)	/ 4)
   2217		off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
   2218		off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
   2219		off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
   2220		off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
   2221		off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
   2222		off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
   2223		off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
   2224		off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
   2225		off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
   2226		off->frag_info_offset =	__cpu_to_le16(desc_offset(frag_info));
   2227#undef desc_offset
   2228	}
   2229}
   2230
   2231static inline struct rx_attention *
   2232ath10k_htt_rx_desc_get_attention(struct	ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2233{
   2234	struct htt_rx_desc_v1 *rx_desc;
   2235
   2236	if (hw->rx_desc_ops->rx_desc_get_attention)
   2237		return hw->rx_desc_ops->rx_desc_get_attention(rxd);
   2238
   2239	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2240	return &rx_desc->attention;
   2241}
   2242
   2243static inline struct rx_frag_info_common *
   2244ath10k_htt_rx_desc_get_frag_info(struct	ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2245{
   2246	struct htt_rx_desc_v1 *rx_desc;
   2247
   2248	if (hw->rx_desc_ops->rx_desc_get_frag_info)
   2249		return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
   2250
   2251	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2252	return &rx_desc->frag_info.common;
   2253}
   2254
   2255static inline struct rx_mpdu_start *
   2256ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2257{
   2258	struct htt_rx_desc_v1 *rx_desc;
   2259
   2260	if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
   2261		return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
   2262
   2263	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2264	return &rx_desc->mpdu_start;
   2265}
   2266
   2267static inline struct rx_mpdu_end *
   2268ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params	*hw, struct htt_rx_desc	*rxd)
   2269{
   2270	struct htt_rx_desc_v1 *rx_desc;
   2271
   2272	if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
   2273		return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
   2274
   2275	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2276	return &rx_desc->mpdu_end;
   2277}
   2278
   2279static inline struct rx_msdu_start_common *
   2280ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2281{
   2282	struct htt_rx_desc_v1 *rx_desc;
   2283
   2284	if (hw->rx_desc_ops->rx_desc_get_msdu_start)
   2285		return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
   2286
   2287	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2288	return &rx_desc->msdu_start.common;
   2289}
   2290
   2291static inline struct rx_msdu_end_common	*
   2292ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params	*hw, struct htt_rx_desc	*rxd)
   2293{
   2294	struct htt_rx_desc_v1 *rx_desc;
   2295
   2296	if (hw->rx_desc_ops->rx_desc_get_msdu_end)
   2297		return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
   2298
   2299	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2300	return &rx_desc->msdu_end.common;
   2301}
   2302
   2303static inline struct rx_ppdu_start *
   2304ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2305{
   2306	struct htt_rx_desc_v1 *rx_desc;
   2307
   2308	if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
   2309		return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
   2310
   2311	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2312	return &rx_desc->ppdu_start;
   2313}
   2314
   2315static inline struct rx_ppdu_end_common	*
   2316ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params	*hw, struct htt_rx_desc	*rxd)
   2317{
   2318	struct htt_rx_desc_v1 *rx_desc;
   2319
   2320	if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
   2321		return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
   2322
   2323	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2324	return &rx_desc->ppdu_end.common;
   2325}
   2326
   2327static inline u8 *
   2328ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
   2329{
   2330	struct htt_rx_desc_v1 *rx_desc;
   2331
   2332	if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
   2333		return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
   2334
   2335	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2336	return rx_desc->rx_hdr_status;
   2337}
   2338
   2339static inline u8 *
   2340ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct	htt_rx_desc *rxd)
   2341{
   2342	struct htt_rx_desc_v1 *rx_desc;
   2343
   2344	if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
   2345		return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
   2346
   2347	rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
   2348	return rx_desc->msdu_payload;
   2349}
   2350
   2351#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK           0x00000fff
   2352#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB            0
   2353#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK         0x00001000
   2354#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB          12
   2355#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
   2356#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB  13
   2357#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK       0x00010000
   2358#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB        16
   2359#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK        0x01fe0000
   2360#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB         17
   2361
   2362struct htt_rx_desc_base_hl {
   2363	__le32 info; /* HTT_RX_DESC_HL_INFO_ */
   2364};
   2365
   2366struct htt_rx_chan_info {
   2367	__le16 primary_chan_center_freq_mhz;
   2368	__le16 contig_chan1_center_freq_mhz;
   2369	__le16 contig_chan2_center_freq_mhz;
   2370	u8 phy_mode;
   2371	u8 reserved;
   2372} __packed;
   2373
   2374#define HTT_RX_DESC_ALIGN 8
   2375
   2376#define HTT_MAC_ADDR_LEN 6
   2377
   2378/*
   2379 * FIX THIS
   2380 * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
   2381 * rounded up to a cache line size.
   2382 */
   2383#define HTT_RX_BUF_SIZE 2048
   2384
   2385/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
   2386 * because it depends on the underlying device rx_desc representation
   2387 */
   2388static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
   2389{
   2390	return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
   2391}
   2392
   2393/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
   2394 * aggregated traffic more nicely.
   2395 */
   2396#define ATH10K_HTT_MAX_NUM_REFILL 100
   2397
   2398/*
   2399 * DMA_MAP expects the buffer to be an integral number of cache lines.
   2400 * Rather than checking the actual cache line size, this code makes a
   2401 * conservative estimate of what the cache line size could be.
   2402 */
   2403#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7	/* 2^7 = 128 */
   2404#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
   2405
   2406/* These values are default in most firmware revisions and apparently are a
   2407 * sweet spot performance wise.
   2408 */
   2409#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
   2410#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
   2411
   2412int ath10k_htt_connect(struct ath10k_htt *htt);
   2413int ath10k_htt_init(struct ath10k *ar);
   2414int ath10k_htt_setup(struct ath10k_htt *htt);
   2415
   2416int ath10k_htt_tx_start(struct ath10k_htt *htt);
   2417void ath10k_htt_tx_stop(struct ath10k_htt *htt);
   2418void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
   2419void ath10k_htt_tx_free(struct ath10k_htt *htt);
   2420
   2421int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
   2422int ath10k_htt_rx_ring_refill(struct ath10k *ar);
   2423void ath10k_htt_rx_free(struct ath10k_htt *htt);
   2424
   2425void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
   2426void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
   2427bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
   2428int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
   2429int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
   2430			     u64 cookie);
   2431void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
   2432int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
   2433			     __le32 token,
   2434			     __le16 fetch_seq_num,
   2435			     struct htt_tx_fetch_record *records,
   2436			     size_t num_records);
   2437void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
   2438
   2439void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
   2440			      struct ieee80211_txq *txq);
   2441void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
   2442			      struct ieee80211_txq *txq);
   2443void ath10k_htt_tx_txq_sync(struct ath10k *ar);
   2444void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
   2445int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
   2446void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
   2447int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
   2448				   bool is_presp);
   2449
   2450int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
   2451void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
   2452int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
   2453void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
   2454					     struct sk_buff *skb);
   2455int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
   2456int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
   2457void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
   2458void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
   2459#endif