cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hns3_enet.h (18622B)


      1/* SPDX-License-Identifier: GPL-2.0+ */
      2// Copyright (c) 2016-2017 Hisilicon Limited.
      3
      4#ifndef __HNS3_ENET_H
      5#define __HNS3_ENET_H
      6
      7#include <linux/dim.h>
      8#include <linux/if_vlan.h>
      9#include <net/page_pool.h>
     10#include <asm/barrier.h>
     11
     12#include "hnae3.h"
     13
     14struct iphdr;
     15struct ipv6hdr;
     16
     17enum hns3_nic_state {
     18	HNS3_NIC_STATE_TESTING,
     19	HNS3_NIC_STATE_RESETTING,
     20	HNS3_NIC_STATE_INITED,
     21	HNS3_NIC_STATE_DOWN,
     22	HNS3_NIC_STATE_DISABLED,
     23	HNS3_NIC_STATE_REMOVING,
     24	HNS3_NIC_STATE_SERVICE_INITED,
     25	HNS3_NIC_STATE_SERVICE_SCHED,
     26	HNS3_NIC_STATE2_RESET_REQUESTED,
     27	HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
     28	HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
     29	HNS3_NIC_STATE_TX_PUSH_ENABLE,
     30	HNS3_NIC_STATE_MAX
     31};
     32
     33#define HNS3_MAX_PUSH_BD_NUM		2
     34
     35#define HNS3_RING_RX_RING_BASEADDR_L_REG	0x00000
     36#define HNS3_RING_RX_RING_BASEADDR_H_REG	0x00004
     37#define HNS3_RING_RX_RING_BD_NUM_REG		0x00008
     38#define HNS3_RING_RX_RING_BD_LEN_REG		0x0000C
     39#define HNS3_RING_RX_RING_TAIL_REG		0x00018
     40#define HNS3_RING_RX_RING_HEAD_REG		0x0001C
     41#define HNS3_RING_RX_RING_FBDNUM_REG		0x00020
     42#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG	0x0002C
     43
     44#define HNS3_RING_TX_RING_BASEADDR_L_REG	0x00040
     45#define HNS3_RING_TX_RING_BASEADDR_H_REG	0x00044
     46#define HNS3_RING_TX_RING_BD_NUM_REG		0x00048
     47#define HNS3_RING_TX_RING_TC_REG		0x00050
     48#define HNS3_RING_TX_RING_TAIL_REG		0x00058
     49#define HNS3_RING_TX_RING_HEAD_REG		0x0005C
     50#define HNS3_RING_TX_RING_FBDNUM_REG		0x00060
     51#define HNS3_RING_TX_RING_OFFSET_REG		0x00064
     52#define HNS3_RING_TX_RING_EBDNUM_REG		0x00068
     53#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG	0x0006C
     54#define HNS3_RING_TX_RING_EBD_OFFSET_REG	0x00070
     55#define HNS3_RING_TX_RING_BD_ERR_REG		0x00074
     56#define HNS3_RING_EN_REG			0x00090
     57#define HNS3_RING_RX_EN_REG			0x00098
     58#define HNS3_RING_TX_EN_REG			0x000D4
     59
     60#define HNS3_RX_HEAD_SIZE			256
     61
     62#define HNS3_TX_TIMEOUT (5 * HZ)
     63#define HNS3_RING_NAME_LEN			16
     64#define HNS3_BUFFER_SIZE_2048			2048
     65#define HNS3_RING_MAX_PENDING			32760
     66#define HNS3_RING_MIN_PENDING			72
     67#define HNS3_RING_BD_MULTIPLE			8
     68/* max frame size of mac */
     69#define HNS3_MAX_MTU(max_frm_size) \
     70	((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
     71
     72#define HNS3_BD_SIZE_512_TYPE			0
     73#define HNS3_BD_SIZE_1024_TYPE			1
     74#define HNS3_BD_SIZE_2048_TYPE			2
     75#define HNS3_BD_SIZE_4096_TYPE			3
     76
     77#define HNS3_RX_FLAG_VLAN_PRESENT		0x1
     78#define HNS3_RX_FLAG_L3ID_IPV4			0x0
     79#define HNS3_RX_FLAG_L3ID_IPV6			0x1
     80#define HNS3_RX_FLAG_L4ID_UDP			0x0
     81#define HNS3_RX_FLAG_L4ID_TCP			0x1
     82
     83#define HNS3_RXD_DMAC_S				0
     84#define HNS3_RXD_DMAC_M				(0x3 << HNS3_RXD_DMAC_S)
     85#define HNS3_RXD_VLAN_S				2
     86#define HNS3_RXD_VLAN_M				(0x3 << HNS3_RXD_VLAN_S)
     87#define HNS3_RXD_L3ID_S				4
     88#define HNS3_RXD_L3ID_M				(0xf << HNS3_RXD_L3ID_S)
     89#define HNS3_RXD_L4ID_S				8
     90#define HNS3_RXD_L4ID_M				(0xf << HNS3_RXD_L4ID_S)
     91#define HNS3_RXD_FRAG_B				12
     92#define HNS3_RXD_STRP_TAGP_S			13
     93#define HNS3_RXD_STRP_TAGP_M			(0x3 << HNS3_RXD_STRP_TAGP_S)
     94
     95#define HNS3_RXD_L2E_B				16
     96#define HNS3_RXD_L3E_B				17
     97#define HNS3_RXD_L4E_B				18
     98#define HNS3_RXD_TRUNCAT_B			19
     99#define HNS3_RXD_HOI_B				20
    100#define HNS3_RXD_DOI_B				21
    101#define HNS3_RXD_OL3E_B				22
    102#define HNS3_RXD_OL4E_B				23
    103#define HNS3_RXD_GRO_COUNT_S			24
    104#define HNS3_RXD_GRO_COUNT_M			(0x3f << HNS3_RXD_GRO_COUNT_S)
    105#define HNS3_RXD_GRO_FIXID_B			30
    106#define HNS3_RXD_GRO_ECN_B			31
    107
    108#define HNS3_RXD_ODMAC_S			0
    109#define HNS3_RXD_ODMAC_M			(0x3 << HNS3_RXD_ODMAC_S)
    110#define HNS3_RXD_OVLAN_S			2
    111#define HNS3_RXD_OVLAN_M			(0x3 << HNS3_RXD_OVLAN_S)
    112#define HNS3_RXD_OL3ID_S			4
    113#define HNS3_RXD_OL3ID_M			(0xf << HNS3_RXD_OL3ID_S)
    114#define HNS3_RXD_OL4ID_S			8
    115#define HNS3_RXD_OL4ID_M			(0xf << HNS3_RXD_OL4ID_S)
    116#define HNS3_RXD_FBHI_S				12
    117#define HNS3_RXD_FBHI_M				(0x3 << HNS3_RXD_FBHI_S)
    118#define HNS3_RXD_FBLI_S				14
    119#define HNS3_RXD_FBLI_M				(0x3 << HNS3_RXD_FBLI_S)
    120
    121#define HNS3_RXD_PTYPE_S			4
    122#define HNS3_RXD_PTYPE_M			GENMASK(11, 4)
    123
    124#define HNS3_RXD_BDTYPE_S			0
    125#define HNS3_RXD_BDTYPE_M			(0xf << HNS3_RXD_BDTYPE_S)
    126#define HNS3_RXD_VLD_B				4
    127#define HNS3_RXD_UDP0_B				5
    128#define HNS3_RXD_EXTEND_B			7
    129#define HNS3_RXD_FE_B				8
    130#define HNS3_RXD_LUM_B				9
    131#define HNS3_RXD_CRCP_B				10
    132#define HNS3_RXD_L3L4P_B			11
    133#define HNS3_RXD_TSIDX_S			12
    134#define HNS3_RXD_TSIDX_M			(0x3 << HNS3_RXD_TSIDX_S)
    135#define HNS3_RXD_TS_VLD_B			14
    136#define HNS3_RXD_LKBK_B				15
    137#define HNS3_RXD_GRO_SIZE_S			16
    138#define HNS3_RXD_GRO_SIZE_M			(0x3fff << HNS3_RXD_GRO_SIZE_S)
    139
    140#define HNS3_TXD_L3T_S				0
    141#define HNS3_TXD_L3T_M				(0x3 << HNS3_TXD_L3T_S)
    142#define HNS3_TXD_L4T_S				2
    143#define HNS3_TXD_L4T_M				(0x3 << HNS3_TXD_L4T_S)
    144#define HNS3_TXD_L3CS_B				4
    145#define HNS3_TXD_L4CS_B				5
    146#define HNS3_TXD_VLAN_B				6
    147#define HNS3_TXD_TSO_B				7
    148
    149#define HNS3_TXD_L2LEN_S			8
    150#define HNS3_TXD_L2LEN_M			(0xff << HNS3_TXD_L2LEN_S)
    151#define HNS3_TXD_L3LEN_S			16
    152#define HNS3_TXD_L3LEN_M			(0xff << HNS3_TXD_L3LEN_S)
    153#define HNS3_TXD_L4LEN_S			24
    154#define HNS3_TXD_L4LEN_M			(0xff << HNS3_TXD_L4LEN_S)
    155
    156#define HNS3_TXD_CSUM_START_S		8
    157#define HNS3_TXD_CSUM_START_M		(0xffff << HNS3_TXD_CSUM_START_S)
    158
    159#define HNS3_TXD_OL3T_S				0
    160#define HNS3_TXD_OL3T_M				(0x3 << HNS3_TXD_OL3T_S)
    161#define HNS3_TXD_OVLAN_B			2
    162#define HNS3_TXD_MACSEC_B			3
    163#define HNS3_TXD_TUNTYPE_S			4
    164#define HNS3_TXD_TUNTYPE_M			(0xf << HNS3_TXD_TUNTYPE_S)
    165
    166#define HNS3_TXD_CSUM_OFFSET_S		8
    167#define HNS3_TXD_CSUM_OFFSET_M		(0xffff << HNS3_TXD_CSUM_OFFSET_S)
    168
    169#define HNS3_TXD_BDTYPE_S			0
    170#define HNS3_TXD_BDTYPE_M			(0xf << HNS3_TXD_BDTYPE_S)
    171#define HNS3_TXD_FE_B				4
    172#define HNS3_TXD_SC_S				5
    173#define HNS3_TXD_SC_M				(0x3 << HNS3_TXD_SC_S)
    174#define HNS3_TXD_EXTEND_B			7
    175#define HNS3_TXD_VLD_B				8
    176#define HNS3_TXD_RI_B				9
    177#define HNS3_TXD_RA_B				10
    178#define HNS3_TXD_TSYN_B				11
    179#define HNS3_TXD_DECTTL_S			12
    180#define HNS3_TXD_DECTTL_M			(0xf << HNS3_TXD_DECTTL_S)
    181
    182#define HNS3_TXD_OL4CS_B			22
    183
    184#define HNS3_TXD_MSS_S				0
    185#define HNS3_TXD_MSS_M				(0x3fff << HNS3_TXD_MSS_S)
    186#define HNS3_TXD_HW_CS_B			14
    187
    188#define HNS3_VECTOR_TX_IRQ			BIT_ULL(0)
    189#define HNS3_VECTOR_RX_IRQ			BIT_ULL(1)
    190
    191#define HNS3_VECTOR_NOT_INITED			0
    192#define HNS3_VECTOR_INITED			1
    193
    194#define HNS3_MAX_BD_SIZE			65535
    195#define HNS3_MAX_TSO_BD_NUM			63U
    196#define HNS3_MAX_TSO_SIZE			1048576U
    197#define HNS3_MAX_NON_TSO_SIZE			9728U
    198
    199#define HNS3_VECTOR_GL_MASK			GENMASK(11, 0)
    200#define HNS3_VECTOR_GL0_OFFSET			0x100
    201#define HNS3_VECTOR_GL1_OFFSET			0x200
    202#define HNS3_VECTOR_GL2_OFFSET			0x300
    203#define HNS3_VECTOR_RL_OFFSET			0x900
    204#define HNS3_VECTOR_RL_EN_B			6
    205#define HNS3_VECTOR_QL_MASK			GENMASK(9, 0)
    206#define HNS3_VECTOR_TX_QL_OFFSET		0xe00
    207#define HNS3_VECTOR_RX_QL_OFFSET		0xf00
    208
    209#define HNS3_RING_EN_B				0
    210
    211#define HNS3_GL0_CQ_MODE_REG			0x20d00
    212#define HNS3_GL1_CQ_MODE_REG			0x20d04
    213#define HNS3_GL2_CQ_MODE_REG			0x20d08
    214#define HNS3_CQ_MODE_EQE			1U
    215#define HNS3_CQ_MODE_CQE			0U
    216
    217enum hns3_pkt_l2t_type {
    218	HNS3_L2_TYPE_UNICAST,
    219	HNS3_L2_TYPE_MULTICAST,
    220	HNS3_L2_TYPE_BROADCAST,
    221	HNS3_L2_TYPE_INVALID,
    222};
    223
    224enum hns3_pkt_l3t_type {
    225	HNS3_L3T_NONE,
    226	HNS3_L3T_IPV6,
    227	HNS3_L3T_IPV4,
    228	HNS3_L3T_RESERVED
    229};
    230
    231enum hns3_pkt_l4t_type {
    232	HNS3_L4T_UNKNOWN,
    233	HNS3_L4T_TCP,
    234	HNS3_L4T_UDP,
    235	HNS3_L4T_SCTP
    236};
    237
    238enum hns3_pkt_ol3t_type {
    239	HNS3_OL3T_NONE,
    240	HNS3_OL3T_IPV6,
    241	HNS3_OL3T_IPV4_NO_CSUM,
    242	HNS3_OL3T_IPV4_CSUM
    243};
    244
    245enum hns3_pkt_tun_type {
    246	HNS3_TUN_NONE,
    247	HNS3_TUN_MAC_IN_UDP,
    248	HNS3_TUN_NVGRE,
    249	HNS3_TUN_OTHER
    250};
    251
    252/* hardware spec ring buffer format */
    253struct __packed hns3_desc {
    254	union {
    255		__le64 addr;
    256		__le16 csum;
    257		struct {
    258			__le32 ts_nsec;
    259			__le32 ts_sec;
    260		};
    261	};
    262	union {
    263		struct {
    264			__le16 vlan_tag;
    265			__le16 send_size;
    266			union {
    267				__le32 type_cs_vlan_tso_len;
    268				struct {
    269					__u8 type_cs_vlan_tso;
    270					__u8 l2_len;
    271					__u8 l3_len;
    272					__u8 l4_len;
    273				};
    274			};
    275			__le16 outer_vlan_tag;
    276			__le16 tv;
    277
    278		union {
    279			__le32 ol_type_vlan_len_msec;
    280			struct {
    281				__u8 ol_type_vlan_msec;
    282				__u8 ol2_len;
    283				__u8 ol3_len;
    284				__u8 ol4_len;
    285			};
    286		};
    287
    288			__le32 paylen_ol4cs;
    289			__le16 bdtp_fe_sc_vld_ra_ri;
    290			__le16 mss_hw_csum;
    291		} tx;
    292
    293		struct {
    294			__le32 l234_info;
    295			__le16 pkt_len;
    296			__le16 size;
    297
    298			__le32 rss_hash;
    299			__le16 fd_id;
    300			__le16 vlan_tag;
    301
    302			union {
    303				__le32 ol_info;
    304				struct {
    305					__le16 o_dm_vlan_id_fb;
    306					__le16 ot_vlan_tag;
    307				};
    308			};
    309
    310			__le32 bd_base_info;
    311		} rx;
    312	};
    313};
    314
    315enum hns3_desc_type {
    316	DESC_TYPE_UNKNOWN		= 0,
    317	DESC_TYPE_SKB			= 1 << 0,
    318	DESC_TYPE_FRAGLIST_SKB		= 1 << 1,
    319	DESC_TYPE_PAGE			= 1 << 2,
    320	DESC_TYPE_BOUNCE_ALL		= 1 << 3,
    321	DESC_TYPE_BOUNCE_HEAD		= 1 << 4,
    322	DESC_TYPE_SGL_SKB		= 1 << 5,
    323	DESC_TYPE_PP_FRAG		= 1 << 6,
    324};
    325
    326struct hns3_desc_cb {
    327	dma_addr_t dma; /* dma address of this desc */
    328	void *buf;      /* cpu addr for a desc */
    329
    330	/* priv data for the desc, e.g. skb when use with ip stack */
    331	void *priv;
    332
    333	union {
    334		u32 page_offset;	/* for rx */
    335		u32 send_bytes;		/* for tx */
    336	};
    337
    338	u32 length;     /* length of the buffer */
    339
    340	u16 reuse_flag;
    341	u16 refill;
    342
    343	/* desc type, used by the ring user to mark the type of the priv data */
    344	u16 type;
    345	u16 pagecnt_bias;
    346};
    347
    348enum hns3_pkt_l3type {
    349	HNS3_L3_TYPE_IPV4,
    350	HNS3_L3_TYPE_IPV6,
    351	HNS3_L3_TYPE_ARP,
    352	HNS3_L3_TYPE_RARP,
    353	HNS3_L3_TYPE_IPV4_OPT,
    354	HNS3_L3_TYPE_IPV6_EXT,
    355	HNS3_L3_TYPE_LLDP,
    356	HNS3_L3_TYPE_BPDU,
    357	HNS3_L3_TYPE_MAC_PAUSE,
    358	HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */
    359
    360	/* reserved for 0xA~0xB */
    361
    362	HNS3_L3_TYPE_CNM = 0xc,
    363
    364	/* reserved for 0xD~0xE */
    365
    366	HNS3_L3_TYPE_PARSE_FAIL	= 0xf /* must be last */
    367};
    368
    369enum hns3_pkt_l4type {
    370	HNS3_L4_TYPE_UDP,
    371	HNS3_L4_TYPE_TCP,
    372	HNS3_L4_TYPE_GRE,
    373	HNS3_L4_TYPE_SCTP,
    374	HNS3_L4_TYPE_IGMP,
    375	HNS3_L4_TYPE_ICMP,
    376
    377	/* reserved for 0x6~0xE */
    378
    379	HNS3_L4_TYPE_PARSE_FAIL	= 0xf /* must be last */
    380};
    381
    382enum hns3_pkt_ol3type {
    383	HNS3_OL3_TYPE_IPV4 = 0,
    384	HNS3_OL3_TYPE_IPV6,
    385	/* reserved for 0x2~0x3 */
    386	HNS3_OL3_TYPE_IPV4_OPT = 4,
    387	HNS3_OL3_TYPE_IPV6_EXT,
    388
    389	/* reserved for 0x6~0xE */
    390
    391	HNS3_OL3_TYPE_PARSE_FAIL = 0xf	/* must be last */
    392};
    393
    394enum hns3_pkt_ol4type {
    395	HNS3_OL4_TYPE_NO_TUN,
    396	HNS3_OL4_TYPE_MAC_IN_UDP,
    397	HNS3_OL4_TYPE_NVGRE,
    398	HNS3_OL4_TYPE_UNKNOWN
    399};
    400
    401struct hns3_rx_ptype {
    402	u32 ptype : 8;
    403	u32 csum_level : 2;
    404	u32 ip_summed : 2;
    405	u32 l3_type : 4;
    406	u32 valid : 1;
    407};
    408
    409struct ring_stats {
    410	u64 sw_err_cnt;
    411	u64 seg_pkt_cnt;
    412	union {
    413		struct {
    414			u64 tx_pkts;
    415			u64 tx_bytes;
    416			u64 tx_more;
    417			u64 tx_push;
    418			u64 tx_mem_doorbell;
    419			u64 restart_queue;
    420			u64 tx_busy;
    421			u64 tx_copy;
    422			u64 tx_vlan_err;
    423			u64 tx_l4_proto_err;
    424			u64 tx_l2l3l4_err;
    425			u64 tx_tso_err;
    426			u64 over_max_recursion;
    427			u64 hw_limitation;
    428			u64 tx_bounce;
    429			u64 tx_spare_full;
    430			u64 copy_bits_err;
    431			u64 tx_sgl;
    432			u64 skb2sgl_err;
    433			u64 map_sg_err;
    434		};
    435		struct {
    436			u64 rx_pkts;
    437			u64 rx_bytes;
    438			u64 rx_err_cnt;
    439			u64 reuse_pg_cnt;
    440			u64 err_pkt_len;
    441			u64 err_bd_num;
    442			u64 l2_err;
    443			u64 l3l4_csum_err;
    444			u64 csum_complete;
    445			u64 rx_multicast;
    446			u64 non_reuse_pg;
    447			u64 frag_alloc_err;
    448			u64 frag_alloc;
    449		};
    450		__le16 csum;
    451	};
    452};
    453
    454struct hns3_tx_spare {
    455	dma_addr_t dma;
    456	void *buf;
    457	u32 next_to_use;
    458	u32 next_to_clean;
    459	u32 last_to_clean;
    460	u32 len;
    461};
    462
    463struct hns3_enet_ring {
    464	struct hns3_desc *desc; /* dma map address space */
    465	struct hns3_desc_cb *desc_cb;
    466	struct hns3_enet_ring *next;
    467	struct hns3_enet_tqp_vector *tqp_vector;
    468	struct hnae3_queue *tqp;
    469	int queue_index;
    470	struct device *dev; /* will be used for DMA mapping of descriptors */
    471	struct page_pool *page_pool;
    472
    473	/* statistic */
    474	struct ring_stats stats;
    475	struct u64_stats_sync syncp;
    476
    477	dma_addr_t desc_dma_addr;
    478	u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
    479	u16 desc_num;       /* total number of desc */
    480	int next_to_use;    /* idx of next spare desc */
    481
    482	/* idx of lastest sent desc, the ring is empty when equal to
    483	 * next_to_use
    484	 */
    485	int next_to_clean;
    486	u32 flag;          /* ring attribute */
    487
    488	int pending_buf;
    489	union {
    490		/* for Tx ring */
    491		struct {
    492			u32 fd_qb_tx_sample;
    493			int last_to_use;        /* last idx used by xmit */
    494			u32 tx_copybreak;
    495			struct hns3_tx_spare *tx_spare;
    496		};
    497
    498		/* for Rx ring */
    499		struct {
    500			u32 pull_len;   /* memcpy len for current rx packet */
    501			u32 rx_copybreak;
    502			u32 frag_num;
    503			/* first buffer address for current packet */
    504			unsigned char *va;
    505			struct sk_buff *skb;
    506			struct sk_buff *tail_skb;
    507		};
    508	};
    509} ____cacheline_internodealigned_in_smp;
    510
    511enum hns3_flow_level_range {
    512	HNS3_FLOW_LOW = 0,
    513	HNS3_FLOW_MID = 1,
    514	HNS3_FLOW_HIGH = 2,
    515	HNS3_FLOW_ULTRA = 3,
    516};
    517
    518#define HNS3_INT_GL_50K			0x0014
    519#define HNS3_INT_GL_20K			0x0032
    520#define HNS3_INT_GL_18K			0x0036
    521#define HNS3_INT_GL_8K			0x007C
    522
    523#define HNS3_INT_GL_1US			BIT(31)
    524
    525#define HNS3_INT_RL_MAX			0x00EC
    526#define HNS3_INT_RL_ENABLE_MASK		0x40
    527
    528#define HNS3_INT_QL_DEFAULT_CFG		0x20
    529
    530struct hns3_enet_coalesce {
    531	u16 int_gl;
    532	u16 int_ql;
    533	u16 int_ql_max;
    534	u8 adapt_enable : 1;
    535	u8 ql_enable : 1;
    536	u8 unit_1us : 1;
    537	enum hns3_flow_level_range flow_level;
    538};
    539
    540struct hns3_enet_ring_group {
    541	/* array of pointers to rings */
    542	struct hns3_enet_ring *ring;
    543	u64 total_bytes;	/* total bytes processed this group */
    544	u64 total_packets;	/* total packets processed this group */
    545	u16 count;
    546	struct hns3_enet_coalesce coal;
    547	struct dim dim;
    548};
    549
    550struct hns3_enet_tqp_vector {
    551	struct hnae3_handle *handle;
    552	u8 __iomem *mask_addr;
    553	int vector_irq;
    554	int irq_init_flag;
    555
    556	u16 idx;		/* index in the TQP vector array per handle. */
    557
    558	struct napi_struct napi;
    559
    560	struct hns3_enet_ring_group rx_group;
    561	struct hns3_enet_ring_group tx_group;
    562
    563	cpumask_t affinity_mask;
    564	u16 num_tqps;	/* total number of tqps in TQP vector */
    565	struct irq_affinity_notify affinity_notify;
    566
    567	char name[HNAE3_INT_NAME_LEN];
    568
    569	u64 event_cnt;
    570} ____cacheline_internodealigned_in_smp;
    571
    572struct hns3_nic_priv {
    573	struct hnae3_handle *ae_handle;
    574	struct net_device *netdev;
    575	struct device *dev;
    576
    577	/**
    578	 * the cb for nic to manage the ring buffer, the first half of the
    579	 * array is for tx_ring and vice versa for the second half
    580	 */
    581	struct hns3_enet_ring *ring;
    582	struct hns3_enet_tqp_vector *tqp_vector;
    583	u16 vector_num;
    584	u8 max_non_tso_bd_num;
    585
    586	u64 tx_timeout_count;
    587
    588	unsigned long state;
    589
    590	enum dim_cq_period_mode tx_cqe_mode;
    591	enum dim_cq_period_mode rx_cqe_mode;
    592	struct hns3_enet_coalesce tx_coal;
    593	struct hns3_enet_coalesce rx_coal;
    594	u32 tx_copybreak;
    595	u32 rx_copybreak;
    596};
    597
    598union l3_hdr_info {
    599	struct iphdr *v4;
    600	struct ipv6hdr *v6;
    601	unsigned char *hdr;
    602};
    603
    604union l4_hdr_info {
    605	struct tcphdr *tcp;
    606	struct udphdr *udp;
    607	struct gre_base_hdr *gre;
    608	unsigned char *hdr;
    609};
    610
    611struct hns3_hw_error_info {
    612	enum hnae3_hw_error_type type;
    613	const char *msg;
    614};
    615
    616struct hns3_reset_type_map {
    617	enum ethtool_reset_flags rst_flags;
    618	enum hnae3_reset_type rst_type;
    619};
    620
    621static inline int ring_space(struct hns3_enet_ring *ring)
    622{
    623	/* This smp_load_acquire() pairs with smp_store_release() in
    624	 * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
    625	 */
    626	int begin = smp_load_acquire(&ring->next_to_clean);
    627	int end = READ_ONCE(ring->next_to_use);
    628
    629	return ((end >= begin) ? (ring->desc_num - end + begin) :
    630			(begin - end)) - 1;
    631}
    632
    633static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
    634{
    635	return readl_relaxed(ring->tqp->io_base + reg);
    636}
    637
    638static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
    639{
    640	return readl(base + reg);
    641}
    642
    643static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
    644{
    645	u8 __iomem *reg_addr = READ_ONCE(base);
    646
    647	writel(value, reg_addr + reg);
    648}
    649
    650#define hns3_read_dev(a, reg) \
    651	hns3_read_reg((a)->io_base, reg)
    652
    653static inline bool hns3_nic_resetting(struct net_device *netdev)
    654{
    655	struct hns3_nic_priv *priv = netdev_priv(netdev);
    656
    657	return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
    658}
    659
    660#define hns3_write_dev(a, reg, value) \
    661	hns3_write_reg((a)->io_base, reg, value)
    662
    663#define ring_to_dev(ring) ((ring)->dev)
    664
    665#define ring_to_netdev(ring)	((ring)->tqp_vector->napi.dev)
    666
    667#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
    668	DMA_TO_DEVICE : DMA_FROM_DEVICE)
    669
    670#define hns3_buf_size(_ring) ((_ring)->buf_size)
    671
    672#define hns3_ring_stats_update(ring, cnt) do { \
    673	typeof(ring) (tmp) = (ring); \
    674	u64_stats_update_begin(&(tmp)->syncp); \
    675	((tmp)->stats.cnt)++; \
    676	u64_stats_update_end(&(tmp)->syncp); \
    677} while (0) \
    678
    679static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
    680{
    681#if (PAGE_SIZE < 8192)
    682	if (ring->buf_size > (PAGE_SIZE / 2))
    683		return 1;
    684#endif
    685	return 0;
    686}
    687
    688#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
    689
    690/* iterator for handling rings in ring group */
    691#define hns3_for_each_ring(pos, head) \
    692	for (pos = (head).ring; (pos); pos = (pos)->next)
    693
    694#define hns3_get_handle(ndev) \
    695	(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
    696
    697#define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
    698#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
    699
    700#define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
    701#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
    702
    703void hns3_ethtool_set_ops(struct net_device *netdev);
    704int hns3_set_channels(struct net_device *netdev,
    705		      struct ethtool_channels *ch);
    706
    707void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
    708int hns3_init_all_ring(struct hns3_nic_priv *priv);
    709int hns3_nic_reset_all_ring(struct hnae3_handle *h);
    710void hns3_fini_ring(struct hns3_enet_ring *ring);
    711netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
    712bool hns3_is_phys_func(struct pci_dev *pdev);
    713int hns3_clean_rx_ring(
    714		struct hns3_enet_ring *ring, int budget,
    715		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
    716
    717void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
    718				    u32 gl_value);
    719void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
    720				    u32 gl_value);
    721void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
    722				 u32 rl_value);
    723void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
    724				    u32 ql_value);
    725void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
    726				    u32 ql_value);
    727
    728void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
    729int hns3_reset_notify(struct hnae3_handle *handle,
    730		      enum hnae3_reset_notify_type type);
    731
    732#ifdef CONFIG_HNS3_DCB
    733void hns3_dcbnl_setup(struct hnae3_handle *handle);
    734#else
    735static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
    736#endif
    737
    738int hns3_dbg_init(struct hnae3_handle *handle);
    739void hns3_dbg_uninit(struct hnae3_handle *handle);
    740void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
    741void hns3_dbg_unregister_debugfs(void);
    742void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
    743u16 hns3_get_max_available_channels(struct hnae3_handle *h);
    744void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
    745			      enum dim_cq_period_mode tx_mode,
    746			      enum dim_cq_period_mode rx_mode);
    747#endif