cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ixgbe.h (32599B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* Copyright(c) 1999 - 2018 Intel Corporation. */
      3
      4#ifndef _IXGBE_H_
      5#define _IXGBE_H_
      6
      7#include <linux/bitops.h>
      8#include <linux/types.h>
      9#include <linux/pci.h>
     10#include <linux/netdevice.h>
     11#include <linux/cpumask.h>
     12#include <linux/aer.h>
     13#include <linux/if_vlan.h>
     14#include <linux/jiffies.h>
     15#include <linux/phy.h>
     16
     17#include <linux/timecounter.h>
     18#include <linux/net_tstamp.h>
     19#include <linux/ptp_clock_kernel.h>
     20
     21#include "ixgbe_type.h"
     22#include "ixgbe_common.h"
     23#include "ixgbe_dcb.h"
     24#if IS_ENABLED(CONFIG_FCOE)
     25#define IXGBE_FCOE
     26#include "ixgbe_fcoe.h"
     27#endif /* IS_ENABLED(CONFIG_FCOE) */
     28#ifdef CONFIG_IXGBE_DCA
     29#include <linux/dca.h>
     30#endif
     31#include "ixgbe_ipsec.h"
     32
     33#include <net/xdp.h>
     34
     35/* common prefix used by pr_<> macros */
     36#undef pr_fmt
     37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     38
     39/* TX/RX descriptor defines */
     40#define IXGBE_DEFAULT_TXD		    512
     41#define IXGBE_DEFAULT_TX_WORK		    256
     42#define IXGBE_MAX_TXD			   4096
     43#define IXGBE_MIN_TXD			     64
     44
     45#if (PAGE_SIZE < 8192)
     46#define IXGBE_DEFAULT_RXD		    512
     47#else
     48#define IXGBE_DEFAULT_RXD		    128
     49#endif
     50#define IXGBE_MAX_RXD			   4096
     51#define IXGBE_MIN_RXD			     64
     52
     53/* flow control */
     54#define IXGBE_MIN_FCRTL			   0x40
     55#define IXGBE_MAX_FCRTL			0x7FF80
     56#define IXGBE_MIN_FCRTH			  0x600
     57#define IXGBE_MAX_FCRTH			0x7FFF0
     58#define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
     59#define IXGBE_MIN_FCPAUSE		      0
     60#define IXGBE_MAX_FCPAUSE		 0xFFFF
     61
     62/* Supported Rx Buffer Sizes */
     63#define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
     64#define IXGBE_RXBUFFER_1536  1536
     65#define IXGBE_RXBUFFER_2K    2048
     66#define IXGBE_RXBUFFER_3K    3072
     67#define IXGBE_RXBUFFER_4K    4096
     68#define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
     69
     70/* Attempt to maximize the headroom available for incoming frames.  We
     71 * use a 2K buffer for receives and need 1536/1534 to store the data for
     72 * the frame.  This leaves us with 512 bytes of room.  From that we need
     73 * to deduct the space needed for the shared info and the padding needed
     74 * to IP align the frame.
     75 *
     76 * Note: For cache line sizes 256 or larger this value is going to end
     77 *	 up negative.  In these cases we should fall back to the 3K
     78 *	 buffers.
     79 */
     80#if (PAGE_SIZE < 8192)
     81#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
     82#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
     83((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
     84
     85static inline int ixgbe_compute_pad(int rx_buf_len)
     86{
     87	int page_size, pad_size;
     88
     89	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
     90	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
     91
     92	return pad_size;
     93}
     94
     95static inline int ixgbe_skb_pad(void)
     96{
     97	int rx_buf_len;
     98
     99	/* If a 2K buffer cannot handle a standard Ethernet frame then
    100	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
    101	 *
    102	 * For a 3K buffer we need to add enough padding to allow for
    103	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
    104	 * cache-line alignment.
    105	 */
    106	if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
    107		rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
    108	else
    109		rx_buf_len = IXGBE_RXBUFFER_1536;
    110
    111	/* if needed make room for NET_IP_ALIGN */
    112	rx_buf_len -= NET_IP_ALIGN;
    113
    114	return ixgbe_compute_pad(rx_buf_len);
    115}
    116
    117#define IXGBE_SKB_PAD	ixgbe_skb_pad()
    118#else
    119#define IXGBE_SKB_PAD	(NET_SKB_PAD + NET_IP_ALIGN)
    120#endif
    121
    122/*
    123 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
    124 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
    125 * this adds up to 448 bytes of extra data.
    126 *
    127 * Since netdev_alloc_skb now allocates a page fragment we can use a value
    128 * of 256 and the resultant skb will have a truesize of 960 or less.
    129 */
    130#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
    131
    132/* How many Rx Buffers do we bundle into one write to the hardware ? */
    133#define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */
    134
    135#define IXGBE_RX_DMA_ATTR \
    136	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
    137
    138enum ixgbe_tx_flags {
    139	/* cmd_type flags */
    140	IXGBE_TX_FLAGS_HW_VLAN	= 0x01,
    141	IXGBE_TX_FLAGS_TSO	= 0x02,
    142	IXGBE_TX_FLAGS_TSTAMP	= 0x04,
    143
    144	/* olinfo flags */
    145	IXGBE_TX_FLAGS_CC	= 0x08,
    146	IXGBE_TX_FLAGS_IPV4	= 0x10,
    147	IXGBE_TX_FLAGS_CSUM	= 0x20,
    148	IXGBE_TX_FLAGS_IPSEC	= 0x40,
    149
    150	/* software defined flags */
    151	IXGBE_TX_FLAGS_SW_VLAN	= 0x80,
    152	IXGBE_TX_FLAGS_FCOE	= 0x100,
    153};
    154
    155/* VLAN info */
    156#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
    157#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
    158#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
    159#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
    160
    161#define IXGBE_MAX_VF_MC_ENTRIES         30
    162#define IXGBE_MAX_VF_FUNCTIONS          64
    163#define IXGBE_MAX_VFTA_ENTRIES          128
    164#define MAX_EMULATION_MAC_ADDRS         16
    165#define IXGBE_MAX_PF_MACVLANS           15
    166#define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
    167#define IXGBE_82599_VF_DEVICE_ID        0x10ED
    168#define IXGBE_X540_VF_DEVICE_ID         0x1515
    169
    170struct vf_data_storage {
    171	struct pci_dev *vfdev;
    172	unsigned char vf_mac_addresses[ETH_ALEN];
    173	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
    174	u16 num_vf_mc_hashes;
    175	bool clear_to_send;
    176	bool pf_set_mac;
    177	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
    178	u16 pf_qos;
    179	u16 tx_rate;
    180	int link_enable;
    181	int link_state;
    182	u8 spoofchk_enabled;
    183	bool rss_query_enabled;
    184	u8 trusted;
    185	int xcast_mode;
    186	unsigned int vf_api;
    187	u8 primary_abort_count;
    188};
    189
    190enum ixgbevf_xcast_modes {
    191	IXGBEVF_XCAST_MODE_NONE = 0,
    192	IXGBEVF_XCAST_MODE_MULTI,
    193	IXGBEVF_XCAST_MODE_ALLMULTI,
    194	IXGBEVF_XCAST_MODE_PROMISC,
    195};
    196
    197struct vf_macvlans {
    198	struct list_head l;
    199	int vf;
    200	bool free;
    201	bool is_macvlan;
    202	u8 vf_macvlan[ETH_ALEN];
    203};
    204
    205#define IXGBE_MAX_TXD_PWR	14
    206#define IXGBE_MAX_DATA_PER_TXD	(1u << IXGBE_MAX_TXD_PWR)
    207
    208/* Tx Descriptors needed, worst case */
    209#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
    210#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
    211
    212/* wrapper around a pointer to a socket buffer,
    213 * so a DMA handle can be stored along with the buffer */
    214struct ixgbe_tx_buffer {
    215	union ixgbe_adv_tx_desc *next_to_watch;
    216	unsigned long time_stamp;
    217	union {
    218		struct sk_buff *skb;
    219		struct xdp_frame *xdpf;
    220	};
    221	unsigned int bytecount;
    222	unsigned short gso_segs;
    223	__be16 protocol;
    224	DEFINE_DMA_UNMAP_ADDR(dma);
    225	DEFINE_DMA_UNMAP_LEN(len);
    226	u32 tx_flags;
    227};
    228
    229struct ixgbe_rx_buffer {
    230	union {
    231		struct {
    232			struct sk_buff *skb;
    233			dma_addr_t dma;
    234			struct page *page;
    235			__u32 page_offset;
    236			__u16 pagecnt_bias;
    237		};
    238		struct {
    239			bool discard;
    240			struct xdp_buff *xdp;
    241		};
    242	};
    243};
    244
    245struct ixgbe_queue_stats {
    246	u64 packets;
    247	u64 bytes;
    248};
    249
    250struct ixgbe_tx_queue_stats {
    251	u64 restart_queue;
    252	u64 tx_busy;
    253	u64 tx_done_old;
    254};
    255
    256struct ixgbe_rx_queue_stats {
    257	u64 rsc_count;
    258	u64 rsc_flush;
    259	u64 non_eop_descs;
    260	u64 alloc_rx_page;
    261	u64 alloc_rx_page_failed;
    262	u64 alloc_rx_buff_failed;
    263	u64 csum_err;
    264};
    265
    266#define IXGBE_TS_HDR_LEN 8
    267
    268enum ixgbe_ring_state_t {
    269	__IXGBE_RX_3K_BUFFER,
    270	__IXGBE_RX_BUILD_SKB_ENABLED,
    271	__IXGBE_RX_RSC_ENABLED,
    272	__IXGBE_RX_CSUM_UDP_ZERO_ERR,
    273	__IXGBE_RX_FCOE,
    274	__IXGBE_TX_FDIR_INIT_DONE,
    275	__IXGBE_TX_XPS_INIT_DONE,
    276	__IXGBE_TX_DETECT_HANG,
    277	__IXGBE_HANG_CHECK_ARMED,
    278	__IXGBE_TX_XDP_RING,
    279	__IXGBE_TX_DISABLED,
    280};
    281
    282#define ring_uses_build_skb(ring) \
    283	test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
    284
    285struct ixgbe_fwd_adapter {
    286	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
    287	struct net_device *netdev;
    288	unsigned int tx_base_queue;
    289	unsigned int rx_base_queue;
    290	int pool;
    291};
    292
    293#define check_for_tx_hang(ring) \
    294	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
    295#define set_check_for_tx_hang(ring) \
    296	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
    297#define clear_check_for_tx_hang(ring) \
    298	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
    299#define ring_is_rsc_enabled(ring) \
    300	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
    301#define set_ring_rsc_enabled(ring) \
    302	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
    303#define clear_ring_rsc_enabled(ring) \
    304	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
    305#define ring_is_xdp(ring) \
    306	test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
    307#define set_ring_xdp(ring) \
    308	set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
    309#define clear_ring_xdp(ring) \
    310	clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
    311struct ixgbe_ring {
    312	struct ixgbe_ring *next;	/* pointer to next ring in q_vector */
    313	struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
    314	struct net_device *netdev;	/* netdev ring belongs to */
    315	struct bpf_prog *xdp_prog;
    316	struct device *dev;		/* device for DMA mapping */
    317	void *desc;			/* descriptor ring memory */
    318	union {
    319		struct ixgbe_tx_buffer *tx_buffer_info;
    320		struct ixgbe_rx_buffer *rx_buffer_info;
    321	};
    322	unsigned long state;
    323	u8 __iomem *tail;
    324	dma_addr_t dma;			/* phys. address of descriptor ring */
    325	unsigned int size;		/* length in bytes */
    326
    327	u16 count;			/* amount of descriptors */
    328
    329	u8 queue_index; /* needed for multiqueue queue management */
    330	u8 reg_idx;			/* holds the special value that gets
    331					 * the hardware register offset
    332					 * associated with this ring, which is
    333					 * different for DCB and RSS modes
    334					 */
    335	u16 next_to_use;
    336	u16 next_to_clean;
    337
    338	unsigned long last_rx_timestamp;
    339
    340	union {
    341		u16 next_to_alloc;
    342		struct {
    343			u8 atr_sample_rate;
    344			u8 atr_count;
    345		};
    346	};
    347
    348	u8 dcb_tc;
    349	struct ixgbe_queue_stats stats;
    350	struct u64_stats_sync syncp;
    351	union {
    352		struct ixgbe_tx_queue_stats tx_stats;
    353		struct ixgbe_rx_queue_stats rx_stats;
    354	};
    355	u16 rx_offset;
    356	struct xdp_rxq_info xdp_rxq;
    357	spinlock_t tx_lock;	/* used in XDP mode */
    358	struct xsk_buff_pool *xsk_pool;
    359	u16 ring_idx;		/* {rx,tx,xdp}_ring back reference idx */
    360	u16 rx_buf_len;
    361} ____cacheline_internodealigned_in_smp;
    362
    363enum ixgbe_ring_f_enum {
    364	RING_F_NONE = 0,
    365	RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
    366	RING_F_RSS,
    367	RING_F_FDIR,
    368#ifdef IXGBE_FCOE
    369	RING_F_FCOE,
    370#endif /* IXGBE_FCOE */
    371
    372	RING_F_ARRAY_SIZE      /* must be last in enum set */
    373};
    374
    375#define IXGBE_MAX_RSS_INDICES		16
    376#define IXGBE_MAX_RSS_INDICES_X550	63
    377#define IXGBE_MAX_VMDQ_INDICES		64
    378#define IXGBE_MAX_FDIR_INDICES		63	/* based on q_vector limit */
    379#define IXGBE_MAX_FCOE_INDICES		8
    380#define MAX_RX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
    381#define MAX_TX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
    382#define IXGBE_MAX_XDP_QS		(IXGBE_MAX_FDIR_INDICES + 1)
    383#define IXGBE_MAX_L2A_QUEUES		4
    384#define IXGBE_BAD_L2A_QUEUE		3
    385#define IXGBE_MAX_MACVLANS		63
    386
    387DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
    388
    389struct ixgbe_ring_feature {
    390	u16 limit;	/* upper limit on feature indices */
    391	u16 indices;	/* current value of indices */
    392	u16 mask;	/* Mask used for feature to ring mapping */
    393	u16 offset;	/* offset to start of feature */
    394} ____cacheline_internodealigned_in_smp;
    395
    396#define IXGBE_82599_VMDQ_8Q_MASK 0x78
    397#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
    398#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
    399
    400/*
    401 * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
    402 * this is twice the size of a half page we need to double the page order
    403 * for FCoE enabled Rx queues.
    404 */
    405static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
    406{
    407	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
    408		return IXGBE_RXBUFFER_3K;
    409#if (PAGE_SIZE < 8192)
    410	if (ring_uses_build_skb(ring))
    411		return IXGBE_MAX_2K_FRAME_BUILD_SKB;
    412#endif
    413	return IXGBE_RXBUFFER_2K;
    414}
    415
    416static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
    417{
    418#if (PAGE_SIZE < 8192)
    419	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
    420		return 1;
    421#endif
    422	return 0;
    423}
    424#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
    425
    426#define IXGBE_ITR_ADAPTIVE_MIN_INC	2
    427#define IXGBE_ITR_ADAPTIVE_MIN_USECS	10
    428#define IXGBE_ITR_ADAPTIVE_MAX_USECS	126
    429#define IXGBE_ITR_ADAPTIVE_LATENCY	0x80
    430#define IXGBE_ITR_ADAPTIVE_BULK		0x00
    431
    432struct ixgbe_ring_container {
    433	struct ixgbe_ring *ring;	/* pointer to linked list of rings */
    434	unsigned long next_update;	/* jiffies value of last update */
    435	unsigned int total_bytes;	/* total bytes processed this int */
    436	unsigned int total_packets;	/* total packets processed this int */
    437	u16 work_limit;			/* total work allowed per interrupt */
    438	u8 count;			/* total number of rings in vector */
    439	u8 itr;				/* current ITR setting for ring */
    440};
    441
    442/* iterator for handling rings in ring container */
    443#define ixgbe_for_each_ring(pos, head) \
    444	for (pos = (head).ring; pos != NULL; pos = pos->next)
    445
    446#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
    447			      ? 8 : 1)
    448#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
    449
    450/* MAX_Q_VECTORS of these are allocated,
    451 * but we only use one per queue-specific vector.
    452 */
    453struct ixgbe_q_vector {
    454	struct ixgbe_adapter *adapter;
    455#ifdef CONFIG_IXGBE_DCA
    456	int cpu;	    /* CPU for DCA */
    457#endif
    458	u16 v_idx;		/* index of q_vector within array, also used for
    459				 * finding the bit in EICR and friends that
    460				 * represents the vector for this ring */
    461	u16 itr;		/* Interrupt throttle rate written to EITR */
    462	struct ixgbe_ring_container rx, tx;
    463
    464	struct napi_struct napi;
    465	cpumask_t affinity_mask;
    466	int numa_node;
    467	struct rcu_head rcu;	/* to avoid race with update stats on free */
    468	char name[IFNAMSIZ + 9];
    469
    470	/* for dynamic allocation of rings associated with this q_vector */
    471	struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
    472};
    473
    474#ifdef CONFIG_IXGBE_HWMON
    475
    476#define IXGBE_HWMON_TYPE_LOC		0
    477#define IXGBE_HWMON_TYPE_TEMP		1
    478#define IXGBE_HWMON_TYPE_CAUTION	2
    479#define IXGBE_HWMON_TYPE_MAX		3
    480
    481struct hwmon_attr {
    482	struct device_attribute dev_attr;
    483	struct ixgbe_hw *hw;
    484	struct ixgbe_thermal_diode_data *sensor;
    485	char name[12];
    486};
    487
    488struct hwmon_buff {
    489	struct attribute_group group;
    490	const struct attribute_group *groups[2];
    491	struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
    492	struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
    493	unsigned int n_hwmon;
    494};
    495#endif /* CONFIG_IXGBE_HWMON */
    496
    497/*
    498 * microsecond values for various ITR rates shifted by 2 to fit itr register
    499 * with the first 3 bits reserved 0
    500 */
    501#define IXGBE_MIN_RSC_ITR	24
    502#define IXGBE_100K_ITR		40
    503#define IXGBE_20K_ITR		200
    504#define IXGBE_12K_ITR		336
    505
    506/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
    507static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
    508					const u32 stat_err_bits)
    509{
    510	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
    511}
    512
    513static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
    514{
    515	u16 ntc = ring->next_to_clean;
    516	u16 ntu = ring->next_to_use;
    517
    518	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
    519}
    520
    521#define IXGBE_RX_DESC(R, i)	    \
    522	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
    523#define IXGBE_TX_DESC(R, i)	    \
    524	(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
    525#define IXGBE_TX_CTXTDESC(R, i)	    \
    526	(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
    527
    528#define IXGBE_MAX_JUMBO_FRAME_SIZE	9728 /* Maximum Supported Size 9.5KB */
    529#ifdef IXGBE_FCOE
    530/* Use 3K as the baby jumbo frame size for FCoE */
    531#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
    532#endif /* IXGBE_FCOE */
    533
    534#define OTHER_VECTOR 1
    535#define NON_Q_VECTORS (OTHER_VECTOR)
    536
    537#define MAX_MSIX_VECTORS_82599 64
    538#define MAX_Q_VECTORS_82599 64
    539#define MAX_MSIX_VECTORS_82598 18
    540#define MAX_Q_VECTORS_82598 16
    541
    542struct ixgbe_mac_addr {
    543	u8 addr[ETH_ALEN];
    544	u16 pool;
    545	u16 state; /* bitmask */
    546};
    547
    548#define IXGBE_MAC_STATE_DEFAULT		0x1
    549#define IXGBE_MAC_STATE_MODIFIED	0x2
    550#define IXGBE_MAC_STATE_IN_USE		0x4
    551
    552#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
    553#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
    554
    555#define MIN_MSIX_Q_VECTORS 1
    556#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
    557
    558/* default to trying for four seconds */
    559#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
    560#define IXGBE_SFP_POLL_JIFFIES (2 * HZ)	/* SFP poll every 2 seconds */
    561
    562#define IXGBE_PRIMARY_ABORT_LIMIT	5
    563
    564/* board specific private data structure */
    565struct ixgbe_adapter {
    566	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
    567	/* OS defined structs */
    568	struct net_device *netdev;
    569	struct bpf_prog *xdp_prog;
    570	struct pci_dev *pdev;
    571	struct mii_bus *mii_bus;
    572
    573	unsigned long state;
    574
    575	/* Some features need tri-state capability,
    576	 * thus the additional *_CAPABLE flags.
    577	 */
    578	u32 flags;
    579#define IXGBE_FLAG_MSI_ENABLED			BIT(1)
    580#define IXGBE_FLAG_MSIX_ENABLED			BIT(3)
    581#define IXGBE_FLAG_RX_1BUF_CAPABLE		BIT(4)
    582#define IXGBE_FLAG_RX_PS_CAPABLE		BIT(5)
    583#define IXGBE_FLAG_RX_PS_ENABLED		BIT(6)
    584#define IXGBE_FLAG_DCA_ENABLED			BIT(8)
    585#define IXGBE_FLAG_DCA_CAPABLE			BIT(9)
    586#define IXGBE_FLAG_IMIR_ENABLED			BIT(10)
    587#define IXGBE_FLAG_MQ_CAPABLE			BIT(11)
    588#define IXGBE_FLAG_DCB_ENABLED			BIT(12)
    589#define IXGBE_FLAG_VMDQ_CAPABLE			BIT(13)
    590#define IXGBE_FLAG_VMDQ_ENABLED			BIT(14)
    591#define IXGBE_FLAG_FAN_FAIL_CAPABLE		BIT(15)
    592#define IXGBE_FLAG_NEED_LINK_UPDATE		BIT(16)
    593#define IXGBE_FLAG_NEED_LINK_CONFIG		BIT(17)
    594#define IXGBE_FLAG_FDIR_HASH_CAPABLE		BIT(18)
    595#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE		BIT(19)
    596#define IXGBE_FLAG_FCOE_CAPABLE			BIT(20)
    597#define IXGBE_FLAG_FCOE_ENABLED			BIT(21)
    598#define IXGBE_FLAG_SRIOV_CAPABLE		BIT(22)
    599#define IXGBE_FLAG_SRIOV_ENABLED		BIT(23)
    600#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED		BIT(25)
    601#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER	BIT(26)
    602#define IXGBE_FLAG_DCB_CAPABLE			BIT(27)
    603
    604	u32 flags2;
    605#define IXGBE_FLAG2_RSC_CAPABLE			BIT(0)
    606#define IXGBE_FLAG2_RSC_ENABLED			BIT(1)
    607#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE		BIT(2)
    608#define IXGBE_FLAG2_TEMP_SENSOR_EVENT		BIT(3)
    609#define IXGBE_FLAG2_SEARCH_FOR_SFP		BIT(4)
    610#define IXGBE_FLAG2_SFP_NEEDS_RESET		BIT(5)
    611#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT	BIT(7)
    612#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		BIT(8)
    613#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		BIT(9)
    614#define IXGBE_FLAG2_PTP_PPS_ENABLED		BIT(10)
    615#define IXGBE_FLAG2_PHY_INTERRUPT		BIT(11)
    616#define IXGBE_FLAG2_VLAN_PROMISC		BIT(13)
    617#define IXGBE_FLAG2_EEE_CAPABLE			BIT(14)
    618#define IXGBE_FLAG2_EEE_ENABLED			BIT(15)
    619#define IXGBE_FLAG2_RX_LEGACY			BIT(16)
    620#define IXGBE_FLAG2_IPSEC_ENABLED		BIT(17)
    621#define IXGBE_FLAG2_VF_IPSEC_ENABLED		BIT(18)
    622#define IXGBE_FLAG2_AUTO_DISABLE_VF		BIT(19)
    623
    624	/* Tx fast path data */
    625	int num_tx_queues;
    626	u16 tx_itr_setting;
    627	u16 tx_work_limit;
    628	u64 tx_ipsec;
    629
    630	/* Rx fast path data */
    631	int num_rx_queues;
    632	u16 rx_itr_setting;
    633	u64 rx_ipsec;
    634
    635	/* Port number used to identify VXLAN traffic */
    636	__be16 vxlan_port;
    637	__be16 geneve_port;
    638
    639	/* XDP */
    640	int num_xdp_queues;
    641	struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
    642	unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
    643
    644	/* TX */
    645	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
    646
    647	u64 restart_queue;
    648	u64 lsc_int;
    649	u32 tx_timeout_count;
    650
    651	/* RX */
    652	struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
    653	int num_rx_pools;		/* == num_rx_queues in 82598 */
    654	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
    655	u64 hw_csum_rx_error;
    656	u64 hw_rx_no_dma_resources;
    657	u64 rsc_total_count;
    658	u64 rsc_total_flush;
    659	u64 non_eop_descs;
    660	u32 alloc_rx_page;
    661	u32 alloc_rx_page_failed;
    662	u32 alloc_rx_buff_failed;
    663
    664	struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
    665
    666	/* DCB parameters */
    667	struct ieee_pfc *ixgbe_ieee_pfc;
    668	struct ieee_ets *ixgbe_ieee_ets;
    669	struct ixgbe_dcb_config dcb_cfg;
    670	struct ixgbe_dcb_config temp_dcb_cfg;
    671	u8 hw_tcs;
    672	u8 dcb_set_bitmap;
    673	u8 dcbx_cap;
    674	enum ixgbe_fc_mode last_lfc_mode;
    675
    676	int num_q_vectors;	/* current number of q_vectors for device */
    677	int max_q_vectors;	/* true count of q_vectors for device */
    678	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
    679	struct msix_entry *msix_entries;
    680
    681	u32 test_icr;
    682	struct ixgbe_ring test_tx_ring;
    683	struct ixgbe_ring test_rx_ring;
    684
    685	/* structs defined in ixgbe_hw.h */
    686	struct ixgbe_hw hw;
    687	u16 msg_enable;
    688	struct ixgbe_hw_stats stats;
    689
    690	u64 tx_busy;
    691	unsigned int tx_ring_count;
    692	unsigned int xdp_ring_count;
    693	unsigned int rx_ring_count;
    694
    695	u32 link_speed;
    696	bool link_up;
    697	unsigned long sfp_poll_time;
    698	unsigned long link_check_timeout;
    699
    700	struct timer_list service_timer;
    701	struct work_struct service_task;
    702
    703	struct hlist_head fdir_filter_list;
    704	unsigned long fdir_overflow; /* number of times ATR was backed off */
    705	union ixgbe_atr_input fdir_mask;
    706	int fdir_filter_count;
    707	u32 fdir_pballoc;
    708	u32 atr_sample_rate;
    709	spinlock_t fdir_perfect_lock;
    710
    711#ifdef IXGBE_FCOE
    712	struct ixgbe_fcoe fcoe;
    713#endif /* IXGBE_FCOE */
    714	u8 __iomem *io_addr; /* Mainly for iounmap use */
    715	u32 wol;
    716
    717	u16 bridge_mode;
    718
    719	char eeprom_id[NVM_VER_SIZE];
    720	u16 eeprom_cap;
    721
    722	u32 interrupt_event;
    723	u32 led_reg;
    724
    725	struct ptp_clock *ptp_clock;
    726	struct ptp_clock_info ptp_caps;
    727	struct work_struct ptp_tx_work;
    728	struct sk_buff *ptp_tx_skb;
    729	struct hwtstamp_config tstamp_config;
    730	unsigned long ptp_tx_start;
    731	unsigned long last_overflow_check;
    732	unsigned long last_rx_ptp_check;
    733	unsigned long last_rx_timestamp;
    734	spinlock_t tmreg_lock;
    735	struct cyclecounter hw_cc;
    736	struct timecounter hw_tc;
    737	u32 base_incval;
    738	u32 tx_hwtstamp_timeouts;
    739	u32 tx_hwtstamp_skipped;
    740	u32 rx_hwtstamp_cleared;
    741	void (*ptp_setup_sdp)(struct ixgbe_adapter *);
    742
    743	/* SR-IOV */
    744	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
    745	unsigned int num_vfs;
    746	struct vf_data_storage *vfinfo;
    747	int vf_rate_link_speed;
    748	struct vf_macvlans vf_mvs;
    749	struct vf_macvlans *mv_list;
    750
    751	u32 timer_event_accumulator;
    752	u32 vferr_refcount;
    753	struct ixgbe_mac_addr *mac_table;
    754	struct kobject *info_kobj;
    755#ifdef CONFIG_IXGBE_HWMON
    756	struct hwmon_buff *ixgbe_hwmon_buff;
    757#endif /* CONFIG_IXGBE_HWMON */
    758#ifdef CONFIG_DEBUG_FS
    759	struct dentry *ixgbe_dbg_adapter;
    760#endif /*CONFIG_DEBUG_FS*/
    761
    762	u8 default_up;
    763	/* Bitmask indicating in use pools */
    764	DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
    765
    766#define IXGBE_MAX_LINK_HANDLE 10
    767	struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
    768	unsigned long tables;
    769
    770/* maximum number of RETA entries among all devices supported by ixgbe
    771 * driver: currently it's x550 device in non-SRIOV mode
    772 */
    773#define IXGBE_MAX_RETA_ENTRIES 512
    774	u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
    775
    776#define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
    777	u32 *rss_key;
    778
    779#ifdef CONFIG_IXGBE_IPSEC
    780	struct ixgbe_ipsec *ipsec;
    781#endif /* CONFIG_IXGBE_IPSEC */
    782};
    783
    784static inline int ixgbe_determine_xdp_q_idx(int cpu)
    785{
    786	if (static_key_enabled(&ixgbe_xdp_locking_key))
    787		return cpu % IXGBE_MAX_XDP_QS;
    788	else
    789		return cpu;
    790}
    791
    792static inline
    793struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
    794{
    795	int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
    796
    797	return adapter->xdp_ring[index];
    798}
    799
    800static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
    801{
    802	switch (adapter->hw.mac.type) {
    803	case ixgbe_mac_82598EB:
    804	case ixgbe_mac_82599EB:
    805	case ixgbe_mac_X540:
    806		return IXGBE_MAX_RSS_INDICES;
    807	case ixgbe_mac_X550:
    808	case ixgbe_mac_X550EM_x:
    809	case ixgbe_mac_x550em_a:
    810		return IXGBE_MAX_RSS_INDICES_X550;
    811	default:
    812		return 0;
    813	}
    814}
    815
    816struct ixgbe_fdir_filter {
    817	struct hlist_node fdir_node;
    818	union ixgbe_atr_input filter;
    819	u16 sw_idx;
    820	u64 action;
    821};
    822
    823enum ixgbe_state_t {
    824	__IXGBE_TESTING,
    825	__IXGBE_RESETTING,
    826	__IXGBE_DOWN,
    827	__IXGBE_DISABLED,
    828	__IXGBE_REMOVING,
    829	__IXGBE_SERVICE_SCHED,
    830	__IXGBE_SERVICE_INITED,
    831	__IXGBE_IN_SFP_INIT,
    832	__IXGBE_PTP_RUNNING,
    833	__IXGBE_PTP_TX_IN_PROGRESS,
    834	__IXGBE_RESET_REQUESTED,
    835};
    836
    837struct ixgbe_cb {
    838	union {				/* Union defining head/tail partner */
    839		struct sk_buff *head;
    840		struct sk_buff *tail;
    841	};
    842	dma_addr_t dma;
    843	u16 append_cnt;
    844	bool page_released;
    845};
    846#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
    847
    848enum ixgbe_boards {
    849	board_82598,
    850	board_82599,
    851	board_X540,
    852	board_X550,
    853	board_X550EM_x,
    854	board_x550em_x_fw,
    855	board_x550em_a,
    856	board_x550em_a_fw,
    857};
    858
    859extern const struct ixgbe_info ixgbe_82598_info;
    860extern const struct ixgbe_info ixgbe_82599_info;
    861extern const struct ixgbe_info ixgbe_X540_info;
    862extern const struct ixgbe_info ixgbe_X550_info;
    863extern const struct ixgbe_info ixgbe_X550EM_x_info;
    864extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
    865extern const struct ixgbe_info ixgbe_x550em_a_info;
    866extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
    867#ifdef CONFIG_IXGBE_DCB
    868extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
    869#endif
    870
    871extern char ixgbe_driver_name[];
    872#ifdef IXGBE_FCOE
    873extern char ixgbe_default_device_descr[];
    874#endif /* IXGBE_FCOE */
    875
    876int ixgbe_open(struct net_device *netdev);
    877int ixgbe_close(struct net_device *netdev);
    878void ixgbe_up(struct ixgbe_adapter *adapter);
    879void ixgbe_down(struct ixgbe_adapter *adapter);
    880void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
    881void ixgbe_reset(struct ixgbe_adapter *adapter);
    882void ixgbe_set_ethtool_ops(struct net_device *netdev);
    883int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
    884int ixgbe_setup_tx_resources(struct ixgbe_ring *);
    885void ixgbe_free_rx_resources(struct ixgbe_ring *);
    886void ixgbe_free_tx_resources(struct ixgbe_ring *);
    887void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
    888void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
    889void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
    890void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
    891void ixgbe_update_stats(struct ixgbe_adapter *adapter);
    892int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
    893bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
    894			 u16 subdevice_id);
    895#ifdef CONFIG_PCI_IOV
    896void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
    897#endif
    898int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
    899			 const u8 *addr, u16 queue);
    900int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
    901			 const u8 *addr, u16 queue);
    902void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
    903void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
    904netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
    905				  struct ixgbe_ring *);
    906void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
    907				      struct ixgbe_tx_buffer *);
    908void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
    909void ixgbe_write_eitr(struct ixgbe_q_vector *);
    910int ixgbe_poll(struct napi_struct *napi, int budget);
    911int ethtool_ioctl(struct ifreq *ifr);
    912s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
    913s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
    914s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
    915s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
    916					  union ixgbe_atr_hash_dword input,
    917					  union ixgbe_atr_hash_dword common,
    918					  u8 queue);
    919s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
    920				    union ixgbe_atr_input *input_mask);
    921s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
    922					  union ixgbe_atr_input *input,
    923					  u16 soft_id, u8 queue);
    924s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
    925					  union ixgbe_atr_input *input,
    926					  u16 soft_id);
    927void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
    928					  union ixgbe_atr_input *mask);
    929int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
    930				    struct ixgbe_fdir_filter *input,
    931				    u16 sw_idx);
    932void ixgbe_set_rx_mode(struct net_device *netdev);
    933#ifdef CONFIG_IXGBE_DCB
    934void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
    935#endif
    936int ixgbe_setup_tc(struct net_device *dev, u8 tc);
    937void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
    938void ixgbe_do_reset(struct net_device *netdev);
    939#ifdef CONFIG_IXGBE_HWMON
    940void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
    941int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
    942#endif /* CONFIG_IXGBE_HWMON */
    943#ifdef IXGBE_FCOE
    944void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
    945int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
    946	      u8 *hdr_len);
    947int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
    948		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
    949int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
    950		       struct scatterlist *sgl, unsigned int sgc);
    951int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
    952			  struct scatterlist *sgl, unsigned int sgc);
    953int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
    954int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
    955void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
    956int ixgbe_fcoe_enable(struct net_device *netdev);
    957int ixgbe_fcoe_disable(struct net_device *netdev);
    958#ifdef CONFIG_IXGBE_DCB
    959u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
    960u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
    961#endif /* CONFIG_IXGBE_DCB */
    962int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
    963int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
    964			   struct netdev_fcoe_hbainfo *info);
    965u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
    966#endif /* IXGBE_FCOE */
    967#ifdef CONFIG_DEBUG_FS
    968void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
    969void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
    970void ixgbe_dbg_init(void);
    971void ixgbe_dbg_exit(void);
    972#else
    973static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
    974static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
    975static inline void ixgbe_dbg_init(void) {}
    976static inline void ixgbe_dbg_exit(void) {}
    977#endif /* CONFIG_DEBUG_FS */
    978static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
    979{
    980	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
    981}
    982
    983void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
    984void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
    985void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
    986void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
    987void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
    988void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
    989void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
    990void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
    991static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
    992					 union ixgbe_adv_rx_desc *rx_desc,
    993					 struct sk_buff *skb)
    994{
    995	if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
    996		ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
    997		return;
    998	}
    999
   1000	if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
   1001		return;
   1002
   1003	ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
   1004
   1005	/* Update the last_rx_timestamp timer in order to enable watchdog check
   1006	 * for error case of latched timestamp on a dropped packet.
   1007	 */
   1008	rx_ring->last_rx_timestamp = jiffies;
   1009}
   1010
   1011int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
   1012int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
   1013void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
   1014void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
   1015void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
   1016#ifdef CONFIG_PCI_IOV
   1017void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
   1018#endif
   1019
   1020netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
   1021				  struct ixgbe_adapter *adapter,
   1022				  struct ixgbe_ring *tx_ring);
   1023u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
   1024void ixgbe_store_key(struct ixgbe_adapter *adapter);
   1025void ixgbe_store_reta(struct ixgbe_adapter *adapter);
   1026s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
   1027		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
   1028#ifdef CONFIG_IXGBE_IPSEC
   1029void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
   1030void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
   1031void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
   1032void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
   1033		    union ixgbe_adv_rx_desc *rx_desc,
   1034		    struct sk_buff *skb);
   1035int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
   1036		   struct ixgbe_ipsec_tx_data *itd);
   1037void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
   1038int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
   1039int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
   1040#else
   1041static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
   1042static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
   1043static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
   1044static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
   1045				  union ixgbe_adv_rx_desc *rx_desc,
   1046				  struct sk_buff *skb) { }
   1047static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
   1048				 struct ixgbe_tx_buffer *first,
   1049				 struct ixgbe_ipsec_tx_data *itd) { return 0; }
   1050static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
   1051					u32 vf) { }
   1052static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
   1053					u32 *mbuf, u32 vf) { return -EACCES; }
   1054static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
   1055					u32 *mbuf, u32 vf) { return -EACCES; }
   1056#endif /* CONFIG_IXGBE_IPSEC */
   1057
   1058static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
   1059{
   1060	return !!adapter->xdp_prog;
   1061}
   1062
   1063#endif /* _IXGBE_H_ */