cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i40e_txrx.h (18003B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* Copyright(c) 2013 - 2018 Intel Corporation. */
      3
      4#ifndef _I40E_TXRX_H_
      5#define _I40E_TXRX_H_
      6
      7#include <net/xdp.h>
      8
      9/* Interrupt Throttling and Rate Limiting Goodies */
     10#define I40E_DEFAULT_IRQ_WORK      256
     11
     12/* The datasheet for the X710 and XL710 indicate that the maximum value for
     13 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
     14 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
     15 * the register value which is divided by 2 lets use the actual values and
     16 * avoid an excessive amount of translation.
     17 */
     18#define I40E_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
     19#define I40E_ITR_MASK		0x1FFE	/* mask for ITR register value */
     20#define I40E_MIN_ITR		     2	/* reg uses 2 usec resolution */
     21#define I40E_ITR_20K		    50
     22#define I40E_ITR_8K		   122
     23#define I40E_MAX_ITR		  8160	/* maximum value as per datasheet */
     24#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
     25#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
     26#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
     27
     28#define I40E_ITR_RX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
     29#define I40E_ITR_TX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
     30
     31/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
     32 * the value of the rate limit is non-zero
     33 */
     34#define INTRL_ENA                  BIT(6)
     35#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
     36#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
     37
     38/**
     39 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
     40 * @intrl: interrupt rate limit to convert
     41 *
     42 * This function converts a decimal interrupt rate limit to the appropriate
     43 * register format expected by the firmware when setting interrupt rate limit.
     44 */
     45static inline u16 i40e_intrl_usec_to_reg(int intrl)
     46{
     47	if (intrl >> 2)
     48		return ((intrl >> 2) | INTRL_ENA);
     49	else
     50		return 0;
     51}
     52
     53#define I40E_QUEUE_END_OF_LIST 0x7FF
     54
     55/* this enum matches hardware bits and is meant to be used by DYN_CTLN
     56 * registers and QINT registers or more generally anywhere in the manual
     57 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
     58 * register but instead is a special value meaning "don't update" ITR0/1/2.
     59 */
     60enum i40e_dyn_idx_t {
     61	I40E_IDX_ITR0 = 0,
     62	I40E_IDX_ITR1 = 1,
     63	I40E_IDX_ITR2 = 2,
     64	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
     65};
     66
     67/* these are indexes into ITRN registers */
     68#define I40E_RX_ITR    I40E_IDX_ITR0
     69#define I40E_TX_ITR    I40E_IDX_ITR1
     70
     71/* Supported RSS offloads */
     72#define I40E_DEFAULT_RSS_HENA ( \
     73	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
     74	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
     75	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
     76	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
     77	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
     78	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
     79	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
     80	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
     81	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
     82	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
     83	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
     84
     85#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
     86	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
     87	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
     88	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
     89	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
     90	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
     91	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
     92
     93#define i40e_pf_get_default_rss_hena(pf) \
     94	(((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
     95	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
     96
     97/* Supported Rx Buffer Sizes (a multiple of 128) */
     98#define I40E_RXBUFFER_256   256
     99#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
    100#define I40E_RXBUFFER_2048  2048
    101#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
    102#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
    103
    104/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
    105 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
    106 * this adds up to 512 bytes of extra data meaning the smallest allocation
    107 * we could have is 1K.
    108 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
    109 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
    110 */
    111#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
    112#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
    113#define i40e_rx_desc i40e_16byte_rx_desc
    114
    115#define I40E_RX_DMA_ATTR \
    116	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
    117
    118/* Attempt to maximize the headroom available for incoming frames.  We
    119 * use a 2K buffer for receives and need 1536/1534 to store the data for
    120 * the frame.  This leaves us with 512 bytes of room.  From that we need
    121 * to deduct the space needed for the shared info and the padding needed
    122 * to IP align the frame.
    123 *
    124 * Note: For cache line sizes 256 or larger this value is going to end
    125 *	 up negative.  In these cases we should fall back to the legacy
    126 *	 receive path.
    127 */
    128#if (PAGE_SIZE < 8192)
    129#define I40E_2K_TOO_SMALL_WITH_PADDING \
    130((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
    131
    132static inline int i40e_compute_pad(int rx_buf_len)
    133{
    134	int page_size, pad_size;
    135
    136	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
    137	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
    138
    139	return pad_size;
    140}
    141
    142static inline int i40e_skb_pad(void)
    143{
    144	int rx_buf_len;
    145
    146	/* If a 2K buffer cannot handle a standard Ethernet frame then
    147	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
    148	 *
    149	 * For a 3K buffer we need to add enough padding to allow for
    150	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
    151	 * cache-line alignment.
    152	 */
    153	if (I40E_2K_TOO_SMALL_WITH_PADDING)
    154		rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
    155	else
    156		rx_buf_len = I40E_RXBUFFER_1536;
    157
    158	/* if needed make room for NET_IP_ALIGN */
    159	rx_buf_len -= NET_IP_ALIGN;
    160
    161	return i40e_compute_pad(rx_buf_len);
    162}
    163
    164#define I40E_SKB_PAD i40e_skb_pad()
    165#else
    166#define I40E_2K_TOO_SMALL_WITH_PADDING false
    167#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
    168#endif
    169
    170/**
    171 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
    172 * @rx_desc: pointer to receive descriptor (in le64 format)
    173 * @stat_err_bits: value to mask
    174 *
    175 * This function does some fast chicanery in order to return the
    176 * value of the mask which is really only used for boolean tests.
    177 * The status_error_len doesn't need to be shifted because it begins
    178 * at offset zero.
    179 */
    180static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
    181				     const u64 stat_err_bits)
    182{
    183	return !!(rx_desc->wb.qword1.status_error_len &
    184		  cpu_to_le64(stat_err_bits));
    185}
    186
    187/* How many Rx Buffers do we bundle into one write to the hardware ? */
    188#define I40E_RX_BUFFER_WRITE	32	/* Must be power of 2 */
    189
    190#define I40E_RX_NEXT_DESC(r, i, n)		\
    191	do {					\
    192		(i)++;				\
    193		if ((i) == (r)->count)		\
    194			i = 0;			\
    195		(n) = I40E_RX_DESC((r), (i));	\
    196	} while (0)
    197
    198
    199#define I40E_MAX_BUFFER_TXD	8
    200#define I40E_MIN_TX_LEN		17
    201
    202/* The size limit for a transmit buffer in a descriptor is (16K - 1).
    203 * In order to align with the read requests we will align the value to
    204 * the nearest 4K which represents our maximum read request size.
    205 */
    206#define I40E_MAX_READ_REQ_SIZE		4096
    207#define I40E_MAX_DATA_PER_TXD		(16 * 1024 - 1)
    208#define I40E_MAX_DATA_PER_TXD_ALIGNED \
    209	(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
    210
    211/**
    212 * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
    213 * @size: transmit request size in bytes
    214 *
    215 * Due to hardware alignment restrictions (4K alignment), we need to
    216 * assume that we can have no more than 12K of data per descriptor, even
    217 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
    218 * Thus, we need to divide by 12K. But division is slow! Instead,
    219 * we decompose the operation into shifts and one relatively cheap
    220 * multiply operation.
    221 *
    222 * To divide by 12K, we first divide by 4K, then divide by 3:
    223 *     To divide by 4K, shift right by 12 bits
    224 *     To divide by 3, multiply by 85, then divide by 256
    225 *     (Divide by 256 is done by shifting right by 8 bits)
    226 * Finally, we add one to round up. Because 256 isn't an exact multiple of
    227 * 3, we'll underestimate near each multiple of 12K. This is actually more
    228 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
    229 * segment.  For our purposes this is accurate out to 1M which is orders of
    230 * magnitude greater than our largest possible GSO size.
    231 *
    232 * This would then be implemented as:
    233 *     return (((size >> 12) * 85) >> 8) + 1;
    234 *
    235 * Since multiplication and division are commutative, we can reorder
    236 * operations into:
    237 *     return ((size * 85) >> 20) + 1;
    238 */
    239static inline unsigned int i40e_txd_use_count(unsigned int size)
    240{
    241	return ((size * 85) >> 20) + 1;
    242}
    243
    244/* Tx Descriptors needed, worst case */
    245#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
    246
    247#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
    248#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
    249#define I40E_TX_FLAGS_TSO		BIT(3)
    250#define I40E_TX_FLAGS_IPV4		BIT(4)
    251#define I40E_TX_FLAGS_IPV6		BIT(5)
    252#define I40E_TX_FLAGS_TSYN		BIT(8)
    253#define I40E_TX_FLAGS_FD_SB		BIT(9)
    254#define I40E_TX_FLAGS_UDP_TUNNEL	BIT(10)
    255#define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
    256#define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
    257#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
    258#define I40E_TX_FLAGS_VLAN_SHIFT	16
    259
    260struct i40e_tx_buffer {
    261	struct i40e_tx_desc *next_to_watch;
    262	union {
    263		struct xdp_frame *xdpf;
    264		struct sk_buff *skb;
    265		void *raw_buf;
    266	};
    267	unsigned int bytecount;
    268	unsigned short gso_segs;
    269
    270	DEFINE_DMA_UNMAP_ADDR(dma);
    271	DEFINE_DMA_UNMAP_LEN(len);
    272	u32 tx_flags;
    273};
    274
    275struct i40e_rx_buffer {
    276	dma_addr_t dma;
    277	struct page *page;
    278	__u32 page_offset;
    279	__u16 pagecnt_bias;
    280};
    281
    282struct i40e_queue_stats {
    283	u64 packets;
    284	u64 bytes;
    285};
    286
    287struct i40e_tx_queue_stats {
    288	u64 restart_queue;
    289	u64 tx_busy;
    290	u64 tx_done_old;
    291	u64 tx_linearize;
    292	u64 tx_force_wb;
    293	u64 tx_stopped;
    294	int prev_pkt_ctr;
    295};
    296
    297struct i40e_rx_queue_stats {
    298	u64 non_eop_descs;
    299	u64 alloc_page_failed;
    300	u64 alloc_buff_failed;
    301	u64 page_reuse_count;
    302	u64 page_alloc_count;
    303	u64 page_waive_count;
    304	u64 page_busy_count;
    305};
    306
    307enum i40e_ring_state_t {
    308	__I40E_TX_FDIR_INIT_DONE,
    309	__I40E_TX_XPS_INIT_DONE,
    310	__I40E_RING_STATE_NBITS /* must be last */
    311};
    312
    313/* some useful defines for virtchannel interface, which
    314 * is the only remaining user of header split
    315 */
    316#define I40E_RX_DTYPE_HEADER_SPLIT  1
    317#define I40E_RX_SPLIT_L2      0x1
    318#define I40E_RX_SPLIT_IP      0x2
    319#define I40E_RX_SPLIT_TCP_UDP 0x4
    320#define I40E_RX_SPLIT_SCTP    0x8
    321
    322/* struct that defines a descriptor ring, associated with a VSI */
    323struct i40e_ring {
    324	struct i40e_ring *next;		/* pointer to next ring in q_vector */
    325	void *desc;			/* Descriptor ring memory */
    326	struct device *dev;		/* Used for DMA mapping */
    327	struct net_device *netdev;	/* netdev ring maps to */
    328	struct bpf_prog *xdp_prog;
    329	union {
    330		struct i40e_tx_buffer *tx_bi;
    331		struct i40e_rx_buffer *rx_bi;
    332		struct xdp_buff **rx_bi_zc;
    333	};
    334	DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
    335	u16 queue_index;		/* Queue number of ring */
    336	u8 dcb_tc;			/* Traffic class of ring */
    337	u8 __iomem *tail;
    338
    339	/* high bit set means dynamic, use accessor routines to read/write.
    340	 * hardware only supports 2us resolution for the ITR registers.
    341	 * these values always store the USER setting, and must be converted
    342	 * before programming to a register.
    343	 */
    344	u16 itr_setting;
    345
    346	u16 count;			/* Number of descriptors */
    347	u16 reg_idx;			/* HW register index of the ring */
    348	u16 rx_buf_len;
    349
    350	/* used in interrupt processing */
    351	u16 next_to_use;
    352	u16 next_to_clean;
    353	u16 xdp_tx_active;
    354
    355	u8 atr_sample_rate;
    356	u8 atr_count;
    357
    358	bool ring_active;		/* is ring online or not */
    359	bool arm_wb;		/* do something to arm write back */
    360	u8 packet_stride;
    361
    362	u16 flags;
    363#define I40E_TXR_FLAGS_WB_ON_ITR		BIT(0)
    364#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
    365#define I40E_TXR_FLAGS_XDP			BIT(2)
    366
    367	/* stats structs */
    368	struct i40e_queue_stats	stats;
    369	struct u64_stats_sync syncp;
    370	union {
    371		struct i40e_tx_queue_stats tx_stats;
    372		struct i40e_rx_queue_stats rx_stats;
    373	};
    374
    375	unsigned int size;		/* length of descriptor ring in bytes */
    376	dma_addr_t dma;			/* physical address of ring */
    377
    378	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
    379	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
    380
    381	struct rcu_head rcu;		/* to avoid race on free */
    382	u16 next_to_alloc;
    383	struct sk_buff *skb;		/* When i40e_clean_rx_ring_irq() must
    384					 * return before it sees the EOP for
    385					 * the current packet, we save that skb
    386					 * here and resume receiving this
    387					 * packet the next time
    388					 * i40e_clean_rx_ring_irq() is called
    389					 * for this ring.
    390					 */
    391
    392	struct i40e_channel *ch;
    393	u16 rx_offset;
    394	struct xdp_rxq_info xdp_rxq;
    395	struct xsk_buff_pool *xsk_pool;
    396} ____cacheline_internodealigned_in_smp;
    397
    398static inline bool ring_uses_build_skb(struct i40e_ring *ring)
    399{
    400	return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
    401}
    402
    403static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
    404{
    405	ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
    406}
    407
    408static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
    409{
    410	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
    411}
    412
    413static inline bool ring_is_xdp(struct i40e_ring *ring)
    414{
    415	return !!(ring->flags & I40E_TXR_FLAGS_XDP);
    416}
    417
    418static inline void set_ring_xdp(struct i40e_ring *ring)
    419{
    420	ring->flags |= I40E_TXR_FLAGS_XDP;
    421}
    422
    423#define I40E_ITR_ADAPTIVE_MIN_INC	0x0002
    424#define I40E_ITR_ADAPTIVE_MIN_USECS	0x0002
    425#define I40E_ITR_ADAPTIVE_MAX_USECS	0x007e
    426#define I40E_ITR_ADAPTIVE_LATENCY	0x8000
    427#define I40E_ITR_ADAPTIVE_BULK		0x0000
    428
    429struct i40e_ring_container {
    430	struct i40e_ring *ring;		/* pointer to linked list of ring(s) */
    431	unsigned long next_update;	/* jiffies value of next update */
    432	unsigned int total_bytes;	/* total bytes processed this int */
    433	unsigned int total_packets;	/* total packets processed this int */
    434	u16 count;
    435	u16 target_itr;			/* target ITR setting for ring(s) */
    436	u16 current_itr;		/* current ITR setting for ring(s) */
    437};
    438
    439/* iterator for handling rings in ring container */
    440#define i40e_for_each_ring(pos, head) \
    441	for (pos = (head).ring; pos != NULL; pos = pos->next)
    442
    443static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
    444{
    445#if (PAGE_SIZE < 8192)
    446	if (ring->rx_buf_len > (PAGE_SIZE / 2))
    447		return 1;
    448#endif
    449	return 0;
    450}
    451
    452#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
    453
    454bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
    455netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
    456u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
    457			  struct net_device *sb_dev);
    458void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
    459void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
    460int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
    461int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
    462void i40e_free_tx_resources(struct i40e_ring *tx_ring);
    463void i40e_free_rx_resources(struct i40e_ring *rx_ring);
    464int i40e_napi_poll(struct napi_struct *napi, int budget);
    465void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
    466u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
    467void i40e_detect_recover_hung(struct i40e_vsi *vsi);
    468int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
    469bool __i40e_chk_linearize(struct sk_buff *skb);
    470int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
    471		  u32 flags);
    472int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
    473
    474/**
    475 * i40e_get_head - Retrieve head from head writeback
    476 * @tx_ring:  tx ring to fetch head of
    477 *
    478 * Returns value of Tx ring head based on value stored
    479 * in head write-back location
    480 **/
    481static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
    482{
    483	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
    484
    485	return le32_to_cpu(*(volatile __le32 *)head);
    486}
    487
    488/**
    489 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
    490 * @skb:     send buffer
    491 *
    492 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
    493 * there is not enough descriptors available in this ring since we need at least
    494 * one descriptor.
    495 **/
    496static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
    497{
    498	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
    499	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
    500	int count = 0, size = skb_headlen(skb);
    501
    502	for (;;) {
    503		count += i40e_txd_use_count(size);
    504
    505		if (!nr_frags--)
    506			break;
    507
    508		size = skb_frag_size(frag++);
    509	}
    510
    511	return count;
    512}
    513
    514/**
    515 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
    516 * @tx_ring: the ring to be checked
    517 * @size:    the size buffer we want to assure is available
    518 *
    519 * Returns 0 if stop is not needed
    520 **/
    521static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
    522{
    523	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
    524		return 0;
    525	return __i40e_maybe_stop_tx(tx_ring, size);
    526}
    527
    528/**
    529 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
    530 * @skb:      send buffer
    531 * @count:    number of buffers used
    532 *
    533 * Note: Our HW can't scatter-gather more than 8 fragments to build
    534 * a packet on the wire and so we need to figure out the cases where we
    535 * need to linearize the skb.
    536 **/
    537static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
    538{
    539	/* Both TSO and single send will work if count is less than 8 */
    540	if (likely(count < I40E_MAX_BUFFER_TXD))
    541		return false;
    542
    543	if (skb_is_gso(skb))
    544		return __i40e_chk_linearize(skb);
    545
    546	/* we can support up to 8 data buffers for a single send */
    547	return count != I40E_MAX_BUFFER_TXD;
    548}
    549
    550/**
    551 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
    552 * @ring: Tx ring to find the netdev equivalent of
    553 **/
    554static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
    555{
    556	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
    557}
    558#endif /* _I40E_TXRX_H_ */