cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iavf_txrx.h (17429B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* Copyright(c) 2013 - 2018 Intel Corporation. */
      3
      4#ifndef _IAVF_TXRX_H_
      5#define _IAVF_TXRX_H_
      6
      7/* Interrupt Throttling and Rate Limiting Goodies */
      8#define IAVF_DEFAULT_IRQ_WORK      256
      9
     10/* The datasheet for the X710 and XL710 indicate that the maximum value for
     11 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
     12 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
     13 * the register value which is divided by 2 lets use the actual values and
     14 * avoid an excessive amount of translation.
     15 */
     16#define IAVF_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
     17#define IAVF_ITR_MASK		0x1FFE	/* mask for ITR register value */
     18#define IAVF_MIN_ITR		     2	/* reg uses 2 usec resolution */
     19#define IAVF_ITR_100K		    10	/* all values below must be even */
     20#define IAVF_ITR_50K		    20
     21#define IAVF_ITR_20K		    50
     22#define IAVF_ITR_18K		    60
     23#define IAVF_ITR_8K		   122
     24#define IAVF_MAX_ITR		  8160	/* maximum value as per datasheet */
     25#define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
     26#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
     27#define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
     28
     29#define IAVF_ITR_RX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
     30#define IAVF_ITR_TX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
     31
     32/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
     33 * the value of the rate limit is non-zero
     34 */
     35#define INTRL_ENA                  BIT(6)
     36#define IAVF_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
     37#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
     38#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
     39#define IAVF_INTRL_8K              125     /* 8000 ints/sec */
     40#define IAVF_INTRL_62K             16      /* 62500 ints/sec */
     41#define IAVF_INTRL_83K             12      /* 83333 ints/sec */
     42
     43#define IAVF_QUEUE_END_OF_LIST 0x7FF
     44
     45/* this enum matches hardware bits and is meant to be used by DYN_CTLN
     46 * registers and QINT registers or more generally anywhere in the manual
     47 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
     48 * register but instead is a special value meaning "don't update" ITR0/1/2.
     49 */
     50enum iavf_dyn_idx_t {
     51	IAVF_IDX_ITR0 = 0,
     52	IAVF_IDX_ITR1 = 1,
     53	IAVF_IDX_ITR2 = 2,
     54	IAVF_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
     55};
     56
     57/* these are indexes into ITRN registers */
     58#define IAVF_RX_ITR    IAVF_IDX_ITR0
     59#define IAVF_TX_ITR    IAVF_IDX_ITR1
     60#define IAVF_PE_ITR    IAVF_IDX_ITR2
     61
     62/* Supported RSS offloads */
     63#define IAVF_DEFAULT_RSS_HENA ( \
     64	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
     65	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
     66	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
     67	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
     68	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
     69	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
     70	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
     71	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
     72	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
     73	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
     74	BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
     75
     76#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
     77	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
     78	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
     79	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
     80	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
     81	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
     82	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
     83
     84/* Supported Rx Buffer Sizes (a multiple of 128) */
     85#define IAVF_RXBUFFER_256   256
     86#define IAVF_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
     87#define IAVF_RXBUFFER_2048  2048
     88#define IAVF_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
     89#define IAVF_MAX_RXBUFFER   9728  /* largest size for single descriptor */
     90
     91/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
     92 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
     93 * this adds up to 512 bytes of extra data meaning the smallest allocation
     94 * we could have is 1K.
     95 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
     96 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
     97 */
     98#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
     99#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
    100#define iavf_rx_desc iavf_32byte_rx_desc
    101
    102#define IAVF_RX_DMA_ATTR \
    103	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
    104
    105/* Attempt to maximize the headroom available for incoming frames.  We
    106 * use a 2K buffer for receives and need 1536/1534 to store the data for
    107 * the frame.  This leaves us with 512 bytes of room.  From that we need
    108 * to deduct the space needed for the shared info and the padding needed
    109 * to IP align the frame.
    110 *
    111 * Note: For cache line sizes 256 or larger this value is going to end
    112 *	 up negative.  In these cases we should fall back to the legacy
    113 *	 receive path.
    114 */
    115#if (PAGE_SIZE < 8192)
    116#define IAVF_2K_TOO_SMALL_WITH_PADDING \
    117((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
    118
    119static inline int iavf_compute_pad(int rx_buf_len)
    120{
    121	int page_size, pad_size;
    122
    123	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
    124	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
    125
    126	return pad_size;
    127}
    128
    129static inline int iavf_skb_pad(void)
    130{
    131	int rx_buf_len;
    132
    133	/* If a 2K buffer cannot handle a standard Ethernet frame then
    134	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
    135	 *
    136	 * For a 3K buffer we need to add enough padding to allow for
    137	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
    138	 * cache-line alignment.
    139	 */
    140	if (IAVF_2K_TOO_SMALL_WITH_PADDING)
    141		rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
    142	else
    143		rx_buf_len = IAVF_RXBUFFER_1536;
    144
    145	/* if needed make room for NET_IP_ALIGN */
    146	rx_buf_len -= NET_IP_ALIGN;
    147
    148	return iavf_compute_pad(rx_buf_len);
    149}
    150
    151#define IAVF_SKB_PAD iavf_skb_pad()
    152#else
    153#define IAVF_2K_TOO_SMALL_WITH_PADDING false
    154#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
    155#endif
    156
    157/**
    158 * iavf_test_staterr - tests bits in Rx descriptor status and error fields
    159 * @rx_desc: pointer to receive descriptor (in le64 format)
    160 * @stat_err_bits: value to mask
    161 *
    162 * This function does some fast chicanery in order to return the
    163 * value of the mask which is really only used for boolean tests.
    164 * The status_error_len doesn't need to be shifted because it begins
    165 * at offset zero.
    166 */
    167static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
    168				     const u64 stat_err_bits)
    169{
    170	return !!(rx_desc->wb.qword1.status_error_len &
    171		  cpu_to_le64(stat_err_bits));
    172}
    173
    174/* How many Rx Buffers do we bundle into one write to the hardware ? */
    175#define IAVF_RX_INCREMENT(r, i) \
    176	do {					\
    177		(i)++;				\
    178		if ((i) == (r)->count)		\
    179			i = 0;			\
    180		r->next_to_clean = i;		\
    181	} while (0)
    182
    183#define IAVF_RX_NEXT_DESC(r, i, n)		\
    184	do {					\
    185		(i)++;				\
    186		if ((i) == (r)->count)		\
    187			i = 0;			\
    188		(n) = IAVF_RX_DESC((r), (i));	\
    189	} while (0)
    190
    191#define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)		\
    192	do {						\
    193		IAVF_RX_NEXT_DESC((r), (i), (n));	\
    194		prefetch((n));				\
    195	} while (0)
    196
    197#define IAVF_MAX_BUFFER_TXD	8
    198#define IAVF_MIN_TX_LEN		17
    199
    200/* The size limit for a transmit buffer in a descriptor is (16K - 1).
    201 * In order to align with the read requests we will align the value to
    202 * the nearest 4K which represents our maximum read request size.
    203 */
    204#define IAVF_MAX_READ_REQ_SIZE		4096
    205#define IAVF_MAX_DATA_PER_TXD		(16 * 1024 - 1)
    206#define IAVF_MAX_DATA_PER_TXD_ALIGNED \
    207	(IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
    208
    209/**
    210 * iavf_txd_use_count  - estimate the number of descriptors needed for Tx
    211 * @size: transmit request size in bytes
    212 *
    213 * Due to hardware alignment restrictions (4K alignment), we need to
    214 * assume that we can have no more than 12K of data per descriptor, even
    215 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
    216 * Thus, we need to divide by 12K. But division is slow! Instead,
    217 * we decompose the operation into shifts and one relatively cheap
    218 * multiply operation.
    219 *
    220 * To divide by 12K, we first divide by 4K, then divide by 3:
    221 *     To divide by 4K, shift right by 12 bits
    222 *     To divide by 3, multiply by 85, then divide by 256
    223 *     (Divide by 256 is done by shifting right by 8 bits)
    224 * Finally, we add one to round up. Because 256 isn't an exact multiple of
    225 * 3, we'll underestimate near each multiple of 12K. This is actually more
    226 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
    227 * segment.  For our purposes this is accurate out to 1M which is orders of
    228 * magnitude greater than our largest possible GSO size.
    229 *
    230 * This would then be implemented as:
    231 *     return (((size >> 12) * 85) >> 8) + 1;
    232 *
    233 * Since multiplication and division are commutative, we can reorder
    234 * operations into:
    235 *     return ((size * 85) >> 20) + 1;
    236 */
    237static inline unsigned int iavf_txd_use_count(unsigned int size)
    238{
    239	return ((size * 85) >> 20) + 1;
    240}
    241
    242/* Tx Descriptors needed, worst case */
    243#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
    244#define IAVF_MIN_DESC_PENDING	4
    245
    246#define IAVF_TX_FLAGS_HW_VLAN			BIT(1)
    247#define IAVF_TX_FLAGS_SW_VLAN			BIT(2)
    248#define IAVF_TX_FLAGS_TSO			BIT(3)
    249#define IAVF_TX_FLAGS_IPV4			BIT(4)
    250#define IAVF_TX_FLAGS_IPV6			BIT(5)
    251#define IAVF_TX_FLAGS_FCCRC			BIT(6)
    252#define IAVF_TX_FLAGS_FSO			BIT(7)
    253#define IAVF_TX_FLAGS_FD_SB			BIT(9)
    254#define IAVF_TX_FLAGS_VXLAN_TUNNEL		BIT(10)
    255#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN	BIT(11)
    256#define IAVF_TX_FLAGS_VLAN_MASK			0xffff0000
    257#define IAVF_TX_FLAGS_VLAN_PRIO_MASK		0xe0000000
    258#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT		29
    259#define IAVF_TX_FLAGS_VLAN_SHIFT		16
    260
    261struct iavf_tx_buffer {
    262	struct iavf_tx_desc *next_to_watch;
    263	union {
    264		struct sk_buff *skb;
    265		void *raw_buf;
    266	};
    267	unsigned int bytecount;
    268	unsigned short gso_segs;
    269
    270	DEFINE_DMA_UNMAP_ADDR(dma);
    271	DEFINE_DMA_UNMAP_LEN(len);
    272	u32 tx_flags;
    273};
    274
    275struct iavf_rx_buffer {
    276	dma_addr_t dma;
    277	struct page *page;
    278#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
    279	__u32 page_offset;
    280#else
    281	__u16 page_offset;
    282#endif
    283	__u16 pagecnt_bias;
    284};
    285
    286struct iavf_queue_stats {
    287	u64 packets;
    288	u64 bytes;
    289};
    290
    291struct iavf_tx_queue_stats {
    292	u64 restart_queue;
    293	u64 tx_busy;
    294	u64 tx_done_old;
    295	u64 tx_linearize;
    296	u64 tx_force_wb;
    297	int prev_pkt_ctr;
    298	u64 tx_lost_interrupt;
    299};
    300
    301struct iavf_rx_queue_stats {
    302	u64 non_eop_descs;
    303	u64 alloc_page_failed;
    304	u64 alloc_buff_failed;
    305	u64 page_reuse_count;
    306	u64 realloc_count;
    307};
    308
    309enum iavf_ring_state_t {
    310	__IAVF_TX_FDIR_INIT_DONE,
    311	__IAVF_TX_XPS_INIT_DONE,
    312	__IAVF_RING_STATE_NBITS /* must be last */
    313};
    314
    315/* some useful defines for virtchannel interface, which
    316 * is the only remaining user of header split
    317 */
    318#define IAVF_RX_DTYPE_NO_SPLIT      0
    319#define IAVF_RX_DTYPE_HEADER_SPLIT  1
    320#define IAVF_RX_DTYPE_SPLIT_ALWAYS  2
    321#define IAVF_RX_SPLIT_L2      0x1
    322#define IAVF_RX_SPLIT_IP      0x2
    323#define IAVF_RX_SPLIT_TCP_UDP 0x4
    324#define IAVF_RX_SPLIT_SCTP    0x8
    325
    326/* struct that defines a descriptor ring, associated with a VSI */
    327struct iavf_ring {
    328	struct iavf_ring *next;		/* pointer to next ring in q_vector */
    329	void *desc;			/* Descriptor ring memory */
    330	struct device *dev;		/* Used for DMA mapping */
    331	struct net_device *netdev;	/* netdev ring maps to */
    332	union {
    333		struct iavf_tx_buffer *tx_bi;
    334		struct iavf_rx_buffer *rx_bi;
    335	};
    336	DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
    337	u16 queue_index;		/* Queue number of ring */
    338	u8 dcb_tc;			/* Traffic class of ring */
    339	u8 __iomem *tail;
    340
    341	/* high bit set means dynamic, use accessors routines to read/write.
    342	 * hardware only supports 2us resolution for the ITR registers.
    343	 * these values always store the USER setting, and must be converted
    344	 * before programming to a register.
    345	 */
    346	u16 itr_setting;
    347
    348	u16 count;			/* Number of descriptors */
    349	u16 reg_idx;			/* HW register index of the ring */
    350	u16 rx_buf_len;
    351
    352	/* used in interrupt processing */
    353	u16 next_to_use;
    354	u16 next_to_clean;
    355
    356	u8 atr_sample_rate;
    357	u8 atr_count;
    358
    359	bool ring_active;		/* is ring online or not */
    360	bool arm_wb;		/* do something to arm write back */
    361	u8 packet_stride;
    362
    363	u16 flags;
    364#define IAVF_TXR_FLAGS_WB_ON_ITR		BIT(0)
    365#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
    366#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1	BIT(3)
    367#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2	BIT(4)
    368#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2	BIT(5)
    369
    370	/* stats structs */
    371	struct iavf_queue_stats	stats;
    372	struct u64_stats_sync syncp;
    373	union {
    374		struct iavf_tx_queue_stats tx_stats;
    375		struct iavf_rx_queue_stats rx_stats;
    376	};
    377
    378	unsigned int size;		/* length of descriptor ring in bytes */
    379	dma_addr_t dma;			/* physical address of ring */
    380
    381	struct iavf_vsi *vsi;		/* Backreference to associated VSI */
    382	struct iavf_q_vector *q_vector;	/* Backreference to associated vector */
    383
    384	struct rcu_head rcu;		/* to avoid race on free */
    385	u16 next_to_alloc;
    386	struct sk_buff *skb;		/* When iavf_clean_rx_ring_irq() must
    387					 * return before it sees the EOP for
    388					 * the current packet, we save that skb
    389					 * here and resume receiving this
    390					 * packet the next time
    391					 * iavf_clean_rx_ring_irq() is called
    392					 * for this ring.
    393					 */
    394} ____cacheline_internodealigned_in_smp;
    395
    396static inline bool ring_uses_build_skb(struct iavf_ring *ring)
    397{
    398	return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
    399}
    400
    401static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
    402{
    403	ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
    404}
    405
    406static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
    407{
    408	ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
    409}
    410
    411#define IAVF_ITR_ADAPTIVE_MIN_INC	0x0002
    412#define IAVF_ITR_ADAPTIVE_MIN_USECS	0x0002
    413#define IAVF_ITR_ADAPTIVE_MAX_USECS	0x007e
    414#define IAVF_ITR_ADAPTIVE_LATENCY	0x8000
    415#define IAVF_ITR_ADAPTIVE_BULK		0x0000
    416#define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
    417
    418struct iavf_ring_container {
    419	struct iavf_ring *ring;		/* pointer to linked list of ring(s) */
    420	unsigned long next_update;	/* jiffies value of next update */
    421	unsigned int total_bytes;	/* total bytes processed this int */
    422	unsigned int total_packets;	/* total packets processed this int */
    423	u16 count;
    424	u16 target_itr;			/* target ITR setting for ring(s) */
    425	u16 current_itr;		/* current ITR setting for ring(s) */
    426};
    427
    428/* iterator for handling rings in ring container */
    429#define iavf_for_each_ring(pos, head) \
    430	for (pos = (head).ring; pos != NULL; pos = pos->next)
    431
    432static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
    433{
    434#if (PAGE_SIZE < 8192)
    435	if (ring->rx_buf_len > (PAGE_SIZE / 2))
    436		return 1;
    437#endif
    438	return 0;
    439}
    440
    441#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
    442
    443bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
    444netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
    445void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
    446void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
    447int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
    448int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
    449void iavf_free_tx_resources(struct iavf_ring *tx_ring);
    450void iavf_free_rx_resources(struct iavf_ring *rx_ring);
    451int iavf_napi_poll(struct napi_struct *napi, int budget);
    452void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
    453u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
    454void iavf_detect_recover_hung(struct iavf_vsi *vsi);
    455int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
    456bool __iavf_chk_linearize(struct sk_buff *skb);
    457
    458/**
    459 * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
    460 * @skb:     send buffer
    461 *
    462 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
    463 * there is not enough descriptors available in this ring since we need at least
    464 * one descriptor.
    465 **/
    466static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
    467{
    468	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
    469	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
    470	int count = 0, size = skb_headlen(skb);
    471
    472	for (;;) {
    473		count += iavf_txd_use_count(size);
    474
    475		if (!nr_frags--)
    476			break;
    477
    478		size = skb_frag_size(frag++);
    479	}
    480
    481	return count;
    482}
    483
    484/**
    485 * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
    486 * @tx_ring: the ring to be checked
    487 * @size:    the size buffer we want to assure is available
    488 *
    489 * Returns 0 if stop is not needed
    490 **/
    491static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
    492{
    493	if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
    494		return 0;
    495	return __iavf_maybe_stop_tx(tx_ring, size);
    496}
    497
    498/**
    499 * iavf_chk_linearize - Check if there are more than 8 fragments per packet
    500 * @skb:      send buffer
    501 * @count:    number of buffers used
    502 *
    503 * Note: Our HW can't scatter-gather more than 8 fragments to build
    504 * a packet on the wire and so we need to figure out the cases where we
    505 * need to linearize the skb.
    506 **/
    507static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
    508{
    509	/* Both TSO and single send will work if count is less than 8 */
    510	if (likely(count < IAVF_MAX_BUFFER_TXD))
    511		return false;
    512
    513	if (skb_is_gso(skb))
    514		return __iavf_chk_linearize(skb);
    515
    516	/* we can support up to 8 data buffers for a single send */
    517	return count != IAVF_MAX_BUFFER_TXD;
    518}
    519/**
    520 * txring_txq - helper to convert from a ring to a queue
    521 * @ring: Tx ring to find the netdev equivalent of
    522 **/
    523static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
    524{
    525	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
    526}
    527#endif /* _IAVF_TXRX_H_ */