cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i40e_xsk.c (17343B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright(c) 2018 Intel Corporation. */
      3
      4#include <linux/bpf_trace.h>
      5#include <linux/stringify.h>
      6#include <net/xdp_sock_drv.h>
      7#include <net/xdp.h>
      8
      9#include "i40e.h"
     10#include "i40e_txrx_common.h"
     11#include "i40e_xsk.h"
     12
     13int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
     14{
     15	unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
     16
     17	rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
     18	return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
     19}
     20
     21void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
     22{
     23	memset(rx_ring->rx_bi_zc, 0,
     24	       sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
     25}
     26
     27static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
     28{
     29	return &rx_ring->rx_bi_zc[idx];
     30}
     31
     32/**
     33 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
     34 * certain ring/qid
     35 * @vsi: Current VSI
     36 * @pool: buffer pool
     37 * @qid: Rx ring to associate buffer pool with
     38 *
     39 * Returns 0 on success, <0 on failure
     40 **/
     41static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
     42				struct xsk_buff_pool *pool,
     43				u16 qid)
     44{
     45	struct net_device *netdev = vsi->netdev;
     46	bool if_running;
     47	int err;
     48
     49	if (vsi->type != I40E_VSI_MAIN)
     50		return -EINVAL;
     51
     52	if (qid >= vsi->num_queue_pairs)
     53		return -EINVAL;
     54
     55	if (qid >= netdev->real_num_rx_queues ||
     56	    qid >= netdev->real_num_tx_queues)
     57		return -EINVAL;
     58
     59	err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
     60	if (err)
     61		return err;
     62
     63	set_bit(qid, vsi->af_xdp_zc_qps);
     64
     65	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
     66
     67	if (if_running) {
     68		err = i40e_queue_pair_disable(vsi, qid);
     69		if (err)
     70			return err;
     71
     72		err = i40e_queue_pair_enable(vsi, qid);
     73		if (err)
     74			return err;
     75
     76		/* Kick start the NAPI context so that receiving will start */
     77		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
     78		if (err)
     79			return err;
     80	}
     81
     82	return 0;
     83}
     84
     85/**
     86 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
     87 * certain ring/qid
     88 * @vsi: Current VSI
     89 * @qid: Rx ring to associate buffer pool with
     90 *
     91 * Returns 0 on success, <0 on failure
     92 **/
     93static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
     94{
     95	struct net_device *netdev = vsi->netdev;
     96	struct xsk_buff_pool *pool;
     97	bool if_running;
     98	int err;
     99
    100	pool = xsk_get_pool_from_qid(netdev, qid);
    101	if (!pool)
    102		return -EINVAL;
    103
    104	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
    105
    106	if (if_running) {
    107		err = i40e_queue_pair_disable(vsi, qid);
    108		if (err)
    109			return err;
    110	}
    111
    112	clear_bit(qid, vsi->af_xdp_zc_qps);
    113	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
    114
    115	if (if_running) {
    116		err = i40e_queue_pair_enable(vsi, qid);
    117		if (err)
    118			return err;
    119	}
    120
    121	return 0;
    122}
    123
    124/**
    125 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
    126 * a ring/qid
    127 * @vsi: Current VSI
    128 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
    129 * @qid: Rx ring to (dis)associate buffer pool (from)to
    130 *
    131 * This function enables or disables a buffer pool to a certain ring.
    132 *
    133 * Returns 0 on success, <0 on failure
    134 **/
    135int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
    136			u16 qid)
    137{
    138	return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
    139		i40e_xsk_pool_disable(vsi, qid);
    140}
    141
    142/**
    143 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
    144 * @rx_ring: Rx ring
    145 * @xdp: xdp_buff used as input to the XDP program
    146 *
    147 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
    148 **/
    149static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
    150{
    151	int err, result = I40E_XDP_PASS;
    152	struct i40e_ring *xdp_ring;
    153	struct bpf_prog *xdp_prog;
    154	u32 act;
    155
    156	/* NB! xdp_prog will always be !NULL, due to the fact that
    157	 * this path is enabled by setting an XDP program.
    158	 */
    159	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
    160	act = bpf_prog_run_xdp(xdp_prog, xdp);
    161
    162	if (likely(act == XDP_REDIRECT)) {
    163		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
    164		if (!err)
    165			return I40E_XDP_REDIR;
    166		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
    167			result = I40E_XDP_EXIT;
    168		else
    169			result = I40E_XDP_CONSUMED;
    170		goto out_failure;
    171	}
    172
    173	switch (act) {
    174	case XDP_PASS:
    175		break;
    176	case XDP_TX:
    177		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
    178		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
    179		if (result == I40E_XDP_CONSUMED)
    180			goto out_failure;
    181		break;
    182	case XDP_DROP:
    183		result = I40E_XDP_CONSUMED;
    184		break;
    185	default:
    186		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
    187		fallthrough;
    188	case XDP_ABORTED:
    189		result = I40E_XDP_CONSUMED;
    190out_failure:
    191		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
    192	}
    193	return result;
    194}
    195
    196bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
    197{
    198	u16 ntu = rx_ring->next_to_use;
    199	union i40e_rx_desc *rx_desc;
    200	struct xdp_buff **xdp;
    201	u32 nb_buffs, i;
    202	dma_addr_t dma;
    203
    204	rx_desc = I40E_RX_DESC(rx_ring, ntu);
    205	xdp = i40e_rx_bi(rx_ring, ntu);
    206
    207	nb_buffs = min_t(u16, count, rx_ring->count - ntu);
    208	nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
    209	if (!nb_buffs)
    210		return false;
    211
    212	i = nb_buffs;
    213	while (i--) {
    214		dma = xsk_buff_xdp_get_dma(*xdp);
    215		rx_desc->read.pkt_addr = cpu_to_le64(dma);
    216		rx_desc->read.hdr_addr = 0;
    217
    218		rx_desc++;
    219		xdp++;
    220	}
    221
    222	ntu += nb_buffs;
    223	if (ntu == rx_ring->count) {
    224		rx_desc = I40E_RX_DESC(rx_ring, 0);
    225		ntu = 0;
    226	}
    227
    228	/* clear the status bits for the next_to_use descriptor */
    229	rx_desc->wb.qword1.status_error_len = 0;
    230	i40e_release_rx_desc(rx_ring, ntu);
    231
    232	return count == nb_buffs;
    233}
    234
    235/**
    236 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
    237 * @rx_ring: Rx ring
    238 * @xdp: xdp_buff
    239 *
    240 * This functions allocates a new skb from a zero-copy Rx buffer.
    241 *
    242 * Returns the skb, or NULL on failure.
    243 **/
    244static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
    245					     struct xdp_buff *xdp)
    246{
    247	unsigned int totalsize = xdp->data_end - xdp->data_meta;
    248	unsigned int metasize = xdp->data - xdp->data_meta;
    249	struct sk_buff *skb;
    250
    251	net_prefetch(xdp->data_meta);
    252
    253	/* allocate a skb to store the frags */
    254	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
    255			       GFP_ATOMIC | __GFP_NOWARN);
    256	if (unlikely(!skb))
    257		goto out;
    258
    259	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
    260	       ALIGN(totalsize, sizeof(long)));
    261
    262	if (metasize) {
    263		skb_metadata_set(skb, metasize);
    264		__skb_pull(skb, metasize);
    265	}
    266
    267out:
    268	xsk_buff_free(xdp);
    269	return skb;
    270}
    271
    272static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
    273				      struct xdp_buff *xdp_buff,
    274				      union i40e_rx_desc *rx_desc,
    275				      unsigned int *rx_packets,
    276				      unsigned int *rx_bytes,
    277				      unsigned int size,
    278				      unsigned int xdp_res,
    279				      bool *failure)
    280{
    281	struct sk_buff *skb;
    282
    283	*rx_packets = 1;
    284	*rx_bytes = size;
    285
    286	if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
    287		return;
    288
    289	if (xdp_res == I40E_XDP_EXIT) {
    290		*failure = true;
    291		return;
    292	}
    293
    294	if (xdp_res == I40E_XDP_CONSUMED) {
    295		xsk_buff_free(xdp_buff);
    296		return;
    297	}
    298	if (xdp_res == I40E_XDP_PASS) {
    299		/* NB! We are not checking for errors using
    300		 * i40e_test_staterr with
    301		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
    302		 * SBP is *not* set in PRT_SBPVSI (default not set).
    303		 */
    304		skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
    305		if (!skb) {
    306			rx_ring->rx_stats.alloc_buff_failed++;
    307			*rx_packets = 0;
    308			*rx_bytes = 0;
    309			return;
    310		}
    311
    312		if (eth_skb_pad(skb)) {
    313			*rx_packets = 0;
    314			*rx_bytes = 0;
    315			return;
    316		}
    317
    318		*rx_bytes = skb->len;
    319		i40e_process_skb_fields(rx_ring, rx_desc, skb);
    320		napi_gro_receive(&rx_ring->q_vector->napi, skb);
    321		return;
    322	}
    323
    324	/* Should never get here, as all valid cases have been handled already.
    325	 */
    326	WARN_ON_ONCE(1);
    327}
    328
    329/**
    330 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
    331 * @rx_ring: Rx ring
    332 * @budget: NAPI budget
    333 *
    334 * Returns amount of work completed
    335 **/
    336int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
    337{
    338	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
    339	u16 next_to_clean = rx_ring->next_to_clean;
    340	u16 count_mask = rx_ring->count - 1;
    341	unsigned int xdp_res, xdp_xmit = 0;
    342	bool failure = false;
    343	u16 cleaned_count;
    344
    345	while (likely(total_rx_packets < (unsigned int)budget)) {
    346		union i40e_rx_desc *rx_desc;
    347		unsigned int rx_packets;
    348		unsigned int rx_bytes;
    349		struct xdp_buff *bi;
    350		unsigned int size;
    351		u64 qword;
    352
    353		rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
    354		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
    355
    356		/* This memory barrier is needed to keep us from reading
    357		 * any other fields out of the rx_desc until we have
    358		 * verified the descriptor has been written back.
    359		 */
    360		dma_rmb();
    361
    362		if (i40e_rx_is_programming_status(qword)) {
    363			i40e_clean_programming_status(rx_ring,
    364						      rx_desc->raw.qword[0],
    365						      qword);
    366			bi = *i40e_rx_bi(rx_ring, next_to_clean);
    367			xsk_buff_free(bi);
    368			next_to_clean = (next_to_clean + 1) & count_mask;
    369			continue;
    370		}
    371
    372		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
    373		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
    374		if (!size)
    375			break;
    376
    377		bi = *i40e_rx_bi(rx_ring, next_to_clean);
    378		xsk_buff_set_size(bi, size);
    379		xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
    380
    381		xdp_res = i40e_run_xdp_zc(rx_ring, bi);
    382		i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
    383					  &rx_bytes, size, xdp_res, &failure);
    384		if (failure)
    385			break;
    386		total_rx_packets += rx_packets;
    387		total_rx_bytes += rx_bytes;
    388		xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
    389		next_to_clean = (next_to_clean + 1) & count_mask;
    390	}
    391
    392	rx_ring->next_to_clean = next_to_clean;
    393	cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
    394
    395	if (cleaned_count >= I40E_RX_BUFFER_WRITE)
    396		failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
    397
    398	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
    399	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
    400
    401	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
    402		if (failure || next_to_clean == rx_ring->next_to_use)
    403			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
    404		else
    405			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
    406
    407		return (int)total_rx_packets;
    408	}
    409	return failure ? budget : (int)total_rx_packets;
    410}
    411
    412static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
    413			  unsigned int *total_bytes)
    414{
    415	struct i40e_tx_desc *tx_desc;
    416	dma_addr_t dma;
    417
    418	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
    419	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
    420
    421	tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
    422	tx_desc->buffer_addr = cpu_to_le64(dma);
    423	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
    424						  0, desc->len, 0);
    425
    426	*total_bytes += desc->len;
    427}
    428
    429static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
    430				unsigned int *total_bytes)
    431{
    432	u16 ntu = xdp_ring->next_to_use;
    433	struct i40e_tx_desc *tx_desc;
    434	dma_addr_t dma;
    435	u32 i;
    436
    437	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
    438		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
    439		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
    440
    441		tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
    442		tx_desc->buffer_addr = cpu_to_le64(dma);
    443		tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
    444							  I40E_TX_DESC_CMD_EOP,
    445							  0, desc[i].len, 0);
    446
    447		*total_bytes += desc[i].len;
    448	}
    449
    450	xdp_ring->next_to_use = ntu;
    451}
    452
    453static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
    454				 unsigned int *total_bytes)
    455{
    456	u32 batched, leftover, i;
    457
    458	batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
    459	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
    460	for (i = 0; i < batched; i += PKTS_PER_BATCH)
    461		i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
    462	for (i = batched; i < batched + leftover; i++)
    463		i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
    464}
    465
    466static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
    467{
    468	u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
    469	struct i40e_tx_desc *tx_desc;
    470
    471	tx_desc = I40E_TX_DESC(xdp_ring, ntu);
    472	tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
    473}
    474
    475/**
    476 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
    477 * @xdp_ring: XDP Tx ring
    478 * @budget: NAPI budget
    479 *
    480 * Returns true if the work is finished.
    481 **/
    482static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
    483{
    484	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
    485	u32 nb_pkts, nb_processed = 0;
    486	unsigned int total_bytes = 0;
    487
    488	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
    489	if (!nb_pkts)
    490		return true;
    491
    492	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
    493		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
    494		i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
    495		xdp_ring->next_to_use = 0;
    496	}
    497
    498	i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
    499			     &total_bytes);
    500
    501	/* Request an interrupt for the last frame and bump tail ptr. */
    502	i40e_set_rs_bit(xdp_ring);
    503	i40e_xdp_ring_update_tail(xdp_ring);
    504
    505	i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
    506
    507	return nb_pkts < budget;
    508}
    509
    510/**
    511 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
    512 * @tx_ring: XDP Tx ring
    513 * @tx_bi: Tx buffer info to clean
    514 **/
    515static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
    516				     struct i40e_tx_buffer *tx_bi)
    517{
    518	xdp_return_frame(tx_bi->xdpf);
    519	tx_ring->xdp_tx_active--;
    520	dma_unmap_single(tx_ring->dev,
    521			 dma_unmap_addr(tx_bi, dma),
    522			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
    523	dma_unmap_len_set(tx_bi, len, 0);
    524}
    525
    526/**
    527 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
    528 * @vsi: Current VSI
    529 * @tx_ring: XDP Tx ring
    530 *
    531 * Returns true if cleanup/tranmission is done.
    532 **/
    533bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
    534{
    535	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
    536	u32 i, completed_frames, xsk_frames = 0;
    537	u32 head_idx = i40e_get_head(tx_ring);
    538	struct i40e_tx_buffer *tx_bi;
    539	unsigned int ntc;
    540
    541	if (head_idx < tx_ring->next_to_clean)
    542		head_idx += tx_ring->count;
    543	completed_frames = head_idx - tx_ring->next_to_clean;
    544
    545	if (completed_frames == 0)
    546		goto out_xmit;
    547
    548	if (likely(!tx_ring->xdp_tx_active)) {
    549		xsk_frames = completed_frames;
    550		goto skip;
    551	}
    552
    553	ntc = tx_ring->next_to_clean;
    554
    555	for (i = 0; i < completed_frames; i++) {
    556		tx_bi = &tx_ring->tx_bi[ntc];
    557
    558		if (tx_bi->xdpf) {
    559			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
    560			tx_bi->xdpf = NULL;
    561		} else {
    562			xsk_frames++;
    563		}
    564
    565		if (++ntc >= tx_ring->count)
    566			ntc = 0;
    567	}
    568
    569skip:
    570	tx_ring->next_to_clean += completed_frames;
    571	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
    572		tx_ring->next_to_clean -= tx_ring->count;
    573
    574	if (xsk_frames)
    575		xsk_tx_completed(bp, xsk_frames);
    576
    577	i40e_arm_wb(tx_ring, vsi, completed_frames);
    578
    579out_xmit:
    580	if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
    581		xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
    582
    583	return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
    584}
    585
    586/**
    587 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
    588 * @dev: the netdevice
    589 * @queue_id: queue id to wake up
    590 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
    591 *
    592 * Returns <0 for errors, 0 otherwise.
    593 **/
    594int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
    595{
    596	struct i40e_netdev_priv *np = netdev_priv(dev);
    597	struct i40e_vsi *vsi = np->vsi;
    598	struct i40e_pf *pf = vsi->back;
    599	struct i40e_ring *ring;
    600
    601	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
    602		return -EAGAIN;
    603
    604	if (test_bit(__I40E_VSI_DOWN, vsi->state))
    605		return -ENETDOWN;
    606
    607	if (!i40e_enabled_xdp_vsi(vsi))
    608		return -EINVAL;
    609
    610	if (queue_id >= vsi->num_queue_pairs)
    611		return -EINVAL;
    612
    613	if (!vsi->xdp_rings[queue_id]->xsk_pool)
    614		return -EINVAL;
    615
    616	ring = vsi->xdp_rings[queue_id];
    617
    618	/* The idea here is that if NAPI is running, mark a miss, so
    619	 * it will run again. If not, trigger an interrupt and
    620	 * schedule the NAPI from interrupt context. If NAPI would be
    621	 * scheduled here, the interrupt affinity would not be
    622	 * honored.
    623	 */
    624	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
    625		i40e_force_wb(vsi, ring->q_vector);
    626
    627	return 0;
    628}
    629
    630void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
    631{
    632	u16 count_mask = rx_ring->count - 1;
    633	u16 ntc = rx_ring->next_to_clean;
    634	u16 ntu = rx_ring->next_to_use;
    635
    636	for ( ; ntc != ntu; ntc = (ntc + 1)  & count_mask) {
    637		struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
    638
    639		xsk_buff_free(rx_bi);
    640	}
    641}
    642
    643/**
    644 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
    645 * @tx_ring: XDP Tx ring
    646 **/
    647void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
    648{
    649	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
    650	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
    651	struct i40e_tx_buffer *tx_bi;
    652	u32 xsk_frames = 0;
    653
    654	while (ntc != ntu) {
    655		tx_bi = &tx_ring->tx_bi[ntc];
    656
    657		if (tx_bi->xdpf)
    658			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
    659		else
    660			xsk_frames++;
    661
    662		tx_bi->xdpf = NULL;
    663
    664		ntc++;
    665		if (ntc >= tx_ring->count)
    666			ntc = 0;
    667	}
    668
    669	if (xsk_frames)
    670		xsk_tx_completed(bp, xsk_frames);
    671}
    672
    673/**
    674 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
    675 * buffer pool attached
    676 * @vsi: vsi
    677 *
    678 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
    679 **/
    680bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
    681{
    682	struct net_device *netdev = vsi->netdev;
    683	int i;
    684
    685	for (i = 0; i < vsi->num_queue_pairs; i++) {
    686		if (xsk_get_pool_from_qid(netdev, i))
    687			return true;
    688	}
    689
    690	return false;
    691}