cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nfp_net_xsk.c (3952B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
      2/* Copyright (C) 2018 Netronome Systems, Inc */
      3/* Copyright (C) 2021 Corigine, Inc */
      4
      5#include <linux/dma-direction.h>
      6#include <linux/dma-mapping.h>
      7#include <linux/slab.h>
      8#include <net/xdp_sock_drv.h>
      9#include <trace/events/xdp.h>
     10
     11#include "nfp_app.h"
     12#include "nfp_net.h"
     13#include "nfp_net_dp.h"
     14#include "nfp_net_xsk.h"
     15
     16static void
     17nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
     18			  struct xdp_buff *xdp)
     19{
     20	unsigned int headroom;
     21
     22	headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool);
     23
     24	rx_ring->rxds[idx].fld.reserved = 0;
     25	rx_ring->rxds[idx].fld.meta_len_dd = 0;
     26
     27	rx_ring->xsk_rxbufs[idx].xdp = xdp;
     28	rx_ring->xsk_rxbufs[idx].dma_addr =
     29		xsk_buff_xdp_get_frame_dma(xdp) + headroom;
     30}
     31
     32void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
     33{
     34	rxbuf->dma_addr = 0;
     35	rxbuf->xdp = NULL;
     36}
     37
     38void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
     39{
     40	if (rxbuf->xdp)
     41		xsk_buff_free(rxbuf->xdp);
     42
     43	nfp_net_xsk_rx_unstash(rxbuf);
     44}
     45
     46void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring)
     47{
     48	unsigned int i;
     49
     50	if (!rx_ring->cnt)
     51		return;
     52
     53	for (i = 0; i < rx_ring->cnt - 1; i++)
     54		nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]);
     55}
     56
     57void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
     58{
     59	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
     60	struct xsk_buff_pool *pool = r_vec->xsk_pool;
     61	unsigned int wr_idx, wr_ptr_add = 0;
     62	struct xdp_buff *xdp;
     63
     64	while (nfp_net_rx_space(rx_ring)) {
     65		wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
     66
     67		xdp = xsk_buff_alloc(pool);
     68		if (!xdp)
     69			break;
     70
     71		nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
     72
     73		nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
     74				      rx_ring->xsk_rxbufs[wr_idx].dma_addr);
     75
     76		rx_ring->wr_p++;
     77		wr_ptr_add++;
     78	}
     79
     80	/* Ensure all records are visible before incrementing write counter. */
     81	wmb();
     82	nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
     83}
     84
     85void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
     86			 struct nfp_net_xsk_rx_buf *xrxbuf)
     87{
     88	u64_stats_update_begin(&r_vec->rx_sync);
     89	r_vec->rx_drops++;
     90	u64_stats_update_end(&r_vec->rx_sync);
     91
     92	nfp_net_xsk_rx_free(xrxbuf);
     93}
     94
     95static void nfp_net_xsk_pool_unmap(struct device *dev,
     96				   struct xsk_buff_pool *pool)
     97{
     98	return xsk_pool_dma_unmap(pool, 0);
     99}
    100
    101static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool)
    102{
    103	return xsk_pool_dma_map(pool, dev, 0);
    104}
    105
    106int nfp_net_xsk_setup_pool(struct net_device *netdev,
    107			   struct xsk_buff_pool *pool, u16 queue_id)
    108{
    109	struct nfp_net *nn = netdev_priv(netdev);
    110
    111	struct xsk_buff_pool *prev_pool;
    112	struct nfp_net_dp *dp;
    113	int err;
    114
    115	/* NFDK doesn't implement xsk yet. */
    116	if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
    117		return -EOPNOTSUPP;
    118
    119	/* Reject on old FWs so we can drop some checks on datapath. */
    120	if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
    121		return -EOPNOTSUPP;
    122	if (!nn->dp.chained_metadata_format)
    123		return -EOPNOTSUPP;
    124
    125	/* Install */
    126	if (pool) {
    127		err = nfp_net_xsk_pool_map(nn->dp.dev, pool);
    128		if (err)
    129			return err;
    130	}
    131
    132	/* Reconfig/swap */
    133	dp = nfp_net_clone_dp(nn);
    134	if (!dp) {
    135		err = -ENOMEM;
    136		goto err_unmap;
    137	}
    138
    139	prev_pool = dp->xsk_pools[queue_id];
    140	dp->xsk_pools[queue_id] = pool;
    141
    142	err = nfp_net_ring_reconfig(nn, dp, NULL);
    143	if (err)
    144		goto err_unmap;
    145
    146	/* Uninstall */
    147	if (prev_pool)
    148		nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool);
    149
    150	return 0;
    151err_unmap:
    152	if (pool)
    153		nfp_net_xsk_pool_unmap(nn->dp.dev, pool);
    154
    155	return err;
    156}
    157
    158int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
    159{
    160	struct nfp_net *nn = netdev_priv(netdev);
    161
    162	/* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL,
    163	 * so it must be within our vector range.  Moreover, our napi structs
    164	 * are statically allocated, so we can always kick them without worrying
    165	 * if reconfig is in progress or interface down.
    166	 */
    167	napi_schedule(&nn->r_vecs[queue_id].napi);
    168
    169	return 0;
    170}