cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xdp_sock.h (2117B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* AF_XDP internal functions
      3 * Copyright(c) 2018 Intel Corporation.
      4 */
      5
      6#ifndef _LINUX_XDP_SOCK_H
      7#define _LINUX_XDP_SOCK_H
      8
      9#include <linux/bpf.h>
     10#include <linux/workqueue.h>
     11#include <linux/if_xdp.h>
     12#include <linux/mutex.h>
     13#include <linux/spinlock.h>
     14#include <linux/mm.h>
     15#include <net/sock.h>
     16
     17struct net_device;
     18struct xsk_queue;
     19struct xdp_buff;
     20
     21struct xdp_umem {
     22	void *addrs;
     23	u64 size;
     24	u32 headroom;
     25	u32 chunk_size;
     26	u32 chunks;
     27	u32 npgs;
     28	struct user_struct *user;
     29	refcount_t users;
     30	u8 flags;
     31	bool zc;
     32	struct page **pgs;
     33	int id;
     34	struct list_head xsk_dma_list;
     35	struct work_struct work;
     36};
     37
     38struct xsk_map {
     39	struct bpf_map map;
     40	spinlock_t lock; /* Synchronize map updates */
     41	struct xdp_sock __rcu *xsk_map[];
     42};
     43
     44struct xdp_sock {
     45	/* struct sock must be the first member of struct xdp_sock */
     46	struct sock sk;
     47	struct xsk_queue *rx ____cacheline_aligned_in_smp;
     48	struct net_device *dev;
     49	struct xdp_umem *umem;
     50	struct list_head flush_node;
     51	struct xsk_buff_pool *pool;
     52	u16 queue_id;
     53	bool zc;
     54	enum {
     55		XSK_READY = 0,
     56		XSK_BOUND,
     57		XSK_UNBOUND,
     58	} state;
     59
     60	struct xsk_queue *tx ____cacheline_aligned_in_smp;
     61	struct list_head tx_list;
     62	/* Protects generic receive. */
     63	spinlock_t rx_lock;
     64
     65	/* Statistics */
     66	u64 rx_dropped;
     67	u64 rx_queue_full;
     68
     69	struct list_head map_list;
     70	/* Protects map_list */
     71	spinlock_t map_list_lock;
     72	/* Protects multiple processes in the control path */
     73	struct mutex mutex;
     74	struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
     75	struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
     76};
     77
     78#ifdef CONFIG_XDP_SOCKETS
     79
     80int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
     81int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
     82void __xsk_map_flush(void);
     83
     84#else
     85
     86static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
     87{
     88	return -ENOTSUPP;
     89}
     90
     91static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
     92{
     93	return -EOPNOTSUPP;
     94}
     95
     96static inline void __xsk_map_flush(void)
     97{
     98}
     99
    100#endif /* CONFIG_XDP_SOCKETS */
    101
    102#endif /* _LINUX_XDP_SOCK_H */