cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xsk.c (28906B)


      1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
      2
      3/*
      4 * AF_XDP user-space access library.
      5 *
      6 * Copyright(c) 2018 - 2019 Intel Corporation.
      7 *
      8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
      9 */
     10
     11#include <errno.h>
     12#include <stdlib.h>
     13#include <string.h>
     14#include <unistd.h>
     15#include <arpa/inet.h>
     16#include <asm/barrier.h>
     17#include <linux/compiler.h>
     18#include <linux/ethtool.h>
     19#include <linux/filter.h>
     20#include <linux/if_ether.h>
     21#include <linux/if_packet.h>
     22#include <linux/if_xdp.h>
     23#include <linux/kernel.h>
     24#include <linux/list.h>
     25#include <linux/sockios.h>
     26#include <net/if.h>
     27#include <sys/ioctl.h>
     28#include <sys/mman.h>
     29#include <sys/socket.h>
     30#include <sys/types.h>
     31#include <linux/if_link.h>
     32
     33#include "bpf.h"
     34#include "libbpf.h"
     35#include "libbpf_internal.h"
     36#include "xsk.h"
     37
     38/* entire xsk.h and xsk.c is going away in libbpf 1.0, so ignore all internal
     39 * uses of deprecated APIs
     40 */
     41#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
     42
     43#ifndef SOL_XDP
     44 #define SOL_XDP 283
     45#endif
     46
     47#ifndef AF_XDP
     48 #define AF_XDP 44
     49#endif
     50
     51#ifndef PF_XDP
     52 #define PF_XDP AF_XDP
     53#endif
     54
     55enum xsk_prog {
     56	XSK_PROG_FALLBACK,
     57	XSK_PROG_REDIRECT_FLAGS,
     58};
     59
     60struct xsk_umem {
     61	struct xsk_ring_prod *fill_save;
     62	struct xsk_ring_cons *comp_save;
     63	char *umem_area;
     64	struct xsk_umem_config config;
     65	int fd;
     66	int refcount;
     67	struct list_head ctx_list;
     68	bool rx_ring_setup_done;
     69	bool tx_ring_setup_done;
     70};
     71
     72struct xsk_ctx {
     73	struct xsk_ring_prod *fill;
     74	struct xsk_ring_cons *comp;
     75	__u32 queue_id;
     76	struct xsk_umem *umem;
     77	int refcount;
     78	int ifindex;
     79	struct list_head list;
     80	int prog_fd;
     81	int link_fd;
     82	int xsks_map_fd;
     83	char ifname[IFNAMSIZ];
     84	bool has_bpf_link;
     85};
     86
     87struct xsk_socket {
     88	struct xsk_ring_cons *rx;
     89	struct xsk_ring_prod *tx;
     90	__u64 outstanding_tx;
     91	struct xsk_ctx *ctx;
     92	struct xsk_socket_config config;
     93	int fd;
     94};
     95
     96struct xsk_nl_info {
     97	bool xdp_prog_attached;
     98	int ifindex;
     99	int fd;
    100};
    101
    102/* Up until and including Linux 5.3 */
    103struct xdp_ring_offset_v1 {
    104	__u64 producer;
    105	__u64 consumer;
    106	__u64 desc;
    107};
    108
    109/* Up until and including Linux 5.3 */
    110struct xdp_mmap_offsets_v1 {
    111	struct xdp_ring_offset_v1 rx;
    112	struct xdp_ring_offset_v1 tx;
    113	struct xdp_ring_offset_v1 fr;
    114	struct xdp_ring_offset_v1 cr;
    115};
    116
    117int xsk_umem__fd(const struct xsk_umem *umem)
    118{
    119	return umem ? umem->fd : -EINVAL;
    120}
    121
    122int xsk_socket__fd(const struct xsk_socket *xsk)
    123{
    124	return xsk ? xsk->fd : -EINVAL;
    125}
    126
    127static bool xsk_page_aligned(void *buffer)
    128{
    129	unsigned long addr = (unsigned long)buffer;
    130
    131	return !(addr & (getpagesize() - 1));
    132}
    133
    134static void xsk_set_umem_config(struct xsk_umem_config *cfg,
    135				const struct xsk_umem_config *usr_cfg)
    136{
    137	if (!usr_cfg) {
    138		cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
    139		cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
    140		cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
    141		cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
    142		cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
    143		return;
    144	}
    145
    146	cfg->fill_size = usr_cfg->fill_size;
    147	cfg->comp_size = usr_cfg->comp_size;
    148	cfg->frame_size = usr_cfg->frame_size;
    149	cfg->frame_headroom = usr_cfg->frame_headroom;
    150	cfg->flags = usr_cfg->flags;
    151}
    152
    153static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
    154				     const struct xsk_socket_config *usr_cfg)
    155{
    156	if (!usr_cfg) {
    157		cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
    158		cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
    159		cfg->libbpf_flags = 0;
    160		cfg->xdp_flags = 0;
    161		cfg->bind_flags = 0;
    162		return 0;
    163	}
    164
    165	if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
    166		return -EINVAL;
    167
    168	cfg->rx_size = usr_cfg->rx_size;
    169	cfg->tx_size = usr_cfg->tx_size;
    170	cfg->libbpf_flags = usr_cfg->libbpf_flags;
    171	cfg->xdp_flags = usr_cfg->xdp_flags;
    172	cfg->bind_flags = usr_cfg->bind_flags;
    173
    174	return 0;
    175}
    176
    177static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
    178{
    179	struct xdp_mmap_offsets_v1 off_v1;
    180
    181	/* getsockopt on a kernel <= 5.3 has no flags fields.
    182	 * Copy over the offsets to the correct places in the >=5.4 format
    183	 * and put the flags where they would have been on that kernel.
    184	 */
    185	memcpy(&off_v1, off, sizeof(off_v1));
    186
    187	off->rx.producer = off_v1.rx.producer;
    188	off->rx.consumer = off_v1.rx.consumer;
    189	off->rx.desc = off_v1.rx.desc;
    190	off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
    191
    192	off->tx.producer = off_v1.tx.producer;
    193	off->tx.consumer = off_v1.tx.consumer;
    194	off->tx.desc = off_v1.tx.desc;
    195	off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
    196
    197	off->fr.producer = off_v1.fr.producer;
    198	off->fr.consumer = off_v1.fr.consumer;
    199	off->fr.desc = off_v1.fr.desc;
    200	off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
    201
    202	off->cr.producer = off_v1.cr.producer;
    203	off->cr.consumer = off_v1.cr.consumer;
    204	off->cr.desc = off_v1.cr.desc;
    205	off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
    206}
    207
    208static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
    209{
    210	socklen_t optlen;
    211	int err;
    212
    213	optlen = sizeof(*off);
    214	err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
    215	if (err)
    216		return err;
    217
    218	if (optlen == sizeof(*off))
    219		return 0;
    220
    221	if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
    222		xsk_mmap_offsets_v1(off);
    223		return 0;
    224	}
    225
    226	return -EINVAL;
    227}
    228
    229static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
    230				 struct xsk_ring_prod *fill,
    231				 struct xsk_ring_cons *comp)
    232{
    233	struct xdp_mmap_offsets off;
    234	void *map;
    235	int err;
    236
    237	err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
    238			 &umem->config.fill_size,
    239			 sizeof(umem->config.fill_size));
    240	if (err)
    241		return -errno;
    242
    243	err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
    244			 &umem->config.comp_size,
    245			 sizeof(umem->config.comp_size));
    246	if (err)
    247		return -errno;
    248
    249	err = xsk_get_mmap_offsets(fd, &off);
    250	if (err)
    251		return -errno;
    252
    253	map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
    254		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
    255		   XDP_UMEM_PGOFF_FILL_RING);
    256	if (map == MAP_FAILED)
    257		return -errno;
    258
    259	fill->mask = umem->config.fill_size - 1;
    260	fill->size = umem->config.fill_size;
    261	fill->producer = map + off.fr.producer;
    262	fill->consumer = map + off.fr.consumer;
    263	fill->flags = map + off.fr.flags;
    264	fill->ring = map + off.fr.desc;
    265	fill->cached_cons = umem->config.fill_size;
    266
    267	map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
    268		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
    269		   XDP_UMEM_PGOFF_COMPLETION_RING);
    270	if (map == MAP_FAILED) {
    271		err = -errno;
    272		goto out_mmap;
    273	}
    274
    275	comp->mask = umem->config.comp_size - 1;
    276	comp->size = umem->config.comp_size;
    277	comp->producer = map + off.cr.producer;
    278	comp->consumer = map + off.cr.consumer;
    279	comp->flags = map + off.cr.flags;
    280	comp->ring = map + off.cr.desc;
    281
    282	return 0;
    283
    284out_mmap:
    285	munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
    286	return err;
    287}
    288
    289DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
    290int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
    291			    __u64 size, struct xsk_ring_prod *fill,
    292			    struct xsk_ring_cons *comp,
    293			    const struct xsk_umem_config *usr_config)
    294{
    295	struct xdp_umem_reg mr;
    296	struct xsk_umem *umem;
    297	int err;
    298
    299	if (!umem_area || !umem_ptr || !fill || !comp)
    300		return -EFAULT;
    301	if (!size && !xsk_page_aligned(umem_area))
    302		return -EINVAL;
    303
    304	umem = calloc(1, sizeof(*umem));
    305	if (!umem)
    306		return -ENOMEM;
    307
    308	umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
    309	if (umem->fd < 0) {
    310		err = -errno;
    311		goto out_umem_alloc;
    312	}
    313
    314	umem->umem_area = umem_area;
    315	INIT_LIST_HEAD(&umem->ctx_list);
    316	xsk_set_umem_config(&umem->config, usr_config);
    317
    318	memset(&mr, 0, sizeof(mr));
    319	mr.addr = (uintptr_t)umem_area;
    320	mr.len = size;
    321	mr.chunk_size = umem->config.frame_size;
    322	mr.headroom = umem->config.frame_headroom;
    323	mr.flags = umem->config.flags;
    324
    325	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
    326	if (err) {
    327		err = -errno;
    328		goto out_socket;
    329	}
    330
    331	err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
    332	if (err)
    333		goto out_socket;
    334
    335	umem->fill_save = fill;
    336	umem->comp_save = comp;
    337	*umem_ptr = umem;
    338	return 0;
    339
    340out_socket:
    341	close(umem->fd);
    342out_umem_alloc:
    343	free(umem);
    344	return err;
    345}
    346
    347struct xsk_umem_config_v1 {
    348	__u32 fill_size;
    349	__u32 comp_size;
    350	__u32 frame_size;
    351	__u32 frame_headroom;
    352};
    353
    354COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
    355int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
    356			    __u64 size, struct xsk_ring_prod *fill,
    357			    struct xsk_ring_cons *comp,
    358			    const struct xsk_umem_config *usr_config)
    359{
    360	struct xsk_umem_config config;
    361
    362	memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
    363	config.flags = 0;
    364
    365	return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
    366					&config);
    367}
    368
    369static enum xsk_prog get_xsk_prog(void)
    370{
    371	enum xsk_prog detected = XSK_PROG_FALLBACK;
    372	__u32 size_out, retval, duration;
    373	char data_in = 0, data_out;
    374	struct bpf_insn insns[] = {
    375		BPF_LD_MAP_FD(BPF_REG_1, 0),
    376		BPF_MOV64_IMM(BPF_REG_2, 0),
    377		BPF_MOV64_IMM(BPF_REG_3, XDP_PASS),
    378		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
    379		BPF_EXIT_INSN(),
    380	};
    381	int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);
    382
    383	map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL);
    384	if (map_fd < 0)
    385		return detected;
    386
    387	insns[0].imm = map_fd;
    388
    389	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
    390	if (prog_fd < 0) {
    391		close(map_fd);
    392		return detected;
    393	}
    394
    395	ret = bpf_prog_test_run(prog_fd, 0, &data_in, 1, &data_out, &size_out, &retval, &duration);
    396	if (!ret && retval == XDP_PASS)
    397		detected = XSK_PROG_REDIRECT_FLAGS;
    398	close(prog_fd);
    399	close(map_fd);
    400	return detected;
    401}
    402
    403static int xsk_load_xdp_prog(struct xsk_socket *xsk)
    404{
    405	static const int log_buf_size = 16 * 1024;
    406	struct xsk_ctx *ctx = xsk->ctx;
    407	char log_buf[log_buf_size];
    408	int prog_fd;
    409
    410	/* This is the fallback C-program:
    411	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
    412	 * {
    413	 *     int ret, index = ctx->rx_queue_index;
    414	 *
    415	 *     // A set entry here means that the correspnding queue_id
    416	 *     // has an active AF_XDP socket bound to it.
    417	 *     ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
    418	 *     if (ret > 0)
    419	 *         return ret;
    420	 *
    421	 *     // Fallback for pre-5.3 kernels, not supporting default
    422	 *     // action in the flags parameter.
    423	 *     if (bpf_map_lookup_elem(&xsks_map, &index))
    424	 *         return bpf_redirect_map(&xsks_map, index, 0);
    425	 *     return XDP_PASS;
    426	 * }
    427	 */
    428	struct bpf_insn prog[] = {
    429		/* r2 = *(u32 *)(r1 + 16) */
    430		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
    431		/* *(u32 *)(r10 - 4) = r2 */
    432		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
    433		/* r1 = xskmap[] */
    434		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
    435		/* r3 = XDP_PASS */
    436		BPF_MOV64_IMM(BPF_REG_3, 2),
    437		/* call bpf_redirect_map */
    438		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
    439		/* if w0 != 0 goto pc+13 */
    440		BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
    441		/* r2 = r10 */
    442		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    443		/* r2 += -4 */
    444		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
    445		/* r1 = xskmap[] */
    446		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
    447		/* call bpf_map_lookup_elem */
    448		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    449		/* r1 = r0 */
    450		BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    451		/* r0 = XDP_PASS */
    452		BPF_MOV64_IMM(BPF_REG_0, 2),
    453		/* if r1 == 0 goto pc+5 */
    454		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
    455		/* r2 = *(u32 *)(r10 - 4) */
    456		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
    457		/* r1 = xskmap[] */
    458		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
    459		/* r3 = 0 */
    460		BPF_MOV64_IMM(BPF_REG_3, 0),
    461		/* call bpf_redirect_map */
    462		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
    463		/* The jumps are to this instruction */
    464		BPF_EXIT_INSN(),
    465	};
    466
    467	/* This is the post-5.3 kernel C-program:
    468	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
    469	 * {
    470	 *     return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS);
    471	 * }
    472	 */
    473	struct bpf_insn prog_redirect_flags[] = {
    474		/* r2 = *(u32 *)(r1 + 16) */
    475		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
    476		/* r1 = xskmap[] */
    477		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
    478		/* r3 = XDP_PASS */
    479		BPF_MOV64_IMM(BPF_REG_3, 2),
    480		/* call bpf_redirect_map */
    481		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
    482		BPF_EXIT_INSN(),
    483	};
    484	size_t insns_cnt[] = {ARRAY_SIZE(prog),
    485			      ARRAY_SIZE(prog_redirect_flags),
    486	};
    487	struct bpf_insn *progs[] = {prog, prog_redirect_flags};
    488	enum xsk_prog option = get_xsk_prog();
    489	LIBBPF_OPTS(bpf_prog_load_opts, opts,
    490		.log_buf = log_buf,
    491		.log_size = log_buf_size,
    492	);
    493
    494	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause",
    495				progs[option], insns_cnt[option], &opts);
    496	if (prog_fd < 0) {
    497		pr_warn("BPF log buffer:\n%s", log_buf);
    498		return prog_fd;
    499	}
    500
    501	ctx->prog_fd = prog_fd;
    502	return 0;
    503}
    504
    505static int xsk_create_bpf_link(struct xsk_socket *xsk)
    506{
    507	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
    508	struct xsk_ctx *ctx = xsk->ctx;
    509	__u32 prog_id = 0;
    510	int link_fd;
    511	int err;
    512
    513	err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
    514	if (err) {
    515		pr_warn("getting XDP prog id failed\n");
    516		return err;
    517	}
    518
    519	/* if there's a netlink-based XDP prog loaded on interface, bail out
    520	 * and ask user to do the removal by himself
    521	 */
    522	if (prog_id) {
    523		pr_warn("Netlink-based XDP prog detected, please unload it in order to launch AF_XDP prog\n");
    524		return -EINVAL;
    525	}
    526
    527	opts.flags = xsk->config.xdp_flags & ~(XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_REPLACE);
    528
    529	link_fd = bpf_link_create(ctx->prog_fd, ctx->ifindex, BPF_XDP, &opts);
    530	if (link_fd < 0) {
    531		pr_warn("bpf_link_create failed: %s\n", strerror(errno));
    532		return link_fd;
    533	}
    534
    535	ctx->link_fd = link_fd;
    536	return 0;
    537}
    538
    539static int xsk_get_max_queues(struct xsk_socket *xsk)
    540{
    541	struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
    542	struct xsk_ctx *ctx = xsk->ctx;
    543	struct ifreq ifr = {};
    544	int fd, err, ret;
    545
    546	fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0);
    547	if (fd < 0)
    548		return -errno;
    549
    550	ifr.ifr_data = (void *)&channels;
    551	libbpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ);
    552	err = ioctl(fd, SIOCETHTOOL, &ifr);
    553	if (err && errno != EOPNOTSUPP) {
    554		ret = -errno;
    555		goto out;
    556	}
    557
    558	if (err) {
    559		/* If the device says it has no channels, then all traffic
    560		 * is sent to a single stream, so max queues = 1.
    561		 */
    562		ret = 1;
    563	} else {
    564		/* Take the max of rx, tx, combined. Drivers return
    565		 * the number of channels in different ways.
    566		 */
    567		ret = max(channels.max_rx, channels.max_tx);
    568		ret = max(ret, (int)channels.max_combined);
    569	}
    570
    571out:
    572	close(fd);
    573	return ret;
    574}
    575
    576static int xsk_create_bpf_maps(struct xsk_socket *xsk)
    577{
    578	struct xsk_ctx *ctx = xsk->ctx;
    579	int max_queues;
    580	int fd;
    581
    582	max_queues = xsk_get_max_queues(xsk);
    583	if (max_queues < 0)
    584		return max_queues;
    585
    586	fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map",
    587			    sizeof(int), sizeof(int), max_queues, NULL);
    588	if (fd < 0)
    589		return fd;
    590
    591	ctx->xsks_map_fd = fd;
    592
    593	return 0;
    594}
    595
    596static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
    597{
    598	struct xsk_ctx *ctx = xsk->ctx;
    599
    600	bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
    601	close(ctx->xsks_map_fd);
    602}
    603
    604static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
    605{
    606	__u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
    607	__u32 map_len = sizeof(struct bpf_map_info);
    608	struct bpf_prog_info prog_info = {};
    609	struct xsk_ctx *ctx = xsk->ctx;
    610	struct bpf_map_info map_info;
    611	int fd, err;
    612
    613	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
    614	if (err)
    615		return err;
    616
    617	num_maps = prog_info.nr_map_ids;
    618
    619	map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
    620	if (!map_ids)
    621		return -ENOMEM;
    622
    623	memset(&prog_info, 0, prog_len);
    624	prog_info.nr_map_ids = num_maps;
    625	prog_info.map_ids = (__u64)(unsigned long)map_ids;
    626
    627	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
    628	if (err)
    629		goto out_map_ids;
    630
    631	ctx->xsks_map_fd = -1;
    632
    633	for (i = 0; i < prog_info.nr_map_ids; i++) {
    634		fd = bpf_map_get_fd_by_id(map_ids[i]);
    635		if (fd < 0)
    636			continue;
    637
    638		memset(&map_info, 0, map_len);
    639		err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
    640		if (err) {
    641			close(fd);
    642			continue;
    643		}
    644
    645		if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
    646			ctx->xsks_map_fd = fd;
    647			break;
    648		}
    649
    650		close(fd);
    651	}
    652
    653	if (ctx->xsks_map_fd == -1)
    654		err = -ENOENT;
    655
    656out_map_ids:
    657	free(map_ids);
    658	return err;
    659}
    660
    661static int xsk_set_bpf_maps(struct xsk_socket *xsk)
    662{
    663	struct xsk_ctx *ctx = xsk->ctx;
    664
    665	return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
    666				   &xsk->fd, 0);
    667}
    668
    669static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
    670{
    671	struct bpf_link_info link_info;
    672	__u32 link_len;
    673	__u32 id = 0;
    674	int err;
    675	int fd;
    676
    677	while (true) {
    678		err = bpf_link_get_next_id(id, &id);
    679		if (err) {
    680			if (errno == ENOENT) {
    681				err = 0;
    682				break;
    683			}
    684			pr_warn("can't get next link: %s\n", strerror(errno));
    685			break;
    686		}
    687
    688		fd = bpf_link_get_fd_by_id(id);
    689		if (fd < 0) {
    690			if (errno == ENOENT)
    691				continue;
    692			pr_warn("can't get link by id (%u): %s\n", id, strerror(errno));
    693			err = -errno;
    694			break;
    695		}
    696
    697		link_len = sizeof(struct bpf_link_info);
    698		memset(&link_info, 0, link_len);
    699		err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
    700		if (err) {
    701			pr_warn("can't get link info: %s\n", strerror(errno));
    702			close(fd);
    703			break;
    704		}
    705		if (link_info.type == BPF_LINK_TYPE_XDP) {
    706			if (link_info.xdp.ifindex == ifindex) {
    707				*link_fd = fd;
    708				if (prog_id)
    709					*prog_id = link_info.prog_id;
    710				break;
    711			}
    712		}
    713		close(fd);
    714	}
    715
    716	return err;
    717}
    718
    719static bool xsk_probe_bpf_link(void)
    720{
    721	LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE);
    722	struct bpf_insn insns[2] = {
    723		BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
    724		BPF_EXIT_INSN()
    725	};
    726	int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns);
    727	int ifindex_lo = 1;
    728	bool ret = false;
    729	int err;
    730
    731	err = xsk_link_lookup(ifindex_lo, NULL, &link_fd);
    732	if (err)
    733		return ret;
    734
    735	if (link_fd >= 0)
    736		return true;
    737
    738	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
    739	if (prog_fd < 0)
    740		return ret;
    741
    742	link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
    743	close(prog_fd);
    744
    745	if (link_fd >= 0) {
    746		ret = true;
    747		close(link_fd);
    748	}
    749
    750	return ret;
    751}
    752
    753static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
    754{
    755	char ifname[IFNAMSIZ];
    756	struct xsk_ctx *ctx;
    757	char *interface;
    758
    759	ctx = calloc(1, sizeof(*ctx));
    760	if (!ctx)
    761		return -ENOMEM;
    762
    763	interface = if_indextoname(ifindex, &ifname[0]);
    764	if (!interface) {
    765		free(ctx);
    766		return -errno;
    767	}
    768
    769	ctx->ifindex = ifindex;
    770	libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
    771
    772	xsk->ctx = ctx;
    773	xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
    774
    775	return 0;
    776}
    777
    778static int xsk_init_xdp_res(struct xsk_socket *xsk,
    779			    int *xsks_map_fd)
    780{
    781	struct xsk_ctx *ctx = xsk->ctx;
    782	int err;
    783
    784	err = xsk_create_bpf_maps(xsk);
    785	if (err)
    786		return err;
    787
    788	err = xsk_load_xdp_prog(xsk);
    789	if (err)
    790		goto err_load_xdp_prog;
    791
    792	if (ctx->has_bpf_link)
    793		err = xsk_create_bpf_link(xsk);
    794	else
    795		err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, ctx->prog_fd,
    796					  xsk->config.xdp_flags);
    797
    798	if (err)
    799		goto err_attach_xdp_prog;
    800
    801	if (!xsk->rx)
    802		return err;
    803
    804	err = xsk_set_bpf_maps(xsk);
    805	if (err)
    806		goto err_set_bpf_maps;
    807
    808	return err;
    809
    810err_set_bpf_maps:
    811	if (ctx->has_bpf_link)
    812		close(ctx->link_fd);
    813	else
    814		bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
    815err_attach_xdp_prog:
    816	close(ctx->prog_fd);
    817err_load_xdp_prog:
    818	xsk_delete_bpf_maps(xsk);
    819	return err;
    820}
    821
    822static int xsk_lookup_xdp_res(struct xsk_socket *xsk, int *xsks_map_fd, int prog_id)
    823{
    824	struct xsk_ctx *ctx = xsk->ctx;
    825	int err;
    826
    827	ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
    828	if (ctx->prog_fd < 0) {
    829		err = -errno;
    830		goto err_prog_fd;
    831	}
    832	err = xsk_lookup_bpf_maps(xsk);
    833	if (err)
    834		goto err_lookup_maps;
    835
    836	if (!xsk->rx)
    837		return err;
    838
    839	err = xsk_set_bpf_maps(xsk);
    840	if (err)
    841		goto err_set_maps;
    842
    843	return err;
    844
    845err_set_maps:
    846	close(ctx->xsks_map_fd);
    847err_lookup_maps:
    848	close(ctx->prog_fd);
    849err_prog_fd:
    850	if (ctx->has_bpf_link)
    851		close(ctx->link_fd);
    852	return err;
    853}
    854
    855static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
    856{
    857	struct xsk_socket *xsk = _xdp;
    858	struct xsk_ctx *ctx = xsk->ctx;
    859	__u32 prog_id = 0;
    860	int err;
    861
    862	if (ctx->has_bpf_link)
    863		err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd);
    864	else
    865		err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
    866
    867	if (err)
    868		return err;
    869
    870	err = !prog_id ? xsk_init_xdp_res(xsk, xsks_map_fd) :
    871			 xsk_lookup_xdp_res(xsk, xsks_map_fd, prog_id);
    872
    873	if (!err && xsks_map_fd)
    874		*xsks_map_fd = ctx->xsks_map_fd;
    875
    876	return err;
    877}
    878
    879static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
    880				   __u32 queue_id)
    881{
    882	struct xsk_ctx *ctx;
    883
    884	if (list_empty(&umem->ctx_list))
    885		return NULL;
    886
    887	list_for_each_entry(ctx, &umem->ctx_list, list) {
    888		if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
    889			ctx->refcount++;
    890			return ctx;
    891		}
    892	}
    893
    894	return NULL;
    895}
    896
    897static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
    898{
    899	struct xsk_umem *umem = ctx->umem;
    900	struct xdp_mmap_offsets off;
    901	int err;
    902
    903	if (--ctx->refcount)
    904		return;
    905
    906	if (!unmap)
    907		goto out_free;
    908
    909	err = xsk_get_mmap_offsets(umem->fd, &off);
    910	if (err)
    911		goto out_free;
    912
    913	munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
    914	       sizeof(__u64));
    915	munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
    916	       sizeof(__u64));
    917
    918out_free:
    919	list_del(&ctx->list);
    920	free(ctx);
    921}
    922
    923static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
    924				      struct xsk_umem *umem, int ifindex,
    925				      const char *ifname, __u32 queue_id,
    926				      struct xsk_ring_prod *fill,
    927				      struct xsk_ring_cons *comp)
    928{
    929	struct xsk_ctx *ctx;
    930	int err;
    931
    932	ctx = calloc(1, sizeof(*ctx));
    933	if (!ctx)
    934		return NULL;
    935
    936	if (!umem->fill_save) {
    937		err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
    938		if (err) {
    939			free(ctx);
    940			return NULL;
    941		}
    942	} else if (umem->fill_save != fill || umem->comp_save != comp) {
    943		/* Copy over rings to new structs. */
    944		memcpy(fill, umem->fill_save, sizeof(*fill));
    945		memcpy(comp, umem->comp_save, sizeof(*comp));
    946	}
    947
    948	ctx->ifindex = ifindex;
    949	ctx->refcount = 1;
    950	ctx->umem = umem;
    951	ctx->queue_id = queue_id;
    952	libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
    953
    954	ctx->fill = fill;
    955	ctx->comp = comp;
    956	list_add(&ctx->list, &umem->ctx_list);
    957	return ctx;
    958}
    959
    960static void xsk_destroy_xsk_struct(struct xsk_socket *xsk)
    961{
    962	free(xsk->ctx);
    963	free(xsk);
    964}
    965
    966int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd)
    967{
    968	xsk->ctx->xsks_map_fd = fd;
    969	return xsk_set_bpf_maps(xsk);
    970}
    971
    972int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd)
    973{
    974	struct xsk_socket *xsk;
    975	int res;
    976
    977	xsk = calloc(1, sizeof(*xsk));
    978	if (!xsk)
    979		return -ENOMEM;
    980
    981	res = xsk_create_xsk_struct(ifindex, xsk);
    982	if (res) {
    983		free(xsk);
    984		return -EINVAL;
    985	}
    986
    987	res = __xsk_setup_xdp_prog(xsk, xsks_map_fd);
    988
    989	xsk_destroy_xsk_struct(xsk);
    990
    991	return res;
    992}
    993
    994int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
    995			      const char *ifname,
    996			      __u32 queue_id, struct xsk_umem *umem,
    997			      struct xsk_ring_cons *rx,
    998			      struct xsk_ring_prod *tx,
    999			      struct xsk_ring_prod *fill,
   1000			      struct xsk_ring_cons *comp,
   1001			      const struct xsk_socket_config *usr_config)
   1002{
   1003	bool unmap, rx_setup_done = false, tx_setup_done = false;
   1004	void *rx_map = NULL, *tx_map = NULL;
   1005	struct sockaddr_xdp sxdp = {};
   1006	struct xdp_mmap_offsets off;
   1007	struct xsk_socket *xsk;
   1008	struct xsk_ctx *ctx;
   1009	int err, ifindex;
   1010
   1011	if (!umem || !xsk_ptr || !(rx || tx))
   1012		return -EFAULT;
   1013
   1014	unmap = umem->fill_save != fill;
   1015
   1016	xsk = calloc(1, sizeof(*xsk));
   1017	if (!xsk)
   1018		return -ENOMEM;
   1019
   1020	err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
   1021	if (err)
   1022		goto out_xsk_alloc;
   1023
   1024	xsk->outstanding_tx = 0;
   1025	ifindex = if_nametoindex(ifname);
   1026	if (!ifindex) {
   1027		err = -errno;
   1028		goto out_xsk_alloc;
   1029	}
   1030
   1031	if (umem->refcount++ > 0) {
   1032		xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
   1033		if (xsk->fd < 0) {
   1034			err = -errno;
   1035			goto out_xsk_alloc;
   1036		}
   1037	} else {
   1038		xsk->fd = umem->fd;
   1039		rx_setup_done = umem->rx_ring_setup_done;
   1040		tx_setup_done = umem->tx_ring_setup_done;
   1041	}
   1042
   1043	ctx = xsk_get_ctx(umem, ifindex, queue_id);
   1044	if (!ctx) {
   1045		if (!fill || !comp) {
   1046			err = -EFAULT;
   1047			goto out_socket;
   1048		}
   1049
   1050		ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
   1051				     fill, comp);
   1052		if (!ctx) {
   1053			err = -ENOMEM;
   1054			goto out_socket;
   1055		}
   1056	}
   1057	xsk->ctx = ctx;
   1058	xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
   1059
   1060	if (rx && !rx_setup_done) {
   1061		err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
   1062				 &xsk->config.rx_size,
   1063				 sizeof(xsk->config.rx_size));
   1064		if (err) {
   1065			err = -errno;
   1066			goto out_put_ctx;
   1067		}
   1068		if (xsk->fd == umem->fd)
   1069			umem->rx_ring_setup_done = true;
   1070	}
   1071	if (tx && !tx_setup_done) {
   1072		err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
   1073				 &xsk->config.tx_size,
   1074				 sizeof(xsk->config.tx_size));
   1075		if (err) {
   1076			err = -errno;
   1077			goto out_put_ctx;
   1078		}
   1079		if (xsk->fd == umem->fd)
   1080			umem->tx_ring_setup_done = true;
   1081	}
   1082
   1083	err = xsk_get_mmap_offsets(xsk->fd, &off);
   1084	if (err) {
   1085		err = -errno;
   1086		goto out_put_ctx;
   1087	}
   1088
   1089	if (rx) {
   1090		rx_map = mmap(NULL, off.rx.desc +
   1091			      xsk->config.rx_size * sizeof(struct xdp_desc),
   1092			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
   1093			      xsk->fd, XDP_PGOFF_RX_RING);
   1094		if (rx_map == MAP_FAILED) {
   1095			err = -errno;
   1096			goto out_put_ctx;
   1097		}
   1098
   1099		rx->mask = xsk->config.rx_size - 1;
   1100		rx->size = xsk->config.rx_size;
   1101		rx->producer = rx_map + off.rx.producer;
   1102		rx->consumer = rx_map + off.rx.consumer;
   1103		rx->flags = rx_map + off.rx.flags;
   1104		rx->ring = rx_map + off.rx.desc;
   1105		rx->cached_prod = *rx->producer;
   1106		rx->cached_cons = *rx->consumer;
   1107	}
   1108	xsk->rx = rx;
   1109
   1110	if (tx) {
   1111		tx_map = mmap(NULL, off.tx.desc +
   1112			      xsk->config.tx_size * sizeof(struct xdp_desc),
   1113			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
   1114			      xsk->fd, XDP_PGOFF_TX_RING);
   1115		if (tx_map == MAP_FAILED) {
   1116			err = -errno;
   1117			goto out_mmap_rx;
   1118		}
   1119
   1120		tx->mask = xsk->config.tx_size - 1;
   1121		tx->size = xsk->config.tx_size;
   1122		tx->producer = tx_map + off.tx.producer;
   1123		tx->consumer = tx_map + off.tx.consumer;
   1124		tx->flags = tx_map + off.tx.flags;
   1125		tx->ring = tx_map + off.tx.desc;
   1126		tx->cached_prod = *tx->producer;
   1127		/* cached_cons is r->size bigger than the real consumer pointer
   1128		 * See xsk_prod_nb_free
   1129		 */
   1130		tx->cached_cons = *tx->consumer + xsk->config.tx_size;
   1131	}
   1132	xsk->tx = tx;
   1133
   1134	sxdp.sxdp_family = PF_XDP;
   1135	sxdp.sxdp_ifindex = ctx->ifindex;
   1136	sxdp.sxdp_queue_id = ctx->queue_id;
   1137	if (umem->refcount > 1) {
   1138		sxdp.sxdp_flags |= XDP_SHARED_UMEM;
   1139		sxdp.sxdp_shared_umem_fd = umem->fd;
   1140	} else {
   1141		sxdp.sxdp_flags = xsk->config.bind_flags;
   1142	}
   1143
   1144	err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
   1145	if (err) {
   1146		err = -errno;
   1147		goto out_mmap_tx;
   1148	}
   1149
   1150	ctx->prog_fd = -1;
   1151
   1152	if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
   1153		err = __xsk_setup_xdp_prog(xsk, NULL);
   1154		if (err)
   1155			goto out_mmap_tx;
   1156	}
   1157
   1158	*xsk_ptr = xsk;
   1159	umem->fill_save = NULL;
   1160	umem->comp_save = NULL;
   1161	return 0;
   1162
   1163out_mmap_tx:
   1164	if (tx)
   1165		munmap(tx_map, off.tx.desc +
   1166		       xsk->config.tx_size * sizeof(struct xdp_desc));
   1167out_mmap_rx:
   1168	if (rx)
   1169		munmap(rx_map, off.rx.desc +
   1170		       xsk->config.rx_size * sizeof(struct xdp_desc));
   1171out_put_ctx:
   1172	xsk_put_ctx(ctx, unmap);
   1173out_socket:
   1174	if (--umem->refcount)
   1175		close(xsk->fd);
   1176out_xsk_alloc:
   1177	free(xsk);
   1178	return err;
   1179}
   1180
   1181int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
   1182		       __u32 queue_id, struct xsk_umem *umem,
   1183		       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
   1184		       const struct xsk_socket_config *usr_config)
   1185{
   1186	if (!umem)
   1187		return -EFAULT;
   1188
   1189	return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
   1190					 rx, tx, umem->fill_save,
   1191					 umem->comp_save, usr_config);
   1192}
   1193
   1194int xsk_umem__delete(struct xsk_umem *umem)
   1195{
   1196	struct xdp_mmap_offsets off;
   1197	int err;
   1198
   1199	if (!umem)
   1200		return 0;
   1201
   1202	if (umem->refcount)
   1203		return -EBUSY;
   1204
   1205	err = xsk_get_mmap_offsets(umem->fd, &off);
   1206	if (!err && umem->fill_save && umem->comp_save) {
   1207		munmap(umem->fill_save->ring - off.fr.desc,
   1208		       off.fr.desc + umem->config.fill_size * sizeof(__u64));
   1209		munmap(umem->comp_save->ring - off.cr.desc,
   1210		       off.cr.desc + umem->config.comp_size * sizeof(__u64));
   1211	}
   1212
   1213	close(umem->fd);
   1214	free(umem);
   1215
   1216	return 0;
   1217}
   1218
   1219void xsk_socket__delete(struct xsk_socket *xsk)
   1220{
   1221	size_t desc_sz = sizeof(struct xdp_desc);
   1222	struct xdp_mmap_offsets off;
   1223	struct xsk_umem *umem;
   1224	struct xsk_ctx *ctx;
   1225	int err;
   1226
   1227	if (!xsk)
   1228		return;
   1229
   1230	ctx = xsk->ctx;
   1231	umem = ctx->umem;
   1232	if (ctx->prog_fd != -1) {
   1233		xsk_delete_bpf_maps(xsk);
   1234		close(ctx->prog_fd);
   1235		if (ctx->has_bpf_link)
   1236			close(ctx->link_fd);
   1237	}
   1238
   1239	err = xsk_get_mmap_offsets(xsk->fd, &off);
   1240	if (!err) {
   1241		if (xsk->rx) {
   1242			munmap(xsk->rx->ring - off.rx.desc,
   1243			       off.rx.desc + xsk->config.rx_size * desc_sz);
   1244		}
   1245		if (xsk->tx) {
   1246			munmap(xsk->tx->ring - off.tx.desc,
   1247			       off.tx.desc + xsk->config.tx_size * desc_sz);
   1248		}
   1249	}
   1250
   1251	xsk_put_ctx(ctx, true);
   1252
   1253	umem->refcount--;
   1254	/* Do not close an fd that also has an associated umem connected
   1255	 * to it.
   1256	 */
   1257	if (xsk->fd != umem->fd)
   1258		close(xsk->fd);
   1259	free(xsk);
   1260}