cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xskmap.c (6820B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* XSKMAP used for AF_XDP sockets
      3 * Copyright(c) 2018 Intel Corporation.
      4 */
      5
      6#include <linux/bpf.h>
      7#include <linux/filter.h>
      8#include <linux/capability.h>
      9#include <net/xdp_sock.h>
     10#include <linux/slab.h>
     11#include <linux/sched.h>
     12#include <linux/btf_ids.h>
     13
     14#include "xsk.h"
     15
     16static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
     17					       struct xdp_sock __rcu **map_entry)
     18{
     19	struct xsk_map_node *node;
     20
     21	node = bpf_map_kzalloc(&map->map, sizeof(*node),
     22			       GFP_ATOMIC | __GFP_NOWARN);
     23	if (!node)
     24		return ERR_PTR(-ENOMEM);
     25
     26	bpf_map_inc(&map->map);
     27
     28	node->map = map;
     29	node->map_entry = map_entry;
     30	return node;
     31}
     32
     33static void xsk_map_node_free(struct xsk_map_node *node)
     34{
     35	bpf_map_put(&node->map->map);
     36	kfree(node);
     37}
     38
     39static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
     40{
     41	spin_lock_bh(&xs->map_list_lock);
     42	list_add_tail(&node->node, &xs->map_list);
     43	spin_unlock_bh(&xs->map_list_lock);
     44}
     45
     46static void xsk_map_sock_delete(struct xdp_sock *xs,
     47				struct xdp_sock __rcu **map_entry)
     48{
     49	struct xsk_map_node *n, *tmp;
     50
     51	spin_lock_bh(&xs->map_list_lock);
     52	list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
     53		if (map_entry == n->map_entry) {
     54			list_del(&n->node);
     55			xsk_map_node_free(n);
     56		}
     57	}
     58	spin_unlock_bh(&xs->map_list_lock);
     59}
     60
     61static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
     62{
     63	struct xsk_map *m;
     64	int numa_node;
     65	u64 size;
     66
     67	if (!capable(CAP_NET_ADMIN))
     68		return ERR_PTR(-EPERM);
     69
     70	if (attr->max_entries == 0 || attr->key_size != 4 ||
     71	    attr->value_size != 4 ||
     72	    attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
     73		return ERR_PTR(-EINVAL);
     74
     75	numa_node = bpf_map_attr_numa_node(attr);
     76	size = struct_size(m, xsk_map, attr->max_entries);
     77
     78	m = bpf_map_area_alloc(size, numa_node);
     79	if (!m)
     80		return ERR_PTR(-ENOMEM);
     81
     82	bpf_map_init_from_attr(&m->map, attr);
     83	spin_lock_init(&m->lock);
     84
     85	return &m->map;
     86}
     87
     88static void xsk_map_free(struct bpf_map *map)
     89{
     90	struct xsk_map *m = container_of(map, struct xsk_map, map);
     91
     92	synchronize_net();
     93	bpf_map_area_free(m);
     94}
     95
     96static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
     97{
     98	struct xsk_map *m = container_of(map, struct xsk_map, map);
     99	u32 index = key ? *(u32 *)key : U32_MAX;
    100	u32 *next = next_key;
    101
    102	if (index >= m->map.max_entries) {
    103		*next = 0;
    104		return 0;
    105	}
    106
    107	if (index == m->map.max_entries - 1)
    108		return -ENOENT;
    109	*next = index + 1;
    110	return 0;
    111}
    112
    113static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
    114{
    115	const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
    116	struct bpf_insn *insn = insn_buf;
    117
    118	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
    119	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
    120	*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
    121	*insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
    122	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
    123	*insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
    124	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
    125	*insn++ = BPF_MOV64_IMM(ret, 0);
    126	return insn - insn_buf;
    127}
    128
    129/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
    130 * by local_bh_disable() (from XDP calls inside NAPI). The
    131 * rcu_read_lock_bh_held() below makes lockdep accept both.
    132 */
    133static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
    134{
    135	struct xsk_map *m = container_of(map, struct xsk_map, map);
    136
    137	if (key >= map->max_entries)
    138		return NULL;
    139
    140	return rcu_dereference_check(m->xsk_map[key], rcu_read_lock_bh_held());
    141}
    142
    143static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
    144{
    145	return __xsk_map_lookup_elem(map, *(u32 *)key);
    146}
    147
    148static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
    149{
    150	return ERR_PTR(-EOPNOTSUPP);
    151}
    152
    153static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
    154			       u64 map_flags)
    155{
    156	struct xsk_map *m = container_of(map, struct xsk_map, map);
    157	struct xdp_sock __rcu **map_entry;
    158	struct xdp_sock *xs, *old_xs;
    159	u32 i = *(u32 *)key, fd = *(u32 *)value;
    160	struct xsk_map_node *node;
    161	struct socket *sock;
    162	int err;
    163
    164	if (unlikely(map_flags > BPF_EXIST))
    165		return -EINVAL;
    166	if (unlikely(i >= m->map.max_entries))
    167		return -E2BIG;
    168
    169	sock = sockfd_lookup(fd, &err);
    170	if (!sock)
    171		return err;
    172
    173	if (sock->sk->sk_family != PF_XDP) {
    174		sockfd_put(sock);
    175		return -EOPNOTSUPP;
    176	}
    177
    178	xs = (struct xdp_sock *)sock->sk;
    179
    180	map_entry = &m->xsk_map[i];
    181	node = xsk_map_node_alloc(m, map_entry);
    182	if (IS_ERR(node)) {
    183		sockfd_put(sock);
    184		return PTR_ERR(node);
    185	}
    186
    187	spin_lock_bh(&m->lock);
    188	old_xs = rcu_dereference_protected(*map_entry, lockdep_is_held(&m->lock));
    189	if (old_xs == xs) {
    190		err = 0;
    191		goto out;
    192	} else if (old_xs && map_flags == BPF_NOEXIST) {
    193		err = -EEXIST;
    194		goto out;
    195	} else if (!old_xs && map_flags == BPF_EXIST) {
    196		err = -ENOENT;
    197		goto out;
    198	}
    199	xsk_map_sock_add(xs, node);
    200	rcu_assign_pointer(*map_entry, xs);
    201	if (old_xs)
    202		xsk_map_sock_delete(old_xs, map_entry);
    203	spin_unlock_bh(&m->lock);
    204	sockfd_put(sock);
    205	return 0;
    206
    207out:
    208	spin_unlock_bh(&m->lock);
    209	sockfd_put(sock);
    210	xsk_map_node_free(node);
    211	return err;
    212}
    213
    214static int xsk_map_delete_elem(struct bpf_map *map, void *key)
    215{
    216	struct xsk_map *m = container_of(map, struct xsk_map, map);
    217	struct xdp_sock __rcu **map_entry;
    218	struct xdp_sock *old_xs;
    219	int k = *(u32 *)key;
    220
    221	if (k >= map->max_entries)
    222		return -EINVAL;
    223
    224	spin_lock_bh(&m->lock);
    225	map_entry = &m->xsk_map[k];
    226	old_xs = unrcu_pointer(xchg(map_entry, NULL));
    227	if (old_xs)
    228		xsk_map_sock_delete(old_xs, map_entry);
    229	spin_unlock_bh(&m->lock);
    230
    231	return 0;
    232}
    233
    234static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
    235{
    236	return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
    237				      __xsk_map_lookup_elem);
    238}
    239
    240void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
    241			     struct xdp_sock __rcu **map_entry)
    242{
    243	spin_lock_bh(&map->lock);
    244	if (rcu_access_pointer(*map_entry) == xs) {
    245		rcu_assign_pointer(*map_entry, NULL);
    246		xsk_map_sock_delete(xs, map_entry);
    247	}
    248	spin_unlock_bh(&map->lock);
    249}
    250
    251static bool xsk_map_meta_equal(const struct bpf_map *meta0,
    252			       const struct bpf_map *meta1)
    253{
    254	return meta0->max_entries == meta1->max_entries &&
    255		bpf_map_meta_equal(meta0, meta1);
    256}
    257
    258BTF_ID_LIST_SINGLE(xsk_map_btf_ids, struct, xsk_map)
    259const struct bpf_map_ops xsk_map_ops = {
    260	.map_meta_equal = xsk_map_meta_equal,
    261	.map_alloc = xsk_map_alloc,
    262	.map_free = xsk_map_free,
    263	.map_get_next_key = xsk_map_get_next_key,
    264	.map_lookup_elem = xsk_map_lookup_elem,
    265	.map_gen_lookup = xsk_map_gen_lookup,
    266	.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
    267	.map_update_elem = xsk_map_update_elem,
    268	.map_delete_elem = xsk_map_delete_elem,
    269	.map_check_btf = map_check_no_btf,
    270	.map_btf_id = &xsk_map_btf_ids[0],
    271	.map_redirect = xsk_map_redirect,
    272};