cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

devmap.c (29659B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
      3 */
      4
      5/* Devmaps primary use is as a backend map for XDP BPF helper call
      6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
      7 * spent some effort to ensure the datapath with redirect maps does not use
      8 * any locking. This is a quick note on the details.
      9 *
     10 * We have three possible paths to get into the devmap control plane bpf
     11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
     12 * will invoke an update, delete, or lookup operation. To ensure updates and
     13 * deletes appear atomic from the datapath side xchg() is used to modify the
     14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
     15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
     16 * an rcu grace period before free'ing the old data structures. This ensures the
     17 * datapath always has a valid copy. However, the datapath does a "flush"
     18 * operation that pushes any pending packets in the driver outside the RCU
     19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
     20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
     21 * this list is empty, indicating outstanding flush operations have completed.
     22 *
     23 * BPF syscalls may race with BPF program calls on any of the update, delete
     24 * or lookup operations. As noted above the xchg() operation also keep the
     25 * netdev_map consistent in this case. From the devmap side BPF programs
     26 * calling into these operations are the same as multiple user space threads
     27 * making system calls.
     28 *
     29 * Finally, any of the above may race with a netdev_unregister notifier. The
     30 * unregister notifier must search for net devices in the map structure that
     31 * contain a reference to the net device and remove them. This is a two step
     32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
     33 * check to see if the ifindex is the same as the net_device being removed.
     34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
     35 * removed, in the case of a concurrent update or delete operation it is
     36 * possible that the initially referenced dev is no longer in the map. As the
     37 * notifier hook walks the map we know that new dev references can not be
     38 * added by the user because core infrastructure ensures dev_get_by_index()
     39 * calls will fail at this point.
     40 *
     41 * The devmap_hash type is a map type which interprets keys as ifindexes and
     42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
     43 * densely packed instead of having holes in the lookup array for unused
     44 * ifindexes. The setup and packet enqueue/send code is shared between the two
     45 * types of devmap; only the lookup and insertion is different.
     46 */
     47#include <linux/bpf.h>
     48#include <net/xdp.h>
     49#include <linux/filter.h>
     50#include <trace/events/xdp.h>
     51#include <linux/btf_ids.h>
     52
     53#define DEV_CREATE_FLAG_MASK \
     54	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
     55
     56struct xdp_dev_bulk_queue {
     57	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
     58	struct list_head flush_node;
     59	struct net_device *dev;
     60	struct net_device *dev_rx;
     61	struct bpf_prog *xdp_prog;
     62	unsigned int count;
     63};
     64
     65struct bpf_dtab_netdev {
     66	struct net_device *dev; /* must be first member, due to tracepoint */
     67	struct hlist_node index_hlist;
     68	struct bpf_dtab *dtab;
     69	struct bpf_prog *xdp_prog;
     70	struct rcu_head rcu;
     71	unsigned int idx;
     72	struct bpf_devmap_val val;
     73};
     74
     75struct bpf_dtab {
     76	struct bpf_map map;
     77	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
     78	struct list_head list;
     79
     80	/* these are only used for DEVMAP_HASH type maps */
     81	struct hlist_head *dev_index_head;
     82	spinlock_t index_lock;
     83	unsigned int items;
     84	u32 n_buckets;
     85};
     86
     87static DEFINE_PER_CPU(struct list_head, dev_flush_list);
     88static DEFINE_SPINLOCK(dev_map_lock);
     89static LIST_HEAD(dev_map_list);
     90
     91static struct hlist_head *dev_map_create_hash(unsigned int entries,
     92					      int numa_node)
     93{
     94	int i;
     95	struct hlist_head *hash;
     96
     97	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
     98	if (hash != NULL)
     99		for (i = 0; i < entries; i++)
    100			INIT_HLIST_HEAD(&hash[i]);
    101
    102	return hash;
    103}
    104
    105static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
    106						    int idx)
    107{
    108	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
    109}
    110
    111static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
    112{
    113	u32 valsize = attr->value_size;
    114
    115	/* check sanity of attributes. 2 value sizes supported:
    116	 * 4 bytes: ifindex
    117	 * 8 bytes: ifindex + prog fd
    118	 */
    119	if (attr->max_entries == 0 || attr->key_size != 4 ||
    120	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
    121	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
    122	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
    123		return -EINVAL;
    124
    125	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
    126	 * verifier prevents writes from the BPF side
    127	 */
    128	attr->map_flags |= BPF_F_RDONLY_PROG;
    129
    130
    131	bpf_map_init_from_attr(&dtab->map, attr);
    132
    133	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
    134		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
    135
    136		if (!dtab->n_buckets) /* Overflow check */
    137			return -EINVAL;
    138	}
    139
    140	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
    141		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
    142							   dtab->map.numa_node);
    143		if (!dtab->dev_index_head)
    144			return -ENOMEM;
    145
    146		spin_lock_init(&dtab->index_lock);
    147	} else {
    148		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
    149						      sizeof(struct bpf_dtab_netdev *),
    150						      dtab->map.numa_node);
    151		if (!dtab->netdev_map)
    152			return -ENOMEM;
    153	}
    154
    155	return 0;
    156}
    157
    158static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
    159{
    160	struct bpf_dtab *dtab;
    161	int err;
    162
    163	if (!capable(CAP_NET_ADMIN))
    164		return ERR_PTR(-EPERM);
    165
    166	dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
    167	if (!dtab)
    168		return ERR_PTR(-ENOMEM);
    169
    170	err = dev_map_init_map(dtab, attr);
    171	if (err) {
    172		kfree(dtab);
    173		return ERR_PTR(err);
    174	}
    175
    176	spin_lock(&dev_map_lock);
    177	list_add_tail_rcu(&dtab->list, &dev_map_list);
    178	spin_unlock(&dev_map_lock);
    179
    180	return &dtab->map;
    181}
    182
    183static void dev_map_free(struct bpf_map *map)
    184{
    185	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    186	int i;
    187
    188	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
    189	 * so the programs (can be more than one that used this map) were
    190	 * disconnected from events. The following synchronize_rcu() guarantees
    191	 * both rcu read critical sections complete and waits for
    192	 * preempt-disable regions (NAPI being the relevant context here) so we
    193	 * are certain there will be no further reads against the netdev_map and
    194	 * all flush operations are complete. Flush operations can only be done
    195	 * from NAPI context for this reason.
    196	 */
    197
    198	spin_lock(&dev_map_lock);
    199	list_del_rcu(&dtab->list);
    200	spin_unlock(&dev_map_lock);
    201
    202	bpf_clear_redirect_map(map);
    203	synchronize_rcu();
    204
    205	/* Make sure prior __dev_map_entry_free() have completed. */
    206	rcu_barrier();
    207
    208	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
    209		for (i = 0; i < dtab->n_buckets; i++) {
    210			struct bpf_dtab_netdev *dev;
    211			struct hlist_head *head;
    212			struct hlist_node *next;
    213
    214			head = dev_map_index_hash(dtab, i);
    215
    216			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
    217				hlist_del_rcu(&dev->index_hlist);
    218				if (dev->xdp_prog)
    219					bpf_prog_put(dev->xdp_prog);
    220				dev_put(dev->dev);
    221				kfree(dev);
    222			}
    223		}
    224
    225		bpf_map_area_free(dtab->dev_index_head);
    226	} else {
    227		for (i = 0; i < dtab->map.max_entries; i++) {
    228			struct bpf_dtab_netdev *dev;
    229
    230			dev = rcu_dereference_raw(dtab->netdev_map[i]);
    231			if (!dev)
    232				continue;
    233
    234			if (dev->xdp_prog)
    235				bpf_prog_put(dev->xdp_prog);
    236			dev_put(dev->dev);
    237			kfree(dev);
    238		}
    239
    240		bpf_map_area_free(dtab->netdev_map);
    241	}
    242
    243	kfree(dtab);
    244}
    245
    246static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
    247{
    248	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    249	u32 index = key ? *(u32 *)key : U32_MAX;
    250	u32 *next = next_key;
    251
    252	if (index >= dtab->map.max_entries) {
    253		*next = 0;
    254		return 0;
    255	}
    256
    257	if (index == dtab->map.max_entries - 1)
    258		return -ENOENT;
    259	*next = index + 1;
    260	return 0;
    261}
    262
    263/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
    264 * by local_bh_disable() (from XDP calls inside NAPI). The
    265 * rcu_read_lock_bh_held() below makes lockdep accept both.
    266 */
    267static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
    268{
    269	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    270	struct hlist_head *head = dev_map_index_hash(dtab, key);
    271	struct bpf_dtab_netdev *dev;
    272
    273	hlist_for_each_entry_rcu(dev, head, index_hlist,
    274				 lockdep_is_held(&dtab->index_lock))
    275		if (dev->idx == key)
    276			return dev;
    277
    278	return NULL;
    279}
    280
    281static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
    282				    void *next_key)
    283{
    284	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    285	u32 idx, *next = next_key;
    286	struct bpf_dtab_netdev *dev, *next_dev;
    287	struct hlist_head *head;
    288	int i = 0;
    289
    290	if (!key)
    291		goto find_first;
    292
    293	idx = *(u32 *)key;
    294
    295	dev = __dev_map_hash_lookup_elem(map, idx);
    296	if (!dev)
    297		goto find_first;
    298
    299	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
    300				    struct bpf_dtab_netdev, index_hlist);
    301
    302	if (next_dev) {
    303		*next = next_dev->idx;
    304		return 0;
    305	}
    306
    307	i = idx & (dtab->n_buckets - 1);
    308	i++;
    309
    310 find_first:
    311	for (; i < dtab->n_buckets; i++) {
    312		head = dev_map_index_hash(dtab, i);
    313
    314		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
    315					    struct bpf_dtab_netdev,
    316					    index_hlist);
    317		if (next_dev) {
    318			*next = next_dev->idx;
    319			return 0;
    320		}
    321	}
    322
    323	return -ENOENT;
    324}
    325
    326static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
    327				struct xdp_frame **frames, int n,
    328				struct net_device *dev)
    329{
    330	struct xdp_txq_info txq = { .dev = dev };
    331	struct xdp_buff xdp;
    332	int i, nframes = 0;
    333
    334	for (i = 0; i < n; i++) {
    335		struct xdp_frame *xdpf = frames[i];
    336		u32 act;
    337		int err;
    338
    339		xdp_convert_frame_to_buff(xdpf, &xdp);
    340		xdp.txq = &txq;
    341
    342		act = bpf_prog_run_xdp(xdp_prog, &xdp);
    343		switch (act) {
    344		case XDP_PASS:
    345			err = xdp_update_frame_from_buff(&xdp, xdpf);
    346			if (unlikely(err < 0))
    347				xdp_return_frame_rx_napi(xdpf);
    348			else
    349				frames[nframes++] = xdpf;
    350			break;
    351		default:
    352			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
    353			fallthrough;
    354		case XDP_ABORTED:
    355			trace_xdp_exception(dev, xdp_prog, act);
    356			fallthrough;
    357		case XDP_DROP:
    358			xdp_return_frame_rx_napi(xdpf);
    359			break;
    360		}
    361	}
    362	return nframes; /* sent frames count */
    363}
    364
    365static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
    366{
    367	struct net_device *dev = bq->dev;
    368	unsigned int cnt = bq->count;
    369	int sent = 0, err = 0;
    370	int to_send = cnt;
    371	int i;
    372
    373	if (unlikely(!cnt))
    374		return;
    375
    376	for (i = 0; i < cnt; i++) {
    377		struct xdp_frame *xdpf = bq->q[i];
    378
    379		prefetch(xdpf);
    380	}
    381
    382	if (bq->xdp_prog) {
    383		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
    384		if (!to_send)
    385			goto out;
    386	}
    387
    388	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
    389	if (sent < 0) {
    390		/* If ndo_xdp_xmit fails with an errno, no frames have
    391		 * been xmit'ed.
    392		 */
    393		err = sent;
    394		sent = 0;
    395	}
    396
    397	/* If not all frames have been transmitted, it is our
    398	 * responsibility to free them
    399	 */
    400	for (i = sent; unlikely(i < to_send); i++)
    401		xdp_return_frame_rx_napi(bq->q[i]);
    402
    403out:
    404	bq->count = 0;
    405	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
    406}
    407
    408/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
    409 * driver before returning from its napi->poll() routine. See the comment above
    410 * xdp_do_flush() in filter.c.
    411 */
    412void __dev_flush(void)
    413{
    414	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
    415	struct xdp_dev_bulk_queue *bq, *tmp;
    416
    417	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
    418		bq_xmit_all(bq, XDP_XMIT_FLUSH);
    419		bq->dev_rx = NULL;
    420		bq->xdp_prog = NULL;
    421		__list_del_clearprev(&bq->flush_node);
    422	}
    423}
    424
    425/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
    426 * by local_bh_disable() (from XDP calls inside NAPI). The
    427 * rcu_read_lock_bh_held() below makes lockdep accept both.
    428 */
    429static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
    430{
    431	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    432	struct bpf_dtab_netdev *obj;
    433
    434	if (key >= map->max_entries)
    435		return NULL;
    436
    437	obj = rcu_dereference_check(dtab->netdev_map[key],
    438				    rcu_read_lock_bh_held());
    439	return obj;
    440}
    441
    442/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
    443 * variable access, and map elements stick around. See comment above
    444 * xdp_do_flush() in filter.c.
    445 */
    446static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
    447		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
    448{
    449	struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
    450	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
    451
    452	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
    453		bq_xmit_all(bq, 0);
    454
    455	/* Ingress dev_rx will be the same for all xdp_frame's in
    456	 * bulk_queue, because bq stored per-CPU and must be flushed
    457	 * from net_device drivers NAPI func end.
    458	 *
    459	 * Do the same with xdp_prog and flush_list since these fields
    460	 * are only ever modified together.
    461	 */
    462	if (!bq->dev_rx) {
    463		bq->dev_rx = dev_rx;
    464		bq->xdp_prog = xdp_prog;
    465		list_add(&bq->flush_node, flush_list);
    466	}
    467
    468	bq->q[bq->count++] = xdpf;
    469}
    470
    471static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
    472				struct net_device *dev_rx,
    473				struct bpf_prog *xdp_prog)
    474{
    475	int err;
    476
    477	if (!dev->netdev_ops->ndo_xdp_xmit)
    478		return -EOPNOTSUPP;
    479
    480	err = xdp_ok_fwd_dev(dev, xdpf->len);
    481	if (unlikely(err))
    482		return err;
    483
    484	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
    485	return 0;
    486}
    487
    488static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
    489{
    490	struct xdp_txq_info txq = { .dev = dst->dev };
    491	struct xdp_buff xdp;
    492	u32 act;
    493
    494	if (!dst->xdp_prog)
    495		return XDP_PASS;
    496
    497	__skb_pull(skb, skb->mac_len);
    498	xdp.txq = &txq;
    499
    500	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
    501	switch (act) {
    502	case XDP_PASS:
    503		__skb_push(skb, skb->mac_len);
    504		break;
    505	default:
    506		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
    507		fallthrough;
    508	case XDP_ABORTED:
    509		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
    510		fallthrough;
    511	case XDP_DROP:
    512		kfree_skb(skb);
    513		break;
    514	}
    515
    516	return act;
    517}
    518
    519int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
    520		    struct net_device *dev_rx)
    521{
    522	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
    523}
    524
    525int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
    526		    struct net_device *dev_rx)
    527{
    528	struct net_device *dev = dst->dev;
    529
    530	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
    531}
    532
    533static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
    534{
    535	if (!obj ||
    536	    !obj->dev->netdev_ops->ndo_xdp_xmit)
    537		return false;
    538
    539	if (xdp_ok_fwd_dev(obj->dev, xdpf->len))
    540		return false;
    541
    542	return true;
    543}
    544
    545static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
    546				 struct net_device *dev_rx,
    547				 struct xdp_frame *xdpf)
    548{
    549	struct xdp_frame *nxdpf;
    550
    551	nxdpf = xdpf_clone(xdpf);
    552	if (!nxdpf)
    553		return -ENOMEM;
    554
    555	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
    556
    557	return 0;
    558}
    559
    560static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
    561{
    562	while (num_excluded--) {
    563		if (ifindex == excluded[num_excluded])
    564			return true;
    565	}
    566	return false;
    567}
    568
    569/* Get ifindex of each upper device. 'indexes' must be able to hold at
    570 * least MAX_NEST_DEV elements.
    571 * Returns the number of ifindexes added.
    572 */
    573static int get_upper_ifindexes(struct net_device *dev, int *indexes)
    574{
    575	struct net_device *upper;
    576	struct list_head *iter;
    577	int n = 0;
    578
    579	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
    580		indexes[n++] = upper->ifindex;
    581	}
    582	return n;
    583}
    584
    585int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
    586			  struct bpf_map *map, bool exclude_ingress)
    587{
    588	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    589	struct bpf_dtab_netdev *dst, *last_dst = NULL;
    590	int excluded_devices[1+MAX_NEST_DEV];
    591	struct hlist_head *head;
    592	int num_excluded = 0;
    593	unsigned int i;
    594	int err;
    595
    596	if (exclude_ingress) {
    597		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
    598		excluded_devices[num_excluded++] = dev_rx->ifindex;
    599	}
    600
    601	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
    602		for (i = 0; i < map->max_entries; i++) {
    603			dst = rcu_dereference_check(dtab->netdev_map[i],
    604						    rcu_read_lock_bh_held());
    605			if (!is_valid_dst(dst, xdpf))
    606				continue;
    607
    608			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
    609				continue;
    610
    611			/* we only need n-1 clones; last_dst enqueued below */
    612			if (!last_dst) {
    613				last_dst = dst;
    614				continue;
    615			}
    616
    617			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
    618			if (err)
    619				return err;
    620
    621			last_dst = dst;
    622		}
    623	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
    624		for (i = 0; i < dtab->n_buckets; i++) {
    625			head = dev_map_index_hash(dtab, i);
    626			hlist_for_each_entry_rcu(dst, head, index_hlist,
    627						 lockdep_is_held(&dtab->index_lock)) {
    628				if (!is_valid_dst(dst, xdpf))
    629					continue;
    630
    631				if (is_ifindex_excluded(excluded_devices, num_excluded,
    632							dst->dev->ifindex))
    633					continue;
    634
    635				/* we only need n-1 clones; last_dst enqueued below */
    636				if (!last_dst) {
    637					last_dst = dst;
    638					continue;
    639				}
    640
    641				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
    642				if (err)
    643					return err;
    644
    645				last_dst = dst;
    646			}
    647		}
    648	}
    649
    650	/* consume the last copy of the frame */
    651	if (last_dst)
    652		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
    653	else
    654		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
    655
    656	return 0;
    657}
    658
    659int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
    660			     struct bpf_prog *xdp_prog)
    661{
    662	int err;
    663
    664	err = xdp_ok_fwd_dev(dst->dev, skb->len);
    665	if (unlikely(err))
    666		return err;
    667
    668	/* Redirect has already succeeded semantically at this point, so we just
    669	 * return 0 even if packet is dropped. Helper below takes care of
    670	 * freeing skb.
    671	 */
    672	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
    673		return 0;
    674
    675	skb->dev = dst->dev;
    676	generic_xdp_tx(skb, xdp_prog);
    677
    678	return 0;
    679}
    680
    681static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
    682				  struct sk_buff *skb,
    683				  struct bpf_prog *xdp_prog)
    684{
    685	struct sk_buff *nskb;
    686	int err;
    687
    688	nskb = skb_clone(skb, GFP_ATOMIC);
    689	if (!nskb)
    690		return -ENOMEM;
    691
    692	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
    693	if (unlikely(err)) {
    694		consume_skb(nskb);
    695		return err;
    696	}
    697
    698	return 0;
    699}
    700
    701int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
    702			   struct bpf_prog *xdp_prog, struct bpf_map *map,
    703			   bool exclude_ingress)
    704{
    705	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    706	struct bpf_dtab_netdev *dst, *last_dst = NULL;
    707	int excluded_devices[1+MAX_NEST_DEV];
    708	struct hlist_head *head;
    709	struct hlist_node *next;
    710	int num_excluded = 0;
    711	unsigned int i;
    712	int err;
    713
    714	if (exclude_ingress) {
    715		num_excluded = get_upper_ifindexes(dev, excluded_devices);
    716		excluded_devices[num_excluded++] = dev->ifindex;
    717	}
    718
    719	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
    720		for (i = 0; i < map->max_entries; i++) {
    721			dst = rcu_dereference_check(dtab->netdev_map[i],
    722						    rcu_read_lock_bh_held());
    723			if (!dst)
    724				continue;
    725
    726			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
    727				continue;
    728
    729			/* we only need n-1 clones; last_dst enqueued below */
    730			if (!last_dst) {
    731				last_dst = dst;
    732				continue;
    733			}
    734
    735			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
    736			if (err)
    737				return err;
    738
    739			last_dst = dst;
    740
    741		}
    742	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
    743		for (i = 0; i < dtab->n_buckets; i++) {
    744			head = dev_map_index_hash(dtab, i);
    745			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
    746				if (!dst)
    747					continue;
    748
    749				if (is_ifindex_excluded(excluded_devices, num_excluded,
    750							dst->dev->ifindex))
    751					continue;
    752
    753				/* we only need n-1 clones; last_dst enqueued below */
    754				if (!last_dst) {
    755					last_dst = dst;
    756					continue;
    757				}
    758
    759				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
    760				if (err)
    761					return err;
    762
    763				last_dst = dst;
    764			}
    765		}
    766	}
    767
    768	/* consume the first skb and return */
    769	if (last_dst)
    770		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
    771
    772	/* dtab is empty */
    773	consume_skb(skb);
    774	return 0;
    775}
    776
    777static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
    778{
    779	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
    780
    781	return obj ? &obj->val : NULL;
    782}
    783
    784static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
    785{
    786	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
    787								*(u32 *)key);
    788	return obj ? &obj->val : NULL;
    789}
    790
    791static void __dev_map_entry_free(struct rcu_head *rcu)
    792{
    793	struct bpf_dtab_netdev *dev;
    794
    795	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
    796	if (dev->xdp_prog)
    797		bpf_prog_put(dev->xdp_prog);
    798	dev_put(dev->dev);
    799	kfree(dev);
    800}
    801
    802static int dev_map_delete_elem(struct bpf_map *map, void *key)
    803{
    804	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    805	struct bpf_dtab_netdev *old_dev;
    806	int k = *(u32 *)key;
    807
    808	if (k >= map->max_entries)
    809		return -EINVAL;
    810
    811	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
    812	if (old_dev)
    813		call_rcu(&old_dev->rcu, __dev_map_entry_free);
    814	return 0;
    815}
    816
    817static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
    818{
    819	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    820	struct bpf_dtab_netdev *old_dev;
    821	int k = *(u32 *)key;
    822	unsigned long flags;
    823	int ret = -ENOENT;
    824
    825	spin_lock_irqsave(&dtab->index_lock, flags);
    826
    827	old_dev = __dev_map_hash_lookup_elem(map, k);
    828	if (old_dev) {
    829		dtab->items--;
    830		hlist_del_init_rcu(&old_dev->index_hlist);
    831		call_rcu(&old_dev->rcu, __dev_map_entry_free);
    832		ret = 0;
    833	}
    834	spin_unlock_irqrestore(&dtab->index_lock, flags);
    835
    836	return ret;
    837}
    838
    839static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
    840						    struct bpf_dtab *dtab,
    841						    struct bpf_devmap_val *val,
    842						    unsigned int idx)
    843{
    844	struct bpf_prog *prog = NULL;
    845	struct bpf_dtab_netdev *dev;
    846
    847	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
    848				   GFP_ATOMIC | __GFP_NOWARN,
    849				   dtab->map.numa_node);
    850	if (!dev)
    851		return ERR_PTR(-ENOMEM);
    852
    853	dev->dev = dev_get_by_index(net, val->ifindex);
    854	if (!dev->dev)
    855		goto err_out;
    856
    857	if (val->bpf_prog.fd > 0) {
    858		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
    859					     BPF_PROG_TYPE_XDP, false);
    860		if (IS_ERR(prog))
    861			goto err_put_dev;
    862		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
    863		    !bpf_prog_map_compatible(&dtab->map, prog))
    864			goto err_put_prog;
    865	}
    866
    867	dev->idx = idx;
    868	dev->dtab = dtab;
    869	if (prog) {
    870		dev->xdp_prog = prog;
    871		dev->val.bpf_prog.id = prog->aux->id;
    872	} else {
    873		dev->xdp_prog = NULL;
    874		dev->val.bpf_prog.id = 0;
    875	}
    876	dev->val.ifindex = val->ifindex;
    877
    878	return dev;
    879err_put_prog:
    880	bpf_prog_put(prog);
    881err_put_dev:
    882	dev_put(dev->dev);
    883err_out:
    884	kfree(dev);
    885	return ERR_PTR(-EINVAL);
    886}
    887
    888static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
    889				 void *key, void *value, u64 map_flags)
    890{
    891	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    892	struct bpf_dtab_netdev *dev, *old_dev;
    893	struct bpf_devmap_val val = {};
    894	u32 i = *(u32 *)key;
    895
    896	if (unlikely(map_flags > BPF_EXIST))
    897		return -EINVAL;
    898	if (unlikely(i >= dtab->map.max_entries))
    899		return -E2BIG;
    900	if (unlikely(map_flags == BPF_NOEXIST))
    901		return -EEXIST;
    902
    903	/* already verified value_size <= sizeof val */
    904	memcpy(&val, value, map->value_size);
    905
    906	if (!val.ifindex) {
    907		dev = NULL;
    908		/* can not specify fd if ifindex is 0 */
    909		if (val.bpf_prog.fd > 0)
    910			return -EINVAL;
    911	} else {
    912		dev = __dev_map_alloc_node(net, dtab, &val, i);
    913		if (IS_ERR(dev))
    914			return PTR_ERR(dev);
    915	}
    916
    917	/* Use call_rcu() here to ensure rcu critical sections have completed
    918	 * Remembering the driver side flush operation will happen before the
    919	 * net device is removed.
    920	 */
    921	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
    922	if (old_dev)
    923		call_rcu(&old_dev->rcu, __dev_map_entry_free);
    924
    925	return 0;
    926}
    927
    928static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
    929			       u64 map_flags)
    930{
    931	return __dev_map_update_elem(current->nsproxy->net_ns,
    932				     map, key, value, map_flags);
    933}
    934
    935static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
    936				     void *key, void *value, u64 map_flags)
    937{
    938	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    939	struct bpf_dtab_netdev *dev, *old_dev;
    940	struct bpf_devmap_val val = {};
    941	u32 idx = *(u32 *)key;
    942	unsigned long flags;
    943	int err = -EEXIST;
    944
    945	/* already verified value_size <= sizeof val */
    946	memcpy(&val, value, map->value_size);
    947
    948	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
    949		return -EINVAL;
    950
    951	spin_lock_irqsave(&dtab->index_lock, flags);
    952
    953	old_dev = __dev_map_hash_lookup_elem(map, idx);
    954	if (old_dev && (map_flags & BPF_NOEXIST))
    955		goto out_err;
    956
    957	dev = __dev_map_alloc_node(net, dtab, &val, idx);
    958	if (IS_ERR(dev)) {
    959		err = PTR_ERR(dev);
    960		goto out_err;
    961	}
    962
    963	if (old_dev) {
    964		hlist_del_rcu(&old_dev->index_hlist);
    965	} else {
    966		if (dtab->items >= dtab->map.max_entries) {
    967			spin_unlock_irqrestore(&dtab->index_lock, flags);
    968			call_rcu(&dev->rcu, __dev_map_entry_free);
    969			return -E2BIG;
    970		}
    971		dtab->items++;
    972	}
    973
    974	hlist_add_head_rcu(&dev->index_hlist,
    975			   dev_map_index_hash(dtab, idx));
    976	spin_unlock_irqrestore(&dtab->index_lock, flags);
    977
    978	if (old_dev)
    979		call_rcu(&old_dev->rcu, __dev_map_entry_free);
    980
    981	return 0;
    982
    983out_err:
    984	spin_unlock_irqrestore(&dtab->index_lock, flags);
    985	return err;
    986}
    987
    988static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
    989				   u64 map_flags)
    990{
    991	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
    992					 map, key, value, map_flags);
    993}
    994
    995static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
    996{
    997	return __bpf_xdp_redirect_map(map, ifindex, flags,
    998				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
    999				      __dev_map_lookup_elem);
   1000}
   1001
   1002static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
   1003{
   1004	return __bpf_xdp_redirect_map(map, ifindex, flags,
   1005				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
   1006				      __dev_map_hash_lookup_elem);
   1007}
   1008
   1009BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
   1010const struct bpf_map_ops dev_map_ops = {
   1011	.map_meta_equal = bpf_map_meta_equal,
   1012	.map_alloc = dev_map_alloc,
   1013	.map_free = dev_map_free,
   1014	.map_get_next_key = dev_map_get_next_key,
   1015	.map_lookup_elem = dev_map_lookup_elem,
   1016	.map_update_elem = dev_map_update_elem,
   1017	.map_delete_elem = dev_map_delete_elem,
   1018	.map_check_btf = map_check_no_btf,
   1019	.map_btf_id = &dev_map_btf_ids[0],
   1020	.map_redirect = dev_map_redirect,
   1021};
   1022
   1023const struct bpf_map_ops dev_map_hash_ops = {
   1024	.map_meta_equal = bpf_map_meta_equal,
   1025	.map_alloc = dev_map_alloc,
   1026	.map_free = dev_map_free,
   1027	.map_get_next_key = dev_map_hash_get_next_key,
   1028	.map_lookup_elem = dev_map_hash_lookup_elem,
   1029	.map_update_elem = dev_map_hash_update_elem,
   1030	.map_delete_elem = dev_map_hash_delete_elem,
   1031	.map_check_btf = map_check_no_btf,
   1032	.map_btf_id = &dev_map_btf_ids[0],
   1033	.map_redirect = dev_hash_map_redirect,
   1034};
   1035
   1036static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
   1037				       struct net_device *netdev)
   1038{
   1039	unsigned long flags;
   1040	u32 i;
   1041
   1042	spin_lock_irqsave(&dtab->index_lock, flags);
   1043	for (i = 0; i < dtab->n_buckets; i++) {
   1044		struct bpf_dtab_netdev *dev;
   1045		struct hlist_head *head;
   1046		struct hlist_node *next;
   1047
   1048		head = dev_map_index_hash(dtab, i);
   1049
   1050		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
   1051			if (netdev != dev->dev)
   1052				continue;
   1053
   1054			dtab->items--;
   1055			hlist_del_rcu(&dev->index_hlist);
   1056			call_rcu(&dev->rcu, __dev_map_entry_free);
   1057		}
   1058	}
   1059	spin_unlock_irqrestore(&dtab->index_lock, flags);
   1060}
   1061
   1062static int dev_map_notification(struct notifier_block *notifier,
   1063				ulong event, void *ptr)
   1064{
   1065	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
   1066	struct bpf_dtab *dtab;
   1067	int i, cpu;
   1068
   1069	switch (event) {
   1070	case NETDEV_REGISTER:
   1071		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
   1072			break;
   1073
   1074		/* will be freed in free_netdev() */
   1075		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
   1076		if (!netdev->xdp_bulkq)
   1077			return NOTIFY_BAD;
   1078
   1079		for_each_possible_cpu(cpu)
   1080			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
   1081		break;
   1082	case NETDEV_UNREGISTER:
   1083		/* This rcu_read_lock/unlock pair is needed because
   1084		 * dev_map_list is an RCU list AND to ensure a delete
   1085		 * operation does not free a netdev_map entry while we
   1086		 * are comparing it against the netdev being unregistered.
   1087		 */
   1088		rcu_read_lock();
   1089		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
   1090			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
   1091				dev_map_hash_remove_netdev(dtab, netdev);
   1092				continue;
   1093			}
   1094
   1095			for (i = 0; i < dtab->map.max_entries; i++) {
   1096				struct bpf_dtab_netdev *dev, *odev;
   1097
   1098				dev = rcu_dereference(dtab->netdev_map[i]);
   1099				if (!dev || netdev != dev->dev)
   1100					continue;
   1101				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
   1102				if (dev == odev)
   1103					call_rcu(&dev->rcu,
   1104						 __dev_map_entry_free);
   1105			}
   1106		}
   1107		rcu_read_unlock();
   1108		break;
   1109	default:
   1110		break;
   1111	}
   1112	return NOTIFY_OK;
   1113}
   1114
   1115static struct notifier_block dev_map_notifier = {
   1116	.notifier_call = dev_map_notification,
   1117};
   1118
   1119static int __init dev_map_init(void)
   1120{
   1121	int cpu;
   1122
   1123	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
   1124	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
   1125		     offsetof(struct _bpf_dtab_netdev, dev));
   1126	register_netdevice_notifier(&dev_map_notifier);
   1127
   1128	for_each_possible_cpu(cpu)
   1129		INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
   1130	return 0;
   1131}
   1132
   1133subsys_initcall(dev_map_init);