cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

offload.c (16653B)


      1/*
      2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
      3 *
      4 * This software is licensed under the GNU General License Version 2,
      5 * June 1991 as shown in the file COPYING in the top-level directory of this
      6 * source tree.
      7 *
      8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
      9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
     10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
     12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
     13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
     14 */
     15
     16#include <linux/bpf.h>
     17#include <linux/bpf_verifier.h>
     18#include <linux/bug.h>
     19#include <linux/kdev_t.h>
     20#include <linux/list.h>
     21#include <linux/lockdep.h>
     22#include <linux/netdevice.h>
     23#include <linux/printk.h>
     24#include <linux/proc_ns.h>
     25#include <linux/rhashtable.h>
     26#include <linux/rtnetlink.h>
     27#include <linux/rwsem.h>
     28
     29/* Protects offdevs, members of bpf_offload_netdev and offload members
     30 * of all progs.
     31 * RTNL lock cannot be taken when holding this lock.
     32 */
     33static DECLARE_RWSEM(bpf_devs_lock);
     34
     35struct bpf_offload_dev {
     36	const struct bpf_prog_offload_ops *ops;
     37	struct list_head netdevs;
     38	void *priv;
     39};
     40
     41struct bpf_offload_netdev {
     42	struct rhash_head l;
     43	struct net_device *netdev;
     44	struct bpf_offload_dev *offdev;
     45	struct list_head progs;
     46	struct list_head maps;
     47	struct list_head offdev_netdevs;
     48};
     49
     50static const struct rhashtable_params offdevs_params = {
     51	.nelem_hint		= 4,
     52	.key_len		= sizeof(struct net_device *),
     53	.key_offset		= offsetof(struct bpf_offload_netdev, netdev),
     54	.head_offset		= offsetof(struct bpf_offload_netdev, l),
     55	.automatic_shrinking	= true,
     56};
     57
     58static struct rhashtable offdevs;
     59static bool offdevs_inited;
     60
     61static int bpf_dev_offload_check(struct net_device *netdev)
     62{
     63	if (!netdev)
     64		return -EINVAL;
     65	if (!netdev->netdev_ops->ndo_bpf)
     66		return -EOPNOTSUPP;
     67	return 0;
     68}
     69
     70static struct bpf_offload_netdev *
     71bpf_offload_find_netdev(struct net_device *netdev)
     72{
     73	lockdep_assert_held(&bpf_devs_lock);
     74
     75	if (!offdevs_inited)
     76		return NULL;
     77	return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
     78}
     79
     80int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
     81{
     82	struct bpf_offload_netdev *ondev;
     83	struct bpf_prog_offload *offload;
     84	int err;
     85
     86	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
     87	    attr->prog_type != BPF_PROG_TYPE_XDP)
     88		return -EINVAL;
     89
     90	if (attr->prog_flags)
     91		return -EINVAL;
     92
     93	offload = kzalloc(sizeof(*offload), GFP_USER);
     94	if (!offload)
     95		return -ENOMEM;
     96
     97	offload->prog = prog;
     98
     99	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
    100					   attr->prog_ifindex);
    101	err = bpf_dev_offload_check(offload->netdev);
    102	if (err)
    103		goto err_maybe_put;
    104
    105	down_write(&bpf_devs_lock);
    106	ondev = bpf_offload_find_netdev(offload->netdev);
    107	if (!ondev) {
    108		err = -EINVAL;
    109		goto err_unlock;
    110	}
    111	offload->offdev = ondev->offdev;
    112	prog->aux->offload = offload;
    113	list_add_tail(&offload->offloads, &ondev->progs);
    114	dev_put(offload->netdev);
    115	up_write(&bpf_devs_lock);
    116
    117	return 0;
    118err_unlock:
    119	up_write(&bpf_devs_lock);
    120err_maybe_put:
    121	if (offload->netdev)
    122		dev_put(offload->netdev);
    123	kfree(offload);
    124	return err;
    125}
    126
    127int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
    128{
    129	struct bpf_prog_offload *offload;
    130	int ret = -ENODEV;
    131
    132	down_read(&bpf_devs_lock);
    133	offload = prog->aux->offload;
    134	if (offload) {
    135		ret = offload->offdev->ops->prepare(prog);
    136		offload->dev_state = !ret;
    137	}
    138	up_read(&bpf_devs_lock);
    139
    140	return ret;
    141}
    142
    143int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
    144				 int insn_idx, int prev_insn_idx)
    145{
    146	struct bpf_prog_offload *offload;
    147	int ret = -ENODEV;
    148
    149	down_read(&bpf_devs_lock);
    150	offload = env->prog->aux->offload;
    151	if (offload)
    152		ret = offload->offdev->ops->insn_hook(env, insn_idx,
    153						      prev_insn_idx);
    154	up_read(&bpf_devs_lock);
    155
    156	return ret;
    157}
    158
    159int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
    160{
    161	struct bpf_prog_offload *offload;
    162	int ret = -ENODEV;
    163
    164	down_read(&bpf_devs_lock);
    165	offload = env->prog->aux->offload;
    166	if (offload) {
    167		if (offload->offdev->ops->finalize)
    168			ret = offload->offdev->ops->finalize(env);
    169		else
    170			ret = 0;
    171	}
    172	up_read(&bpf_devs_lock);
    173
    174	return ret;
    175}
    176
    177void
    178bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
    179			      struct bpf_insn *insn)
    180{
    181	const struct bpf_prog_offload_ops *ops;
    182	struct bpf_prog_offload *offload;
    183	int ret = -EOPNOTSUPP;
    184
    185	down_read(&bpf_devs_lock);
    186	offload = env->prog->aux->offload;
    187	if (offload) {
    188		ops = offload->offdev->ops;
    189		if (!offload->opt_failed && ops->replace_insn)
    190			ret = ops->replace_insn(env, off, insn);
    191		offload->opt_failed |= ret;
    192	}
    193	up_read(&bpf_devs_lock);
    194}
    195
    196void
    197bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
    198{
    199	struct bpf_prog_offload *offload;
    200	int ret = -EOPNOTSUPP;
    201
    202	down_read(&bpf_devs_lock);
    203	offload = env->prog->aux->offload;
    204	if (offload) {
    205		if (!offload->opt_failed && offload->offdev->ops->remove_insns)
    206			ret = offload->offdev->ops->remove_insns(env, off, cnt);
    207		offload->opt_failed |= ret;
    208	}
    209	up_read(&bpf_devs_lock);
    210}
    211
    212static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
    213{
    214	struct bpf_prog_offload *offload = prog->aux->offload;
    215
    216	if (offload->dev_state)
    217		offload->offdev->ops->destroy(prog);
    218
    219	/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
    220	bpf_prog_free_id(prog, true);
    221
    222	list_del_init(&offload->offloads);
    223	kfree(offload);
    224	prog->aux->offload = NULL;
    225}
    226
    227void bpf_prog_offload_destroy(struct bpf_prog *prog)
    228{
    229	down_write(&bpf_devs_lock);
    230	if (prog->aux->offload)
    231		__bpf_prog_offload_destroy(prog);
    232	up_write(&bpf_devs_lock);
    233}
    234
    235static int bpf_prog_offload_translate(struct bpf_prog *prog)
    236{
    237	struct bpf_prog_offload *offload;
    238	int ret = -ENODEV;
    239
    240	down_read(&bpf_devs_lock);
    241	offload = prog->aux->offload;
    242	if (offload)
    243		ret = offload->offdev->ops->translate(prog);
    244	up_read(&bpf_devs_lock);
    245
    246	return ret;
    247}
    248
    249static unsigned int bpf_prog_warn_on_exec(const void *ctx,
    250					  const struct bpf_insn *insn)
    251{
    252	WARN(1, "attempt to execute device eBPF program on the host!");
    253	return 0;
    254}
    255
    256int bpf_prog_offload_compile(struct bpf_prog *prog)
    257{
    258	prog->bpf_func = bpf_prog_warn_on_exec;
    259
    260	return bpf_prog_offload_translate(prog);
    261}
    262
    263struct ns_get_path_bpf_prog_args {
    264	struct bpf_prog *prog;
    265	struct bpf_prog_info *info;
    266};
    267
    268static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
    269{
    270	struct ns_get_path_bpf_prog_args *args = private_data;
    271	struct bpf_prog_aux *aux = args->prog->aux;
    272	struct ns_common *ns;
    273	struct net *net;
    274
    275	rtnl_lock();
    276	down_read(&bpf_devs_lock);
    277
    278	if (aux->offload) {
    279		args->info->ifindex = aux->offload->netdev->ifindex;
    280		net = dev_net(aux->offload->netdev);
    281		get_net(net);
    282		ns = &net->ns;
    283	} else {
    284		args->info->ifindex = 0;
    285		ns = NULL;
    286	}
    287
    288	up_read(&bpf_devs_lock);
    289	rtnl_unlock();
    290
    291	return ns;
    292}
    293
    294int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
    295			       struct bpf_prog *prog)
    296{
    297	struct ns_get_path_bpf_prog_args args = {
    298		.prog	= prog,
    299		.info	= info,
    300	};
    301	struct bpf_prog_aux *aux = prog->aux;
    302	struct inode *ns_inode;
    303	struct path ns_path;
    304	char __user *uinsns;
    305	int res;
    306	u32 ulen;
    307
    308	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
    309	if (res) {
    310		if (!info->ifindex)
    311			return -ENODEV;
    312		return res;
    313	}
    314
    315	down_read(&bpf_devs_lock);
    316
    317	if (!aux->offload) {
    318		up_read(&bpf_devs_lock);
    319		return -ENODEV;
    320	}
    321
    322	ulen = info->jited_prog_len;
    323	info->jited_prog_len = aux->offload->jited_len;
    324	if (info->jited_prog_len && ulen) {
    325		uinsns = u64_to_user_ptr(info->jited_prog_insns);
    326		ulen = min_t(u32, info->jited_prog_len, ulen);
    327		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
    328			up_read(&bpf_devs_lock);
    329			return -EFAULT;
    330		}
    331	}
    332
    333	up_read(&bpf_devs_lock);
    334
    335	ns_inode = ns_path.dentry->d_inode;
    336	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
    337	info->netns_ino = ns_inode->i_ino;
    338	path_put(&ns_path);
    339
    340	return 0;
    341}
    342
    343const struct bpf_prog_ops bpf_offload_prog_ops = {
    344};
    345
    346static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
    347			       enum bpf_netdev_command cmd)
    348{
    349	struct netdev_bpf data = {};
    350	struct net_device *netdev;
    351
    352	ASSERT_RTNL();
    353
    354	data.command = cmd;
    355	data.offmap = offmap;
    356	/* Caller must make sure netdev is valid */
    357	netdev = offmap->netdev;
    358
    359	return netdev->netdev_ops->ndo_bpf(netdev, &data);
    360}
    361
    362struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
    363{
    364	struct net *net = current->nsproxy->net_ns;
    365	struct bpf_offload_netdev *ondev;
    366	struct bpf_offloaded_map *offmap;
    367	int err;
    368
    369	if (!capable(CAP_SYS_ADMIN))
    370		return ERR_PTR(-EPERM);
    371	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
    372	    attr->map_type != BPF_MAP_TYPE_HASH)
    373		return ERR_PTR(-EINVAL);
    374
    375	offmap = kzalloc(sizeof(*offmap), GFP_USER);
    376	if (!offmap)
    377		return ERR_PTR(-ENOMEM);
    378
    379	bpf_map_init_from_attr(&offmap->map, attr);
    380
    381	rtnl_lock();
    382	down_write(&bpf_devs_lock);
    383	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
    384	err = bpf_dev_offload_check(offmap->netdev);
    385	if (err)
    386		goto err_unlock;
    387
    388	ondev = bpf_offload_find_netdev(offmap->netdev);
    389	if (!ondev) {
    390		err = -EINVAL;
    391		goto err_unlock;
    392	}
    393
    394	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
    395	if (err)
    396		goto err_unlock;
    397
    398	list_add_tail(&offmap->offloads, &ondev->maps);
    399	up_write(&bpf_devs_lock);
    400	rtnl_unlock();
    401
    402	return &offmap->map;
    403
    404err_unlock:
    405	up_write(&bpf_devs_lock);
    406	rtnl_unlock();
    407	kfree(offmap);
    408	return ERR_PTR(err);
    409}
    410
    411static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
    412{
    413	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
    414	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
    415	bpf_map_free_id(&offmap->map, true);
    416	list_del_init(&offmap->offloads);
    417	offmap->netdev = NULL;
    418}
    419
    420void bpf_map_offload_map_free(struct bpf_map *map)
    421{
    422	struct bpf_offloaded_map *offmap = map_to_offmap(map);
    423
    424	rtnl_lock();
    425	down_write(&bpf_devs_lock);
    426	if (offmap->netdev)
    427		__bpf_map_offload_destroy(offmap);
    428	up_write(&bpf_devs_lock);
    429	rtnl_unlock();
    430
    431	kfree(offmap);
    432}
    433
    434int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
    435{
    436	struct bpf_offloaded_map *offmap = map_to_offmap(map);
    437	int ret = -ENODEV;
    438
    439	down_read(&bpf_devs_lock);
    440	if (offmap->netdev)
    441		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
    442	up_read(&bpf_devs_lock);
    443
    444	return ret;
    445}
    446
    447int bpf_map_offload_update_elem(struct bpf_map *map,
    448				void *key, void *value, u64 flags)
    449{
    450	struct bpf_offloaded_map *offmap = map_to_offmap(map);
    451	int ret = -ENODEV;
    452
    453	if (unlikely(flags > BPF_EXIST))
    454		return -EINVAL;
    455
    456	down_read(&bpf_devs_lock);
    457	if (offmap->netdev)
    458		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
    459						       flags);
    460	up_read(&bpf_devs_lock);
    461
    462	return ret;
    463}
    464
    465int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
    466{
    467	struct bpf_offloaded_map *offmap = map_to_offmap(map);
    468	int ret = -ENODEV;
    469
    470	down_read(&bpf_devs_lock);
    471	if (offmap->netdev)
    472		ret = offmap->dev_ops->map_delete_elem(offmap, key);
    473	up_read(&bpf_devs_lock);
    474
    475	return ret;
    476}
    477
    478int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
    479{
    480	struct bpf_offloaded_map *offmap = map_to_offmap(map);
    481	int ret = -ENODEV;
    482
    483	down_read(&bpf_devs_lock);
    484	if (offmap->netdev)
    485		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
    486	up_read(&bpf_devs_lock);
    487
    488	return ret;
    489}
    490
    491struct ns_get_path_bpf_map_args {
    492	struct bpf_offloaded_map *offmap;
    493	struct bpf_map_info *info;
    494};
    495
    496static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
    497{
    498	struct ns_get_path_bpf_map_args *args = private_data;
    499	struct ns_common *ns;
    500	struct net *net;
    501
    502	rtnl_lock();
    503	down_read(&bpf_devs_lock);
    504
    505	if (args->offmap->netdev) {
    506		args->info->ifindex = args->offmap->netdev->ifindex;
    507		net = dev_net(args->offmap->netdev);
    508		get_net(net);
    509		ns = &net->ns;
    510	} else {
    511		args->info->ifindex = 0;
    512		ns = NULL;
    513	}
    514
    515	up_read(&bpf_devs_lock);
    516	rtnl_unlock();
    517
    518	return ns;
    519}
    520
    521int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
    522{
    523	struct ns_get_path_bpf_map_args args = {
    524		.offmap	= map_to_offmap(map),
    525		.info	= info,
    526	};
    527	struct inode *ns_inode;
    528	struct path ns_path;
    529	int res;
    530
    531	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
    532	if (res) {
    533		if (!info->ifindex)
    534			return -ENODEV;
    535		return res;
    536	}
    537
    538	ns_inode = ns_path.dentry->d_inode;
    539	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
    540	info->netns_ino = ns_inode->i_ino;
    541	path_put(&ns_path);
    542
    543	return 0;
    544}
    545
    546static bool __bpf_offload_dev_match(struct bpf_prog *prog,
    547				    struct net_device *netdev)
    548{
    549	struct bpf_offload_netdev *ondev1, *ondev2;
    550	struct bpf_prog_offload *offload;
    551
    552	if (!bpf_prog_is_dev_bound(prog->aux))
    553		return false;
    554
    555	offload = prog->aux->offload;
    556	if (!offload)
    557		return false;
    558	if (offload->netdev == netdev)
    559		return true;
    560
    561	ondev1 = bpf_offload_find_netdev(offload->netdev);
    562	ondev2 = bpf_offload_find_netdev(netdev);
    563
    564	return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
    565}
    566
    567bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
    568{
    569	bool ret;
    570
    571	down_read(&bpf_devs_lock);
    572	ret = __bpf_offload_dev_match(prog, netdev);
    573	up_read(&bpf_devs_lock);
    574
    575	return ret;
    576}
    577EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
    578
    579bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
    580{
    581	struct bpf_offloaded_map *offmap;
    582	bool ret;
    583
    584	if (!bpf_map_is_dev_bound(map))
    585		return bpf_map_offload_neutral(map);
    586	offmap = map_to_offmap(map);
    587
    588	down_read(&bpf_devs_lock);
    589	ret = __bpf_offload_dev_match(prog, offmap->netdev);
    590	up_read(&bpf_devs_lock);
    591
    592	return ret;
    593}
    594
    595int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
    596				    struct net_device *netdev)
    597{
    598	struct bpf_offload_netdev *ondev;
    599	int err;
    600
    601	ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
    602	if (!ondev)
    603		return -ENOMEM;
    604
    605	ondev->netdev = netdev;
    606	ondev->offdev = offdev;
    607	INIT_LIST_HEAD(&ondev->progs);
    608	INIT_LIST_HEAD(&ondev->maps);
    609
    610	down_write(&bpf_devs_lock);
    611	err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
    612	if (err) {
    613		netdev_warn(netdev, "failed to register for BPF offload\n");
    614		goto err_unlock_free;
    615	}
    616
    617	list_add(&ondev->offdev_netdevs, &offdev->netdevs);
    618	up_write(&bpf_devs_lock);
    619	return 0;
    620
    621err_unlock_free:
    622	up_write(&bpf_devs_lock);
    623	kfree(ondev);
    624	return err;
    625}
    626EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
    627
    628void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
    629				       struct net_device *netdev)
    630{
    631	struct bpf_offload_netdev *ondev, *altdev;
    632	struct bpf_offloaded_map *offmap, *mtmp;
    633	struct bpf_prog_offload *offload, *ptmp;
    634
    635	ASSERT_RTNL();
    636
    637	down_write(&bpf_devs_lock);
    638	ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
    639	if (WARN_ON(!ondev))
    640		goto unlock;
    641
    642	WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
    643	list_del(&ondev->offdev_netdevs);
    644
    645	/* Try to move the objects to another netdev of the device */
    646	altdev = list_first_entry_or_null(&offdev->netdevs,
    647					  struct bpf_offload_netdev,
    648					  offdev_netdevs);
    649	if (altdev) {
    650		list_for_each_entry(offload, &ondev->progs, offloads)
    651			offload->netdev = altdev->netdev;
    652		list_splice_init(&ondev->progs, &altdev->progs);
    653
    654		list_for_each_entry(offmap, &ondev->maps, offloads)
    655			offmap->netdev = altdev->netdev;
    656		list_splice_init(&ondev->maps, &altdev->maps);
    657	} else {
    658		list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
    659			__bpf_prog_offload_destroy(offload->prog);
    660		list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
    661			__bpf_map_offload_destroy(offmap);
    662	}
    663
    664	WARN_ON(!list_empty(&ondev->progs));
    665	WARN_ON(!list_empty(&ondev->maps));
    666	kfree(ondev);
    667unlock:
    668	up_write(&bpf_devs_lock);
    669}
    670EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
    671
    672struct bpf_offload_dev *
    673bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
    674{
    675	struct bpf_offload_dev *offdev;
    676	int err;
    677
    678	down_write(&bpf_devs_lock);
    679	if (!offdevs_inited) {
    680		err = rhashtable_init(&offdevs, &offdevs_params);
    681		if (err) {
    682			up_write(&bpf_devs_lock);
    683			return ERR_PTR(err);
    684		}
    685		offdevs_inited = true;
    686	}
    687	up_write(&bpf_devs_lock);
    688
    689	offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
    690	if (!offdev)
    691		return ERR_PTR(-ENOMEM);
    692
    693	offdev->ops = ops;
    694	offdev->priv = priv;
    695	INIT_LIST_HEAD(&offdev->netdevs);
    696
    697	return offdev;
    698}
    699EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
    700
    701void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
    702{
    703	WARN_ON(!list_empty(&offdev->netdevs));
    704	kfree(offdev);
    705}
    706EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
    707
    708void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
    709{
    710	return offdev->priv;
    711}
    712EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);