cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cls_bpf.c (17348B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Berkeley Packet Filter based traffic classifier
      4 *
      5 * Might be used to classify traffic through flexible, user-defined and
      6 * possibly JIT-ed BPF filters for traffic control as an alternative to
      7 * ematches.
      8 *
      9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
     10 */
     11
     12#include <linux/module.h>
     13#include <linux/types.h>
     14#include <linux/skbuff.h>
     15#include <linux/filter.h>
     16#include <linux/bpf.h>
     17#include <linux/idr.h>
     18
     19#include <net/rtnetlink.h>
     20#include <net/pkt_cls.h>
     21#include <net/sock.h>
     22
     23MODULE_LICENSE("GPL");
     24MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
     25MODULE_DESCRIPTION("TC BPF based classifier");
     26
     27#define CLS_BPF_NAME_LEN	256
     28#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
     29	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
     30
     31struct cls_bpf_head {
     32	struct list_head plist;
     33	struct idr handle_idr;
     34	struct rcu_head rcu;
     35};
     36
     37struct cls_bpf_prog {
     38	struct bpf_prog *filter;
     39	struct list_head link;
     40	struct tcf_result res;
     41	bool exts_integrated;
     42	u32 gen_flags;
     43	unsigned int in_hw_count;
     44	struct tcf_exts exts;
     45	u32 handle;
     46	u16 bpf_num_ops;
     47	struct sock_filter *bpf_ops;
     48	const char *bpf_name;
     49	struct tcf_proto *tp;
     50	struct rcu_work rwork;
     51};
     52
     53static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
     54	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
     55	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
     56	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
     57	[TCA_BPF_FD]		= { .type = NLA_U32 },
     58	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
     59				    .len = CLS_BPF_NAME_LEN },
     60	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
     61	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
     62				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
     63};
     64
     65static int cls_bpf_exec_opcode(int code)
     66{
     67	switch (code) {
     68	case TC_ACT_OK:
     69	case TC_ACT_SHOT:
     70	case TC_ACT_STOLEN:
     71	case TC_ACT_TRAP:
     72	case TC_ACT_REDIRECT:
     73	case TC_ACT_UNSPEC:
     74		return code;
     75	default:
     76		return TC_ACT_UNSPEC;
     77	}
     78}
     79
     80static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
     81			    struct tcf_result *res)
     82{
     83	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
     84	bool at_ingress = skb_at_tc_ingress(skb);
     85	struct cls_bpf_prog *prog;
     86	int ret = -1;
     87
     88	list_for_each_entry_rcu(prog, &head->plist, link) {
     89		int filter_res;
     90
     91		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
     92
     93		if (tc_skip_sw(prog->gen_flags)) {
     94			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
     95		} else if (at_ingress) {
     96			/* It is safe to push/pull even if skb_shared() */
     97			__skb_push(skb, skb->mac_len);
     98			bpf_compute_data_pointers(skb);
     99			filter_res = bpf_prog_run(prog->filter, skb);
    100			__skb_pull(skb, skb->mac_len);
    101		} else {
    102			bpf_compute_data_pointers(skb);
    103			filter_res = bpf_prog_run(prog->filter, skb);
    104		}
    105		if (unlikely(!skb->tstamp && skb->mono_delivery_time))
    106			skb->mono_delivery_time = 0;
    107
    108		if (prog->exts_integrated) {
    109			res->class   = 0;
    110			res->classid = TC_H_MAJ(prog->res.classid) |
    111				       qdisc_skb_cb(skb)->tc_classid;
    112
    113			ret = cls_bpf_exec_opcode(filter_res);
    114			if (ret == TC_ACT_UNSPEC)
    115				continue;
    116			break;
    117		}
    118
    119		if (filter_res == 0)
    120			continue;
    121		if (filter_res != -1) {
    122			res->class   = 0;
    123			res->classid = filter_res;
    124		} else {
    125			*res = prog->res;
    126		}
    127
    128		ret = tcf_exts_exec(skb, &prog->exts, res);
    129		if (ret < 0)
    130			continue;
    131
    132		break;
    133	}
    134
    135	return ret;
    136}
    137
    138static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
    139{
    140	return !prog->bpf_ops;
    141}
    142
    143static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
    144			       struct cls_bpf_prog *oldprog,
    145			       struct netlink_ext_ack *extack)
    146{
    147	struct tcf_block *block = tp->chain->block;
    148	struct tc_cls_bpf_offload cls_bpf = {};
    149	struct cls_bpf_prog *obj;
    150	bool skip_sw;
    151	int err;
    152
    153	skip_sw = prog && tc_skip_sw(prog->gen_flags);
    154	obj = prog ?: oldprog;
    155
    156	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
    157	cls_bpf.command = TC_CLSBPF_OFFLOAD;
    158	cls_bpf.exts = &obj->exts;
    159	cls_bpf.prog = prog ? prog->filter : NULL;
    160	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
    161	cls_bpf.name = obj->bpf_name;
    162	cls_bpf.exts_integrated = obj->exts_integrated;
    163
    164	if (oldprog && prog)
    165		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
    166					  skip_sw, &oldprog->gen_flags,
    167					  &oldprog->in_hw_count,
    168					  &prog->gen_flags, &prog->in_hw_count,
    169					  true);
    170	else if (prog)
    171		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
    172				      skip_sw, &prog->gen_flags,
    173				      &prog->in_hw_count, true);
    174	else
    175		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
    176					  skip_sw, &oldprog->gen_flags,
    177					  &oldprog->in_hw_count, true);
    178
    179	if (prog && err) {
    180		cls_bpf_offload_cmd(tp, oldprog, prog, extack);
    181		return err;
    182	}
    183
    184	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
    185		return -EINVAL;
    186
    187	return 0;
    188}
    189
    190static u32 cls_bpf_flags(u32 flags)
    191{
    192	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
    193}
    194
    195static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
    196			   struct cls_bpf_prog *oldprog,
    197			   struct netlink_ext_ack *extack)
    198{
    199	if (prog && oldprog &&
    200	    cls_bpf_flags(prog->gen_flags) !=
    201	    cls_bpf_flags(oldprog->gen_flags))
    202		return -EINVAL;
    203
    204	if (prog && tc_skip_hw(prog->gen_flags))
    205		prog = NULL;
    206	if (oldprog && tc_skip_hw(oldprog->gen_flags))
    207		oldprog = NULL;
    208	if (!prog && !oldprog)
    209		return 0;
    210
    211	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
    212}
    213
    214static void cls_bpf_stop_offload(struct tcf_proto *tp,
    215				 struct cls_bpf_prog *prog,
    216				 struct netlink_ext_ack *extack)
    217{
    218	int err;
    219
    220	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
    221	if (err)
    222		pr_err("Stopping hardware offload failed: %d\n", err);
    223}
    224
    225static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
    226					 struct cls_bpf_prog *prog)
    227{
    228	struct tcf_block *block = tp->chain->block;
    229	struct tc_cls_bpf_offload cls_bpf = {};
    230
    231	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
    232	cls_bpf.command = TC_CLSBPF_STATS;
    233	cls_bpf.exts = &prog->exts;
    234	cls_bpf.prog = prog->filter;
    235	cls_bpf.name = prog->bpf_name;
    236	cls_bpf.exts_integrated = prog->exts_integrated;
    237
    238	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
    239}
    240
    241static int cls_bpf_init(struct tcf_proto *tp)
    242{
    243	struct cls_bpf_head *head;
    244
    245	head = kzalloc(sizeof(*head), GFP_KERNEL);
    246	if (head == NULL)
    247		return -ENOBUFS;
    248
    249	INIT_LIST_HEAD_RCU(&head->plist);
    250	idr_init(&head->handle_idr);
    251	rcu_assign_pointer(tp->root, head);
    252
    253	return 0;
    254}
    255
    256static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
    257{
    258	if (cls_bpf_is_ebpf(prog))
    259		bpf_prog_put(prog->filter);
    260	else
    261		bpf_prog_destroy(prog->filter);
    262
    263	kfree(prog->bpf_name);
    264	kfree(prog->bpf_ops);
    265}
    266
    267static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
    268{
    269	tcf_exts_destroy(&prog->exts);
    270	tcf_exts_put_net(&prog->exts);
    271
    272	cls_bpf_free_parms(prog);
    273	kfree(prog);
    274}
    275
    276static void cls_bpf_delete_prog_work(struct work_struct *work)
    277{
    278	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
    279						 struct cls_bpf_prog,
    280						 rwork);
    281	rtnl_lock();
    282	__cls_bpf_delete_prog(prog);
    283	rtnl_unlock();
    284}
    285
    286static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
    287			     struct netlink_ext_ack *extack)
    288{
    289	struct cls_bpf_head *head = rtnl_dereference(tp->root);
    290
    291	idr_remove(&head->handle_idr, prog->handle);
    292	cls_bpf_stop_offload(tp, prog, extack);
    293	list_del_rcu(&prog->link);
    294	tcf_unbind_filter(tp, &prog->res);
    295	if (tcf_exts_get_net(&prog->exts))
    296		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
    297	else
    298		__cls_bpf_delete_prog(prog);
    299}
    300
    301static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
    302			  bool rtnl_held, struct netlink_ext_ack *extack)
    303{
    304	struct cls_bpf_head *head = rtnl_dereference(tp->root);
    305
    306	__cls_bpf_delete(tp, arg, extack);
    307	*last = list_empty(&head->plist);
    308	return 0;
    309}
    310
    311static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
    312			    struct netlink_ext_ack *extack)
    313{
    314	struct cls_bpf_head *head = rtnl_dereference(tp->root);
    315	struct cls_bpf_prog *prog, *tmp;
    316
    317	list_for_each_entry_safe(prog, tmp, &head->plist, link)
    318		__cls_bpf_delete(tp, prog, extack);
    319
    320	idr_destroy(&head->handle_idr);
    321	kfree_rcu(head, rcu);
    322}
    323
    324static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
    325{
    326	struct cls_bpf_head *head = rtnl_dereference(tp->root);
    327	struct cls_bpf_prog *prog;
    328
    329	list_for_each_entry(prog, &head->plist, link) {
    330		if (prog->handle == handle)
    331			return prog;
    332	}
    333
    334	return NULL;
    335}
    336
    337static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
    338{
    339	struct sock_filter *bpf_ops;
    340	struct sock_fprog_kern fprog_tmp;
    341	struct bpf_prog *fp;
    342	u16 bpf_size, bpf_num_ops;
    343	int ret;
    344
    345	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
    346	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
    347		return -EINVAL;
    348
    349	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
    350	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
    351		return -EINVAL;
    352
    353	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
    354	if (bpf_ops == NULL)
    355		return -ENOMEM;
    356
    357	fprog_tmp.len = bpf_num_ops;
    358	fprog_tmp.filter = bpf_ops;
    359
    360	ret = bpf_prog_create(&fp, &fprog_tmp);
    361	if (ret < 0) {
    362		kfree(bpf_ops);
    363		return ret;
    364	}
    365
    366	prog->bpf_ops = bpf_ops;
    367	prog->bpf_num_ops = bpf_num_ops;
    368	prog->bpf_name = NULL;
    369	prog->filter = fp;
    370
    371	return 0;
    372}
    373
    374static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
    375				 u32 gen_flags, const struct tcf_proto *tp)
    376{
    377	struct bpf_prog *fp;
    378	char *name = NULL;
    379	bool skip_sw;
    380	u32 bpf_fd;
    381
    382	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
    383	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
    384
    385	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
    386	if (IS_ERR(fp))
    387		return PTR_ERR(fp);
    388
    389	if (tb[TCA_BPF_NAME]) {
    390		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
    391		if (!name) {
    392			bpf_prog_put(fp);
    393			return -ENOMEM;
    394		}
    395	}
    396
    397	prog->bpf_ops = NULL;
    398	prog->bpf_name = name;
    399	prog->filter = fp;
    400
    401	if (fp->dst_needed)
    402		tcf_block_netif_keep_dst(tp->chain->block);
    403
    404	return 0;
    405}
    406
    407static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
    408			     struct cls_bpf_prog *prog, unsigned long base,
    409			     struct nlattr **tb, struct nlattr *est, u32 flags,
    410			     struct netlink_ext_ack *extack)
    411{
    412	bool is_bpf, is_ebpf, have_exts = false;
    413	u32 gen_flags = 0;
    414	int ret;
    415
    416	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
    417	is_ebpf = tb[TCA_BPF_FD];
    418	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
    419		return -EINVAL;
    420
    421	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
    422				extack);
    423	if (ret < 0)
    424		return ret;
    425
    426	if (tb[TCA_BPF_FLAGS]) {
    427		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
    428
    429		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
    430			return -EINVAL;
    431
    432		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
    433	}
    434	if (tb[TCA_BPF_FLAGS_GEN]) {
    435		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
    436		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
    437		    !tc_flags_valid(gen_flags))
    438			return -EINVAL;
    439	}
    440
    441	prog->exts_integrated = have_exts;
    442	prog->gen_flags = gen_flags;
    443
    444	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
    445		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
    446	if (ret < 0)
    447		return ret;
    448
    449	if (tb[TCA_BPF_CLASSID]) {
    450		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
    451		tcf_bind_filter(tp, &prog->res, base);
    452	}
    453
    454	return 0;
    455}
    456
    457static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
    458			  struct tcf_proto *tp, unsigned long base,
    459			  u32 handle, struct nlattr **tca,
    460			  void **arg, u32 flags,
    461			  struct netlink_ext_ack *extack)
    462{
    463	struct cls_bpf_head *head = rtnl_dereference(tp->root);
    464	struct cls_bpf_prog *oldprog = *arg;
    465	struct nlattr *tb[TCA_BPF_MAX + 1];
    466	struct cls_bpf_prog *prog;
    467	int ret;
    468
    469	if (tca[TCA_OPTIONS] == NULL)
    470		return -EINVAL;
    471
    472	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
    473					  bpf_policy, NULL);
    474	if (ret < 0)
    475		return ret;
    476
    477	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
    478	if (!prog)
    479		return -ENOBUFS;
    480
    481	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
    482	if (ret < 0)
    483		goto errout;
    484
    485	if (oldprog) {
    486		if (handle && oldprog->handle != handle) {
    487			ret = -EINVAL;
    488			goto errout;
    489		}
    490	}
    491
    492	if (handle == 0) {
    493		handle = 1;
    494		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
    495				    INT_MAX, GFP_KERNEL);
    496	} else if (!oldprog) {
    497		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
    498				    handle, GFP_KERNEL);
    499	}
    500
    501	if (ret)
    502		goto errout;
    503	prog->handle = handle;
    504
    505	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
    506				extack);
    507	if (ret < 0)
    508		goto errout_idr;
    509
    510	ret = cls_bpf_offload(tp, prog, oldprog, extack);
    511	if (ret)
    512		goto errout_parms;
    513
    514	if (!tc_in_hw(prog->gen_flags))
    515		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
    516
    517	if (oldprog) {
    518		idr_replace(&head->handle_idr, prog, handle);
    519		list_replace_rcu(&oldprog->link, &prog->link);
    520		tcf_unbind_filter(tp, &oldprog->res);
    521		tcf_exts_get_net(&oldprog->exts);
    522		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
    523	} else {
    524		list_add_rcu(&prog->link, &head->plist);
    525	}
    526
    527	*arg = prog;
    528	return 0;
    529
    530errout_parms:
    531	cls_bpf_free_parms(prog);
    532errout_idr:
    533	if (!oldprog)
    534		idr_remove(&head->handle_idr, prog->handle);
    535errout:
    536	tcf_exts_destroy(&prog->exts);
    537	kfree(prog);
    538	return ret;
    539}
    540
    541static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
    542				 struct sk_buff *skb)
    543{
    544	struct nlattr *nla;
    545
    546	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
    547		return -EMSGSIZE;
    548
    549	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
    550			  sizeof(struct sock_filter));
    551	if (nla == NULL)
    552		return -EMSGSIZE;
    553
    554	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
    555
    556	return 0;
    557}
    558
    559static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
    560				  struct sk_buff *skb)
    561{
    562	struct nlattr *nla;
    563
    564	if (prog->bpf_name &&
    565	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
    566		return -EMSGSIZE;
    567
    568	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
    569		return -EMSGSIZE;
    570
    571	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
    572	if (nla == NULL)
    573		return -EMSGSIZE;
    574
    575	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
    576
    577	return 0;
    578}
    579
    580static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
    581			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
    582{
    583	struct cls_bpf_prog *prog = fh;
    584	struct nlattr *nest;
    585	u32 bpf_flags = 0;
    586	int ret;
    587
    588	if (prog == NULL)
    589		return skb->len;
    590
    591	tm->tcm_handle = prog->handle;
    592
    593	cls_bpf_offload_update_stats(tp, prog);
    594
    595	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
    596	if (nest == NULL)
    597		goto nla_put_failure;
    598
    599	if (prog->res.classid &&
    600	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
    601		goto nla_put_failure;
    602
    603	if (cls_bpf_is_ebpf(prog))
    604		ret = cls_bpf_dump_ebpf_info(prog, skb);
    605	else
    606		ret = cls_bpf_dump_bpf_info(prog, skb);
    607	if (ret)
    608		goto nla_put_failure;
    609
    610	if (tcf_exts_dump(skb, &prog->exts) < 0)
    611		goto nla_put_failure;
    612
    613	if (prog->exts_integrated)
    614		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
    615	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
    616		goto nla_put_failure;
    617	if (prog->gen_flags &&
    618	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
    619		goto nla_put_failure;
    620
    621	nla_nest_end(skb, nest);
    622
    623	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
    624		goto nla_put_failure;
    625
    626	return skb->len;
    627
    628nla_put_failure:
    629	nla_nest_cancel(skb, nest);
    630	return -1;
    631}
    632
    633static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
    634			       void *q, unsigned long base)
    635{
    636	struct cls_bpf_prog *prog = fh;
    637
    638	if (prog && prog->res.classid == classid) {
    639		if (cl)
    640			__tcf_bind_filter(q, &prog->res, base);
    641		else
    642			__tcf_unbind_filter(q, &prog->res);
    643	}
    644}
    645
    646static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
    647			 bool rtnl_held)
    648{
    649	struct cls_bpf_head *head = rtnl_dereference(tp->root);
    650	struct cls_bpf_prog *prog;
    651
    652	list_for_each_entry(prog, &head->plist, link) {
    653		if (arg->count < arg->skip)
    654			goto skip;
    655		if (arg->fn(tp, prog, arg) < 0) {
    656			arg->stop = 1;
    657			break;
    658		}
    659skip:
    660		arg->count++;
    661	}
    662}
    663
    664static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
    665			     void *cb_priv, struct netlink_ext_ack *extack)
    666{
    667	struct cls_bpf_head *head = rtnl_dereference(tp->root);
    668	struct tcf_block *block = tp->chain->block;
    669	struct tc_cls_bpf_offload cls_bpf = {};
    670	struct cls_bpf_prog *prog;
    671	int err;
    672
    673	list_for_each_entry(prog, &head->plist, link) {
    674		if (tc_skip_hw(prog->gen_flags))
    675			continue;
    676
    677		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
    678					   extack);
    679		cls_bpf.command = TC_CLSBPF_OFFLOAD;
    680		cls_bpf.exts = &prog->exts;
    681		cls_bpf.prog = add ? prog->filter : NULL;
    682		cls_bpf.oldprog = add ? NULL : prog->filter;
    683		cls_bpf.name = prog->bpf_name;
    684		cls_bpf.exts_integrated = prog->exts_integrated;
    685
    686		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
    687					    &cls_bpf, cb_priv, &prog->gen_flags,
    688					    &prog->in_hw_count);
    689		if (err)
    690			return err;
    691	}
    692
    693	return 0;
    694}
    695
    696static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
    697	.kind		=	"bpf",
    698	.owner		=	THIS_MODULE,
    699	.classify	=	cls_bpf_classify,
    700	.init		=	cls_bpf_init,
    701	.destroy	=	cls_bpf_destroy,
    702	.get		=	cls_bpf_get,
    703	.change		=	cls_bpf_change,
    704	.delete		=	cls_bpf_delete,
    705	.walk		=	cls_bpf_walk,
    706	.reoffload	=	cls_bpf_reoffload,
    707	.dump		=	cls_bpf_dump,
    708	.bind_class	=	cls_bpf_bind_class,
    709};
    710
    711static int __init cls_bpf_init_mod(void)
    712{
    713	return register_tcf_proto_ops(&cls_bpf_ops);
    714}
    715
    716static void __exit cls_bpf_exit_mod(void)
    717{
    718	unregister_tcf_proto_ops(&cls_bpf_ops);
    719}
    720
    721module_init(cls_bpf_init_mod);
    722module_exit(cls_bpf_exit_mod);