cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cls_matchall.c (10459B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * net/sched/cls_matchll.c		Match-all classifier
      4 *
      5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
      6 */
      7
      8#include <linux/kernel.h>
      9#include <linux/init.h>
     10#include <linux/module.h>
     11#include <linux/percpu.h>
     12
     13#include <net/sch_generic.h>
     14#include <net/pkt_cls.h>
     15
     16struct cls_mall_head {
     17	struct tcf_exts exts;
     18	struct tcf_result res;
     19	u32 handle;
     20	u32 flags;
     21	unsigned int in_hw_count;
     22	struct tc_matchall_pcnt __percpu *pf;
     23	struct rcu_work rwork;
     24	bool deleting;
     25};
     26
     27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
     28			 struct tcf_result *res)
     29{
     30	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
     31
     32	if (unlikely(!head))
     33		return -1;
     34
     35	if (tc_skip_sw(head->flags))
     36		return -1;
     37
     38	*res = head->res;
     39	__this_cpu_inc(head->pf->rhit);
     40	return tcf_exts_exec(skb, &head->exts, res);
     41}
     42
     43static int mall_init(struct tcf_proto *tp)
     44{
     45	return 0;
     46}
     47
     48static void __mall_destroy(struct cls_mall_head *head)
     49{
     50	tcf_exts_destroy(&head->exts);
     51	tcf_exts_put_net(&head->exts);
     52	free_percpu(head->pf);
     53	kfree(head);
     54}
     55
     56static void mall_destroy_work(struct work_struct *work)
     57{
     58	struct cls_mall_head *head = container_of(to_rcu_work(work),
     59						  struct cls_mall_head,
     60						  rwork);
     61	rtnl_lock();
     62	__mall_destroy(head);
     63	rtnl_unlock();
     64}
     65
     66static void mall_destroy_hw_filter(struct tcf_proto *tp,
     67				   struct cls_mall_head *head,
     68				   unsigned long cookie,
     69				   struct netlink_ext_ack *extack)
     70{
     71	struct tc_cls_matchall_offload cls_mall = {};
     72	struct tcf_block *block = tp->chain->block;
     73
     74	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
     75	cls_mall.command = TC_CLSMATCHALL_DESTROY;
     76	cls_mall.cookie = cookie;
     77
     78	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
     79			    &head->flags, &head->in_hw_count, true);
     80}
     81
     82static int mall_replace_hw_filter(struct tcf_proto *tp,
     83				  struct cls_mall_head *head,
     84				  unsigned long cookie,
     85				  struct netlink_ext_ack *extack)
     86{
     87	struct tc_cls_matchall_offload cls_mall = {};
     88	struct tcf_block *block = tp->chain->block;
     89	bool skip_sw = tc_skip_sw(head->flags);
     90	int err;
     91
     92	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
     93	if (!cls_mall.rule)
     94		return -ENOMEM;
     95
     96	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
     97	cls_mall.command = TC_CLSMATCHALL_REPLACE;
     98	cls_mall.cookie = cookie;
     99
    100	err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
    101				      cls_mall.common.extack);
    102	if (err) {
    103		kfree(cls_mall.rule);
    104		mall_destroy_hw_filter(tp, head, cookie, NULL);
    105
    106		return skip_sw ? err : 0;
    107	}
    108
    109	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
    110			      skip_sw, &head->flags, &head->in_hw_count, true);
    111	tc_cleanup_offload_action(&cls_mall.rule->action);
    112	kfree(cls_mall.rule);
    113
    114	if (err) {
    115		mall_destroy_hw_filter(tp, head, cookie, NULL);
    116		return err;
    117	}
    118
    119	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
    120		return -EINVAL;
    121
    122	return 0;
    123}
    124
    125static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
    126			 struct netlink_ext_ack *extack)
    127{
    128	struct cls_mall_head *head = rtnl_dereference(tp->root);
    129
    130	if (!head)
    131		return;
    132
    133	tcf_unbind_filter(tp, &head->res);
    134
    135	if (!tc_skip_hw(head->flags))
    136		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
    137
    138	if (tcf_exts_get_net(&head->exts))
    139		tcf_queue_work(&head->rwork, mall_destroy_work);
    140	else
    141		__mall_destroy(head);
    142}
    143
    144static void *mall_get(struct tcf_proto *tp, u32 handle)
    145{
    146	struct cls_mall_head *head = rtnl_dereference(tp->root);
    147
    148	if (head && head->handle == handle)
    149		return head;
    150
    151	return NULL;
    152}
    153
    154static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
    155	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
    156	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
    157	[TCA_MATCHALL_FLAGS]		= { .type = NLA_U32 },
    158};
    159
    160static int mall_set_parms(struct net *net, struct tcf_proto *tp,
    161			  struct cls_mall_head *head,
    162			  unsigned long base, struct nlattr **tb,
    163			  struct nlattr *est, u32 flags, u32 fl_flags,
    164			  struct netlink_ext_ack *extack)
    165{
    166	int err;
    167
    168	err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
    169				   fl_flags, extack);
    170	if (err < 0)
    171		return err;
    172
    173	if (tb[TCA_MATCHALL_CLASSID]) {
    174		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
    175		tcf_bind_filter(tp, &head->res, base);
    176	}
    177	return 0;
    178}
    179
    180static int mall_change(struct net *net, struct sk_buff *in_skb,
    181		       struct tcf_proto *tp, unsigned long base,
    182		       u32 handle, struct nlattr **tca,
    183		       void **arg, u32 flags,
    184		       struct netlink_ext_ack *extack)
    185{
    186	struct cls_mall_head *head = rtnl_dereference(tp->root);
    187	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
    188	struct cls_mall_head *new;
    189	u32 userflags = 0;
    190	int err;
    191
    192	if (!tca[TCA_OPTIONS])
    193		return -EINVAL;
    194
    195	if (head)
    196		return -EEXIST;
    197
    198	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
    199					  tca[TCA_OPTIONS], mall_policy, NULL);
    200	if (err < 0)
    201		return err;
    202
    203	if (tb[TCA_MATCHALL_FLAGS]) {
    204		userflags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
    205		if (!tc_flags_valid(userflags))
    206			return -EINVAL;
    207	}
    208
    209	new = kzalloc(sizeof(*new), GFP_KERNEL);
    210	if (!new)
    211		return -ENOBUFS;
    212
    213	err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
    214	if (err)
    215		goto err_exts_init;
    216
    217	if (!handle)
    218		handle = 1;
    219	new->handle = handle;
    220	new->flags = userflags;
    221	new->pf = alloc_percpu(struct tc_matchall_pcnt);
    222	if (!new->pf) {
    223		err = -ENOMEM;
    224		goto err_alloc_percpu;
    225	}
    226
    227	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
    228			     flags, new->flags, extack);
    229	if (err)
    230		goto err_set_parms;
    231
    232	if (!tc_skip_hw(new->flags)) {
    233		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
    234					     extack);
    235		if (err)
    236			goto err_replace_hw_filter;
    237	}
    238
    239	if (!tc_in_hw(new->flags))
    240		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
    241
    242	*arg = head;
    243	rcu_assign_pointer(tp->root, new);
    244	return 0;
    245
    246err_replace_hw_filter:
    247err_set_parms:
    248	free_percpu(new->pf);
    249err_alloc_percpu:
    250	tcf_exts_destroy(&new->exts);
    251err_exts_init:
    252	kfree(new);
    253	return err;
    254}
    255
    256static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
    257		       bool rtnl_held, struct netlink_ext_ack *extack)
    258{
    259	struct cls_mall_head *head = rtnl_dereference(tp->root);
    260
    261	head->deleting = true;
    262	*last = true;
    263	return 0;
    264}
    265
    266static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
    267		      bool rtnl_held)
    268{
    269	struct cls_mall_head *head = rtnl_dereference(tp->root);
    270
    271	if (arg->count < arg->skip)
    272		goto skip;
    273
    274	if (!head || head->deleting)
    275		return;
    276	if (arg->fn(tp, head, arg) < 0)
    277		arg->stop = 1;
    278skip:
    279	arg->count++;
    280}
    281
    282static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
    283			  void *cb_priv, struct netlink_ext_ack *extack)
    284{
    285	struct cls_mall_head *head = rtnl_dereference(tp->root);
    286	struct tc_cls_matchall_offload cls_mall = {};
    287	struct tcf_block *block = tp->chain->block;
    288	int err;
    289
    290	if (tc_skip_hw(head->flags))
    291		return 0;
    292
    293	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
    294	if (!cls_mall.rule)
    295		return -ENOMEM;
    296
    297	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
    298	cls_mall.command = add ?
    299		TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
    300	cls_mall.cookie = (unsigned long)head;
    301
    302	err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
    303				      cls_mall.common.extack);
    304	if (err) {
    305		kfree(cls_mall.rule);
    306
    307		return add && tc_skip_sw(head->flags) ? err : 0;
    308	}
    309
    310	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
    311				    &cls_mall, cb_priv, &head->flags,
    312				    &head->in_hw_count);
    313	tc_cleanup_offload_action(&cls_mall.rule->action);
    314	kfree(cls_mall.rule);
    315
    316	if (err)
    317		return err;
    318
    319	return 0;
    320}
    321
    322static void mall_stats_hw_filter(struct tcf_proto *tp,
    323				 struct cls_mall_head *head,
    324				 unsigned long cookie)
    325{
    326	struct tc_cls_matchall_offload cls_mall = {};
    327	struct tcf_block *block = tp->chain->block;
    328
    329	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
    330	cls_mall.command = TC_CLSMATCHALL_STATS;
    331	cls_mall.cookie = cookie;
    332
    333	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
    334
    335	tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes,
    336				 cls_mall.stats.pkts, cls_mall.stats.drops,
    337				 cls_mall.stats.lastused,
    338				 cls_mall.stats.used_hw_stats,
    339				 cls_mall.stats.used_hw_stats_valid);
    340}
    341
    342static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
    343		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
    344{
    345	struct tc_matchall_pcnt gpf = {};
    346	struct cls_mall_head *head = fh;
    347	struct nlattr *nest;
    348	int cpu;
    349
    350	if (!head)
    351		return skb->len;
    352
    353	if (!tc_skip_hw(head->flags))
    354		mall_stats_hw_filter(tp, head, (unsigned long)head);
    355
    356	t->tcm_handle = head->handle;
    357
    358	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
    359	if (!nest)
    360		goto nla_put_failure;
    361
    362	if (head->res.classid &&
    363	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
    364		goto nla_put_failure;
    365
    366	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
    367		goto nla_put_failure;
    368
    369	for_each_possible_cpu(cpu) {
    370		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
    371
    372		gpf.rhit += pf->rhit;
    373	}
    374
    375	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
    376			  sizeof(struct tc_matchall_pcnt),
    377			  &gpf, TCA_MATCHALL_PAD))
    378		goto nla_put_failure;
    379
    380	if (tcf_exts_dump(skb, &head->exts))
    381		goto nla_put_failure;
    382
    383	nla_nest_end(skb, nest);
    384
    385	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
    386		goto nla_put_failure;
    387
    388	return skb->len;
    389
    390nla_put_failure:
    391	nla_nest_cancel(skb, nest);
    392	return -1;
    393}
    394
    395static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
    396			    unsigned long base)
    397{
    398	struct cls_mall_head *head = fh;
    399
    400	if (head && head->res.classid == classid) {
    401		if (cl)
    402			__tcf_bind_filter(q, &head->res, base);
    403		else
    404			__tcf_unbind_filter(q, &head->res);
    405	}
    406}
    407
    408static struct tcf_proto_ops cls_mall_ops __read_mostly = {
    409	.kind		= "matchall",
    410	.classify	= mall_classify,
    411	.init		= mall_init,
    412	.destroy	= mall_destroy,
    413	.get		= mall_get,
    414	.change		= mall_change,
    415	.delete		= mall_delete,
    416	.walk		= mall_walk,
    417	.reoffload	= mall_reoffload,
    418	.dump		= mall_dump,
    419	.bind_class	= mall_bind_class,
    420	.owner		= THIS_MODULE,
    421};
    422
    423static int __init cls_mall_init(void)
    424{
    425	return register_tcf_proto_ops(&cls_mall_ops);
    426}
    427
    428static void __exit cls_mall_exit(void)
    429{
    430	unregister_tcf_proto_ops(&cls_mall_ops);
    431}
    432
    433module_init(cls_mall_init);
    434module_exit(cls_mall_exit);
    435
    436MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
    437MODULE_DESCRIPTION("Match-all classifier");
    438MODULE_LICENSE("GPL v2");