cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sch_ingress.c (7640B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* net/sched/sch_ingress.c - Ingress and clsact qdisc
      3 *
      4 * Authors:     Jamal Hadi Salim 1999
      5 */
      6
      7#include <linux/module.h>
      8#include <linux/types.h>
      9#include <linux/list.h>
     10#include <linux/skbuff.h>
     11#include <linux/rtnetlink.h>
     12
     13#include <net/netlink.h>
     14#include <net/pkt_sched.h>
     15#include <net/pkt_cls.h>
     16
     17struct ingress_sched_data {
     18	struct tcf_block *block;
     19	struct tcf_block_ext_info block_info;
     20	struct mini_Qdisc_pair miniqp;
     21};
     22
     23static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
     24{
     25	return NULL;
     26}
     27
     28static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
     29{
     30	return TC_H_MIN(classid) + 1;
     31}
     32
     33static unsigned long ingress_bind_filter(struct Qdisc *sch,
     34					 unsigned long parent, u32 classid)
     35{
     36	return ingress_find(sch, classid);
     37}
     38
     39static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
     40{
     41}
     42
     43static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
     44{
     45}
     46
     47static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
     48					   struct netlink_ext_ack *extack)
     49{
     50	struct ingress_sched_data *q = qdisc_priv(sch);
     51
     52	return q->block;
     53}
     54
     55static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
     56{
     57	struct mini_Qdisc_pair *miniqp = priv;
     58
     59	mini_qdisc_pair_swap(miniqp, tp_head);
     60};
     61
     62static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
     63{
     64	struct ingress_sched_data *q = qdisc_priv(sch);
     65
     66	q->block_info.block_index = block_index;
     67}
     68
     69static u32 ingress_ingress_block_get(struct Qdisc *sch)
     70{
     71	struct ingress_sched_data *q = qdisc_priv(sch);
     72
     73	return q->block_info.block_index;
     74}
     75
     76static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
     77			struct netlink_ext_ack *extack)
     78{
     79	struct ingress_sched_data *q = qdisc_priv(sch);
     80	struct net_device *dev = qdisc_dev(sch);
     81	int err;
     82
     83	net_inc_ingress_queue();
     84
     85	mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
     86
     87	q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
     88	q->block_info.chain_head_change = clsact_chain_head_change;
     89	q->block_info.chain_head_change_priv = &q->miniqp;
     90
     91	err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
     92	if (err)
     93		return err;
     94
     95	mini_qdisc_pair_block_init(&q->miniqp, q->block);
     96
     97	return 0;
     98}
     99
    100static void ingress_destroy(struct Qdisc *sch)
    101{
    102	struct ingress_sched_data *q = qdisc_priv(sch);
    103
    104	tcf_block_put_ext(q->block, sch, &q->block_info);
    105	net_dec_ingress_queue();
    106}
    107
    108static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
    109{
    110	struct nlattr *nest;
    111
    112	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
    113	if (nest == NULL)
    114		goto nla_put_failure;
    115
    116	return nla_nest_end(skb, nest);
    117
    118nla_put_failure:
    119	nla_nest_cancel(skb, nest);
    120	return -1;
    121}
    122
    123static const struct Qdisc_class_ops ingress_class_ops = {
    124	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
    125	.leaf		=	ingress_leaf,
    126	.find		=	ingress_find,
    127	.walk		=	ingress_walk,
    128	.tcf_block	=	ingress_tcf_block,
    129	.bind_tcf	=	ingress_bind_filter,
    130	.unbind_tcf	=	ingress_unbind_filter,
    131};
    132
    133static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
    134	.cl_ops			=	&ingress_class_ops,
    135	.id			=	"ingress",
    136	.priv_size		=	sizeof(struct ingress_sched_data),
    137	.static_flags		=	TCQ_F_CPUSTATS,
    138	.init			=	ingress_init,
    139	.destroy		=	ingress_destroy,
    140	.dump			=	ingress_dump,
    141	.ingress_block_set	=	ingress_ingress_block_set,
    142	.ingress_block_get	=	ingress_ingress_block_get,
    143	.owner			=	THIS_MODULE,
    144};
    145
    146struct clsact_sched_data {
    147	struct tcf_block *ingress_block;
    148	struct tcf_block *egress_block;
    149	struct tcf_block_ext_info ingress_block_info;
    150	struct tcf_block_ext_info egress_block_info;
    151	struct mini_Qdisc_pair miniqp_ingress;
    152	struct mini_Qdisc_pair miniqp_egress;
    153};
    154
    155static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
    156{
    157	switch (TC_H_MIN(classid)) {
    158	case TC_H_MIN(TC_H_MIN_INGRESS):
    159	case TC_H_MIN(TC_H_MIN_EGRESS):
    160		return TC_H_MIN(classid);
    161	default:
    162		return 0;
    163	}
    164}
    165
    166static unsigned long clsact_bind_filter(struct Qdisc *sch,
    167					unsigned long parent, u32 classid)
    168{
    169	return clsact_find(sch, classid);
    170}
    171
    172static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
    173					  struct netlink_ext_ack *extack)
    174{
    175	struct clsact_sched_data *q = qdisc_priv(sch);
    176
    177	switch (cl) {
    178	case TC_H_MIN(TC_H_MIN_INGRESS):
    179		return q->ingress_block;
    180	case TC_H_MIN(TC_H_MIN_EGRESS):
    181		return q->egress_block;
    182	default:
    183		return NULL;
    184	}
    185}
    186
    187static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
    188{
    189	struct clsact_sched_data *q = qdisc_priv(sch);
    190
    191	q->ingress_block_info.block_index = block_index;
    192}
    193
    194static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
    195{
    196	struct clsact_sched_data *q = qdisc_priv(sch);
    197
    198	q->egress_block_info.block_index = block_index;
    199}
    200
    201static u32 clsact_ingress_block_get(struct Qdisc *sch)
    202{
    203	struct clsact_sched_data *q = qdisc_priv(sch);
    204
    205	return q->ingress_block_info.block_index;
    206}
    207
    208static u32 clsact_egress_block_get(struct Qdisc *sch)
    209{
    210	struct clsact_sched_data *q = qdisc_priv(sch);
    211
    212	return q->egress_block_info.block_index;
    213}
    214
    215static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
    216		       struct netlink_ext_ack *extack)
    217{
    218	struct clsact_sched_data *q = qdisc_priv(sch);
    219	struct net_device *dev = qdisc_dev(sch);
    220	int err;
    221
    222	net_inc_ingress_queue();
    223	net_inc_egress_queue();
    224
    225	mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
    226
    227	q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
    228	q->ingress_block_info.chain_head_change = clsact_chain_head_change;
    229	q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
    230
    231	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
    232				extack);
    233	if (err)
    234		return err;
    235
    236	mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
    237
    238	mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
    239
    240	q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
    241	q->egress_block_info.chain_head_change = clsact_chain_head_change;
    242	q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
    243
    244	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
    245}
    246
    247static void clsact_destroy(struct Qdisc *sch)
    248{
    249	struct clsact_sched_data *q = qdisc_priv(sch);
    250
    251	tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
    252	tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
    253
    254	net_dec_ingress_queue();
    255	net_dec_egress_queue();
    256}
    257
    258static const struct Qdisc_class_ops clsact_class_ops = {
    259	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
    260	.leaf		=	ingress_leaf,
    261	.find		=	clsact_find,
    262	.walk		=	ingress_walk,
    263	.tcf_block	=	clsact_tcf_block,
    264	.bind_tcf	=	clsact_bind_filter,
    265	.unbind_tcf	=	ingress_unbind_filter,
    266};
    267
    268static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
    269	.cl_ops			=	&clsact_class_ops,
    270	.id			=	"clsact",
    271	.priv_size		=	sizeof(struct clsact_sched_data),
    272	.static_flags		=	TCQ_F_CPUSTATS,
    273	.init			=	clsact_init,
    274	.destroy		=	clsact_destroy,
    275	.dump			=	ingress_dump,
    276	.ingress_block_set	=	clsact_ingress_block_set,
    277	.egress_block_set	=	clsact_egress_block_set,
    278	.ingress_block_get	=	clsact_ingress_block_get,
    279	.egress_block_get	=	clsact_egress_block_get,
    280	.owner			=	THIS_MODULE,
    281};
    282
    283static int __init ingress_module_init(void)
    284{
    285	int ret;
    286
    287	ret = register_qdisc(&ingress_qdisc_ops);
    288	if (!ret) {
    289		ret = register_qdisc(&clsact_qdisc_ops);
    290		if (ret)
    291			unregister_qdisc(&ingress_qdisc_ops);
    292	}
    293
    294	return ret;
    295}
    296
    297static void __exit ingress_module_exit(void)
    298{
    299	unregister_qdisc(&ingress_qdisc_ops);
    300	unregister_qdisc(&clsact_qdisc_ops);
    301}
    302
    303module_init(ingress_module_init);
    304module_exit(ingress_module_exit);
    305
    306MODULE_ALIAS("sch_clsact");
    307MODULE_LICENSE("GPL");