cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sch_drr.c (11466B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * net/sched/sch_drr.c         Deficit Round Robin scheduler
      4 *
      5 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/slab.h>
     10#include <linux/init.h>
     11#include <linux/errno.h>
     12#include <linux/netdevice.h>
     13#include <linux/pkt_sched.h>
     14#include <net/sch_generic.h>
     15#include <net/pkt_sched.h>
     16#include <net/pkt_cls.h>
     17
     18struct drr_class {
     19	struct Qdisc_class_common	common;
     20	unsigned int			filter_cnt;
     21
     22	struct gnet_stats_basic_sync		bstats;
     23	struct gnet_stats_queue		qstats;
     24	struct net_rate_estimator __rcu *rate_est;
     25	struct list_head		alist;
     26	struct Qdisc			*qdisc;
     27
     28	u32				quantum;
     29	u32				deficit;
     30};
     31
     32struct drr_sched {
     33	struct list_head		active;
     34	struct tcf_proto __rcu		*filter_list;
     35	struct tcf_block		*block;
     36	struct Qdisc_class_hash		clhash;
     37};
     38
     39static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
     40{
     41	struct drr_sched *q = qdisc_priv(sch);
     42	struct Qdisc_class_common *clc;
     43
     44	clc = qdisc_class_find(&q->clhash, classid);
     45	if (clc == NULL)
     46		return NULL;
     47	return container_of(clc, struct drr_class, common);
     48}
     49
     50static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
     51	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
     52};
     53
     54static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
     55			    struct nlattr **tca, unsigned long *arg,
     56			    struct netlink_ext_ack *extack)
     57{
     58	struct drr_sched *q = qdisc_priv(sch);
     59	struct drr_class *cl = (struct drr_class *)*arg;
     60	struct nlattr *opt = tca[TCA_OPTIONS];
     61	struct nlattr *tb[TCA_DRR_MAX + 1];
     62	u32 quantum;
     63	int err;
     64
     65	if (!opt) {
     66		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
     67		return -EINVAL;
     68	}
     69
     70	err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
     71					  extack);
     72	if (err < 0)
     73		return err;
     74
     75	if (tb[TCA_DRR_QUANTUM]) {
     76		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
     77		if (quantum == 0) {
     78			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
     79			return -EINVAL;
     80		}
     81	} else
     82		quantum = psched_mtu(qdisc_dev(sch));
     83
     84	if (cl != NULL) {
     85		if (tca[TCA_RATE]) {
     86			err = gen_replace_estimator(&cl->bstats, NULL,
     87						    &cl->rate_est,
     88						    NULL, true,
     89						    tca[TCA_RATE]);
     90			if (err) {
     91				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
     92				return err;
     93			}
     94		}
     95
     96		sch_tree_lock(sch);
     97		if (tb[TCA_DRR_QUANTUM])
     98			cl->quantum = quantum;
     99		sch_tree_unlock(sch);
    100
    101		return 0;
    102	}
    103
    104	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
    105	if (cl == NULL)
    106		return -ENOBUFS;
    107
    108	gnet_stats_basic_sync_init(&cl->bstats);
    109	cl->common.classid = classid;
    110	cl->quantum	   = quantum;
    111	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
    112					       &pfifo_qdisc_ops, classid,
    113					       NULL);
    114	if (cl->qdisc == NULL)
    115		cl->qdisc = &noop_qdisc;
    116	else
    117		qdisc_hash_add(cl->qdisc, true);
    118
    119	if (tca[TCA_RATE]) {
    120		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
    121					    NULL, true, tca[TCA_RATE]);
    122		if (err) {
    123			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
    124			qdisc_put(cl->qdisc);
    125			kfree(cl);
    126			return err;
    127		}
    128	}
    129
    130	sch_tree_lock(sch);
    131	qdisc_class_hash_insert(&q->clhash, &cl->common);
    132	sch_tree_unlock(sch);
    133
    134	qdisc_class_hash_grow(sch, &q->clhash);
    135
    136	*arg = (unsigned long)cl;
    137	return 0;
    138}
    139
    140static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
    141{
    142	gen_kill_estimator(&cl->rate_est);
    143	qdisc_put(cl->qdisc);
    144	kfree(cl);
    145}
    146
    147static int drr_delete_class(struct Qdisc *sch, unsigned long arg,
    148			    struct netlink_ext_ack *extack)
    149{
    150	struct drr_sched *q = qdisc_priv(sch);
    151	struct drr_class *cl = (struct drr_class *)arg;
    152
    153	if (cl->filter_cnt > 0)
    154		return -EBUSY;
    155
    156	sch_tree_lock(sch);
    157
    158	qdisc_purge_queue(cl->qdisc);
    159	qdisc_class_hash_remove(&q->clhash, &cl->common);
    160
    161	sch_tree_unlock(sch);
    162
    163	drr_destroy_class(sch, cl);
    164	return 0;
    165}
    166
    167static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
    168{
    169	return (unsigned long)drr_find_class(sch, classid);
    170}
    171
    172static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
    173				       struct netlink_ext_ack *extack)
    174{
    175	struct drr_sched *q = qdisc_priv(sch);
    176
    177	if (cl) {
    178		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
    179		return NULL;
    180	}
    181
    182	return q->block;
    183}
    184
    185static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
    186				  u32 classid)
    187{
    188	struct drr_class *cl = drr_find_class(sch, classid);
    189
    190	if (cl != NULL)
    191		cl->filter_cnt++;
    192
    193	return (unsigned long)cl;
    194}
    195
    196static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
    197{
    198	struct drr_class *cl = (struct drr_class *)arg;
    199
    200	cl->filter_cnt--;
    201}
    202
    203static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
    204			   struct Qdisc *new, struct Qdisc **old,
    205			   struct netlink_ext_ack *extack)
    206{
    207	struct drr_class *cl = (struct drr_class *)arg;
    208
    209	if (new == NULL) {
    210		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
    211					cl->common.classid, NULL);
    212		if (new == NULL)
    213			new = &noop_qdisc;
    214	}
    215
    216	*old = qdisc_replace(sch, new, &cl->qdisc);
    217	return 0;
    218}
    219
    220static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
    221{
    222	struct drr_class *cl = (struct drr_class *)arg;
    223
    224	return cl->qdisc;
    225}
    226
    227static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
    228{
    229	struct drr_class *cl = (struct drr_class *)arg;
    230
    231	list_del(&cl->alist);
    232}
    233
    234static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
    235			  struct sk_buff *skb, struct tcmsg *tcm)
    236{
    237	struct drr_class *cl = (struct drr_class *)arg;
    238	struct nlattr *nest;
    239
    240	tcm->tcm_parent	= TC_H_ROOT;
    241	tcm->tcm_handle	= cl->common.classid;
    242	tcm->tcm_info	= cl->qdisc->handle;
    243
    244	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
    245	if (nest == NULL)
    246		goto nla_put_failure;
    247	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
    248		goto nla_put_failure;
    249	return nla_nest_end(skb, nest);
    250
    251nla_put_failure:
    252	nla_nest_cancel(skb, nest);
    253	return -EMSGSIZE;
    254}
    255
    256static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
    257				struct gnet_dump *d)
    258{
    259	struct drr_class *cl = (struct drr_class *)arg;
    260	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
    261	struct Qdisc *cl_q = cl->qdisc;
    262	struct tc_drr_stats xstats;
    263
    264	memset(&xstats, 0, sizeof(xstats));
    265	if (qlen)
    266		xstats.deficit = cl->deficit;
    267
    268	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
    269	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
    270	    gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
    271		return -1;
    272
    273	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
    274}
    275
    276static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
    277{
    278	struct drr_sched *q = qdisc_priv(sch);
    279	struct drr_class *cl;
    280	unsigned int i;
    281
    282	if (arg->stop)
    283		return;
    284
    285	for (i = 0; i < q->clhash.hashsize; i++) {
    286		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
    287			if (arg->count < arg->skip) {
    288				arg->count++;
    289				continue;
    290			}
    291			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
    292				arg->stop = 1;
    293				return;
    294			}
    295			arg->count++;
    296		}
    297	}
    298}
    299
    300static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
    301				      int *qerr)
    302{
    303	struct drr_sched *q = qdisc_priv(sch);
    304	struct drr_class *cl;
    305	struct tcf_result res;
    306	struct tcf_proto *fl;
    307	int result;
    308
    309	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
    310		cl = drr_find_class(sch, skb->priority);
    311		if (cl != NULL)
    312			return cl;
    313	}
    314
    315	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
    316	fl = rcu_dereference_bh(q->filter_list);
    317	result = tcf_classify(skb, NULL, fl, &res, false);
    318	if (result >= 0) {
    319#ifdef CONFIG_NET_CLS_ACT
    320		switch (result) {
    321		case TC_ACT_QUEUED:
    322		case TC_ACT_STOLEN:
    323		case TC_ACT_TRAP:
    324			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
    325			fallthrough;
    326		case TC_ACT_SHOT:
    327			return NULL;
    328		}
    329#endif
    330		cl = (struct drr_class *)res.class;
    331		if (cl == NULL)
    332			cl = drr_find_class(sch, res.classid);
    333		return cl;
    334	}
    335	return NULL;
    336}
    337
    338static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
    339		       struct sk_buff **to_free)
    340{
    341	unsigned int len = qdisc_pkt_len(skb);
    342	struct drr_sched *q = qdisc_priv(sch);
    343	struct drr_class *cl;
    344	int err = 0;
    345	bool first;
    346
    347	cl = drr_classify(skb, sch, &err);
    348	if (cl == NULL) {
    349		if (err & __NET_XMIT_BYPASS)
    350			qdisc_qstats_drop(sch);
    351		__qdisc_drop(skb, to_free);
    352		return err;
    353	}
    354
    355	first = !cl->qdisc->q.qlen;
    356	err = qdisc_enqueue(skb, cl->qdisc, to_free);
    357	if (unlikely(err != NET_XMIT_SUCCESS)) {
    358		if (net_xmit_drop_count(err)) {
    359			cl->qstats.drops++;
    360			qdisc_qstats_drop(sch);
    361		}
    362		return err;
    363	}
    364
    365	if (first) {
    366		list_add_tail(&cl->alist, &q->active);
    367		cl->deficit = cl->quantum;
    368	}
    369
    370	sch->qstats.backlog += len;
    371	sch->q.qlen++;
    372	return err;
    373}
    374
    375static struct sk_buff *drr_dequeue(struct Qdisc *sch)
    376{
    377	struct drr_sched *q = qdisc_priv(sch);
    378	struct drr_class *cl;
    379	struct sk_buff *skb;
    380	unsigned int len;
    381
    382	if (list_empty(&q->active))
    383		goto out;
    384	while (1) {
    385		cl = list_first_entry(&q->active, struct drr_class, alist);
    386		skb = cl->qdisc->ops->peek(cl->qdisc);
    387		if (skb == NULL) {
    388			qdisc_warn_nonwc(__func__, cl->qdisc);
    389			goto out;
    390		}
    391
    392		len = qdisc_pkt_len(skb);
    393		if (len <= cl->deficit) {
    394			cl->deficit -= len;
    395			skb = qdisc_dequeue_peeked(cl->qdisc);
    396			if (unlikely(skb == NULL))
    397				goto out;
    398			if (cl->qdisc->q.qlen == 0)
    399				list_del(&cl->alist);
    400
    401			bstats_update(&cl->bstats, skb);
    402			qdisc_bstats_update(sch, skb);
    403			qdisc_qstats_backlog_dec(sch, skb);
    404			sch->q.qlen--;
    405			return skb;
    406		}
    407
    408		cl->deficit += cl->quantum;
    409		list_move_tail(&cl->alist, &q->active);
    410	}
    411out:
    412	return NULL;
    413}
    414
    415static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
    416			  struct netlink_ext_ack *extack)
    417{
    418	struct drr_sched *q = qdisc_priv(sch);
    419	int err;
    420
    421	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
    422	if (err)
    423		return err;
    424	err = qdisc_class_hash_init(&q->clhash);
    425	if (err < 0)
    426		return err;
    427	INIT_LIST_HEAD(&q->active);
    428	return 0;
    429}
    430
    431static void drr_reset_qdisc(struct Qdisc *sch)
    432{
    433	struct drr_sched *q = qdisc_priv(sch);
    434	struct drr_class *cl;
    435	unsigned int i;
    436
    437	for (i = 0; i < q->clhash.hashsize; i++) {
    438		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
    439			if (cl->qdisc->q.qlen)
    440				list_del(&cl->alist);
    441			qdisc_reset(cl->qdisc);
    442		}
    443	}
    444	sch->qstats.backlog = 0;
    445	sch->q.qlen = 0;
    446}
    447
    448static void drr_destroy_qdisc(struct Qdisc *sch)
    449{
    450	struct drr_sched *q = qdisc_priv(sch);
    451	struct drr_class *cl;
    452	struct hlist_node *next;
    453	unsigned int i;
    454
    455	tcf_block_put(q->block);
    456
    457	for (i = 0; i < q->clhash.hashsize; i++) {
    458		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
    459					  common.hnode)
    460			drr_destroy_class(sch, cl);
    461	}
    462	qdisc_class_hash_destroy(&q->clhash);
    463}
    464
    465static const struct Qdisc_class_ops drr_class_ops = {
    466	.change		= drr_change_class,
    467	.delete		= drr_delete_class,
    468	.find		= drr_search_class,
    469	.tcf_block	= drr_tcf_block,
    470	.bind_tcf	= drr_bind_tcf,
    471	.unbind_tcf	= drr_unbind_tcf,
    472	.graft		= drr_graft_class,
    473	.leaf		= drr_class_leaf,
    474	.qlen_notify	= drr_qlen_notify,
    475	.dump		= drr_dump_class,
    476	.dump_stats	= drr_dump_class_stats,
    477	.walk		= drr_walk,
    478};
    479
    480static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
    481	.cl_ops		= &drr_class_ops,
    482	.id		= "drr",
    483	.priv_size	= sizeof(struct drr_sched),
    484	.enqueue	= drr_enqueue,
    485	.dequeue	= drr_dequeue,
    486	.peek		= qdisc_peek_dequeued,
    487	.init		= drr_init_qdisc,
    488	.reset		= drr_reset_qdisc,
    489	.destroy	= drr_destroy_qdisc,
    490	.owner		= THIS_MODULE,
    491};
    492
    493static int __init drr_init(void)
    494{
    495	return register_qdisc(&drr_qdisc_ops);
    496}
    497
    498static void __exit drr_exit(void)
    499{
    500	unregister_qdisc(&drr_qdisc_ops);
    501}
    502
    503module_init(drr_init);
    504module_exit(drr_exit);
    505MODULE_LICENSE("GPL");