cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sch_multiq.c (9013B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2008, Intel Corporation.
      4 *
      5 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/slab.h>
     10#include <linux/types.h>
     11#include <linux/kernel.h>
     12#include <linux/string.h>
     13#include <linux/errno.h>
     14#include <linux/skbuff.h>
     15#include <net/netlink.h>
     16#include <net/pkt_sched.h>
     17#include <net/pkt_cls.h>
     18
     19struct multiq_sched_data {
     20	u16 bands;
     21	u16 max_bands;
     22	u16 curband;
     23	struct tcf_proto __rcu *filter_list;
     24	struct tcf_block *block;
     25	struct Qdisc **queues;
     26};
     27
     28
     29static struct Qdisc *
     30multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
     31{
     32	struct multiq_sched_data *q = qdisc_priv(sch);
     33	u32 band;
     34	struct tcf_result res;
     35	struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
     36	int err;
     37
     38	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
     39	err = tcf_classify(skb, NULL, fl, &res, false);
     40#ifdef CONFIG_NET_CLS_ACT
     41	switch (err) {
     42	case TC_ACT_STOLEN:
     43	case TC_ACT_QUEUED:
     44	case TC_ACT_TRAP:
     45		*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
     46		fallthrough;
     47	case TC_ACT_SHOT:
     48		return NULL;
     49	}
     50#endif
     51	band = skb_get_queue_mapping(skb);
     52
     53	if (band >= q->bands)
     54		return q->queues[0];
     55
     56	return q->queues[band];
     57}
     58
     59static int
     60multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
     61	       struct sk_buff **to_free)
     62{
     63	struct Qdisc *qdisc;
     64	int ret;
     65
     66	qdisc = multiq_classify(skb, sch, &ret);
     67#ifdef CONFIG_NET_CLS_ACT
     68	if (qdisc == NULL) {
     69
     70		if (ret & __NET_XMIT_BYPASS)
     71			qdisc_qstats_drop(sch);
     72		__qdisc_drop(skb, to_free);
     73		return ret;
     74	}
     75#endif
     76
     77	ret = qdisc_enqueue(skb, qdisc, to_free);
     78	if (ret == NET_XMIT_SUCCESS) {
     79		sch->q.qlen++;
     80		return NET_XMIT_SUCCESS;
     81	}
     82	if (net_xmit_drop_count(ret))
     83		qdisc_qstats_drop(sch);
     84	return ret;
     85}
     86
     87static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
     88{
     89	struct multiq_sched_data *q = qdisc_priv(sch);
     90	struct Qdisc *qdisc;
     91	struct sk_buff *skb;
     92	int band;
     93
     94	for (band = 0; band < q->bands; band++) {
     95		/* cycle through bands to ensure fairness */
     96		q->curband++;
     97		if (q->curband >= q->bands)
     98			q->curband = 0;
     99
    100		/* Check that target subqueue is available before
    101		 * pulling an skb to avoid head-of-line blocking.
    102		 */
    103		if (!netif_xmit_stopped(
    104		    netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
    105			qdisc = q->queues[q->curband];
    106			skb = qdisc->dequeue(qdisc);
    107			if (skb) {
    108				qdisc_bstats_update(sch, skb);
    109				sch->q.qlen--;
    110				return skb;
    111			}
    112		}
    113	}
    114	return NULL;
    115
    116}
    117
    118static struct sk_buff *multiq_peek(struct Qdisc *sch)
    119{
    120	struct multiq_sched_data *q = qdisc_priv(sch);
    121	unsigned int curband = q->curband;
    122	struct Qdisc *qdisc;
    123	struct sk_buff *skb;
    124	int band;
    125
    126	for (band = 0; band < q->bands; band++) {
    127		/* cycle through bands to ensure fairness */
    128		curband++;
    129		if (curband >= q->bands)
    130			curband = 0;
    131
    132		/* Check that target subqueue is available before
    133		 * pulling an skb to avoid head-of-line blocking.
    134		 */
    135		if (!netif_xmit_stopped(
    136		    netdev_get_tx_queue(qdisc_dev(sch), curband))) {
    137			qdisc = q->queues[curband];
    138			skb = qdisc->ops->peek(qdisc);
    139			if (skb)
    140				return skb;
    141		}
    142	}
    143	return NULL;
    144
    145}
    146
    147static void
    148multiq_reset(struct Qdisc *sch)
    149{
    150	u16 band;
    151	struct multiq_sched_data *q = qdisc_priv(sch);
    152
    153	for (band = 0; band < q->bands; band++)
    154		qdisc_reset(q->queues[band]);
    155	sch->q.qlen = 0;
    156	q->curband = 0;
    157}
    158
    159static void
    160multiq_destroy(struct Qdisc *sch)
    161{
    162	int band;
    163	struct multiq_sched_data *q = qdisc_priv(sch);
    164
    165	tcf_block_put(q->block);
    166	for (band = 0; band < q->bands; band++)
    167		qdisc_put(q->queues[band]);
    168
    169	kfree(q->queues);
    170}
    171
    172static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
    173		       struct netlink_ext_ack *extack)
    174{
    175	struct multiq_sched_data *q = qdisc_priv(sch);
    176	struct tc_multiq_qopt *qopt;
    177	struct Qdisc **removed;
    178	int i, n_removed = 0;
    179
    180	if (!netif_is_multiqueue(qdisc_dev(sch)))
    181		return -EOPNOTSUPP;
    182	if (nla_len(opt) < sizeof(*qopt))
    183		return -EINVAL;
    184
    185	qopt = nla_data(opt);
    186
    187	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
    188
    189	removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
    190			  GFP_KERNEL);
    191	if (!removed)
    192		return -ENOMEM;
    193
    194	sch_tree_lock(sch);
    195	q->bands = qopt->bands;
    196	for (i = q->bands; i < q->max_bands; i++) {
    197		if (q->queues[i] != &noop_qdisc) {
    198			struct Qdisc *child = q->queues[i];
    199
    200			q->queues[i] = &noop_qdisc;
    201			qdisc_purge_queue(child);
    202			removed[n_removed++] = child;
    203		}
    204	}
    205
    206	sch_tree_unlock(sch);
    207
    208	for (i = 0; i < n_removed; i++)
    209		qdisc_put(removed[i]);
    210	kfree(removed);
    211
    212	for (i = 0; i < q->bands; i++) {
    213		if (q->queues[i] == &noop_qdisc) {
    214			struct Qdisc *child, *old;
    215			child = qdisc_create_dflt(sch->dev_queue,
    216						  &pfifo_qdisc_ops,
    217						  TC_H_MAKE(sch->handle,
    218							    i + 1), extack);
    219			if (child) {
    220				sch_tree_lock(sch);
    221				old = q->queues[i];
    222				q->queues[i] = child;
    223				if (child != &noop_qdisc)
    224					qdisc_hash_add(child, true);
    225
    226				if (old != &noop_qdisc)
    227					qdisc_purge_queue(old);
    228				sch_tree_unlock(sch);
    229				qdisc_put(old);
    230			}
    231		}
    232	}
    233	return 0;
    234}
    235
    236static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
    237		       struct netlink_ext_ack *extack)
    238{
    239	struct multiq_sched_data *q = qdisc_priv(sch);
    240	int i, err;
    241
    242	q->queues = NULL;
    243
    244	if (!opt)
    245		return -EINVAL;
    246
    247	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
    248	if (err)
    249		return err;
    250
    251	q->max_bands = qdisc_dev(sch)->num_tx_queues;
    252
    253	q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
    254	if (!q->queues)
    255		return -ENOBUFS;
    256	for (i = 0; i < q->max_bands; i++)
    257		q->queues[i] = &noop_qdisc;
    258
    259	return multiq_tune(sch, opt, extack);
    260}
    261
    262static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
    263{
    264	struct multiq_sched_data *q = qdisc_priv(sch);
    265	unsigned char *b = skb_tail_pointer(skb);
    266	struct tc_multiq_qopt opt;
    267
    268	opt.bands = q->bands;
    269	opt.max_bands = q->max_bands;
    270
    271	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
    272		goto nla_put_failure;
    273
    274	return skb->len;
    275
    276nla_put_failure:
    277	nlmsg_trim(skb, b);
    278	return -1;
    279}
    280
    281static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
    282			struct Qdisc **old, struct netlink_ext_ack *extack)
    283{
    284	struct multiq_sched_data *q = qdisc_priv(sch);
    285	unsigned long band = arg - 1;
    286
    287	if (new == NULL)
    288		new = &noop_qdisc;
    289
    290	*old = qdisc_replace(sch, new, &q->queues[band]);
    291	return 0;
    292}
    293
    294static struct Qdisc *
    295multiq_leaf(struct Qdisc *sch, unsigned long arg)
    296{
    297	struct multiq_sched_data *q = qdisc_priv(sch);
    298	unsigned long band = arg - 1;
    299
    300	return q->queues[band];
    301}
    302
    303static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
    304{
    305	struct multiq_sched_data *q = qdisc_priv(sch);
    306	unsigned long band = TC_H_MIN(classid);
    307
    308	if (band - 1 >= q->bands)
    309		return 0;
    310	return band;
    311}
    312
    313static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
    314				 u32 classid)
    315{
    316	return multiq_find(sch, classid);
    317}
    318
    319
    320static void multiq_unbind(struct Qdisc *q, unsigned long cl)
    321{
    322}
    323
    324static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
    325			     struct sk_buff *skb, struct tcmsg *tcm)
    326{
    327	struct multiq_sched_data *q = qdisc_priv(sch);
    328
    329	tcm->tcm_handle |= TC_H_MIN(cl);
    330	tcm->tcm_info = q->queues[cl - 1]->handle;
    331	return 0;
    332}
    333
    334static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
    335				 struct gnet_dump *d)
    336{
    337	struct multiq_sched_data *q = qdisc_priv(sch);
    338	struct Qdisc *cl_q;
    339
    340	cl_q = q->queues[cl - 1];
    341	if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 ||
    342	    qdisc_qstats_copy(d, cl_q) < 0)
    343		return -1;
    344
    345	return 0;
    346}
    347
    348static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
    349{
    350	struct multiq_sched_data *q = qdisc_priv(sch);
    351	int band;
    352
    353	if (arg->stop)
    354		return;
    355
    356	for (band = 0; band < q->bands; band++) {
    357		if (arg->count < arg->skip) {
    358			arg->count++;
    359			continue;
    360		}
    361		if (arg->fn(sch, band + 1, arg) < 0) {
    362			arg->stop = 1;
    363			break;
    364		}
    365		arg->count++;
    366	}
    367}
    368
    369static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
    370					  struct netlink_ext_ack *extack)
    371{
    372	struct multiq_sched_data *q = qdisc_priv(sch);
    373
    374	if (cl)
    375		return NULL;
    376	return q->block;
    377}
    378
    379static const struct Qdisc_class_ops multiq_class_ops = {
    380	.graft		=	multiq_graft,
    381	.leaf		=	multiq_leaf,
    382	.find		=	multiq_find,
    383	.walk		=	multiq_walk,
    384	.tcf_block	=	multiq_tcf_block,
    385	.bind_tcf	=	multiq_bind,
    386	.unbind_tcf	=	multiq_unbind,
    387	.dump		=	multiq_dump_class,
    388	.dump_stats	=	multiq_dump_class_stats,
    389};
    390
    391static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
    392	.next		=	NULL,
    393	.cl_ops		=	&multiq_class_ops,
    394	.id		=	"multiq",
    395	.priv_size	=	sizeof(struct multiq_sched_data),
    396	.enqueue	=	multiq_enqueue,
    397	.dequeue	=	multiq_dequeue,
    398	.peek		=	multiq_peek,
    399	.init		=	multiq_init,
    400	.reset		=	multiq_reset,
    401	.destroy	=	multiq_destroy,
    402	.change		=	multiq_tune,
    403	.dump		=	multiq_dump,
    404	.owner		=	THIS_MODULE,
    405};
    406
    407static int __init multiq_module_init(void)
    408{
    409	return register_qdisc(&multiq_qdisc_ops);
    410}
    411
    412static void __exit multiq_module_exit(void)
    413{
    414	unregister_qdisc(&multiq_qdisc_ops);
    415}
    416
    417module_init(multiq_module_init)
    418module_exit(multiq_module_exit)
    419
    420MODULE_LICENSE("GPL");