cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sch_mqprio.c (16419B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * net/sched/sch_mqprio.c
      4 *
      5 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
      6 */
      7
      8#include <linux/types.h>
      9#include <linux/slab.h>
     10#include <linux/kernel.h>
     11#include <linux/string.h>
     12#include <linux/errno.h>
     13#include <linux/skbuff.h>
     14#include <linux/module.h>
     15#include <net/netlink.h>
     16#include <net/pkt_sched.h>
     17#include <net/sch_generic.h>
     18#include <net/pkt_cls.h>
     19
     20struct mqprio_sched {
     21	struct Qdisc		**qdiscs;
     22	u16 mode;
     23	u16 shaper;
     24	int hw_offload;
     25	u32 flags;
     26	u64 min_rate[TC_QOPT_MAX_QUEUE];
     27	u64 max_rate[TC_QOPT_MAX_QUEUE];
     28};
     29
     30static void mqprio_destroy(struct Qdisc *sch)
     31{
     32	struct net_device *dev = qdisc_dev(sch);
     33	struct mqprio_sched *priv = qdisc_priv(sch);
     34	unsigned int ntx;
     35
     36	if (priv->qdiscs) {
     37		for (ntx = 0;
     38		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
     39		     ntx++)
     40			qdisc_put(priv->qdiscs[ntx]);
     41		kfree(priv->qdiscs);
     42	}
     43
     44	if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
     45		struct tc_mqprio_qopt_offload mqprio = { { 0 } };
     46
     47		switch (priv->mode) {
     48		case TC_MQPRIO_MODE_DCB:
     49		case TC_MQPRIO_MODE_CHANNEL:
     50			dev->netdev_ops->ndo_setup_tc(dev,
     51						      TC_SETUP_QDISC_MQPRIO,
     52						      &mqprio);
     53			break;
     54		default:
     55			return;
     56		}
     57	} else {
     58		netdev_set_num_tc(dev, 0);
     59	}
     60}
     61
     62static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
     63{
     64	int i, j;
     65
     66	/* Verify num_tc is not out of max range */
     67	if (qopt->num_tc > TC_MAX_QUEUE)
     68		return -EINVAL;
     69
     70	/* Verify priority mapping uses valid tcs */
     71	for (i = 0; i < TC_BITMASK + 1; i++) {
     72		if (qopt->prio_tc_map[i] >= qopt->num_tc)
     73			return -EINVAL;
     74	}
     75
     76	/* Limit qopt->hw to maximum supported offload value.  Drivers have
     77	 * the option of overriding this later if they don't support the a
     78	 * given offload type.
     79	 */
     80	if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
     81		qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
     82
     83	/* If hardware offload is requested we will leave it to the device
     84	 * to either populate the queue counts itself or to validate the
     85	 * provided queue counts.  If ndo_setup_tc is not present then
     86	 * hardware doesn't support offload and we should return an error.
     87	 */
     88	if (qopt->hw)
     89		return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
     90
     91	for (i = 0; i < qopt->num_tc; i++) {
     92		unsigned int last = qopt->offset[i] + qopt->count[i];
     93
     94		/* Verify the queue count is in tx range being equal to the
     95		 * real_num_tx_queues indicates the last queue is in use.
     96		 */
     97		if (qopt->offset[i] >= dev->real_num_tx_queues ||
     98		    !qopt->count[i] ||
     99		    last > dev->real_num_tx_queues)
    100			return -EINVAL;
    101
    102		/* Verify that the offset and counts do not overlap */
    103		for (j = i + 1; j < qopt->num_tc; j++) {
    104			if (last > qopt->offset[j])
    105				return -EINVAL;
    106		}
    107	}
    108
    109	return 0;
    110}
    111
    112static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
    113	[TCA_MQPRIO_MODE]	= { .len = sizeof(u16) },
    114	[TCA_MQPRIO_SHAPER]	= { .len = sizeof(u16) },
    115	[TCA_MQPRIO_MIN_RATE64]	= { .type = NLA_NESTED },
    116	[TCA_MQPRIO_MAX_RATE64]	= { .type = NLA_NESTED },
    117};
    118
    119static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
    120		      const struct nla_policy *policy, int len)
    121{
    122	int nested_len = nla_len(nla) - NLA_ALIGN(len);
    123
    124	if (nested_len >= nla_attr_size(0))
    125		return nla_parse_deprecated(tb, maxtype,
    126					    nla_data(nla) + NLA_ALIGN(len),
    127					    nested_len, policy, NULL);
    128
    129	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
    130	return 0;
    131}
    132
    133static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
    134		       struct netlink_ext_ack *extack)
    135{
    136	struct net_device *dev = qdisc_dev(sch);
    137	struct mqprio_sched *priv = qdisc_priv(sch);
    138	struct netdev_queue *dev_queue;
    139	struct Qdisc *qdisc;
    140	int i, err = -EOPNOTSUPP;
    141	struct tc_mqprio_qopt *qopt = NULL;
    142	struct nlattr *tb[TCA_MQPRIO_MAX + 1];
    143	struct nlattr *attr;
    144	int rem;
    145	int len;
    146
    147	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
    148	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
    149
    150	if (sch->parent != TC_H_ROOT)
    151		return -EOPNOTSUPP;
    152
    153	if (!netif_is_multiqueue(dev))
    154		return -EOPNOTSUPP;
    155
    156	/* make certain can allocate enough classids to handle queues */
    157	if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
    158		return -ENOMEM;
    159
    160	if (!opt || nla_len(opt) < sizeof(*qopt))
    161		return -EINVAL;
    162
    163	qopt = nla_data(opt);
    164	if (mqprio_parse_opt(dev, qopt))
    165		return -EINVAL;
    166
    167	len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
    168	if (len > 0) {
    169		err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
    170				 sizeof(*qopt));
    171		if (err < 0)
    172			return err;
    173
    174		if (!qopt->hw)
    175			return -EINVAL;
    176
    177		if (tb[TCA_MQPRIO_MODE]) {
    178			priv->flags |= TC_MQPRIO_F_MODE;
    179			priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
    180		}
    181
    182		if (tb[TCA_MQPRIO_SHAPER]) {
    183			priv->flags |= TC_MQPRIO_F_SHAPER;
    184			priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
    185		}
    186
    187		if (tb[TCA_MQPRIO_MIN_RATE64]) {
    188			if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
    189				return -EINVAL;
    190			i = 0;
    191			nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
    192					    rem) {
    193				if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
    194					return -EINVAL;
    195				if (i >= qopt->num_tc)
    196					break;
    197				priv->min_rate[i] = *(u64 *)nla_data(attr);
    198				i++;
    199			}
    200			priv->flags |= TC_MQPRIO_F_MIN_RATE;
    201		}
    202
    203		if (tb[TCA_MQPRIO_MAX_RATE64]) {
    204			if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
    205				return -EINVAL;
    206			i = 0;
    207			nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
    208					    rem) {
    209				if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
    210					return -EINVAL;
    211				if (i >= qopt->num_tc)
    212					break;
    213				priv->max_rate[i] = *(u64 *)nla_data(attr);
    214				i++;
    215			}
    216			priv->flags |= TC_MQPRIO_F_MAX_RATE;
    217		}
    218	}
    219
    220	/* pre-allocate qdisc, attachment can't fail */
    221	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
    222			       GFP_KERNEL);
    223	if (!priv->qdiscs)
    224		return -ENOMEM;
    225
    226	for (i = 0; i < dev->num_tx_queues; i++) {
    227		dev_queue = netdev_get_tx_queue(dev, i);
    228		qdisc = qdisc_create_dflt(dev_queue,
    229					  get_default_qdisc_ops(dev, i),
    230					  TC_H_MAKE(TC_H_MAJ(sch->handle),
    231						    TC_H_MIN(i + 1)), extack);
    232		if (!qdisc)
    233			return -ENOMEM;
    234
    235		priv->qdiscs[i] = qdisc;
    236		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
    237	}
    238
    239	/* If the mqprio options indicate that hardware should own
    240	 * the queue mapping then run ndo_setup_tc otherwise use the
    241	 * supplied and verified mapping
    242	 */
    243	if (qopt->hw) {
    244		struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
    245
    246		switch (priv->mode) {
    247		case TC_MQPRIO_MODE_DCB:
    248			if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
    249				return -EINVAL;
    250			break;
    251		case TC_MQPRIO_MODE_CHANNEL:
    252			mqprio.flags = priv->flags;
    253			if (priv->flags & TC_MQPRIO_F_MODE)
    254				mqprio.mode = priv->mode;
    255			if (priv->flags & TC_MQPRIO_F_SHAPER)
    256				mqprio.shaper = priv->shaper;
    257			if (priv->flags & TC_MQPRIO_F_MIN_RATE)
    258				for (i = 0; i < mqprio.qopt.num_tc; i++)
    259					mqprio.min_rate[i] = priv->min_rate[i];
    260			if (priv->flags & TC_MQPRIO_F_MAX_RATE)
    261				for (i = 0; i < mqprio.qopt.num_tc; i++)
    262					mqprio.max_rate[i] = priv->max_rate[i];
    263			break;
    264		default:
    265			return -EINVAL;
    266		}
    267		err = dev->netdev_ops->ndo_setup_tc(dev,
    268						    TC_SETUP_QDISC_MQPRIO,
    269						    &mqprio);
    270		if (err)
    271			return err;
    272
    273		priv->hw_offload = mqprio.qopt.hw;
    274	} else {
    275		netdev_set_num_tc(dev, qopt->num_tc);
    276		for (i = 0; i < qopt->num_tc; i++)
    277			netdev_set_tc_queue(dev, i,
    278					    qopt->count[i], qopt->offset[i]);
    279	}
    280
    281	/* Always use supplied priority mappings */
    282	for (i = 0; i < TC_BITMASK + 1; i++)
    283		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
    284
    285	sch->flags |= TCQ_F_MQROOT;
    286	return 0;
    287}
    288
    289static void mqprio_attach(struct Qdisc *sch)
    290{
    291	struct net_device *dev = qdisc_dev(sch);
    292	struct mqprio_sched *priv = qdisc_priv(sch);
    293	struct Qdisc *qdisc, *old;
    294	unsigned int ntx;
    295
    296	/* Attach underlying qdisc */
    297	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
    298		qdisc = priv->qdiscs[ntx];
    299		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
    300		if (old)
    301			qdisc_put(old);
    302		if (ntx < dev->real_num_tx_queues)
    303			qdisc_hash_add(qdisc, false);
    304	}
    305	kfree(priv->qdiscs);
    306	priv->qdiscs = NULL;
    307}
    308
    309static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
    310					     unsigned long cl)
    311{
    312	struct net_device *dev = qdisc_dev(sch);
    313	unsigned long ntx = cl - 1;
    314
    315	if (ntx >= dev->num_tx_queues)
    316		return NULL;
    317	return netdev_get_tx_queue(dev, ntx);
    318}
    319
    320static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
    321			struct Qdisc **old, struct netlink_ext_ack *extack)
    322{
    323	struct net_device *dev = qdisc_dev(sch);
    324	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
    325
    326	if (!dev_queue)
    327		return -EINVAL;
    328
    329	if (dev->flags & IFF_UP)
    330		dev_deactivate(dev);
    331
    332	*old = dev_graft_qdisc(dev_queue, new);
    333
    334	if (new)
    335		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
    336
    337	if (dev->flags & IFF_UP)
    338		dev_activate(dev);
    339
    340	return 0;
    341}
    342
    343static int dump_rates(struct mqprio_sched *priv,
    344		      struct tc_mqprio_qopt *opt, struct sk_buff *skb)
    345{
    346	struct nlattr *nest;
    347	int i;
    348
    349	if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
    350		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
    351		if (!nest)
    352			goto nla_put_failure;
    353
    354		for (i = 0; i < opt->num_tc; i++) {
    355			if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
    356				    sizeof(priv->min_rate[i]),
    357				    &priv->min_rate[i]))
    358				goto nla_put_failure;
    359		}
    360		nla_nest_end(skb, nest);
    361	}
    362
    363	if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
    364		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
    365		if (!nest)
    366			goto nla_put_failure;
    367
    368		for (i = 0; i < opt->num_tc; i++) {
    369			if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
    370				    sizeof(priv->max_rate[i]),
    371				    &priv->max_rate[i]))
    372				goto nla_put_failure;
    373		}
    374		nla_nest_end(skb, nest);
    375	}
    376	return 0;
    377
    378nla_put_failure:
    379	nla_nest_cancel(skb, nest);
    380	return -1;
    381}
    382
    383static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
    384{
    385	struct net_device *dev = qdisc_dev(sch);
    386	struct mqprio_sched *priv = qdisc_priv(sch);
    387	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
    388	struct tc_mqprio_qopt opt = { 0 };
    389	struct Qdisc *qdisc;
    390	unsigned int ntx, tc;
    391
    392	sch->q.qlen = 0;
    393	gnet_stats_basic_sync_init(&sch->bstats);
    394	memset(&sch->qstats, 0, sizeof(sch->qstats));
    395
    396	/* MQ supports lockless qdiscs. However, statistics accounting needs
    397	 * to account for all, none, or a mix of locked and unlocked child
    398	 * qdiscs. Percpu stats are added to counters in-band and locking
    399	 * qdisc totals are added at end.
    400	 */
    401	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
    402		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
    403		spin_lock_bh(qdisc_lock(qdisc));
    404
    405		gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
    406				     &qdisc->bstats, false);
    407		gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
    408				     &qdisc->qstats);
    409		sch->q.qlen += qdisc_qlen(qdisc);
    410
    411		spin_unlock_bh(qdisc_lock(qdisc));
    412	}
    413
    414	opt.num_tc = netdev_get_num_tc(dev);
    415	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
    416	opt.hw = priv->hw_offload;
    417
    418	for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
    419		opt.count[tc] = dev->tc_to_txq[tc].count;
    420		opt.offset[tc] = dev->tc_to_txq[tc].offset;
    421	}
    422
    423	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
    424		goto nla_put_failure;
    425
    426	if ((priv->flags & TC_MQPRIO_F_MODE) &&
    427	    nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
    428		goto nla_put_failure;
    429
    430	if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
    431	    nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
    432		goto nla_put_failure;
    433
    434	if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
    435	     priv->flags & TC_MQPRIO_F_MAX_RATE) &&
    436	    (dump_rates(priv, &opt, skb) != 0))
    437		goto nla_put_failure;
    438
    439	return nla_nest_end(skb, nla);
    440nla_put_failure:
    441	nlmsg_trim(skb, nla);
    442	return -1;
    443}
    444
    445static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
    446{
    447	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
    448
    449	if (!dev_queue)
    450		return NULL;
    451
    452	return dev_queue->qdisc_sleeping;
    453}
    454
    455static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
    456{
    457	struct net_device *dev = qdisc_dev(sch);
    458	unsigned int ntx = TC_H_MIN(classid);
    459
    460	/* There are essentially two regions here that have valid classid
    461	 * values. The first region will have a classid value of 1 through
    462	 * num_tx_queues. All of these are backed by actual Qdiscs.
    463	 */
    464	if (ntx < TC_H_MIN_PRIORITY)
    465		return (ntx <= dev->num_tx_queues) ? ntx : 0;
    466
    467	/* The second region represents the hardware traffic classes. These
    468	 * are represented by classid values of TC_H_MIN_PRIORITY through
    469	 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
    470	 */
    471	return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
    472}
    473
    474static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
    475			 struct sk_buff *skb, struct tcmsg *tcm)
    476{
    477	if (cl < TC_H_MIN_PRIORITY) {
    478		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
    479		struct net_device *dev = qdisc_dev(sch);
    480		int tc = netdev_txq_to_tc(dev, cl - 1);
    481
    482		tcm->tcm_parent = (tc < 0) ? 0 :
    483			TC_H_MAKE(TC_H_MAJ(sch->handle),
    484				  TC_H_MIN(tc + TC_H_MIN_PRIORITY));
    485		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
    486	} else {
    487		tcm->tcm_parent = TC_H_ROOT;
    488		tcm->tcm_info = 0;
    489	}
    490	tcm->tcm_handle |= TC_H_MIN(cl);
    491	return 0;
    492}
    493
    494static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
    495				   struct gnet_dump *d)
    496	__releases(d->lock)
    497	__acquires(d->lock)
    498{
    499	if (cl >= TC_H_MIN_PRIORITY) {
    500		int i;
    501		__u32 qlen;
    502		struct gnet_stats_queue qstats = {0};
    503		struct gnet_stats_basic_sync bstats;
    504		struct net_device *dev = qdisc_dev(sch);
    505		struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
    506
    507		gnet_stats_basic_sync_init(&bstats);
    508		/* Drop lock here it will be reclaimed before touching
    509		 * statistics this is required because the d->lock we
    510		 * hold here is the look on dev_queue->qdisc_sleeping
    511		 * also acquired below.
    512		 */
    513		if (d->lock)
    514			spin_unlock_bh(d->lock);
    515
    516		for (i = tc.offset; i < tc.offset + tc.count; i++) {
    517			struct netdev_queue *q = netdev_get_tx_queue(dev, i);
    518			struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
    519
    520			spin_lock_bh(qdisc_lock(qdisc));
    521
    522			gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
    523					     &qdisc->bstats, false);
    524			gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
    525					     &qdisc->qstats);
    526			sch->q.qlen += qdisc_qlen(qdisc);
    527
    528			spin_unlock_bh(qdisc_lock(qdisc));
    529		}
    530		qlen = qdisc_qlen(sch) + qstats.qlen;
    531
    532		/* Reclaim root sleeping lock before completing stats */
    533		if (d->lock)
    534			spin_lock_bh(d->lock);
    535		if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
    536		    gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
    537			return -1;
    538	} else {
    539		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
    540
    541		sch = dev_queue->qdisc_sleeping;
    542		if (gnet_stats_copy_basic(d, sch->cpu_bstats,
    543					  &sch->bstats, true) < 0 ||
    544		    qdisc_qstats_copy(d, sch) < 0)
    545			return -1;
    546	}
    547	return 0;
    548}
    549
    550static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
    551{
    552	struct net_device *dev = qdisc_dev(sch);
    553	unsigned long ntx;
    554
    555	if (arg->stop)
    556		return;
    557
    558	/* Walk hierarchy with a virtual class per tc */
    559	arg->count = arg->skip;
    560	for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
    561		if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
    562			arg->stop = 1;
    563			return;
    564		}
    565		arg->count++;
    566	}
    567
    568	/* Pad the values and skip over unused traffic classes */
    569	if (ntx < TC_MAX_QUEUE) {
    570		arg->count = TC_MAX_QUEUE;
    571		ntx = TC_MAX_QUEUE;
    572	}
    573
    574	/* Reset offset, sort out remaining per-queue qdiscs */
    575	for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
    576		if (arg->fn(sch, ntx + 1, arg) < 0) {
    577			arg->stop = 1;
    578			return;
    579		}
    580		arg->count++;
    581	}
    582}
    583
    584static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
    585						struct tcmsg *tcm)
    586{
    587	return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
    588}
    589
    590static const struct Qdisc_class_ops mqprio_class_ops = {
    591	.graft		= mqprio_graft,
    592	.leaf		= mqprio_leaf,
    593	.find		= mqprio_find,
    594	.walk		= mqprio_walk,
    595	.dump		= mqprio_dump_class,
    596	.dump_stats	= mqprio_dump_class_stats,
    597	.select_queue	= mqprio_select_queue,
    598};
    599
    600static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
    601	.cl_ops		= &mqprio_class_ops,
    602	.id		= "mqprio",
    603	.priv_size	= sizeof(struct mqprio_sched),
    604	.init		= mqprio_init,
    605	.destroy	= mqprio_destroy,
    606	.attach		= mqprio_attach,
    607	.change_real_num_tx = mq_change_real_num_tx,
    608	.dump		= mqprio_dump,
    609	.owner		= THIS_MODULE,
    610};
    611
    612static int __init mqprio_module_init(void)
    613{
    614	return register_qdisc(&mqprio_qdisc_ops);
    615}
    616
    617static void __exit mqprio_module_exit(void)
    618{
    619	unregister_qdisc(&mqprio_qdisc_ops);
    620}
    621
    622module_init(mqprio_module_init);
    623module_exit(mqprio_module_exit);
    624
    625MODULE_LICENSE("GPL");