cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sch_hfsc.c (40431B)


      1/*
      2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
      3 *
      4 * This program is free software; you can redistribute it and/or
      5 * modify it under the terms of the GNU General Public License
      6 * as published by the Free Software Foundation; either version 2
      7 * of the License, or (at your option) any later version.
      8 *
      9 * 2003-10-17 - Ported from altq
     10 */
     11/*
     12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
     13 *
     14 * Permission to use, copy, modify, and distribute this software and
     15 * its documentation is hereby granted (including for commercial or
     16 * for-profit use), provided that both the copyright notice and this
     17 * permission notice appear in all copies of the software, derivative
     18 * works, or modified versions, and any portions thereof.
     19 *
     20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
     21 * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
     22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
     23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     25 * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
     26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
     28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
     32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
     33 * DAMAGE.
     34 *
     35 * Carnegie Mellon encourages (but does not require) users of this
     36 * software to return any improvements or extensions that they make,
     37 * and to grant Carnegie Mellon the rights to redistribute these
     38 * changes without encumbrance.
     39 */
     40/*
     41 * H-FSC is described in Proceedings of SIGCOMM'97,
     42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
     43 * Real-Time and Priority Service"
     44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
     45 *
     46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
     47 * when a class has an upperlimit, the fit-time is computed from the
     48 * upperlimit service curve.  the link-sharing scheduler does not schedule
     49 * a class whose fit-time exceeds the current time.
     50 */
     51
     52#include <linux/kernel.h>
     53#include <linux/module.h>
     54#include <linux/types.h>
     55#include <linux/errno.h>
     56#include <linux/compiler.h>
     57#include <linux/spinlock.h>
     58#include <linux/skbuff.h>
     59#include <linux/string.h>
     60#include <linux/slab.h>
     61#include <linux/list.h>
     62#include <linux/rbtree.h>
     63#include <linux/init.h>
     64#include <linux/rtnetlink.h>
     65#include <linux/pkt_sched.h>
     66#include <net/netlink.h>
     67#include <net/pkt_sched.h>
     68#include <net/pkt_cls.h>
     69#include <asm/div64.h>
     70
     71/*
     72 * kernel internal service curve representation:
     73 *   coordinates are given by 64 bit unsigned integers.
     74 *   x-axis: unit is clock count.
     75 *   y-axis: unit is byte.
     76 *
     77 *   The service curve parameters are converted to the internal
     78 *   representation. The slope values are scaled to avoid overflow.
     79 *   the inverse slope values as well as the y-projection of the 1st
     80 *   segment are kept in order to avoid 64-bit divide operations
     81 *   that are expensive on 32-bit architectures.
     82 */
     83
     84struct internal_sc {
     85	u64	sm1;	/* scaled slope of the 1st segment */
     86	u64	ism1;	/* scaled inverse-slope of the 1st segment */
     87	u64	dx;	/* the x-projection of the 1st segment */
     88	u64	dy;	/* the y-projection of the 1st segment */
     89	u64	sm2;	/* scaled slope of the 2nd segment */
     90	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
     91};
     92
     93/* runtime service curve */
     94struct runtime_sc {
     95	u64	x;	/* current starting position on x-axis */
     96	u64	y;	/* current starting position on y-axis */
     97	u64	sm1;	/* scaled slope of the 1st segment */
     98	u64	ism1;	/* scaled inverse-slope of the 1st segment */
     99	u64	dx;	/* the x-projection of the 1st segment */
    100	u64	dy;	/* the y-projection of the 1st segment */
    101	u64	sm2;	/* scaled slope of the 2nd segment */
    102	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
    103};
    104
    105enum hfsc_class_flags {
    106	HFSC_RSC = 0x1,
    107	HFSC_FSC = 0x2,
    108	HFSC_USC = 0x4
    109};
    110
    111struct hfsc_class {
    112	struct Qdisc_class_common cl_common;
    113
    114	struct gnet_stats_basic_sync bstats;
    115	struct gnet_stats_queue qstats;
    116	struct net_rate_estimator __rcu *rate_est;
    117	struct tcf_proto __rcu *filter_list; /* filter list */
    118	struct tcf_block *block;
    119	unsigned int	filter_cnt;	/* filter count */
    120	unsigned int	level;		/* class level in hierarchy */
    121
    122	struct hfsc_sched *sched;	/* scheduler data */
    123	struct hfsc_class *cl_parent;	/* parent class */
    124	struct list_head siblings;	/* sibling classes */
    125	struct list_head children;	/* child classes */
    126	struct Qdisc	*qdisc;		/* leaf qdisc */
    127
    128	struct rb_node el_node;		/* qdisc's eligible tree member */
    129	struct rb_root vt_tree;		/* active children sorted by cl_vt */
    130	struct rb_node vt_node;		/* parent's vt_tree member */
    131	struct rb_root cf_tree;		/* active children sorted by cl_f */
    132	struct rb_node cf_node;		/* parent's cf_heap member */
    133
    134	u64	cl_total;		/* total work in bytes */
    135	u64	cl_cumul;		/* cumulative work in bytes done by
    136					   real-time criteria */
    137
    138	u64	cl_d;			/* deadline*/
    139	u64	cl_e;			/* eligible time */
    140	u64	cl_vt;			/* virtual time */
    141	u64	cl_f;			/* time when this class will fit for
    142					   link-sharing, max(myf, cfmin) */
    143	u64	cl_myf;			/* my fit-time (calculated from this
    144					   class's own upperlimit curve) */
    145	u64	cl_cfmin;		/* earliest children's fit-time (used
    146					   with cl_myf to obtain cl_f) */
    147	u64	cl_cvtmin;		/* minimal virtual time among the
    148					   children fit for link-sharing
    149					   (monotonic within a period) */
    150	u64	cl_vtadj;		/* intra-period cumulative vt
    151					   adjustment */
    152	u64	cl_cvtoff;		/* largest virtual time seen among
    153					   the children */
    154
    155	struct internal_sc cl_rsc;	/* internal real-time service curve */
    156	struct internal_sc cl_fsc;	/* internal fair service curve */
    157	struct internal_sc cl_usc;	/* internal upperlimit service curve */
    158	struct runtime_sc cl_deadline;	/* deadline curve */
    159	struct runtime_sc cl_eligible;	/* eligible curve */
    160	struct runtime_sc cl_virtual;	/* virtual curve */
    161	struct runtime_sc cl_ulimit;	/* upperlimit curve */
    162
    163	u8		cl_flags;	/* which curves are valid */
    164	u32		cl_vtperiod;	/* vt period sequence number */
    165	u32		cl_parentperiod;/* parent's vt period sequence number*/
    166	u32		cl_nactive;	/* number of active children */
    167};
    168
    169struct hfsc_sched {
    170	u16	defcls;				/* default class id */
    171	struct hfsc_class root;			/* root class */
    172	struct Qdisc_class_hash clhash;		/* class hash */
    173	struct rb_root eligible;		/* eligible tree */
    174	struct qdisc_watchdog watchdog;		/* watchdog timer */
    175};
    176
    177#define	HT_INFINITY	0xffffffffffffffffULL	/* infinite time value */
    178
    179
    180/*
    181 * eligible tree holds backlogged classes being sorted by their eligible times.
    182 * there is one eligible tree per hfsc instance.
    183 */
    184
    185static void
    186eltree_insert(struct hfsc_class *cl)
    187{
    188	struct rb_node **p = &cl->sched->eligible.rb_node;
    189	struct rb_node *parent = NULL;
    190	struct hfsc_class *cl1;
    191
    192	while (*p != NULL) {
    193		parent = *p;
    194		cl1 = rb_entry(parent, struct hfsc_class, el_node);
    195		if (cl->cl_e >= cl1->cl_e)
    196			p = &parent->rb_right;
    197		else
    198			p = &parent->rb_left;
    199	}
    200	rb_link_node(&cl->el_node, parent, p);
    201	rb_insert_color(&cl->el_node, &cl->sched->eligible);
    202}
    203
    204static inline void
    205eltree_remove(struct hfsc_class *cl)
    206{
    207	rb_erase(&cl->el_node, &cl->sched->eligible);
    208}
    209
    210static inline void
    211eltree_update(struct hfsc_class *cl)
    212{
    213	eltree_remove(cl);
    214	eltree_insert(cl);
    215}
    216
    217/* find the class with the minimum deadline among the eligible classes */
    218static inline struct hfsc_class *
    219eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
    220{
    221	struct hfsc_class *p, *cl = NULL;
    222	struct rb_node *n;
    223
    224	for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
    225		p = rb_entry(n, struct hfsc_class, el_node);
    226		if (p->cl_e > cur_time)
    227			break;
    228		if (cl == NULL || p->cl_d < cl->cl_d)
    229			cl = p;
    230	}
    231	return cl;
    232}
    233
    234/* find the class with minimum eligible time among the eligible classes */
    235static inline struct hfsc_class *
    236eltree_get_minel(struct hfsc_sched *q)
    237{
    238	struct rb_node *n;
    239
    240	n = rb_first(&q->eligible);
    241	if (n == NULL)
    242		return NULL;
    243	return rb_entry(n, struct hfsc_class, el_node);
    244}
    245
    246/*
    247 * vttree holds holds backlogged child classes being sorted by their virtual
    248 * time. each intermediate class has one vttree.
    249 */
    250static void
    251vttree_insert(struct hfsc_class *cl)
    252{
    253	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
    254	struct rb_node *parent = NULL;
    255	struct hfsc_class *cl1;
    256
    257	while (*p != NULL) {
    258		parent = *p;
    259		cl1 = rb_entry(parent, struct hfsc_class, vt_node);
    260		if (cl->cl_vt >= cl1->cl_vt)
    261			p = &parent->rb_right;
    262		else
    263			p = &parent->rb_left;
    264	}
    265	rb_link_node(&cl->vt_node, parent, p);
    266	rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
    267}
    268
    269static inline void
    270vttree_remove(struct hfsc_class *cl)
    271{
    272	rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
    273}
    274
    275static inline void
    276vttree_update(struct hfsc_class *cl)
    277{
    278	vttree_remove(cl);
    279	vttree_insert(cl);
    280}
    281
    282static inline struct hfsc_class *
    283vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
    284{
    285	struct hfsc_class *p;
    286	struct rb_node *n;
    287
    288	for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
    289		p = rb_entry(n, struct hfsc_class, vt_node);
    290		if (p->cl_f <= cur_time)
    291			return p;
    292	}
    293	return NULL;
    294}
    295
    296/*
    297 * get the leaf class with the minimum vt in the hierarchy
    298 */
    299static struct hfsc_class *
    300vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
    301{
    302	/* if root-class's cfmin is bigger than cur_time nothing to do */
    303	if (cl->cl_cfmin > cur_time)
    304		return NULL;
    305
    306	while (cl->level > 0) {
    307		cl = vttree_firstfit(cl, cur_time);
    308		if (cl == NULL)
    309			return NULL;
    310		/*
    311		 * update parent's cl_cvtmin.
    312		 */
    313		if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
    314			cl->cl_parent->cl_cvtmin = cl->cl_vt;
    315	}
    316	return cl;
    317}
    318
    319static void
    320cftree_insert(struct hfsc_class *cl)
    321{
    322	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
    323	struct rb_node *parent = NULL;
    324	struct hfsc_class *cl1;
    325
    326	while (*p != NULL) {
    327		parent = *p;
    328		cl1 = rb_entry(parent, struct hfsc_class, cf_node);
    329		if (cl->cl_f >= cl1->cl_f)
    330			p = &parent->rb_right;
    331		else
    332			p = &parent->rb_left;
    333	}
    334	rb_link_node(&cl->cf_node, parent, p);
    335	rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
    336}
    337
    338static inline void
    339cftree_remove(struct hfsc_class *cl)
    340{
    341	rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
    342}
    343
    344static inline void
    345cftree_update(struct hfsc_class *cl)
    346{
    347	cftree_remove(cl);
    348	cftree_insert(cl);
    349}
    350
    351/*
    352 * service curve support functions
    353 *
    354 *  external service curve parameters
    355 *	m: bps
    356 *	d: us
    357 *  internal service curve parameters
    358 *	sm: (bytes/psched_us) << SM_SHIFT
    359 *	ism: (psched_us/byte) << ISM_SHIFT
    360 *	dx: psched_us
    361 *
    362 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
    363 *
    364 * sm and ism are scaled in order to keep effective digits.
    365 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
    366 * digits in decimal using the following table.
    367 *
    368 *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
    369 *  ------------+-------------------------------------------------------
    370 *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
    371 *
    372 *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
    373 *
    374 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
    375 */
    376#define	SM_SHIFT	(30 - PSCHED_SHIFT)
    377#define	ISM_SHIFT	(8 + PSCHED_SHIFT)
    378
    379#define	SM_MASK		((1ULL << SM_SHIFT) - 1)
    380#define	ISM_MASK	((1ULL << ISM_SHIFT) - 1)
    381
    382static inline u64
    383seg_x2y(u64 x, u64 sm)
    384{
    385	u64 y;
    386
    387	/*
    388	 * compute
    389	 *	y = x * sm >> SM_SHIFT
    390	 * but divide it for the upper and lower bits to avoid overflow
    391	 */
    392	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
    393	return y;
    394}
    395
    396static inline u64
    397seg_y2x(u64 y, u64 ism)
    398{
    399	u64 x;
    400
    401	if (y == 0)
    402		x = 0;
    403	else if (ism == HT_INFINITY)
    404		x = HT_INFINITY;
    405	else {
    406		x = (y >> ISM_SHIFT) * ism
    407		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
    408	}
    409	return x;
    410}
    411
    412/* Convert m (bps) into sm (bytes/psched us) */
    413static u64
    414m2sm(u32 m)
    415{
    416	u64 sm;
    417
    418	sm = ((u64)m << SM_SHIFT);
    419	sm += PSCHED_TICKS_PER_SEC - 1;
    420	do_div(sm, PSCHED_TICKS_PER_SEC);
    421	return sm;
    422}
    423
    424/* convert m (bps) into ism (psched us/byte) */
    425static u64
    426m2ism(u32 m)
    427{
    428	u64 ism;
    429
    430	if (m == 0)
    431		ism = HT_INFINITY;
    432	else {
    433		ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
    434		ism += m - 1;
    435		do_div(ism, m);
    436	}
    437	return ism;
    438}
    439
    440/* convert d (us) into dx (psched us) */
    441static u64
    442d2dx(u32 d)
    443{
    444	u64 dx;
    445
    446	dx = ((u64)d * PSCHED_TICKS_PER_SEC);
    447	dx += USEC_PER_SEC - 1;
    448	do_div(dx, USEC_PER_SEC);
    449	return dx;
    450}
    451
    452/* convert sm (bytes/psched us) into m (bps) */
    453static u32
    454sm2m(u64 sm)
    455{
    456	u64 m;
    457
    458	m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
    459	return (u32)m;
    460}
    461
    462/* convert dx (psched us) into d (us) */
    463static u32
    464dx2d(u64 dx)
    465{
    466	u64 d;
    467
    468	d = dx * USEC_PER_SEC;
    469	do_div(d, PSCHED_TICKS_PER_SEC);
    470	return (u32)d;
    471}
    472
    473static void
    474sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
    475{
    476	isc->sm1  = m2sm(sc->m1);
    477	isc->ism1 = m2ism(sc->m1);
    478	isc->dx   = d2dx(sc->d);
    479	isc->dy   = seg_x2y(isc->dx, isc->sm1);
    480	isc->sm2  = m2sm(sc->m2);
    481	isc->ism2 = m2ism(sc->m2);
    482}
    483
    484/*
    485 * initialize the runtime service curve with the given internal
    486 * service curve starting at (x, y).
    487 */
    488static void
    489rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
    490{
    491	rtsc->x	   = x;
    492	rtsc->y    = y;
    493	rtsc->sm1  = isc->sm1;
    494	rtsc->ism1 = isc->ism1;
    495	rtsc->dx   = isc->dx;
    496	rtsc->dy   = isc->dy;
    497	rtsc->sm2  = isc->sm2;
    498	rtsc->ism2 = isc->ism2;
    499}
    500
    501/*
    502 * calculate the y-projection of the runtime service curve by the
    503 * given x-projection value
    504 */
    505static u64
    506rtsc_y2x(struct runtime_sc *rtsc, u64 y)
    507{
    508	u64 x;
    509
    510	if (y < rtsc->y)
    511		x = rtsc->x;
    512	else if (y <= rtsc->y + rtsc->dy) {
    513		/* x belongs to the 1st segment */
    514		if (rtsc->dy == 0)
    515			x = rtsc->x + rtsc->dx;
    516		else
    517			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
    518	} else {
    519		/* x belongs to the 2nd segment */
    520		x = rtsc->x + rtsc->dx
    521		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
    522	}
    523	return x;
    524}
    525
    526static u64
    527rtsc_x2y(struct runtime_sc *rtsc, u64 x)
    528{
    529	u64 y;
    530
    531	if (x <= rtsc->x)
    532		y = rtsc->y;
    533	else if (x <= rtsc->x + rtsc->dx)
    534		/* y belongs to the 1st segment */
    535		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
    536	else
    537		/* y belongs to the 2nd segment */
    538		y = rtsc->y + rtsc->dy
    539		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
    540	return y;
    541}
    542
    543/*
    544 * update the runtime service curve by taking the minimum of the current
    545 * runtime service curve and the service curve starting at (x, y).
    546 */
    547static void
    548rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
    549{
    550	u64 y1, y2, dx, dy;
    551	u32 dsm;
    552
    553	if (isc->sm1 <= isc->sm2) {
    554		/* service curve is convex */
    555		y1 = rtsc_x2y(rtsc, x);
    556		if (y1 < y)
    557			/* the current rtsc is smaller */
    558			return;
    559		rtsc->x = x;
    560		rtsc->y = y;
    561		return;
    562	}
    563
    564	/*
    565	 * service curve is concave
    566	 * compute the two y values of the current rtsc
    567	 *	y1: at x
    568	 *	y2: at (x + dx)
    569	 */
    570	y1 = rtsc_x2y(rtsc, x);
    571	if (y1 <= y) {
    572		/* rtsc is below isc, no change to rtsc */
    573		return;
    574	}
    575
    576	y2 = rtsc_x2y(rtsc, x + isc->dx);
    577	if (y2 >= y + isc->dy) {
    578		/* rtsc is above isc, replace rtsc by isc */
    579		rtsc->x = x;
    580		rtsc->y = y;
    581		rtsc->dx = isc->dx;
    582		rtsc->dy = isc->dy;
    583		return;
    584	}
    585
    586	/*
    587	 * the two curves intersect
    588	 * compute the offsets (dx, dy) using the reverse
    589	 * function of seg_x2y()
    590	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
    591	 */
    592	dx = (y1 - y) << SM_SHIFT;
    593	dsm = isc->sm1 - isc->sm2;
    594	do_div(dx, dsm);
    595	/*
    596	 * check if (x, y1) belongs to the 1st segment of rtsc.
    597	 * if so, add the offset.
    598	 */
    599	if (rtsc->x + rtsc->dx > x)
    600		dx += rtsc->x + rtsc->dx - x;
    601	dy = seg_x2y(dx, isc->sm1);
    602
    603	rtsc->x = x;
    604	rtsc->y = y;
    605	rtsc->dx = dx;
    606	rtsc->dy = dy;
    607}
    608
    609static void
    610init_ed(struct hfsc_class *cl, unsigned int next_len)
    611{
    612	u64 cur_time = psched_get_time();
    613
    614	/* update the deadline curve */
    615	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
    616
    617	/*
    618	 * update the eligible curve.
    619	 * for concave, it is equal to the deadline curve.
    620	 * for convex, it is a linear curve with slope m2.
    621	 */
    622	cl->cl_eligible = cl->cl_deadline;
    623	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
    624		cl->cl_eligible.dx = 0;
    625		cl->cl_eligible.dy = 0;
    626	}
    627
    628	/* compute e and d */
    629	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
    630	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
    631
    632	eltree_insert(cl);
    633}
    634
    635static void
    636update_ed(struct hfsc_class *cl, unsigned int next_len)
    637{
    638	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
    639	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
    640
    641	eltree_update(cl);
    642}
    643
    644static inline void
    645update_d(struct hfsc_class *cl, unsigned int next_len)
    646{
    647	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
    648}
    649
    650static inline void
    651update_cfmin(struct hfsc_class *cl)
    652{
    653	struct rb_node *n = rb_first(&cl->cf_tree);
    654	struct hfsc_class *p;
    655
    656	if (n == NULL) {
    657		cl->cl_cfmin = 0;
    658		return;
    659	}
    660	p = rb_entry(n, struct hfsc_class, cf_node);
    661	cl->cl_cfmin = p->cl_f;
    662}
    663
    664static void
    665init_vf(struct hfsc_class *cl, unsigned int len)
    666{
    667	struct hfsc_class *max_cl;
    668	struct rb_node *n;
    669	u64 vt, f, cur_time;
    670	int go_active;
    671
    672	cur_time = 0;
    673	go_active = 1;
    674	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
    675		if (go_active && cl->cl_nactive++ == 0)
    676			go_active = 1;
    677		else
    678			go_active = 0;
    679
    680		if (go_active) {
    681			n = rb_last(&cl->cl_parent->vt_tree);
    682			if (n != NULL) {
    683				max_cl = rb_entry(n, struct hfsc_class, vt_node);
    684				/*
    685				 * set vt to the average of the min and max
    686				 * classes.  if the parent's period didn't
    687				 * change, don't decrease vt of the class.
    688				 */
    689				vt = max_cl->cl_vt;
    690				if (cl->cl_parent->cl_cvtmin != 0)
    691					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
    692
    693				if (cl->cl_parent->cl_vtperiod !=
    694				    cl->cl_parentperiod || vt > cl->cl_vt)
    695					cl->cl_vt = vt;
    696			} else {
    697				/*
    698				 * first child for a new parent backlog period.
    699				 * initialize cl_vt to the highest value seen
    700				 * among the siblings. this is analogous to
    701				 * what cur_time would provide in realtime case.
    702				 */
    703				cl->cl_vt = cl->cl_parent->cl_cvtoff;
    704				cl->cl_parent->cl_cvtmin = 0;
    705			}
    706
    707			/* update the virtual curve */
    708			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
    709			cl->cl_vtadj = 0;
    710
    711			cl->cl_vtperiod++;  /* increment vt period */
    712			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
    713			if (cl->cl_parent->cl_nactive == 0)
    714				cl->cl_parentperiod++;
    715			cl->cl_f = 0;
    716
    717			vttree_insert(cl);
    718			cftree_insert(cl);
    719
    720			if (cl->cl_flags & HFSC_USC) {
    721				/* class has upper limit curve */
    722				if (cur_time == 0)
    723					cur_time = psched_get_time();
    724
    725				/* update the ulimit curve */
    726				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
    727					 cl->cl_total);
    728				/* compute myf */
    729				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
    730						      cl->cl_total);
    731			}
    732		}
    733
    734		f = max(cl->cl_myf, cl->cl_cfmin);
    735		if (f != cl->cl_f) {
    736			cl->cl_f = f;
    737			cftree_update(cl);
    738		}
    739		update_cfmin(cl->cl_parent);
    740	}
    741}
    742
    743static void
    744update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
    745{
    746	u64 f; /* , myf_bound, delta; */
    747	int go_passive = 0;
    748
    749	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
    750		go_passive = 1;
    751
    752	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
    753		cl->cl_total += len;
    754
    755		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
    756			continue;
    757
    758		if (go_passive && --cl->cl_nactive == 0)
    759			go_passive = 1;
    760		else
    761			go_passive = 0;
    762
    763		/* update vt */
    764		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
    765
    766		/*
    767		 * if vt of the class is smaller than cvtmin,
    768		 * the class was skipped in the past due to non-fit.
    769		 * if so, we need to adjust vtadj.
    770		 */
    771		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
    772			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
    773			cl->cl_vt = cl->cl_parent->cl_cvtmin;
    774		}
    775
    776		if (go_passive) {
    777			/* no more active child, going passive */
    778
    779			/* update cvtoff of the parent class */
    780			if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
    781				cl->cl_parent->cl_cvtoff = cl->cl_vt;
    782
    783			/* remove this class from the vt tree */
    784			vttree_remove(cl);
    785
    786			cftree_remove(cl);
    787			update_cfmin(cl->cl_parent);
    788
    789			continue;
    790		}
    791
    792		/* update the vt tree */
    793		vttree_update(cl);
    794
    795		/* update f */
    796		if (cl->cl_flags & HFSC_USC) {
    797			cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
    798#if 0
    799			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
    800							      cl->cl_total);
    801			/*
    802			 * This code causes classes to stay way under their
    803			 * limit when multiple classes are used at gigabit
    804			 * speed. needs investigation. -kaber
    805			 */
    806			/*
    807			 * if myf lags behind by more than one clock tick
    808			 * from the current time, adjust myfadj to prevent
    809			 * a rate-limited class from going greedy.
    810			 * in a steady state under rate-limiting, myf
    811			 * fluctuates within one clock tick.
    812			 */
    813			myf_bound = cur_time - PSCHED_JIFFIE2US(1);
    814			if (cl->cl_myf < myf_bound) {
    815				delta = cur_time - cl->cl_myf;
    816				cl->cl_myfadj += delta;
    817				cl->cl_myf += delta;
    818			}
    819#endif
    820		}
    821
    822		f = max(cl->cl_myf, cl->cl_cfmin);
    823		if (f != cl->cl_f) {
    824			cl->cl_f = f;
    825			cftree_update(cl);
    826			update_cfmin(cl->cl_parent);
    827		}
    828	}
    829}
    830
    831static unsigned int
    832qdisc_peek_len(struct Qdisc *sch)
    833{
    834	struct sk_buff *skb;
    835	unsigned int len;
    836
    837	skb = sch->ops->peek(sch);
    838	if (unlikely(skb == NULL)) {
    839		qdisc_warn_nonwc("qdisc_peek_len", sch);
    840		return 0;
    841	}
    842	len = qdisc_pkt_len(skb);
    843
    844	return len;
    845}
    846
    847static void
    848hfsc_adjust_levels(struct hfsc_class *cl)
    849{
    850	struct hfsc_class *p;
    851	unsigned int level;
    852
    853	do {
    854		level = 0;
    855		list_for_each_entry(p, &cl->children, siblings) {
    856			if (p->level >= level)
    857				level = p->level + 1;
    858		}
    859		cl->level = level;
    860	} while ((cl = cl->cl_parent) != NULL);
    861}
    862
    863static inline struct hfsc_class *
    864hfsc_find_class(u32 classid, struct Qdisc *sch)
    865{
    866	struct hfsc_sched *q = qdisc_priv(sch);
    867	struct Qdisc_class_common *clc;
    868
    869	clc = qdisc_class_find(&q->clhash, classid);
    870	if (clc == NULL)
    871		return NULL;
    872	return container_of(clc, struct hfsc_class, cl_common);
    873}
    874
    875static void
    876hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
    877		u64 cur_time)
    878{
    879	sc2isc(rsc, &cl->cl_rsc);
    880	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
    881	cl->cl_eligible = cl->cl_deadline;
    882	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
    883		cl->cl_eligible.dx = 0;
    884		cl->cl_eligible.dy = 0;
    885	}
    886	cl->cl_flags |= HFSC_RSC;
    887}
    888
    889static void
    890hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
    891{
    892	sc2isc(fsc, &cl->cl_fsc);
    893	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
    894	cl->cl_flags |= HFSC_FSC;
    895}
    896
    897static void
    898hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
    899		u64 cur_time)
    900{
    901	sc2isc(usc, &cl->cl_usc);
    902	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
    903	cl->cl_flags |= HFSC_USC;
    904}
    905
    906static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
    907	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
    908	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
    909	[TCA_HFSC_USC]	= { .len = sizeof(struct tc_service_curve) },
    910};
    911
    912static int
    913hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
    914		  struct nlattr **tca, unsigned long *arg,
    915		  struct netlink_ext_ack *extack)
    916{
    917	struct hfsc_sched *q = qdisc_priv(sch);
    918	struct hfsc_class *cl = (struct hfsc_class *)*arg;
    919	struct hfsc_class *parent = NULL;
    920	struct nlattr *opt = tca[TCA_OPTIONS];
    921	struct nlattr *tb[TCA_HFSC_MAX + 1];
    922	struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
    923	u64 cur_time;
    924	int err;
    925
    926	if (opt == NULL)
    927		return -EINVAL;
    928
    929	err = nla_parse_nested_deprecated(tb, TCA_HFSC_MAX, opt, hfsc_policy,
    930					  NULL);
    931	if (err < 0)
    932		return err;
    933
    934	if (tb[TCA_HFSC_RSC]) {
    935		rsc = nla_data(tb[TCA_HFSC_RSC]);
    936		if (rsc->m1 == 0 && rsc->m2 == 0)
    937			rsc = NULL;
    938	}
    939
    940	if (tb[TCA_HFSC_FSC]) {
    941		fsc = nla_data(tb[TCA_HFSC_FSC]);
    942		if (fsc->m1 == 0 && fsc->m2 == 0)
    943			fsc = NULL;
    944	}
    945
    946	if (tb[TCA_HFSC_USC]) {
    947		usc = nla_data(tb[TCA_HFSC_USC]);
    948		if (usc->m1 == 0 && usc->m2 == 0)
    949			usc = NULL;
    950	}
    951
    952	if (cl != NULL) {
    953		int old_flags;
    954
    955		if (parentid) {
    956			if (cl->cl_parent &&
    957			    cl->cl_parent->cl_common.classid != parentid)
    958				return -EINVAL;
    959			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
    960				return -EINVAL;
    961		}
    962		cur_time = psched_get_time();
    963
    964		if (tca[TCA_RATE]) {
    965			err = gen_replace_estimator(&cl->bstats, NULL,
    966						    &cl->rate_est,
    967						    NULL,
    968						    true,
    969						    tca[TCA_RATE]);
    970			if (err)
    971				return err;
    972		}
    973
    974		sch_tree_lock(sch);
    975		old_flags = cl->cl_flags;
    976
    977		if (rsc != NULL)
    978			hfsc_change_rsc(cl, rsc, cur_time);
    979		if (fsc != NULL)
    980			hfsc_change_fsc(cl, fsc);
    981		if (usc != NULL)
    982			hfsc_change_usc(cl, usc, cur_time);
    983
    984		if (cl->qdisc->q.qlen != 0) {
    985			int len = qdisc_peek_len(cl->qdisc);
    986
    987			if (cl->cl_flags & HFSC_RSC) {
    988				if (old_flags & HFSC_RSC)
    989					update_ed(cl, len);
    990				else
    991					init_ed(cl, len);
    992			}
    993
    994			if (cl->cl_flags & HFSC_FSC) {
    995				if (old_flags & HFSC_FSC)
    996					update_vf(cl, 0, cur_time);
    997				else
    998					init_vf(cl, len);
    999			}
   1000		}
   1001		sch_tree_unlock(sch);
   1002
   1003		return 0;
   1004	}
   1005
   1006	if (parentid == TC_H_ROOT)
   1007		return -EEXIST;
   1008
   1009	parent = &q->root;
   1010	if (parentid) {
   1011		parent = hfsc_find_class(parentid, sch);
   1012		if (parent == NULL)
   1013			return -ENOENT;
   1014	}
   1015
   1016	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
   1017		return -EINVAL;
   1018	if (hfsc_find_class(classid, sch))
   1019		return -EEXIST;
   1020
   1021	if (rsc == NULL && fsc == NULL)
   1022		return -EINVAL;
   1023
   1024	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
   1025	if (cl == NULL)
   1026		return -ENOBUFS;
   1027
   1028	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
   1029	if (err) {
   1030		kfree(cl);
   1031		return err;
   1032	}
   1033
   1034	if (tca[TCA_RATE]) {
   1035		err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
   1036					NULL, true, tca[TCA_RATE]);
   1037		if (err) {
   1038			tcf_block_put(cl->block);
   1039			kfree(cl);
   1040			return err;
   1041		}
   1042	}
   1043
   1044	if (rsc != NULL)
   1045		hfsc_change_rsc(cl, rsc, 0);
   1046	if (fsc != NULL)
   1047		hfsc_change_fsc(cl, fsc);
   1048	if (usc != NULL)
   1049		hfsc_change_usc(cl, usc, 0);
   1050
   1051	cl->cl_common.classid = classid;
   1052	cl->sched     = q;
   1053	cl->cl_parent = parent;
   1054	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
   1055				      classid, NULL);
   1056	if (cl->qdisc == NULL)
   1057		cl->qdisc = &noop_qdisc;
   1058	else
   1059		qdisc_hash_add(cl->qdisc, true);
   1060	INIT_LIST_HEAD(&cl->children);
   1061	cl->vt_tree = RB_ROOT;
   1062	cl->cf_tree = RB_ROOT;
   1063
   1064	sch_tree_lock(sch);
   1065	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
   1066	list_add_tail(&cl->siblings, &parent->children);
   1067	if (parent->level == 0)
   1068		qdisc_purge_queue(parent->qdisc);
   1069	hfsc_adjust_levels(parent);
   1070	sch_tree_unlock(sch);
   1071
   1072	qdisc_class_hash_grow(sch, &q->clhash);
   1073
   1074	*arg = (unsigned long)cl;
   1075	return 0;
   1076}
   1077
   1078static void
   1079hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
   1080{
   1081	struct hfsc_sched *q = qdisc_priv(sch);
   1082
   1083	tcf_block_put(cl->block);
   1084	qdisc_put(cl->qdisc);
   1085	gen_kill_estimator(&cl->rate_est);
   1086	if (cl != &q->root)
   1087		kfree(cl);
   1088}
   1089
   1090static int
   1091hfsc_delete_class(struct Qdisc *sch, unsigned long arg,
   1092		  struct netlink_ext_ack *extack)
   1093{
   1094	struct hfsc_sched *q = qdisc_priv(sch);
   1095	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1096
   1097	if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
   1098		return -EBUSY;
   1099
   1100	sch_tree_lock(sch);
   1101
   1102	list_del(&cl->siblings);
   1103	hfsc_adjust_levels(cl->cl_parent);
   1104
   1105	qdisc_purge_queue(cl->qdisc);
   1106	qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
   1107
   1108	sch_tree_unlock(sch);
   1109
   1110	hfsc_destroy_class(sch, cl);
   1111	return 0;
   1112}
   1113
   1114static struct hfsc_class *
   1115hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
   1116{
   1117	struct hfsc_sched *q = qdisc_priv(sch);
   1118	struct hfsc_class *head, *cl;
   1119	struct tcf_result res;
   1120	struct tcf_proto *tcf;
   1121	int result;
   1122
   1123	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
   1124	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
   1125		if (cl->level == 0)
   1126			return cl;
   1127
   1128	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
   1129	head = &q->root;
   1130	tcf = rcu_dereference_bh(q->root.filter_list);
   1131	while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
   1132#ifdef CONFIG_NET_CLS_ACT
   1133		switch (result) {
   1134		case TC_ACT_QUEUED:
   1135		case TC_ACT_STOLEN:
   1136		case TC_ACT_TRAP:
   1137			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
   1138			fallthrough;
   1139		case TC_ACT_SHOT:
   1140			return NULL;
   1141		}
   1142#endif
   1143		cl = (struct hfsc_class *)res.class;
   1144		if (!cl) {
   1145			cl = hfsc_find_class(res.classid, sch);
   1146			if (!cl)
   1147				break; /* filter selected invalid classid */
   1148			if (cl->level >= head->level)
   1149				break; /* filter may only point downwards */
   1150		}
   1151
   1152		if (cl->level == 0)
   1153			return cl; /* hit leaf class */
   1154
   1155		/* apply inner filter chain */
   1156		tcf = rcu_dereference_bh(cl->filter_list);
   1157		head = cl;
   1158	}
   1159
   1160	/* classification failed, try default class */
   1161	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
   1162	if (cl == NULL || cl->level > 0)
   1163		return NULL;
   1164
   1165	return cl;
   1166}
   1167
   1168static int
   1169hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
   1170		 struct Qdisc **old, struct netlink_ext_ack *extack)
   1171{
   1172	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1173
   1174	if (cl->level > 0)
   1175		return -EINVAL;
   1176	if (new == NULL) {
   1177		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
   1178					cl->cl_common.classid, NULL);
   1179		if (new == NULL)
   1180			new = &noop_qdisc;
   1181	}
   1182
   1183	*old = qdisc_replace(sch, new, &cl->qdisc);
   1184	return 0;
   1185}
   1186
   1187static struct Qdisc *
   1188hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
   1189{
   1190	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1191
   1192	if (cl->level == 0)
   1193		return cl->qdisc;
   1194
   1195	return NULL;
   1196}
   1197
   1198static void
   1199hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
   1200{
   1201	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1202
   1203	/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
   1204	 * needs to be called explicitly to remove a class from vttree.
   1205	 */
   1206	update_vf(cl, 0, 0);
   1207	if (cl->cl_flags & HFSC_RSC)
   1208		eltree_remove(cl);
   1209}
   1210
   1211static unsigned long
   1212hfsc_search_class(struct Qdisc *sch, u32 classid)
   1213{
   1214	return (unsigned long)hfsc_find_class(classid, sch);
   1215}
   1216
   1217static unsigned long
   1218hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
   1219{
   1220	struct hfsc_class *p = (struct hfsc_class *)parent;
   1221	struct hfsc_class *cl = hfsc_find_class(classid, sch);
   1222
   1223	if (cl != NULL) {
   1224		if (p != NULL && p->level <= cl->level)
   1225			return 0;
   1226		cl->filter_cnt++;
   1227	}
   1228
   1229	return (unsigned long)cl;
   1230}
   1231
   1232static void
   1233hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
   1234{
   1235	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1236
   1237	cl->filter_cnt--;
   1238}
   1239
   1240static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
   1241					struct netlink_ext_ack *extack)
   1242{
   1243	struct hfsc_sched *q = qdisc_priv(sch);
   1244	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1245
   1246	if (cl == NULL)
   1247		cl = &q->root;
   1248
   1249	return cl->block;
   1250}
   1251
   1252static int
   1253hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
   1254{
   1255	struct tc_service_curve tsc;
   1256
   1257	tsc.m1 = sm2m(sc->sm1);
   1258	tsc.d  = dx2d(sc->dx);
   1259	tsc.m2 = sm2m(sc->sm2);
   1260	if (nla_put(skb, attr, sizeof(tsc), &tsc))
   1261		goto nla_put_failure;
   1262
   1263	return skb->len;
   1264
   1265 nla_put_failure:
   1266	return -1;
   1267}
   1268
   1269static int
   1270hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
   1271{
   1272	if ((cl->cl_flags & HFSC_RSC) &&
   1273	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
   1274		goto nla_put_failure;
   1275
   1276	if ((cl->cl_flags & HFSC_FSC) &&
   1277	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
   1278		goto nla_put_failure;
   1279
   1280	if ((cl->cl_flags & HFSC_USC) &&
   1281	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
   1282		goto nla_put_failure;
   1283
   1284	return skb->len;
   1285
   1286 nla_put_failure:
   1287	return -1;
   1288}
   1289
   1290static int
   1291hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
   1292		struct tcmsg *tcm)
   1293{
   1294	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1295	struct nlattr *nest;
   1296
   1297	tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
   1298					  TC_H_ROOT;
   1299	tcm->tcm_handle = cl->cl_common.classid;
   1300	if (cl->level == 0)
   1301		tcm->tcm_info = cl->qdisc->handle;
   1302
   1303	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
   1304	if (nest == NULL)
   1305		goto nla_put_failure;
   1306	if (hfsc_dump_curves(skb, cl) < 0)
   1307		goto nla_put_failure;
   1308	return nla_nest_end(skb, nest);
   1309
   1310 nla_put_failure:
   1311	nla_nest_cancel(skb, nest);
   1312	return -EMSGSIZE;
   1313}
   1314
   1315static int
   1316hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
   1317	struct gnet_dump *d)
   1318{
   1319	struct hfsc_class *cl = (struct hfsc_class *)arg;
   1320	struct tc_hfsc_stats xstats;
   1321	__u32 qlen;
   1322
   1323	qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
   1324	xstats.level   = cl->level;
   1325	xstats.period  = cl->cl_vtperiod;
   1326	xstats.work    = cl->cl_total;
   1327	xstats.rtwork  = cl->cl_cumul;
   1328
   1329	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
   1330	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
   1331	    gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
   1332		return -1;
   1333
   1334	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
   1335}
   1336
   1337
   1338
   1339static void
   1340hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
   1341{
   1342	struct hfsc_sched *q = qdisc_priv(sch);
   1343	struct hfsc_class *cl;
   1344	unsigned int i;
   1345
   1346	if (arg->stop)
   1347		return;
   1348
   1349	for (i = 0; i < q->clhash.hashsize; i++) {
   1350		hlist_for_each_entry(cl, &q->clhash.hash[i],
   1351				     cl_common.hnode) {
   1352			if (arg->count < arg->skip) {
   1353				arg->count++;
   1354				continue;
   1355			}
   1356			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
   1357				arg->stop = 1;
   1358				return;
   1359			}
   1360			arg->count++;
   1361		}
   1362	}
   1363}
   1364
   1365static void
   1366hfsc_schedule_watchdog(struct Qdisc *sch)
   1367{
   1368	struct hfsc_sched *q = qdisc_priv(sch);
   1369	struct hfsc_class *cl;
   1370	u64 next_time = 0;
   1371
   1372	cl = eltree_get_minel(q);
   1373	if (cl)
   1374		next_time = cl->cl_e;
   1375	if (q->root.cl_cfmin != 0) {
   1376		if (next_time == 0 || next_time > q->root.cl_cfmin)
   1377			next_time = q->root.cl_cfmin;
   1378	}
   1379	if (next_time)
   1380		qdisc_watchdog_schedule(&q->watchdog, next_time);
   1381}
   1382
   1383static int
   1384hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
   1385		struct netlink_ext_ack *extack)
   1386{
   1387	struct hfsc_sched *q = qdisc_priv(sch);
   1388	struct tc_hfsc_qopt *qopt;
   1389	int err;
   1390
   1391	qdisc_watchdog_init(&q->watchdog, sch);
   1392
   1393	if (!opt || nla_len(opt) < sizeof(*qopt))
   1394		return -EINVAL;
   1395	qopt = nla_data(opt);
   1396
   1397	q->defcls = qopt->defcls;
   1398	err = qdisc_class_hash_init(&q->clhash);
   1399	if (err < 0)
   1400		return err;
   1401	q->eligible = RB_ROOT;
   1402
   1403	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
   1404	if (err)
   1405		return err;
   1406
   1407	gnet_stats_basic_sync_init(&q->root.bstats);
   1408	q->root.cl_common.classid = sch->handle;
   1409	q->root.sched   = q;
   1410	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
   1411					  sch->handle, NULL);
   1412	if (q->root.qdisc == NULL)
   1413		q->root.qdisc = &noop_qdisc;
   1414	else
   1415		qdisc_hash_add(q->root.qdisc, true);
   1416	INIT_LIST_HEAD(&q->root.children);
   1417	q->root.vt_tree = RB_ROOT;
   1418	q->root.cf_tree = RB_ROOT;
   1419
   1420	qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
   1421	qdisc_class_hash_grow(sch, &q->clhash);
   1422
   1423	return 0;
   1424}
   1425
   1426static int
   1427hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
   1428		  struct netlink_ext_ack *extack)
   1429{
   1430	struct hfsc_sched *q = qdisc_priv(sch);
   1431	struct tc_hfsc_qopt *qopt;
   1432
   1433	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
   1434		return -EINVAL;
   1435	qopt = nla_data(opt);
   1436
   1437	sch_tree_lock(sch);
   1438	q->defcls = qopt->defcls;
   1439	sch_tree_unlock(sch);
   1440
   1441	return 0;
   1442}
   1443
   1444static void
   1445hfsc_reset_class(struct hfsc_class *cl)
   1446{
   1447	cl->cl_total        = 0;
   1448	cl->cl_cumul        = 0;
   1449	cl->cl_d            = 0;
   1450	cl->cl_e            = 0;
   1451	cl->cl_vt           = 0;
   1452	cl->cl_vtadj        = 0;
   1453	cl->cl_cvtmin       = 0;
   1454	cl->cl_cvtoff       = 0;
   1455	cl->cl_vtperiod     = 0;
   1456	cl->cl_parentperiod = 0;
   1457	cl->cl_f            = 0;
   1458	cl->cl_myf          = 0;
   1459	cl->cl_cfmin        = 0;
   1460	cl->cl_nactive      = 0;
   1461
   1462	cl->vt_tree = RB_ROOT;
   1463	cl->cf_tree = RB_ROOT;
   1464	qdisc_reset(cl->qdisc);
   1465
   1466	if (cl->cl_flags & HFSC_RSC)
   1467		rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
   1468	if (cl->cl_flags & HFSC_FSC)
   1469		rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
   1470	if (cl->cl_flags & HFSC_USC)
   1471		rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
   1472}
   1473
   1474static void
   1475hfsc_reset_qdisc(struct Qdisc *sch)
   1476{
   1477	struct hfsc_sched *q = qdisc_priv(sch);
   1478	struct hfsc_class *cl;
   1479	unsigned int i;
   1480
   1481	for (i = 0; i < q->clhash.hashsize; i++) {
   1482		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
   1483			hfsc_reset_class(cl);
   1484	}
   1485	q->eligible = RB_ROOT;
   1486	qdisc_watchdog_cancel(&q->watchdog);
   1487	sch->qstats.backlog = 0;
   1488	sch->q.qlen = 0;
   1489}
   1490
   1491static void
   1492hfsc_destroy_qdisc(struct Qdisc *sch)
   1493{
   1494	struct hfsc_sched *q = qdisc_priv(sch);
   1495	struct hlist_node *next;
   1496	struct hfsc_class *cl;
   1497	unsigned int i;
   1498
   1499	for (i = 0; i < q->clhash.hashsize; i++) {
   1500		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
   1501			tcf_block_put(cl->block);
   1502			cl->block = NULL;
   1503		}
   1504	}
   1505	for (i = 0; i < q->clhash.hashsize; i++) {
   1506		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
   1507					  cl_common.hnode)
   1508			hfsc_destroy_class(sch, cl);
   1509	}
   1510	qdisc_class_hash_destroy(&q->clhash);
   1511	qdisc_watchdog_cancel(&q->watchdog);
   1512}
   1513
   1514static int
   1515hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
   1516{
   1517	struct hfsc_sched *q = qdisc_priv(sch);
   1518	unsigned char *b = skb_tail_pointer(skb);
   1519	struct tc_hfsc_qopt qopt;
   1520
   1521	qopt.defcls = q->defcls;
   1522	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
   1523		goto nla_put_failure;
   1524	return skb->len;
   1525
   1526 nla_put_failure:
   1527	nlmsg_trim(skb, b);
   1528	return -1;
   1529}
   1530
   1531static int
   1532hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
   1533{
   1534	unsigned int len = qdisc_pkt_len(skb);
   1535	struct hfsc_class *cl;
   1536	int err;
   1537	bool first;
   1538
   1539	cl = hfsc_classify(skb, sch, &err);
   1540	if (cl == NULL) {
   1541		if (err & __NET_XMIT_BYPASS)
   1542			qdisc_qstats_drop(sch);
   1543		__qdisc_drop(skb, to_free);
   1544		return err;
   1545	}
   1546
   1547	first = !cl->qdisc->q.qlen;
   1548	err = qdisc_enqueue(skb, cl->qdisc, to_free);
   1549	if (unlikely(err != NET_XMIT_SUCCESS)) {
   1550		if (net_xmit_drop_count(err)) {
   1551			cl->qstats.drops++;
   1552			qdisc_qstats_drop(sch);
   1553		}
   1554		return err;
   1555	}
   1556
   1557	if (first) {
   1558		if (cl->cl_flags & HFSC_RSC)
   1559			init_ed(cl, len);
   1560		if (cl->cl_flags & HFSC_FSC)
   1561			init_vf(cl, len);
   1562		/*
   1563		 * If this is the first packet, isolate the head so an eventual
   1564		 * head drop before the first dequeue operation has no chance
   1565		 * to invalidate the deadline.
   1566		 */
   1567		if (cl->cl_flags & HFSC_RSC)
   1568			cl->qdisc->ops->peek(cl->qdisc);
   1569
   1570	}
   1571
   1572	sch->qstats.backlog += len;
   1573	sch->q.qlen++;
   1574
   1575	return NET_XMIT_SUCCESS;
   1576}
   1577
   1578static struct sk_buff *
   1579hfsc_dequeue(struct Qdisc *sch)
   1580{
   1581	struct hfsc_sched *q = qdisc_priv(sch);
   1582	struct hfsc_class *cl;
   1583	struct sk_buff *skb;
   1584	u64 cur_time;
   1585	unsigned int next_len;
   1586	int realtime = 0;
   1587
   1588	if (sch->q.qlen == 0)
   1589		return NULL;
   1590
   1591	cur_time = psched_get_time();
   1592
   1593	/*
   1594	 * if there are eligible classes, use real-time criteria.
   1595	 * find the class with the minimum deadline among
   1596	 * the eligible classes.
   1597	 */
   1598	cl = eltree_get_mindl(q, cur_time);
   1599	if (cl) {
   1600		realtime = 1;
   1601	} else {
   1602		/*
   1603		 * use link-sharing criteria
   1604		 * get the class with the minimum vt in the hierarchy
   1605		 */
   1606		cl = vttree_get_minvt(&q->root, cur_time);
   1607		if (cl == NULL) {
   1608			qdisc_qstats_overlimit(sch);
   1609			hfsc_schedule_watchdog(sch);
   1610			return NULL;
   1611		}
   1612	}
   1613
   1614	skb = qdisc_dequeue_peeked(cl->qdisc);
   1615	if (skb == NULL) {
   1616		qdisc_warn_nonwc("HFSC", cl->qdisc);
   1617		return NULL;
   1618	}
   1619
   1620	bstats_update(&cl->bstats, skb);
   1621	update_vf(cl, qdisc_pkt_len(skb), cur_time);
   1622	if (realtime)
   1623		cl->cl_cumul += qdisc_pkt_len(skb);
   1624
   1625	if (cl->cl_flags & HFSC_RSC) {
   1626		if (cl->qdisc->q.qlen != 0) {
   1627			/* update ed */
   1628			next_len = qdisc_peek_len(cl->qdisc);
   1629			if (realtime)
   1630				update_ed(cl, next_len);
   1631			else
   1632				update_d(cl, next_len);
   1633		} else {
   1634			/* the class becomes passive */
   1635			eltree_remove(cl);
   1636		}
   1637	}
   1638
   1639	qdisc_bstats_update(sch, skb);
   1640	qdisc_qstats_backlog_dec(sch, skb);
   1641	sch->q.qlen--;
   1642
   1643	return skb;
   1644}
   1645
   1646static const struct Qdisc_class_ops hfsc_class_ops = {
   1647	.change		= hfsc_change_class,
   1648	.delete		= hfsc_delete_class,
   1649	.graft		= hfsc_graft_class,
   1650	.leaf		= hfsc_class_leaf,
   1651	.qlen_notify	= hfsc_qlen_notify,
   1652	.find		= hfsc_search_class,
   1653	.bind_tcf	= hfsc_bind_tcf,
   1654	.unbind_tcf	= hfsc_unbind_tcf,
   1655	.tcf_block	= hfsc_tcf_block,
   1656	.dump		= hfsc_dump_class,
   1657	.dump_stats	= hfsc_dump_class_stats,
   1658	.walk		= hfsc_walk
   1659};
   1660
   1661static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
   1662	.id		= "hfsc",
   1663	.init		= hfsc_init_qdisc,
   1664	.change		= hfsc_change_qdisc,
   1665	.reset		= hfsc_reset_qdisc,
   1666	.destroy	= hfsc_destroy_qdisc,
   1667	.dump		= hfsc_dump_qdisc,
   1668	.enqueue	= hfsc_enqueue,
   1669	.dequeue	= hfsc_dequeue,
   1670	.peek		= qdisc_peek_dequeued,
   1671	.cl_ops		= &hfsc_class_ops,
   1672	.priv_size	= sizeof(struct hfsc_sched),
   1673	.owner		= THIS_MODULE
   1674};
   1675
   1676static int __init
   1677hfsc_init(void)
   1678{
   1679	return register_qdisc(&hfsc_qdisc_ops);
   1680}
   1681
   1682static void __exit
   1683hfsc_cleanup(void)
   1684{
   1685	unregister_qdisc(&hfsc_qdisc_ops);
   1686}
   1687
   1688MODULE_LICENSE("GPL");
   1689module_init(hfsc_init);
   1690module_exit(hfsc_cleanup);