cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sch_netem.c (32061B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * net/sched/sch_netem.c	Network emulator
      4 *
      5 *  		Many of the algorithms and ideas for this came from
      6 *		NIST Net which is not copyrighted.
      7 *
      8 * Authors:	Stephen Hemminger <shemminger@osdl.org>
      9 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
     10 */
     11
     12#include <linux/mm.h>
     13#include <linux/module.h>
     14#include <linux/slab.h>
     15#include <linux/types.h>
     16#include <linux/kernel.h>
     17#include <linux/errno.h>
     18#include <linux/skbuff.h>
     19#include <linux/vmalloc.h>
     20#include <linux/rtnetlink.h>
     21#include <linux/reciprocal_div.h>
     22#include <linux/rbtree.h>
     23
     24#include <net/netlink.h>
     25#include <net/pkt_sched.h>
     26#include <net/inet_ecn.h>
     27
     28#define VERSION "1.3"
     29
     30/*	Network Emulation Queuing algorithm.
     31	====================================
     32
     33	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
     34		 Network Emulation Tool
     35		 [2] Luigi Rizzo, DummyNet for FreeBSD
     36
     37	 ----------------------------------------------------------------
     38
     39	 This started out as a simple way to delay outgoing packets to
     40	 test TCP but has grown to include most of the functionality
     41	 of a full blown network emulator like NISTnet. It can delay
     42	 packets and add random jitter (and correlation). The random
     43	 distribution can be loaded from a table as well to provide
     44	 normal, Pareto, or experimental curves. Packet loss,
     45	 duplication, and reordering can also be emulated.
     46
     47	 This qdisc does not do classification that can be handled in
     48	 layering other disciplines.  It does not need to do bandwidth
     49	 control either since that can be handled by using token
     50	 bucket or other rate control.
     51
     52     Correlated Loss Generator models
     53
     54	Added generation of correlated loss according to the
     55	"Gilbert-Elliot" model, a 4-state markov model.
     56
     57	References:
     58	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
     59	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
     60	and intuitive loss model for packet networks and its implementation
     61	in the Netem module in the Linux kernel", available in [1]
     62
     63	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
     64		 Fabio Ludovici <fabio.ludovici at yahoo.it>
     65*/
     66
     67struct disttable {
     68	u32  size;
     69	s16 table[];
     70};
     71
     72struct netem_sched_data {
     73	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
     74	struct rb_root t_root;
     75
     76	/* a linear queue; reduces rbtree rebalancing when jitter is low */
     77	struct sk_buff	*t_head;
     78	struct sk_buff	*t_tail;
     79
     80	/* optional qdisc for classful handling (NULL at netem init) */
     81	struct Qdisc	*qdisc;
     82
     83	struct qdisc_watchdog watchdog;
     84
     85	s64 latency;
     86	s64 jitter;
     87
     88	u32 loss;
     89	u32 ecn;
     90	u32 limit;
     91	u32 counter;
     92	u32 gap;
     93	u32 duplicate;
     94	u32 reorder;
     95	u32 corrupt;
     96	u64 rate;
     97	s32 packet_overhead;
     98	u32 cell_size;
     99	struct reciprocal_value cell_size_reciprocal;
    100	s32 cell_overhead;
    101
    102	struct crndstate {
    103		u32 last;
    104		u32 rho;
    105	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
    106
    107	struct disttable *delay_dist;
    108
    109	enum  {
    110		CLG_RANDOM,
    111		CLG_4_STATES,
    112		CLG_GILB_ELL,
    113	} loss_model;
    114
    115	enum {
    116		TX_IN_GAP_PERIOD = 1,
    117		TX_IN_BURST_PERIOD,
    118		LOST_IN_GAP_PERIOD,
    119		LOST_IN_BURST_PERIOD,
    120	} _4_state_model;
    121
    122	enum {
    123		GOOD_STATE = 1,
    124		BAD_STATE,
    125	} GE_state_model;
    126
    127	/* Correlated Loss Generation models */
    128	struct clgstate {
    129		/* state of the Markov chain */
    130		u8 state;
    131
    132		/* 4-states and Gilbert-Elliot models */
    133		u32 a1;	/* p13 for 4-states or p for GE */
    134		u32 a2;	/* p31 for 4-states or r for GE */
    135		u32 a3;	/* p32 for 4-states or h for GE */
    136		u32 a4;	/* p14 for 4-states or 1-k for GE */
    137		u32 a5; /* p23 used only in 4-states */
    138	} clg;
    139
    140	struct tc_netem_slot slot_config;
    141	struct slotstate {
    142		u64 slot_next;
    143		s32 packets_left;
    144		s32 bytes_left;
    145	} slot;
    146
    147	struct disttable *slot_dist;
    148};
    149
    150/* Time stamp put into socket buffer control block
    151 * Only valid when skbs are in our internal t(ime)fifo queue.
    152 *
    153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
    154 * and skb->next & skb->prev are scratch space for a qdisc,
    155 * we save skb->tstamp value in skb->cb[] before destroying it.
    156 */
    157struct netem_skb_cb {
    158	u64	        time_to_send;
    159};
    160
    161static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
    162{
    163	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
    164	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
    165	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
    166}
    167
    168/* init_crandom - initialize correlated random number generator
    169 * Use entropy source for initial seed.
    170 */
    171static void init_crandom(struct crndstate *state, unsigned long rho)
    172{
    173	state->rho = rho;
    174	state->last = prandom_u32();
    175}
    176
    177/* get_crandom - correlated random number generator
    178 * Next number depends on last value.
    179 * rho is scaled to avoid floating point.
    180 */
    181static u32 get_crandom(struct crndstate *state)
    182{
    183	u64 value, rho;
    184	unsigned long answer;
    185
    186	if (!state || state->rho == 0)	/* no correlation */
    187		return prandom_u32();
    188
    189	value = prandom_u32();
    190	rho = (u64)state->rho + 1;
    191	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
    192	state->last = answer;
    193	return answer;
    194}
    195
    196/* loss_4state - 4-state model loss generator
    197 * Generates losses according to the 4-state Markov chain adopted in
    198 * the GI (General and Intuitive) loss model.
    199 */
    200static bool loss_4state(struct netem_sched_data *q)
    201{
    202	struct clgstate *clg = &q->clg;
    203	u32 rnd = prandom_u32();
    204
    205	/*
    206	 * Makes a comparison between rnd and the transition
    207	 * probabilities outgoing from the current state, then decides the
    208	 * next state and if the next packet has to be transmitted or lost.
    209	 * The four states correspond to:
    210	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
    211	 *   LOST_IN_GAP_PERIOD => isolated losses within a gap period
    212	 *   LOST_IN_BURST_PERIOD => lost packets within a burst period
    213	 *   TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
    214	 */
    215	switch (clg->state) {
    216	case TX_IN_GAP_PERIOD:
    217		if (rnd < clg->a4) {
    218			clg->state = LOST_IN_GAP_PERIOD;
    219			return true;
    220		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
    221			clg->state = LOST_IN_BURST_PERIOD;
    222			return true;
    223		} else if (clg->a1 + clg->a4 < rnd) {
    224			clg->state = TX_IN_GAP_PERIOD;
    225		}
    226
    227		break;
    228	case TX_IN_BURST_PERIOD:
    229		if (rnd < clg->a5) {
    230			clg->state = LOST_IN_BURST_PERIOD;
    231			return true;
    232		} else {
    233			clg->state = TX_IN_BURST_PERIOD;
    234		}
    235
    236		break;
    237	case LOST_IN_BURST_PERIOD:
    238		if (rnd < clg->a3)
    239			clg->state = TX_IN_BURST_PERIOD;
    240		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
    241			clg->state = TX_IN_GAP_PERIOD;
    242		} else if (clg->a2 + clg->a3 < rnd) {
    243			clg->state = LOST_IN_BURST_PERIOD;
    244			return true;
    245		}
    246		break;
    247	case LOST_IN_GAP_PERIOD:
    248		clg->state = TX_IN_GAP_PERIOD;
    249		break;
    250	}
    251
    252	return false;
    253}
    254
    255/* loss_gilb_ell - Gilbert-Elliot model loss generator
    256 * Generates losses according to the Gilbert-Elliot loss model or
    257 * its special cases  (Gilbert or Simple Gilbert)
    258 *
    259 * Makes a comparison between random number and the transition
    260 * probabilities outgoing from the current state, then decides the
    261 * next state. A second random number is extracted and the comparison
    262 * with the loss probability of the current state decides if the next
    263 * packet will be transmitted or lost.
    264 */
    265static bool loss_gilb_ell(struct netem_sched_data *q)
    266{
    267	struct clgstate *clg = &q->clg;
    268
    269	switch (clg->state) {
    270	case GOOD_STATE:
    271		if (prandom_u32() < clg->a1)
    272			clg->state = BAD_STATE;
    273		if (prandom_u32() < clg->a4)
    274			return true;
    275		break;
    276	case BAD_STATE:
    277		if (prandom_u32() < clg->a2)
    278			clg->state = GOOD_STATE;
    279		if (prandom_u32() > clg->a3)
    280			return true;
    281	}
    282
    283	return false;
    284}
    285
    286static bool loss_event(struct netem_sched_data *q)
    287{
    288	switch (q->loss_model) {
    289	case CLG_RANDOM:
    290		/* Random packet drop 0 => none, ~0 => all */
    291		return q->loss && q->loss >= get_crandom(&q->loss_cor);
    292
    293	case CLG_4_STATES:
    294		/* 4state loss model algorithm (used also for GI model)
    295		* Extracts a value from the markov 4 state loss generator,
    296		* if it is 1 drops a packet and if needed writes the event in
    297		* the kernel logs
    298		*/
    299		return loss_4state(q);
    300
    301	case CLG_GILB_ELL:
    302		/* Gilbert-Elliot loss model algorithm
    303		* Extracts a value from the Gilbert-Elliot loss generator,
    304		* if it is 1 drops a packet and if needed writes the event in
    305		* the kernel logs
    306		*/
    307		return loss_gilb_ell(q);
    308	}
    309
    310	return false;	/* not reached */
    311}
    312
    313
    314/* tabledist - return a pseudo-randomly distributed value with mean mu and
    315 * std deviation sigma.  Uses table lookup to approximate the desired
    316 * distribution, and a uniformly-distributed pseudo-random source.
    317 */
    318static s64 tabledist(s64 mu, s32 sigma,
    319		     struct crndstate *state,
    320		     const struct disttable *dist)
    321{
    322	s64 x;
    323	long t;
    324	u32 rnd;
    325
    326	if (sigma == 0)
    327		return mu;
    328
    329	rnd = get_crandom(state);
    330
    331	/* default uniform distribution */
    332	if (dist == NULL)
    333		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
    334
    335	t = dist->table[rnd % dist->size];
    336	x = (sigma % NETEM_DIST_SCALE) * t;
    337	if (x >= 0)
    338		x += NETEM_DIST_SCALE/2;
    339	else
    340		x -= NETEM_DIST_SCALE/2;
    341
    342	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
    343}
    344
    345static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
    346{
    347	len += q->packet_overhead;
    348
    349	if (q->cell_size) {
    350		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
    351
    352		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
    353			cells++;
    354		len = cells * (q->cell_size + q->cell_overhead);
    355	}
    356
    357	return div64_u64(len * NSEC_PER_SEC, q->rate);
    358}
    359
    360static void tfifo_reset(struct Qdisc *sch)
    361{
    362	struct netem_sched_data *q = qdisc_priv(sch);
    363	struct rb_node *p = rb_first(&q->t_root);
    364
    365	while (p) {
    366		struct sk_buff *skb = rb_to_skb(p);
    367
    368		p = rb_next(p);
    369		rb_erase(&skb->rbnode, &q->t_root);
    370		rtnl_kfree_skbs(skb, skb);
    371	}
    372
    373	rtnl_kfree_skbs(q->t_head, q->t_tail);
    374	q->t_head = NULL;
    375	q->t_tail = NULL;
    376}
    377
    378static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
    379{
    380	struct netem_sched_data *q = qdisc_priv(sch);
    381	u64 tnext = netem_skb_cb(nskb)->time_to_send;
    382
    383	if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
    384		if (q->t_tail)
    385			q->t_tail->next = nskb;
    386		else
    387			q->t_head = nskb;
    388		q->t_tail = nskb;
    389	} else {
    390		struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
    391
    392		while (*p) {
    393			struct sk_buff *skb;
    394
    395			parent = *p;
    396			skb = rb_to_skb(parent);
    397			if (tnext >= netem_skb_cb(skb)->time_to_send)
    398				p = &parent->rb_right;
    399			else
    400				p = &parent->rb_left;
    401		}
    402		rb_link_node(&nskb->rbnode, parent, p);
    403		rb_insert_color(&nskb->rbnode, &q->t_root);
    404	}
    405	sch->q.qlen++;
    406}
    407
    408/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
    409 * when we statistically choose to corrupt one, we instead segment it, returning
    410 * the first packet to be corrupted, and re-enqueue the remaining frames
    411 */
    412static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
    413				     struct sk_buff **to_free)
    414{
    415	struct sk_buff *segs;
    416	netdev_features_t features = netif_skb_features(skb);
    417
    418	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
    419
    420	if (IS_ERR_OR_NULL(segs)) {
    421		qdisc_drop(skb, sch, to_free);
    422		return NULL;
    423	}
    424	consume_skb(skb);
    425	return segs;
    426}
    427
    428/*
    429 * Insert one skb into qdisc.
    430 * Note: parent depends on return value to account for queue length.
    431 * 	NET_XMIT_DROP: queue length didn't change.
    432 *      NET_XMIT_SUCCESS: one skb was queued.
    433 */
    434static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
    435			 struct sk_buff **to_free)
    436{
    437	struct netem_sched_data *q = qdisc_priv(sch);
    438	/* We don't fill cb now as skb_unshare() may invalidate it */
    439	struct netem_skb_cb *cb;
    440	struct sk_buff *skb2;
    441	struct sk_buff *segs = NULL;
    442	unsigned int prev_len = qdisc_pkt_len(skb);
    443	int count = 1;
    444	int rc = NET_XMIT_SUCCESS;
    445	int rc_drop = NET_XMIT_DROP;
    446
    447	/* Do not fool qdisc_drop_all() */
    448	skb->prev = NULL;
    449
    450	/* Random duplication */
    451	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
    452		++count;
    453
    454	/* Drop packet? */
    455	if (loss_event(q)) {
    456		if (q->ecn && INET_ECN_set_ce(skb))
    457			qdisc_qstats_drop(sch); /* mark packet */
    458		else
    459			--count;
    460	}
    461	if (count == 0) {
    462		qdisc_qstats_drop(sch);
    463		__qdisc_drop(skb, to_free);
    464		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
    465	}
    466
    467	/* If a delay is expected, orphan the skb. (orphaning usually takes
    468	 * place at TX completion time, so _before_ the link transit delay)
    469	 */
    470	if (q->latency || q->jitter || q->rate)
    471		skb_orphan_partial(skb);
    472
    473	/*
    474	 * If we need to duplicate packet, then re-insert at top of the
    475	 * qdisc tree, since parent queuer expects that only one
    476	 * skb will be queued.
    477	 */
    478	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
    479		struct Qdisc *rootq = qdisc_root_bh(sch);
    480		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
    481
    482		q->duplicate = 0;
    483		rootq->enqueue(skb2, rootq, to_free);
    484		q->duplicate = dupsave;
    485		rc_drop = NET_XMIT_SUCCESS;
    486	}
    487
    488	/*
    489	 * Randomized packet corruption.
    490	 * Make copy if needed since we are modifying
    491	 * If packet is going to be hardware checksummed, then
    492	 * do it now in software before we mangle it.
    493	 */
    494	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
    495		if (skb_is_gso(skb)) {
    496			skb = netem_segment(skb, sch, to_free);
    497			if (!skb)
    498				return rc_drop;
    499			segs = skb->next;
    500			skb_mark_not_on_list(skb);
    501			qdisc_skb_cb(skb)->pkt_len = skb->len;
    502		}
    503
    504		skb = skb_unshare(skb, GFP_ATOMIC);
    505		if (unlikely(!skb)) {
    506			qdisc_qstats_drop(sch);
    507			goto finish_segs;
    508		}
    509		if (skb->ip_summed == CHECKSUM_PARTIAL &&
    510		    skb_checksum_help(skb)) {
    511			qdisc_drop(skb, sch, to_free);
    512			skb = NULL;
    513			goto finish_segs;
    514		}
    515
    516		skb->data[prandom_u32() % skb_headlen(skb)] ^=
    517			1<<(prandom_u32() % 8);
    518	}
    519
    520	if (unlikely(sch->q.qlen >= sch->limit)) {
    521		/* re-link segs, so that qdisc_drop_all() frees them all */
    522		skb->next = segs;
    523		qdisc_drop_all(skb, sch, to_free);
    524		return rc_drop;
    525	}
    526
    527	qdisc_qstats_backlog_inc(sch, skb);
    528
    529	cb = netem_skb_cb(skb);
    530	if (q->gap == 0 ||		/* not doing reordering */
    531	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
    532	    q->reorder < get_crandom(&q->reorder_cor)) {
    533		u64 now;
    534		s64 delay;
    535
    536		delay = tabledist(q->latency, q->jitter,
    537				  &q->delay_cor, q->delay_dist);
    538
    539		now = ktime_get_ns();
    540
    541		if (q->rate) {
    542			struct netem_skb_cb *last = NULL;
    543
    544			if (sch->q.tail)
    545				last = netem_skb_cb(sch->q.tail);
    546			if (q->t_root.rb_node) {
    547				struct sk_buff *t_skb;
    548				struct netem_skb_cb *t_last;
    549
    550				t_skb = skb_rb_last(&q->t_root);
    551				t_last = netem_skb_cb(t_skb);
    552				if (!last ||
    553				    t_last->time_to_send > last->time_to_send)
    554					last = t_last;
    555			}
    556			if (q->t_tail) {
    557				struct netem_skb_cb *t_last =
    558					netem_skb_cb(q->t_tail);
    559
    560				if (!last ||
    561				    t_last->time_to_send > last->time_to_send)
    562					last = t_last;
    563			}
    564
    565			if (last) {
    566				/*
    567				 * Last packet in queue is reference point (now),
    568				 * calculate this time bonus and subtract
    569				 * from delay.
    570				 */
    571				delay -= last->time_to_send - now;
    572				delay = max_t(s64, 0, delay);
    573				now = last->time_to_send;
    574			}
    575
    576			delay += packet_time_ns(qdisc_pkt_len(skb), q);
    577		}
    578
    579		cb->time_to_send = now + delay;
    580		++q->counter;
    581		tfifo_enqueue(skb, sch);
    582	} else {
    583		/*
    584		 * Do re-ordering by putting one out of N packets at the front
    585		 * of the queue.
    586		 */
    587		cb->time_to_send = ktime_get_ns();
    588		q->counter = 0;
    589
    590		__qdisc_enqueue_head(skb, &sch->q);
    591		sch->qstats.requeues++;
    592	}
    593
    594finish_segs:
    595	if (segs) {
    596		unsigned int len, last_len;
    597		int nb;
    598
    599		len = skb ? skb->len : 0;
    600		nb = skb ? 1 : 0;
    601
    602		while (segs) {
    603			skb2 = segs->next;
    604			skb_mark_not_on_list(segs);
    605			qdisc_skb_cb(segs)->pkt_len = segs->len;
    606			last_len = segs->len;
    607			rc = qdisc_enqueue(segs, sch, to_free);
    608			if (rc != NET_XMIT_SUCCESS) {
    609				if (net_xmit_drop_count(rc))
    610					qdisc_qstats_drop(sch);
    611			} else {
    612				nb++;
    613				len += last_len;
    614			}
    615			segs = skb2;
    616		}
    617		/* Parent qdiscs accounted for 1 skb of size @prev_len */
    618		qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
    619	} else if (!skb) {
    620		return NET_XMIT_DROP;
    621	}
    622	return NET_XMIT_SUCCESS;
    623}
    624
    625/* Delay the next round with a new future slot with a
    626 * correct number of bytes and packets.
    627 */
    628
    629static void get_slot_next(struct netem_sched_data *q, u64 now)
    630{
    631	s64 next_delay;
    632
    633	if (!q->slot_dist)
    634		next_delay = q->slot_config.min_delay +
    635				(prandom_u32() *
    636				 (q->slot_config.max_delay -
    637				  q->slot_config.min_delay) >> 32);
    638	else
    639		next_delay = tabledist(q->slot_config.dist_delay,
    640				       (s32)(q->slot_config.dist_jitter),
    641				       NULL, q->slot_dist);
    642
    643	q->slot.slot_next = now + next_delay;
    644	q->slot.packets_left = q->slot_config.max_packets;
    645	q->slot.bytes_left = q->slot_config.max_bytes;
    646}
    647
    648static struct sk_buff *netem_peek(struct netem_sched_data *q)
    649{
    650	struct sk_buff *skb = skb_rb_first(&q->t_root);
    651	u64 t1, t2;
    652
    653	if (!skb)
    654		return q->t_head;
    655	if (!q->t_head)
    656		return skb;
    657
    658	t1 = netem_skb_cb(skb)->time_to_send;
    659	t2 = netem_skb_cb(q->t_head)->time_to_send;
    660	if (t1 < t2)
    661		return skb;
    662	return q->t_head;
    663}
    664
    665static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
    666{
    667	if (skb == q->t_head) {
    668		q->t_head = skb->next;
    669		if (!q->t_head)
    670			q->t_tail = NULL;
    671	} else {
    672		rb_erase(&skb->rbnode, &q->t_root);
    673	}
    674}
    675
    676static struct sk_buff *netem_dequeue(struct Qdisc *sch)
    677{
    678	struct netem_sched_data *q = qdisc_priv(sch);
    679	struct sk_buff *skb;
    680
    681tfifo_dequeue:
    682	skb = __qdisc_dequeue_head(&sch->q);
    683	if (skb) {
    684		qdisc_qstats_backlog_dec(sch, skb);
    685deliver:
    686		qdisc_bstats_update(sch, skb);
    687		return skb;
    688	}
    689	skb = netem_peek(q);
    690	if (skb) {
    691		u64 time_to_send;
    692		u64 now = ktime_get_ns();
    693
    694		/* if more time remaining? */
    695		time_to_send = netem_skb_cb(skb)->time_to_send;
    696		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
    697			get_slot_next(q, now);
    698
    699		if (time_to_send <= now && q->slot.slot_next <= now) {
    700			netem_erase_head(q, skb);
    701			sch->q.qlen--;
    702			qdisc_qstats_backlog_dec(sch, skb);
    703			skb->next = NULL;
    704			skb->prev = NULL;
    705			/* skb->dev shares skb->rbnode area,
    706			 * we need to restore its value.
    707			 */
    708			skb->dev = qdisc_dev(sch);
    709
    710			if (q->slot.slot_next) {
    711				q->slot.packets_left--;
    712				q->slot.bytes_left -= qdisc_pkt_len(skb);
    713				if (q->slot.packets_left <= 0 ||
    714				    q->slot.bytes_left <= 0)
    715					get_slot_next(q, now);
    716			}
    717
    718			if (q->qdisc) {
    719				unsigned int pkt_len = qdisc_pkt_len(skb);
    720				struct sk_buff *to_free = NULL;
    721				int err;
    722
    723				err = qdisc_enqueue(skb, q->qdisc, &to_free);
    724				kfree_skb_list(to_free);
    725				if (err != NET_XMIT_SUCCESS &&
    726				    net_xmit_drop_count(err)) {
    727					qdisc_qstats_drop(sch);
    728					qdisc_tree_reduce_backlog(sch, 1,
    729								  pkt_len);
    730				}
    731				goto tfifo_dequeue;
    732			}
    733			goto deliver;
    734		}
    735
    736		if (q->qdisc) {
    737			skb = q->qdisc->ops->dequeue(q->qdisc);
    738			if (skb)
    739				goto deliver;
    740		}
    741
    742		qdisc_watchdog_schedule_ns(&q->watchdog,
    743					   max(time_to_send,
    744					       q->slot.slot_next));
    745	}
    746
    747	if (q->qdisc) {
    748		skb = q->qdisc->ops->dequeue(q->qdisc);
    749		if (skb)
    750			goto deliver;
    751	}
    752	return NULL;
    753}
    754
    755static void netem_reset(struct Qdisc *sch)
    756{
    757	struct netem_sched_data *q = qdisc_priv(sch);
    758
    759	qdisc_reset_queue(sch);
    760	tfifo_reset(sch);
    761	if (q->qdisc)
    762		qdisc_reset(q->qdisc);
    763	qdisc_watchdog_cancel(&q->watchdog);
    764}
    765
    766static void dist_free(struct disttable *d)
    767{
    768	kvfree(d);
    769}
    770
    771/*
    772 * Distribution data is a variable size payload containing
    773 * signed 16 bit values.
    774 */
    775
    776static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
    777			  const struct nlattr *attr)
    778{
    779	size_t n = nla_len(attr)/sizeof(__s16);
    780	const __s16 *data = nla_data(attr);
    781	spinlock_t *root_lock;
    782	struct disttable *d;
    783	int i;
    784
    785	if (!n || n > NETEM_DIST_MAX)
    786		return -EINVAL;
    787
    788	d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
    789	if (!d)
    790		return -ENOMEM;
    791
    792	d->size = n;
    793	for (i = 0; i < n; i++)
    794		d->table[i] = data[i];
    795
    796	root_lock = qdisc_root_sleeping_lock(sch);
    797
    798	spin_lock_bh(root_lock);
    799	swap(*tbl, d);
    800	spin_unlock_bh(root_lock);
    801
    802	dist_free(d);
    803	return 0;
    804}
    805
    806static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
    807{
    808	const struct tc_netem_slot *c = nla_data(attr);
    809
    810	q->slot_config = *c;
    811	if (q->slot_config.max_packets == 0)
    812		q->slot_config.max_packets = INT_MAX;
    813	if (q->slot_config.max_bytes == 0)
    814		q->slot_config.max_bytes = INT_MAX;
    815
    816	/* capping dist_jitter to the range acceptable by tabledist() */
    817	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
    818
    819	q->slot.packets_left = q->slot_config.max_packets;
    820	q->slot.bytes_left = q->slot_config.max_bytes;
    821	if (q->slot_config.min_delay | q->slot_config.max_delay |
    822	    q->slot_config.dist_jitter)
    823		q->slot.slot_next = ktime_get_ns();
    824	else
    825		q->slot.slot_next = 0;
    826}
    827
    828static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
    829{
    830	const struct tc_netem_corr *c = nla_data(attr);
    831
    832	init_crandom(&q->delay_cor, c->delay_corr);
    833	init_crandom(&q->loss_cor, c->loss_corr);
    834	init_crandom(&q->dup_cor, c->dup_corr);
    835}
    836
    837static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
    838{
    839	const struct tc_netem_reorder *r = nla_data(attr);
    840
    841	q->reorder = r->probability;
    842	init_crandom(&q->reorder_cor, r->correlation);
    843}
    844
    845static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
    846{
    847	const struct tc_netem_corrupt *r = nla_data(attr);
    848
    849	q->corrupt = r->probability;
    850	init_crandom(&q->corrupt_cor, r->correlation);
    851}
    852
    853static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
    854{
    855	const struct tc_netem_rate *r = nla_data(attr);
    856
    857	q->rate = r->rate;
    858	q->packet_overhead = r->packet_overhead;
    859	q->cell_size = r->cell_size;
    860	q->cell_overhead = r->cell_overhead;
    861	if (q->cell_size)
    862		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
    863	else
    864		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
    865}
    866
    867static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
    868{
    869	const struct nlattr *la;
    870	int rem;
    871
    872	nla_for_each_nested(la, attr, rem) {
    873		u16 type = nla_type(la);
    874
    875		switch (type) {
    876		case NETEM_LOSS_GI: {
    877			const struct tc_netem_gimodel *gi = nla_data(la);
    878
    879			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
    880				pr_info("netem: incorrect gi model size\n");
    881				return -EINVAL;
    882			}
    883
    884			q->loss_model = CLG_4_STATES;
    885
    886			q->clg.state = TX_IN_GAP_PERIOD;
    887			q->clg.a1 = gi->p13;
    888			q->clg.a2 = gi->p31;
    889			q->clg.a3 = gi->p32;
    890			q->clg.a4 = gi->p14;
    891			q->clg.a5 = gi->p23;
    892			break;
    893		}
    894
    895		case NETEM_LOSS_GE: {
    896			const struct tc_netem_gemodel *ge = nla_data(la);
    897
    898			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
    899				pr_info("netem: incorrect ge model size\n");
    900				return -EINVAL;
    901			}
    902
    903			q->loss_model = CLG_GILB_ELL;
    904			q->clg.state = GOOD_STATE;
    905			q->clg.a1 = ge->p;
    906			q->clg.a2 = ge->r;
    907			q->clg.a3 = ge->h;
    908			q->clg.a4 = ge->k1;
    909			break;
    910		}
    911
    912		default:
    913			pr_info("netem: unknown loss type %u\n", type);
    914			return -EINVAL;
    915		}
    916	}
    917
    918	return 0;
    919}
    920
    921static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
    922	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
    923	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
    924	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
    925	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
    926	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
    927	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
    928	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
    929	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
    930	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
    931	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
    932};
    933
    934static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
    935		      const struct nla_policy *policy, int len)
    936{
    937	int nested_len = nla_len(nla) - NLA_ALIGN(len);
    938
    939	if (nested_len < 0) {
    940		pr_info("netem: invalid attributes len %d\n", nested_len);
    941		return -EINVAL;
    942	}
    943
    944	if (nested_len >= nla_attr_size(0))
    945		return nla_parse_deprecated(tb, maxtype,
    946					    nla_data(nla) + NLA_ALIGN(len),
    947					    nested_len, policy, NULL);
    948
    949	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
    950	return 0;
    951}
    952
    953/* Parse netlink message to set options */
    954static int netem_change(struct Qdisc *sch, struct nlattr *opt,
    955			struct netlink_ext_ack *extack)
    956{
    957	struct netem_sched_data *q = qdisc_priv(sch);
    958	struct nlattr *tb[TCA_NETEM_MAX + 1];
    959	struct tc_netem_qopt *qopt;
    960	struct clgstate old_clg;
    961	int old_loss_model = CLG_RANDOM;
    962	int ret;
    963
    964	if (opt == NULL)
    965		return -EINVAL;
    966
    967	qopt = nla_data(opt);
    968	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
    969	if (ret < 0)
    970		return ret;
    971
    972	/* backup q->clg and q->loss_model */
    973	old_clg = q->clg;
    974	old_loss_model = q->loss_model;
    975
    976	if (tb[TCA_NETEM_LOSS]) {
    977		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
    978		if (ret) {
    979			q->loss_model = old_loss_model;
    980			return ret;
    981		}
    982	} else {
    983		q->loss_model = CLG_RANDOM;
    984	}
    985
    986	if (tb[TCA_NETEM_DELAY_DIST]) {
    987		ret = get_dist_table(sch, &q->delay_dist,
    988				     tb[TCA_NETEM_DELAY_DIST]);
    989		if (ret)
    990			goto get_table_failure;
    991	}
    992
    993	if (tb[TCA_NETEM_SLOT_DIST]) {
    994		ret = get_dist_table(sch, &q->slot_dist,
    995				     tb[TCA_NETEM_SLOT_DIST]);
    996		if (ret)
    997			goto get_table_failure;
    998	}
    999
   1000	sch->limit = qopt->limit;
   1001
   1002	q->latency = PSCHED_TICKS2NS(qopt->latency);
   1003	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
   1004	q->limit = qopt->limit;
   1005	q->gap = qopt->gap;
   1006	q->counter = 0;
   1007	q->loss = qopt->loss;
   1008	q->duplicate = qopt->duplicate;
   1009
   1010	/* for compatibility with earlier versions.
   1011	 * if gap is set, need to assume 100% probability
   1012	 */
   1013	if (q->gap)
   1014		q->reorder = ~0;
   1015
   1016	if (tb[TCA_NETEM_CORR])
   1017		get_correlation(q, tb[TCA_NETEM_CORR]);
   1018
   1019	if (tb[TCA_NETEM_REORDER])
   1020		get_reorder(q, tb[TCA_NETEM_REORDER]);
   1021
   1022	if (tb[TCA_NETEM_CORRUPT])
   1023		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
   1024
   1025	if (tb[TCA_NETEM_RATE])
   1026		get_rate(q, tb[TCA_NETEM_RATE]);
   1027
   1028	if (tb[TCA_NETEM_RATE64])
   1029		q->rate = max_t(u64, q->rate,
   1030				nla_get_u64(tb[TCA_NETEM_RATE64]));
   1031
   1032	if (tb[TCA_NETEM_LATENCY64])
   1033		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
   1034
   1035	if (tb[TCA_NETEM_JITTER64])
   1036		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
   1037
   1038	if (tb[TCA_NETEM_ECN])
   1039		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
   1040
   1041	if (tb[TCA_NETEM_SLOT])
   1042		get_slot(q, tb[TCA_NETEM_SLOT]);
   1043
   1044	/* capping jitter to the range acceptable by tabledist() */
   1045	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
   1046
   1047	return ret;
   1048
   1049get_table_failure:
   1050	/* recover clg and loss_model, in case of
   1051	 * q->clg and q->loss_model were modified
   1052	 * in get_loss_clg()
   1053	 */
   1054	q->clg = old_clg;
   1055	q->loss_model = old_loss_model;
   1056	return ret;
   1057}
   1058
   1059static int netem_init(struct Qdisc *sch, struct nlattr *opt,
   1060		      struct netlink_ext_ack *extack)
   1061{
   1062	struct netem_sched_data *q = qdisc_priv(sch);
   1063	int ret;
   1064
   1065	qdisc_watchdog_init(&q->watchdog, sch);
   1066
   1067	if (!opt)
   1068		return -EINVAL;
   1069
   1070	q->loss_model = CLG_RANDOM;
   1071	ret = netem_change(sch, opt, extack);
   1072	if (ret)
   1073		pr_info("netem: change failed\n");
   1074	return ret;
   1075}
   1076
   1077static void netem_destroy(struct Qdisc *sch)
   1078{
   1079	struct netem_sched_data *q = qdisc_priv(sch);
   1080
   1081	qdisc_watchdog_cancel(&q->watchdog);
   1082	if (q->qdisc)
   1083		qdisc_put(q->qdisc);
   1084	dist_free(q->delay_dist);
   1085	dist_free(q->slot_dist);
   1086}
   1087
   1088static int dump_loss_model(const struct netem_sched_data *q,
   1089			   struct sk_buff *skb)
   1090{
   1091	struct nlattr *nest;
   1092
   1093	nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
   1094	if (nest == NULL)
   1095		goto nla_put_failure;
   1096
   1097	switch (q->loss_model) {
   1098	case CLG_RANDOM:
   1099		/* legacy loss model */
   1100		nla_nest_cancel(skb, nest);
   1101		return 0;	/* no data */
   1102
   1103	case CLG_4_STATES: {
   1104		struct tc_netem_gimodel gi = {
   1105			.p13 = q->clg.a1,
   1106			.p31 = q->clg.a2,
   1107			.p32 = q->clg.a3,
   1108			.p14 = q->clg.a4,
   1109			.p23 = q->clg.a5,
   1110		};
   1111
   1112		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
   1113			goto nla_put_failure;
   1114		break;
   1115	}
   1116	case CLG_GILB_ELL: {
   1117		struct tc_netem_gemodel ge = {
   1118			.p = q->clg.a1,
   1119			.r = q->clg.a2,
   1120			.h = q->clg.a3,
   1121			.k1 = q->clg.a4,
   1122		};
   1123
   1124		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
   1125			goto nla_put_failure;
   1126		break;
   1127	}
   1128	}
   1129
   1130	nla_nest_end(skb, nest);
   1131	return 0;
   1132
   1133nla_put_failure:
   1134	nla_nest_cancel(skb, nest);
   1135	return -1;
   1136}
   1137
   1138static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
   1139{
   1140	const struct netem_sched_data *q = qdisc_priv(sch);
   1141	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
   1142	struct tc_netem_qopt qopt;
   1143	struct tc_netem_corr cor;
   1144	struct tc_netem_reorder reorder;
   1145	struct tc_netem_corrupt corrupt;
   1146	struct tc_netem_rate rate;
   1147	struct tc_netem_slot slot;
   1148
   1149	qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
   1150			     UINT_MAX);
   1151	qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
   1152			    UINT_MAX);
   1153	qopt.limit = q->limit;
   1154	qopt.loss = q->loss;
   1155	qopt.gap = q->gap;
   1156	qopt.duplicate = q->duplicate;
   1157	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
   1158		goto nla_put_failure;
   1159
   1160	if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
   1161		goto nla_put_failure;
   1162
   1163	if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
   1164		goto nla_put_failure;
   1165
   1166	cor.delay_corr = q->delay_cor.rho;
   1167	cor.loss_corr = q->loss_cor.rho;
   1168	cor.dup_corr = q->dup_cor.rho;
   1169	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
   1170		goto nla_put_failure;
   1171
   1172	reorder.probability = q->reorder;
   1173	reorder.correlation = q->reorder_cor.rho;
   1174	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
   1175		goto nla_put_failure;
   1176
   1177	corrupt.probability = q->corrupt;
   1178	corrupt.correlation = q->corrupt_cor.rho;
   1179	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
   1180		goto nla_put_failure;
   1181
   1182	if (q->rate >= (1ULL << 32)) {
   1183		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
   1184				      TCA_NETEM_PAD))
   1185			goto nla_put_failure;
   1186		rate.rate = ~0U;
   1187	} else {
   1188		rate.rate = q->rate;
   1189	}
   1190	rate.packet_overhead = q->packet_overhead;
   1191	rate.cell_size = q->cell_size;
   1192	rate.cell_overhead = q->cell_overhead;
   1193	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
   1194		goto nla_put_failure;
   1195
   1196	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
   1197		goto nla_put_failure;
   1198
   1199	if (dump_loss_model(q, skb) != 0)
   1200		goto nla_put_failure;
   1201
   1202	if (q->slot_config.min_delay | q->slot_config.max_delay |
   1203	    q->slot_config.dist_jitter) {
   1204		slot = q->slot_config;
   1205		if (slot.max_packets == INT_MAX)
   1206			slot.max_packets = 0;
   1207		if (slot.max_bytes == INT_MAX)
   1208			slot.max_bytes = 0;
   1209		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
   1210			goto nla_put_failure;
   1211	}
   1212
   1213	return nla_nest_end(skb, nla);
   1214
   1215nla_put_failure:
   1216	nlmsg_trim(skb, nla);
   1217	return -1;
   1218}
   1219
   1220static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
   1221			  struct sk_buff *skb, struct tcmsg *tcm)
   1222{
   1223	struct netem_sched_data *q = qdisc_priv(sch);
   1224
   1225	if (cl != 1 || !q->qdisc) 	/* only one class */
   1226		return -ENOENT;
   1227
   1228	tcm->tcm_handle |= TC_H_MIN(1);
   1229	tcm->tcm_info = q->qdisc->handle;
   1230
   1231	return 0;
   1232}
   1233
   1234static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
   1235		     struct Qdisc **old, struct netlink_ext_ack *extack)
   1236{
   1237	struct netem_sched_data *q = qdisc_priv(sch);
   1238
   1239	*old = qdisc_replace(sch, new, &q->qdisc);
   1240	return 0;
   1241}
   1242
   1243static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
   1244{
   1245	struct netem_sched_data *q = qdisc_priv(sch);
   1246	return q->qdisc;
   1247}
   1248
   1249static unsigned long netem_find(struct Qdisc *sch, u32 classid)
   1250{
   1251	return 1;
   1252}
   1253
   1254static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
   1255{
   1256	if (!walker->stop) {
   1257		if (walker->count >= walker->skip)
   1258			if (walker->fn(sch, 1, walker) < 0) {
   1259				walker->stop = 1;
   1260				return;
   1261			}
   1262		walker->count++;
   1263	}
   1264}
   1265
   1266static const struct Qdisc_class_ops netem_class_ops = {
   1267	.graft		=	netem_graft,
   1268	.leaf		=	netem_leaf,
   1269	.find		=	netem_find,
   1270	.walk		=	netem_walk,
   1271	.dump		=	netem_dump_class,
   1272};
   1273
   1274static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
   1275	.id		=	"netem",
   1276	.cl_ops		=	&netem_class_ops,
   1277	.priv_size	=	sizeof(struct netem_sched_data),
   1278	.enqueue	=	netem_enqueue,
   1279	.dequeue	=	netem_dequeue,
   1280	.peek		=	qdisc_peek_dequeued,
   1281	.init		=	netem_init,
   1282	.reset		=	netem_reset,
   1283	.destroy	=	netem_destroy,
   1284	.change		=	netem_change,
   1285	.dump		=	netem_dump,
   1286	.owner		=	THIS_MODULE,
   1287};
   1288
   1289
   1290static int __init netem_module_init(void)
   1291{
   1292	pr_info("netem: version " VERSION "\n");
   1293	return register_qdisc(&netem_qdisc_ops);
   1294}
   1295static void __exit netem_module_exit(void)
   1296{
   1297	unregister_qdisc(&netem_qdisc_ops);
   1298}
   1299module_init(netem_module_init)
   1300module_exit(netem_module_exit)
   1301MODULE_LICENSE("GPL");