]> git.ipfire.org Git - people/arne_f/kernel.git/blame - net/sched/sch_netem.c
cls_tcindex: use tcf_exts_get_net() before call_rcu()
[people/arne_f/kernel.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
b7f080cf 16#include <linux/mm.h>
1da177e4 17#include <linux/module.h>
5a0e3ad6 18#include <linux/slab.h>
1da177e4
LT
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
1da177e4 22#include <linux/skbuff.h>
78776d3f 23#include <linux/vmalloc.h>
1da177e4 24#include <linux/rtnetlink.h>
90b41a1c 25#include <linux/reciprocal_div.h>
aec0a40a 26#include <linux/rbtree.h>
1da177e4 27
dc5fc579 28#include <net/netlink.h>
1da177e4 29#include <net/pkt_sched.h>
e4ae004b 30#include <net/inet_ecn.h>
1da177e4 31
250a65f7 32#define VERSION "1.3"
eb229c4c 33
1da177e4
LT
34/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
661b7972 55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
1da177e4
LT
69*/
70
71struct netem_sched_data {
aec0a40a
ED
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
50612537
ED
74
75 /* optional qdisc for classful handling (NULL at netem init) */
1da177e4 76 struct Qdisc *qdisc;
50612537 77
59cb5c67 78 struct qdisc_watchdog watchdog;
1da177e4 79
b407621c
SH
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
1da177e4 83 u32 loss;
e4ae004b 84 u32 ecn;
1da177e4
LT
85 u32 limit;
86 u32 counter;
87 u32 gap;
1da177e4 88 u32 duplicate;
0dca51d3 89 u32 reorder;
c865e5d9 90 u32 corrupt;
6a031f67 91 u64 rate;
90b41a1c
HPP
92 s32 packet_overhead;
93 u32 cell_size;
809fa972 94 struct reciprocal_value cell_size_reciprocal;
90b41a1c 95 s32 cell_overhead;
1da177e4
LT
96
97 struct crndstate {
b407621c
SH
98 u32 last;
99 u32 rho;
c865e5d9 100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
661b7972 106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
a6e2fe17
YY
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
c045a734
YY
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
661b7972 125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
1da177e4
LT
138};
139
50612537
ED
140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
56b17425
ED
142 *
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
50612537 146 */
1da177e4
LT
147struct netem_skb_cb {
148 psched_time_t time_to_send;
aec0a40a 149 ktime_t tstamp_save;
1da177e4
LT
150};
151
aec0a40a
ED
152
153static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
154{
7f7cd56c 155 return rb_entry(rb, struct sk_buff, rbnode);
aec0a40a
ED
156}
157
5f86173b
JK
158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
aec0a40a 160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
16bda13d 161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175f9c1b 162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
5f86173b
JK
163}
164
1da177e4
LT
165/* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
63862b5b 171 state->last = prandom_u32();
1da177e4
LT
172}
173
174/* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
b407621c 178static u32 get_crandom(struct crndstate *state)
1da177e4
LT
179{
180 u64 value, rho;
181 unsigned long answer;
182
bb2f8cc0 183 if (state->rho == 0) /* no correlation */
63862b5b 184 return prandom_u32();
1da177e4 185
63862b5b 186 value = prandom_u32();
1da177e4
LT
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
661b7972 193/* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
63862b5b 200 u32 rnd = prandom_u32();
661b7972 201
202 /*
25985edc 203 * Makes a comparison between rnd and the transition
661b7972 204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
a6e2fe17
YY
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
661b7972 211 */
212 switch (clg->state) {
a6e2fe17 213 case TX_IN_GAP_PERIOD:
661b7972 214 if (rnd < clg->a4) {
a6e2fe17 215 clg->state = LOST_IN_BURST_PERIOD;
661b7972 216 return true;
ab6c27be 217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
a6e2fe17 218 clg->state = LOST_IN_GAP_PERIOD;
661b7972 219 return true;
a6e2fe17
YY
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
661b7972 223
224 break;
a6e2fe17 225 case TX_IN_BURST_PERIOD:
661b7972 226 if (rnd < clg->a5) {
a6e2fe17 227 clg->state = LOST_IN_GAP_PERIOD;
661b7972 228 return true;
a6e2fe17
YY
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
661b7972 232
233 break;
a6e2fe17 234 case LOST_IN_GAP_PERIOD:
661b7972 235 if (rnd < clg->a3)
a6e2fe17 236 clg->state = TX_IN_BURST_PERIOD;
661b7972 237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
a6e2fe17 238 clg->state = TX_IN_GAP_PERIOD;
661b7972 239 } else if (clg->a2 + clg->a3 < rnd) {
a6e2fe17 240 clg->state = LOST_IN_GAP_PERIOD;
661b7972 241 return true;
242 }
243 break;
a6e2fe17
YY
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
661b7972 246 break;
247 }
248
249 return false;
250}
251
252/* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
25985edc 256 * Makes a comparison between random number and the transition
661b7972 257 * probabilities outgoing from the current state, then decides the
25985edc 258 * next state. A second random number is extracted and the comparison
661b7972 259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
c045a734 267 case GOOD_STATE:
63862b5b 268 if (prandom_u32() < clg->a1)
c045a734 269 clg->state = BAD_STATE;
63862b5b 270 if (prandom_u32() < clg->a4)
661b7972 271 return true;
7c2781fa 272 break;
c045a734 273 case BAD_STATE:
63862b5b 274 if (prandom_u32() < clg->a2)
c045a734 275 clg->state = GOOD_STATE;
63862b5b 276 if (prandom_u32() > clg->a3)
661b7972 277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308}
309
310
1da177e4
LT
311/* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
b407621c
SH
315static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
1da177e4 318{
b407621c
SH
319 psched_tdiff_t x;
320 long t;
321 u32 rnd;
1da177e4
LT
322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
10297b99 329 if (dist == NULL)
1da177e4
LT
330 return (rnd % (2*sigma)) - sigma + mu;
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
90b41a1c 342static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
7bc0f28c 343{
90b41a1c 344 u64 ticks;
fc33cc72 345
90b41a1c
HPP
346 len += q->packet_overhead;
347
348 if (q->cell_size) {
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
350
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */
352 cells++;
353 len = cells * (q->cell_size + q->cell_overhead);
354 }
355
356 ticks = (u64)len * NSEC_PER_SEC;
357
358 do_div(ticks, q->rate);
fc33cc72 359 return PSCHED_NS2TICKS(ticks);
7bc0f28c
HPP
360}
361
ff704050 362static void tfifo_reset(struct Qdisc *sch)
363{
364 struct netem_sched_data *q = qdisc_priv(sch);
365 struct rb_node *p;
366
367 while ((p = rb_first(&q->t_root))) {
368 struct sk_buff *skb = netem_rb_to_skb(p);
369
370 rb_erase(p, &q->t_root);
2f08a9a1 371 rtnl_kfree_skbs(skb, skb);
ff704050 372 }
373}
374
960fb66e 375static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
50612537 376{
aec0a40a 377 struct netem_sched_data *q = qdisc_priv(sch);
50612537 378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
aec0a40a 379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
50612537 380
aec0a40a
ED
381 while (*p) {
382 struct sk_buff *skb;
50612537 383
aec0a40a
ED
384 parent = *p;
385 skb = netem_rb_to_skb(parent);
960fb66e 386 if (tnext >= netem_skb_cb(skb)->time_to_send)
aec0a40a
ED
387 p = &parent->rb_right;
388 else
389 p = &parent->rb_left;
50612537 390 }
56b17425
ED
391 rb_link_node(&nskb->rbnode, parent, p);
392 rb_insert_color(&nskb->rbnode, &q->t_root);
aec0a40a 393 sch->q.qlen++;
50612537
ED
394}
395
6071bd1a
NH
396/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
397 * when we statistically choose to corrupt one, we instead segment it, returning
398 * the first packet to be corrupted, and re-enqueue the remaining frames
399 */
520ac30f
ED
400static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
401 struct sk_buff **to_free)
6071bd1a
NH
402{
403 struct sk_buff *segs;
404 netdev_features_t features = netif_skb_features(skb);
405
406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
407
408 if (IS_ERR_OR_NULL(segs)) {
520ac30f 409 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
410 return NULL;
411 }
412 consume_skb(skb);
413 return segs;
414}
415
48da34b7
FW
416static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
417{
418 skb->next = qh->head;
419
420 if (!qh->head)
421 qh->tail = skb;
422 qh->head = skb;
423 qh->qlen++;
424}
425
0afb51e7
SH
426/*
427 * Insert one skb into qdisc.
428 * Note: parent depends on return value to account for queue length.
429 * NET_XMIT_DROP: queue length didn't change.
430 * NET_XMIT_SUCCESS: one skb was queued.
431 */
520ac30f
ED
432static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
433 struct sk_buff **to_free)
1da177e4
LT
434{
435 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
436 /* We don't fill cb now as skb_unshare() may invalidate it */
437 struct netem_skb_cb *cb;
0afb51e7 438 struct sk_buff *skb2;
6071bd1a
NH
439 struct sk_buff *segs = NULL;
440 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
441 int nb = 0;
0afb51e7 442 int count = 1;
6071bd1a 443 int rc = NET_XMIT_SUCCESS;
1da177e4 444
0afb51e7
SH
445 /* Random duplication */
446 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
447 ++count;
448
661b7972 449 /* Drop packet? */
e4ae004b
ED
450 if (loss_event(q)) {
451 if (q->ecn && INET_ECN_set_ce(skb))
25331d6c 452 qdisc_qstats_drop(sch); /* mark packet */
e4ae004b
ED
453 else
454 --count;
455 }
0afb51e7 456 if (count == 0) {
25331d6c 457 qdisc_qstats_drop(sch);
520ac30f 458 __qdisc_drop(skb, to_free);
c27f339a 459 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4
LT
460 }
461
5a308f40
ED
462 /* If a delay is expected, orphan the skb. (orphaning usually takes
463 * place at TX completion time, so _before_ the link transit delay)
5a308f40 464 */
5080f39e 465 if (q->latency || q->jitter || q->rate)
f2f872f9 466 skb_orphan_partial(skb);
4e8a5201 467
0afb51e7
SH
468 /*
469 * If we need to duplicate packet, then re-insert at top of the
470 * qdisc tree, since parent queuer expects that only one
471 * skb will be queued.
472 */
473 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
7698b4fc 474 struct Qdisc *rootq = qdisc_root(sch);
0afb51e7 475 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
0afb51e7 476
b396cca6 477 q->duplicate = 0;
520ac30f 478 rootq->enqueue(skb2, rootq, to_free);
0afb51e7 479 q->duplicate = dupsave;
1da177e4
LT
480 }
481
c865e5d9
SH
482 /*
483 * Randomized packet corruption.
484 * Make copy if needed since we are modifying
485 * If packet is going to be hardware checksummed, then
486 * do it now in software before we mangle it.
487 */
488 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
6071bd1a 489 if (skb_is_gso(skb)) {
520ac30f 490 segs = netem_segment(skb, sch, to_free);
6071bd1a
NH
491 if (!segs)
492 return NET_XMIT_DROP;
493 } else {
494 segs = skb;
495 }
496
497 skb = segs;
498 segs = segs->next;
499
8a6e9c67
ED
500 skb = skb_unshare(skb, GFP_ATOMIC);
501 if (unlikely(!skb)) {
502 qdisc_qstats_drop(sch);
503 goto finish_segs;
504 }
505 if (skb->ip_summed == CHECKSUM_PARTIAL &&
506 skb_checksum_help(skb)) {
507 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
508 goto finish_segs;
509 }
c865e5d9 510
63862b5b
AH
511 skb->data[prandom_u32() % skb_headlen(skb)] ^=
512 1<<(prandom_u32() % 8);
c865e5d9
SH
513 }
514
97d0678f 515 if (unlikely(sch->q.qlen >= sch->limit))
520ac30f 516 return qdisc_drop(skb, sch, to_free);
960fb66e 517
25331d6c 518 qdisc_qstats_backlog_inc(sch, skb);
960fb66e 519
5f86173b 520 cb = netem_skb_cb(skb);
cc7ec456 521 if (q->gap == 0 || /* not doing reordering */
a42b4799 522 q->counter < q->gap - 1 || /* inside last reordering gap */
f64f9e71 523 q->reorder < get_crandom(&q->reorder_cor)) {
0f9f32ac 524 psched_time_t now;
07aaa115
SH
525 psched_tdiff_t delay;
526
527 delay = tabledist(q->latency, q->jitter,
528 &q->delay_cor, q->delay_dist);
529
3bebcda2 530 now = psched_get_time();
7bc0f28c
HPP
531
532 if (q->rate) {
5080f39e
NU
533 struct netem_skb_cb *last = NULL;
534
535 if (sch->q.tail)
536 last = netem_skb_cb(sch->q.tail);
537 if (q->t_root.rb_node) {
538 struct sk_buff *t_skb;
539 struct netem_skb_cb *t_last;
540
541 t_skb = netem_rb_to_skb(rb_last(&q->t_root));
542 t_last = netem_skb_cb(t_skb);
543 if (!last ||
544 t_last->time_to_send > last->time_to_send) {
545 last = t_last;
546 }
547 }
7bc0f28c 548
aec0a40a 549 if (last) {
7bc0f28c 550 /*
a13d3104
JN
551 * Last packet in queue is reference point (now),
552 * calculate this time bonus and subtract
7bc0f28c
HPP
553 * from delay.
554 */
5080f39e 555 delay -= last->time_to_send - now;
a13d3104 556 delay = max_t(psched_tdiff_t, 0, delay);
5080f39e 557 now = last->time_to_send;
7bc0f28c 558 }
a13d3104 559
8cfd88d6 560 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
7bc0f28c
HPP
561 }
562
7c59e25f 563 cb->time_to_send = now + delay;
aec0a40a 564 cb->tstamp_save = skb->tstamp;
1da177e4 565 ++q->counter;
960fb66e 566 tfifo_enqueue(skb, sch);
1da177e4 567 } else {
10297b99 568 /*
0dca51d3
SH
569 * Do re-ordering by putting one out of N packets at the front
570 * of the queue.
571 */
3bebcda2 572 cb->time_to_send = psched_get_time();
0dca51d3 573 q->counter = 0;
8ba25dad 574
48da34b7 575 netem_enqueue_skb_head(&sch->q, skb);
eb101924 576 sch->qstats.requeues++;
378a2f09 577 }
1da177e4 578
6071bd1a
NH
579finish_segs:
580 if (segs) {
581 while (segs) {
582 skb2 = segs->next;
583 segs->next = NULL;
584 qdisc_skb_cb(segs)->pkt_len = segs->len;
585 last_len = segs->len;
520ac30f 586 rc = qdisc_enqueue(segs, sch, to_free);
6071bd1a
NH
587 if (rc != NET_XMIT_SUCCESS) {
588 if (net_xmit_drop_count(rc))
589 qdisc_qstats_drop(sch);
590 } else {
591 nb++;
592 len += last_len;
593 }
594 segs = skb2;
595 }
596 sch->q.qlen += nb;
597 if (nb > 1)
598 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
599 }
10f6dfcf 600 return NET_XMIT_SUCCESS;
1da177e4
LT
601}
602
1da177e4
LT
603static struct sk_buff *netem_dequeue(struct Qdisc *sch)
604{
605 struct netem_sched_data *q = qdisc_priv(sch);
606 struct sk_buff *skb;
aec0a40a 607 struct rb_node *p;
1da177e4 608
50612537 609tfifo_dequeue:
ed760cb8 610 skb = __qdisc_dequeue_head(&sch->q);
771018e7 611 if (skb) {
25331d6c 612 qdisc_qstats_backlog_dec(sch, skb);
0ad2a836 613deliver:
aec0a40a
ED
614 qdisc_bstats_update(sch, skb);
615 return skb;
616 }
617 p = rb_first(&q->t_root);
618 if (p) {
36b7bfe0
ED
619 psched_time_t time_to_send;
620
aec0a40a 621 skb = netem_rb_to_skb(p);
0f9f32ac
SH
622
623 /* if more time remaining? */
36b7bfe0
ED
624 time_to_send = netem_skb_cb(skb)->time_to_send;
625 if (time_to_send <= psched_get_time()) {
aec0a40a
ED
626 rb_erase(p, &q->t_root);
627
628 sch->q.qlen--;
0ad2a836 629 qdisc_qstats_backlog_dec(sch, skb);
aec0a40a
ED
630 skb->next = NULL;
631 skb->prev = NULL;
632 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
03c05f0d 633
8caf1539
JP
634#ifdef CONFIG_NET_CLS_ACT
635 /*
636 * If it's at ingress let's pretend the delay is
637 * from the network (tstamp will be updated).
638 */
bc31c905 639 if (skb->tc_redirected && skb->tc_from_ingress)
2456e855 640 skb->tstamp = 0;
8caf1539 641#endif
10f6dfcf 642
50612537 643 if (q->qdisc) {
21de12ee 644 unsigned int pkt_len = qdisc_pkt_len(skb);
520ac30f
ED
645 struct sk_buff *to_free = NULL;
646 int err;
50612537 647
520ac30f
ED
648 err = qdisc_enqueue(skb, q->qdisc, &to_free);
649 kfree_skb_list(to_free);
21de12ee
ED
650 if (err != NET_XMIT_SUCCESS &&
651 net_xmit_drop_count(err)) {
652 qdisc_qstats_drop(sch);
653 qdisc_tree_reduce_backlog(sch, 1,
654 pkt_len);
50612537
ED
655 }
656 goto tfifo_dequeue;
657 }
aec0a40a 658 goto deliver;
07aaa115 659 }
11274e5a 660
50612537
ED
661 if (q->qdisc) {
662 skb = q->qdisc->ops->dequeue(q->qdisc);
663 if (skb)
664 goto deliver;
665 }
36b7bfe0 666 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
0f9f32ac
SH
667 }
668
50612537
ED
669 if (q->qdisc) {
670 skb = q->qdisc->ops->dequeue(q->qdisc);
671 if (skb)
672 goto deliver;
673 }
0f9f32ac 674 return NULL;
1da177e4
LT
675}
676
1da177e4
LT
677static void netem_reset(struct Qdisc *sch)
678{
679 struct netem_sched_data *q = qdisc_priv(sch);
680
50612537 681 qdisc_reset_queue(sch);
ff704050 682 tfifo_reset(sch);
50612537
ED
683 if (q->qdisc)
684 qdisc_reset(q->qdisc);
59cb5c67 685 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
686}
687
6373a9a2 688static void dist_free(struct disttable *d)
689{
4cb28970 690 kvfree(d);
6373a9a2 691}
692
1da177e4
LT
693/*
694 * Distribution data is a variable size payload containing
695 * signed 16 bit values.
696 */
1e90474c 697static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
698{
699 struct netem_sched_data *q = qdisc_priv(sch);
6373a9a2 700 size_t n = nla_len(attr)/sizeof(__s16);
1e90474c 701 const __s16 *data = nla_data(attr);
7698b4fc 702 spinlock_t *root_lock;
1da177e4
LT
703 struct disttable *d;
704 int i;
705
df173bda 706 if (n > NETEM_DIST_MAX)
1da177e4
LT
707 return -EINVAL;
708
752ade68 709 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
1da177e4
LT
710 if (!d)
711 return -ENOMEM;
712
713 d->size = n;
714 for (i = 0; i < n; i++)
715 d->table[i] = data[i];
10297b99 716
102396ae 717 root_lock = qdisc_root_sleeping_lock(sch);
7698b4fc
DM
718
719 spin_lock_bh(root_lock);
bb52c7ac 720 swap(q->delay_dist, d);
7698b4fc 721 spin_unlock_bh(root_lock);
bb52c7ac
ED
722
723 dist_free(d);
1da177e4
LT
724 return 0;
725}
726
49545a77 727static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
1da177e4 728{
1e90474c 729 const struct tc_netem_corr *c = nla_data(attr);
1da177e4 730
1da177e4
LT
731 init_crandom(&q->delay_cor, c->delay_corr);
732 init_crandom(&q->loss_cor, c->loss_corr);
733 init_crandom(&q->dup_cor, c->dup_corr);
1da177e4
LT
734}
735
49545a77 736static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
0dca51d3 737{
1e90474c 738 const struct tc_netem_reorder *r = nla_data(attr);
0dca51d3 739
0dca51d3
SH
740 q->reorder = r->probability;
741 init_crandom(&q->reorder_cor, r->correlation);
0dca51d3
SH
742}
743
49545a77 744static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
c865e5d9 745{
1e90474c 746 const struct tc_netem_corrupt *r = nla_data(attr);
c865e5d9 747
c865e5d9
SH
748 q->corrupt = r->probability;
749 init_crandom(&q->corrupt_cor, r->correlation);
c865e5d9
SH
750}
751
49545a77 752static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
7bc0f28c 753{
7bc0f28c
HPP
754 const struct tc_netem_rate *r = nla_data(attr);
755
756 q->rate = r->rate;
90b41a1c
HPP
757 q->packet_overhead = r->packet_overhead;
758 q->cell_size = r->cell_size;
809fa972 759 q->cell_overhead = r->cell_overhead;
90b41a1c
HPP
760 if (q->cell_size)
761 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
809fa972
HFS
762 else
763 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
7bc0f28c
HPP
764}
765
49545a77 766static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
661b7972 767{
661b7972 768 const struct nlattr *la;
769 int rem;
770
771 nla_for_each_nested(la, attr, rem) {
772 u16 type = nla_type(la);
773
833fa743 774 switch (type) {
661b7972 775 case NETEM_LOSS_GI: {
776 const struct tc_netem_gimodel *gi = nla_data(la);
777
2494654d 778 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
661b7972 779 pr_info("netem: incorrect gi model size\n");
780 return -EINVAL;
781 }
782
783 q->loss_model = CLG_4_STATES;
784
3fbac2a8 785 q->clg.state = TX_IN_GAP_PERIOD;
661b7972 786 q->clg.a1 = gi->p13;
787 q->clg.a2 = gi->p31;
788 q->clg.a3 = gi->p32;
789 q->clg.a4 = gi->p14;
790 q->clg.a5 = gi->p23;
791 break;
792 }
793
794 case NETEM_LOSS_GE: {
795 const struct tc_netem_gemodel *ge = nla_data(la);
796
2494654d 797 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
798 pr_info("netem: incorrect ge model size\n");
661b7972 799 return -EINVAL;
800 }
801
802 q->loss_model = CLG_GILB_ELL;
3fbac2a8 803 q->clg.state = GOOD_STATE;
661b7972 804 q->clg.a1 = ge->p;
805 q->clg.a2 = ge->r;
806 q->clg.a3 = ge->h;
807 q->clg.a4 = ge->k1;
808 break;
809 }
810
811 default:
812 pr_info("netem: unknown loss type %u\n", type);
813 return -EINVAL;
814 }
815 }
816
817 return 0;
818}
819
27a3421e
PM
820static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
821 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
822 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
823 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
7bc0f28c 824 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
661b7972 825 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
e4ae004b 826 [TCA_NETEM_ECN] = { .type = NLA_U32 },
6a031f67 827 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
27a3421e
PM
828};
829
2c10b32b
TG
830static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
831 const struct nla_policy *policy, int len)
832{
833 int nested_len = nla_len(nla) - NLA_ALIGN(len);
834
661b7972 835 if (nested_len < 0) {
836 pr_info("netem: invalid attributes len %d\n", nested_len);
2c10b32b 837 return -EINVAL;
661b7972 838 }
839
2c10b32b
TG
840 if (nested_len >= nla_attr_size(0))
841 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
fceb6435 842 nested_len, policy, NULL);
661b7972 843
2c10b32b
TG
844 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
845 return 0;
846}
847
c865e5d9 848/* Parse netlink message to set options */
1e90474c 849static int netem_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
850{
851 struct netem_sched_data *q = qdisc_priv(sch);
b03f4672 852 struct nlattr *tb[TCA_NETEM_MAX + 1];
1da177e4 853 struct tc_netem_qopt *qopt;
54a4b05c
YY
854 struct clgstate old_clg;
855 int old_loss_model = CLG_RANDOM;
1da177e4 856 int ret;
10297b99 857
b03f4672 858 if (opt == NULL)
1da177e4
LT
859 return -EINVAL;
860
2c10b32b
TG
861 qopt = nla_data(opt);
862 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
b03f4672
PM
863 if (ret < 0)
864 return ret;
865
54a4b05c
YY
866 /* backup q->clg and q->loss_model */
867 old_clg = q->clg;
868 old_loss_model = q->loss_model;
869
870 if (tb[TCA_NETEM_LOSS]) {
49545a77 871 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
54a4b05c
YY
872 if (ret) {
873 q->loss_model = old_loss_model;
874 return ret;
875 }
876 } else {
877 q->loss_model = CLG_RANDOM;
878 }
879
880 if (tb[TCA_NETEM_DELAY_DIST]) {
881 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
882 if (ret) {
883 /* recover clg and loss_model, in case of
884 * q->clg and q->loss_model were modified
885 * in get_loss_clg()
886 */
887 q->clg = old_clg;
888 q->loss_model = old_loss_model;
889 return ret;
890 }
891 }
892
50612537 893 sch->limit = qopt->limit;
10297b99 894
1da177e4
LT
895 q->latency = qopt->latency;
896 q->jitter = qopt->jitter;
897 q->limit = qopt->limit;
898 q->gap = qopt->gap;
0dca51d3 899 q->counter = 0;
1da177e4
LT
900 q->loss = qopt->loss;
901 q->duplicate = qopt->duplicate;
902
bb2f8cc0
SH
903 /* for compatibility with earlier versions.
904 * if gap is set, need to assume 100% probability
0dca51d3 905 */
a362e0a7
SH
906 if (q->gap)
907 q->reorder = ~0;
0dca51d3 908
265eb67f 909 if (tb[TCA_NETEM_CORR])
49545a77 910 get_correlation(q, tb[TCA_NETEM_CORR]);
1da177e4 911
265eb67f 912 if (tb[TCA_NETEM_REORDER])
49545a77 913 get_reorder(q, tb[TCA_NETEM_REORDER]);
1da177e4 914
265eb67f 915 if (tb[TCA_NETEM_CORRUPT])
49545a77 916 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1da177e4 917
7bc0f28c 918 if (tb[TCA_NETEM_RATE])
49545a77 919 get_rate(q, tb[TCA_NETEM_RATE]);
7bc0f28c 920
6a031f67
YY
921 if (tb[TCA_NETEM_RATE64])
922 q->rate = max_t(u64, q->rate,
923 nla_get_u64(tb[TCA_NETEM_RATE64]));
924
e4ae004b
ED
925 if (tb[TCA_NETEM_ECN])
926 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
927
661b7972 928 return ret;
1da177e4
LT
929}
930
1e90474c 931static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
932{
933 struct netem_sched_data *q = qdisc_priv(sch);
934 int ret;
935
634576a1
NA
936 qdisc_watchdog_init(&q->watchdog, sch);
937
1da177e4
LT
938 if (!opt)
939 return -EINVAL;
940
661b7972 941 q->loss_model = CLG_RANDOM;
1da177e4 942 ret = netem_change(sch, opt);
50612537 943 if (ret)
250a65f7 944 pr_info("netem: change failed\n");
1da177e4
LT
945 return ret;
946}
947
948static void netem_destroy(struct Qdisc *sch)
949{
950 struct netem_sched_data *q = qdisc_priv(sch);
951
59cb5c67 952 qdisc_watchdog_cancel(&q->watchdog);
50612537
ED
953 if (q->qdisc)
954 qdisc_destroy(q->qdisc);
6373a9a2 955 dist_free(q->delay_dist);
1da177e4
LT
956}
957
661b7972 958static int dump_loss_model(const struct netem_sched_data *q,
959 struct sk_buff *skb)
960{
961 struct nlattr *nest;
962
963 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
964 if (nest == NULL)
965 goto nla_put_failure;
966
967 switch (q->loss_model) {
968 case CLG_RANDOM:
969 /* legacy loss model */
970 nla_nest_cancel(skb, nest);
971 return 0; /* no data */
972
973 case CLG_4_STATES: {
974 struct tc_netem_gimodel gi = {
975 .p13 = q->clg.a1,
976 .p31 = q->clg.a2,
977 .p32 = q->clg.a3,
978 .p14 = q->clg.a4,
979 .p23 = q->clg.a5,
980 };
981
1b34ec43
DM
982 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
983 goto nla_put_failure;
661b7972 984 break;
985 }
986 case CLG_GILB_ELL: {
987 struct tc_netem_gemodel ge = {
988 .p = q->clg.a1,
989 .r = q->clg.a2,
990 .h = q->clg.a3,
991 .k1 = q->clg.a4,
992 };
993
1b34ec43
DM
994 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
995 goto nla_put_failure;
661b7972 996 break;
997 }
998 }
999
1000 nla_nest_end(skb, nest);
1001 return 0;
1002
1003nla_put_failure:
1004 nla_nest_cancel(skb, nest);
1005 return -1;
1006}
1007
1da177e4
LT
1008static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1009{
1010 const struct netem_sched_data *q = qdisc_priv(sch);
861d7f74 1011 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1da177e4
LT
1012 struct tc_netem_qopt qopt;
1013 struct tc_netem_corr cor;
0dca51d3 1014 struct tc_netem_reorder reorder;
c865e5d9 1015 struct tc_netem_corrupt corrupt;
7bc0f28c 1016 struct tc_netem_rate rate;
1da177e4
LT
1017
1018 qopt.latency = q->latency;
1019 qopt.jitter = q->jitter;
1020 qopt.limit = q->limit;
1021 qopt.loss = q->loss;
1022 qopt.gap = q->gap;
1023 qopt.duplicate = q->duplicate;
1b34ec43
DM
1024 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1025 goto nla_put_failure;
1da177e4
LT
1026
1027 cor.delay_corr = q->delay_cor.rho;
1028 cor.loss_corr = q->loss_cor.rho;
1029 cor.dup_corr = q->dup_cor.rho;
1b34ec43
DM
1030 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1031 goto nla_put_failure;
0dca51d3
SH
1032
1033 reorder.probability = q->reorder;
1034 reorder.correlation = q->reorder_cor.rho;
1b34ec43
DM
1035 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1036 goto nla_put_failure;
0dca51d3 1037
c865e5d9
SH
1038 corrupt.probability = q->corrupt;
1039 corrupt.correlation = q->corrupt_cor.rho;
1b34ec43
DM
1040 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1041 goto nla_put_failure;
c865e5d9 1042
6a031f67 1043 if (q->rate >= (1ULL << 32)) {
2a51c1e8
ND
1044 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1045 TCA_NETEM_PAD))
6a031f67
YY
1046 goto nla_put_failure;
1047 rate.rate = ~0U;
1048 } else {
1049 rate.rate = q->rate;
1050 }
90b41a1c
HPP
1051 rate.packet_overhead = q->packet_overhead;
1052 rate.cell_size = q->cell_size;
1053 rate.cell_overhead = q->cell_overhead;
1b34ec43
DM
1054 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1055 goto nla_put_failure;
7bc0f28c 1056
e4ae004b
ED
1057 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1058 goto nla_put_failure;
1059
661b7972 1060 if (dump_loss_model(q, skb) != 0)
1061 goto nla_put_failure;
1062
861d7f74 1063 return nla_nest_end(skb, nla);
1da177e4 1064
1e90474c 1065nla_put_failure:
861d7f74 1066 nlmsg_trim(skb, nla);
1da177e4
LT
1067 return -1;
1068}
1069
10f6dfcf 1070static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1071 struct sk_buff *skb, struct tcmsg *tcm)
1072{
1073 struct netem_sched_data *q = qdisc_priv(sch);
1074
50612537 1075 if (cl != 1 || !q->qdisc) /* only one class */
10f6dfcf 1076 return -ENOENT;
1077
1078 tcm->tcm_handle |= TC_H_MIN(1);
1079 tcm->tcm_info = q->qdisc->handle;
1080
1081 return 0;
1082}
1083
1084static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1085 struct Qdisc **old)
1086{
1087 struct netem_sched_data *q = qdisc_priv(sch);
1088
86a7996c 1089 *old = qdisc_replace(sch, new, &q->qdisc);
10f6dfcf 1090 return 0;
1091}
1092
1093static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1094{
1095 struct netem_sched_data *q = qdisc_priv(sch);
1096 return q->qdisc;
1097}
1098
143976ce 1099static unsigned long netem_find(struct Qdisc *sch, u32 classid)
10f6dfcf 1100{
1101 return 1;
1102}
1103
10f6dfcf 1104static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1105{
1106 if (!walker->stop) {
1107 if (walker->count >= walker->skip)
1108 if (walker->fn(sch, 1, walker) < 0) {
1109 walker->stop = 1;
1110 return;
1111 }
1112 walker->count++;
1113 }
1114}
1115
1116static const struct Qdisc_class_ops netem_class_ops = {
1117 .graft = netem_graft,
1118 .leaf = netem_leaf,
143976ce 1119 .find = netem_find,
10f6dfcf 1120 .walk = netem_walk,
1121 .dump = netem_dump_class,
1122};
1123
20fea08b 1124static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1da177e4 1125 .id = "netem",
10f6dfcf 1126 .cl_ops = &netem_class_ops,
1da177e4
LT
1127 .priv_size = sizeof(struct netem_sched_data),
1128 .enqueue = netem_enqueue,
1129 .dequeue = netem_dequeue,
77be155c 1130 .peek = qdisc_peek_dequeued,
1da177e4
LT
1131 .init = netem_init,
1132 .reset = netem_reset,
1133 .destroy = netem_destroy,
1134 .change = netem_change,
1135 .dump = netem_dump,
1136 .owner = THIS_MODULE,
1137};
1138
1139
1140static int __init netem_module_init(void)
1141{
eb229c4c 1142 pr_info("netem: version " VERSION "\n");
1da177e4
LT
1143 return register_qdisc(&netem_qdisc_ops);
1144}
1145static void __exit netem_module_exit(void)
1146{
1147 unregister_qdisc(&netem_qdisc_ops);
1148}
1149module_init(netem_module_init)
1150module_exit(netem_module_exit)
1151MODULE_LICENSE("GPL");