drm: Use srcu to protect drm_device.unplugged
[linux/fpc-iii.git] / net / sched / sch_netem.c
blob7c179addebcd2967e9fb9c280d8bbe64e33ebe5d
1 /*
2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
32 #define VERSION "1.3"
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
75 /* optional qdisc for classful handling (NULL at netem init) */
76 struct Qdisc *qdisc;
78 struct qdisc_watchdog watchdog;
80 s64 latency;
81 s64 jitter;
83 u32 loss;
84 u32 ecn;
85 u32 limit;
86 u32 counter;
87 u32 gap;
88 u32 duplicate;
89 u32 reorder;
90 u32 corrupt;
91 u64 rate;
92 s32 packet_overhead;
93 u32 cell_size;
94 struct reciprocal_value cell_size_reciprocal;
95 s32 cell_overhead;
97 struct crndstate {
98 u32 last;
99 u32 rho;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
138 struct tc_netem_slot slot_config;
139 struct slotstate {
140 u64 slot_next;
141 s32 packets_left;
142 s32 bytes_left;
143 } slot;
147 /* Time stamp put into socket buffer control block
148 * Only valid when skbs are in our internal t(ime)fifo queue.
150 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
151 * and skb->next & skb->prev are scratch space for a qdisc,
152 * we save skb->tstamp value in skb->cb[] before destroying it.
154 struct netem_skb_cb {
155 u64 time_to_send;
158 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
165 /* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
168 static void init_crandom(struct crndstate *state, unsigned long rho)
170 state->rho = rho;
171 state->last = prandom_u32();
174 /* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
178 static u32 get_crandom(struct crndstate *state)
180 u64 value, rho;
181 unsigned long answer;
183 if (state->rho == 0) /* no correlation */
184 return prandom_u32();
186 value = prandom_u32();
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
193 /* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
197 static bool loss_4state(struct netem_sched_data *q)
199 struct clgstate *clg = &q->clg;
200 u32 rnd = prandom_u32();
203 * Makes a comparison between rnd and the transition
204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
212 switch (clg->state) {
213 case TX_IN_GAP_PERIOD:
214 if (rnd < clg->a4) {
215 clg->state = LOST_IN_BURST_PERIOD;
216 return true;
217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
218 clg->state = LOST_IN_GAP_PERIOD;
219 return true;
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
224 break;
225 case TX_IN_BURST_PERIOD:
226 if (rnd < clg->a5) {
227 clg->state = LOST_IN_GAP_PERIOD;
228 return true;
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
233 break;
234 case LOST_IN_GAP_PERIOD:
235 if (rnd < clg->a3)
236 clg->state = TX_IN_BURST_PERIOD;
237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
238 clg->state = TX_IN_GAP_PERIOD;
239 } else if (clg->a2 + clg->a3 < rnd) {
240 clg->state = LOST_IN_GAP_PERIOD;
241 return true;
243 break;
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
246 break;
249 return false;
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
262 static bool loss_gilb_ell(struct netem_sched_data *q)
264 struct clgstate *clg = &q->clg;
266 switch (clg->state) {
267 case GOOD_STATE:
268 if (prandom_u32() < clg->a1)
269 clg->state = BAD_STATE;
270 if (prandom_u32() < clg->a4)
271 return true;
272 break;
273 case BAD_STATE:
274 if (prandom_u32() < clg->a2)
275 clg->state = GOOD_STATE;
276 if (prandom_u32() > clg->a3)
277 return true;
280 return false;
283 static bool loss_event(struct netem_sched_data *q)
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
296 return loss_4state(q);
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
304 return loss_gilb_ell(q);
307 return false; /* not reached */
311 /* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
315 static s64 tabledist(s64 mu, s32 sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
319 s64 x;
320 long t;
321 u32 rnd;
323 if (sigma == 0)
324 return mu;
326 rnd = get_crandom(state);
328 /* default uniform distribution */
329 if (dist == NULL)
330 return ((rnd % (2 * sigma)) + mu) - sigma;
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
342 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
344 len += q->packet_overhead;
346 if (q->cell_size) {
347 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
349 if (len > cells * q->cell_size) /* extra cell needed for remainder */
350 cells++;
351 len = cells * (q->cell_size + q->cell_overhead);
354 return div64_u64(len * NSEC_PER_SEC, q->rate);
357 static void tfifo_reset(struct Qdisc *sch)
359 struct netem_sched_data *q = qdisc_priv(sch);
360 struct rb_node *p = rb_first(&q->t_root);
362 while (p) {
363 struct sk_buff *skb = rb_to_skb(p);
365 p = rb_next(p);
366 rb_erase(&skb->rbnode, &q->t_root);
367 rtnl_kfree_skbs(skb, skb);
371 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
373 struct netem_sched_data *q = qdisc_priv(sch);
374 u64 tnext = netem_skb_cb(nskb)->time_to_send;
375 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
377 while (*p) {
378 struct sk_buff *skb;
380 parent = *p;
381 skb = rb_to_skb(parent);
382 if (tnext >= netem_skb_cb(skb)->time_to_send)
383 p = &parent->rb_right;
384 else
385 p = &parent->rb_left;
387 rb_link_node(&nskb->rbnode, parent, p);
388 rb_insert_color(&nskb->rbnode, &q->t_root);
389 sch->q.qlen++;
392 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
393 * when we statistically choose to corrupt one, we instead segment it, returning
394 * the first packet to be corrupted, and re-enqueue the remaining frames
396 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
397 struct sk_buff **to_free)
399 struct sk_buff *segs;
400 netdev_features_t features = netif_skb_features(skb);
402 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
404 if (IS_ERR_OR_NULL(segs)) {
405 qdisc_drop(skb, sch, to_free);
406 return NULL;
408 consume_skb(skb);
409 return segs;
412 static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
414 skb->next = qh->head;
416 if (!qh->head)
417 qh->tail = skb;
418 qh->head = skb;
419 qh->qlen++;
423 * Insert one skb into qdisc.
424 * Note: parent depends on return value to account for queue length.
425 * NET_XMIT_DROP: queue length didn't change.
426 * NET_XMIT_SUCCESS: one skb was queued.
428 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
429 struct sk_buff **to_free)
431 struct netem_sched_data *q = qdisc_priv(sch);
432 /* We don't fill cb now as skb_unshare() may invalidate it */
433 struct netem_skb_cb *cb;
434 struct sk_buff *skb2;
435 struct sk_buff *segs = NULL;
436 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
437 int nb = 0;
438 int count = 1;
439 int rc = NET_XMIT_SUCCESS;
441 /* Random duplication */
442 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
443 ++count;
445 /* Drop packet? */
446 if (loss_event(q)) {
447 if (q->ecn && INET_ECN_set_ce(skb))
448 qdisc_qstats_drop(sch); /* mark packet */
449 else
450 --count;
452 if (count == 0) {
453 qdisc_qstats_drop(sch);
454 __qdisc_drop(skb, to_free);
455 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
458 /* If a delay is expected, orphan the skb. (orphaning usually takes
459 * place at TX completion time, so _before_ the link transit delay)
461 if (q->latency || q->jitter || q->rate)
462 skb_orphan_partial(skb);
465 * If we need to duplicate packet, then re-insert at top of the
466 * qdisc tree, since parent queuer expects that only one
467 * skb will be queued.
469 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
470 struct Qdisc *rootq = qdisc_root(sch);
471 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
473 q->duplicate = 0;
474 rootq->enqueue(skb2, rootq, to_free);
475 q->duplicate = dupsave;
479 * Randomized packet corruption.
480 * Make copy if needed since we are modifying
481 * If packet is going to be hardware checksummed, then
482 * do it now in software before we mangle it.
484 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
485 if (skb_is_gso(skb)) {
486 segs = netem_segment(skb, sch, to_free);
487 if (!segs)
488 return NET_XMIT_DROP;
489 } else {
490 segs = skb;
493 skb = segs;
494 segs = segs->next;
496 skb = skb_unshare(skb, GFP_ATOMIC);
497 if (unlikely(!skb)) {
498 qdisc_qstats_drop(sch);
499 goto finish_segs;
501 if (skb->ip_summed == CHECKSUM_PARTIAL &&
502 skb_checksum_help(skb)) {
503 qdisc_drop(skb, sch, to_free);
504 goto finish_segs;
507 skb->data[prandom_u32() % skb_headlen(skb)] ^=
508 1<<(prandom_u32() % 8);
511 if (unlikely(sch->q.qlen >= sch->limit))
512 return qdisc_drop(skb, sch, to_free);
514 qdisc_qstats_backlog_inc(sch, skb);
516 cb = netem_skb_cb(skb);
517 if (q->gap == 0 || /* not doing reordering */
518 q->counter < q->gap - 1 || /* inside last reordering gap */
519 q->reorder < get_crandom(&q->reorder_cor)) {
520 u64 now;
521 s64 delay;
523 delay = tabledist(q->latency, q->jitter,
524 &q->delay_cor, q->delay_dist);
526 now = ktime_get_ns();
528 if (q->rate) {
529 struct netem_skb_cb *last = NULL;
531 if (sch->q.tail)
532 last = netem_skb_cb(sch->q.tail);
533 if (q->t_root.rb_node) {
534 struct sk_buff *t_skb;
535 struct netem_skb_cb *t_last;
537 t_skb = skb_rb_last(&q->t_root);
538 t_last = netem_skb_cb(t_skb);
539 if (!last ||
540 t_last->time_to_send > last->time_to_send) {
541 last = t_last;
545 if (last) {
547 * Last packet in queue is reference point (now),
548 * calculate this time bonus and subtract
549 * from delay.
551 delay -= last->time_to_send - now;
552 delay = max_t(s64, 0, delay);
553 now = last->time_to_send;
556 delay += packet_time_ns(qdisc_pkt_len(skb), q);
559 cb->time_to_send = now + delay;
560 ++q->counter;
561 tfifo_enqueue(skb, sch);
562 } else {
564 * Do re-ordering by putting one out of N packets at the front
565 * of the queue.
567 cb->time_to_send = ktime_get_ns();
568 q->counter = 0;
570 netem_enqueue_skb_head(&sch->q, skb);
571 sch->qstats.requeues++;
574 finish_segs:
575 if (segs) {
576 while (segs) {
577 skb2 = segs->next;
578 segs->next = NULL;
579 qdisc_skb_cb(segs)->pkt_len = segs->len;
580 last_len = segs->len;
581 rc = qdisc_enqueue(segs, sch, to_free);
582 if (rc != NET_XMIT_SUCCESS) {
583 if (net_xmit_drop_count(rc))
584 qdisc_qstats_drop(sch);
585 } else {
586 nb++;
587 len += last_len;
589 segs = skb2;
591 sch->q.qlen += nb;
592 if (nb > 1)
593 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
595 return NET_XMIT_SUCCESS;
598 /* Delay the next round with a new future slot with a
599 * correct number of bytes and packets.
602 static void get_slot_next(struct netem_sched_data *q, u64 now)
604 q->slot.slot_next = now + q->slot_config.min_delay +
605 (prandom_u32() *
606 (q->slot_config.max_delay -
607 q->slot_config.min_delay) >> 32);
608 q->slot.packets_left = q->slot_config.max_packets;
609 q->slot.bytes_left = q->slot_config.max_bytes;
612 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
614 struct netem_sched_data *q = qdisc_priv(sch);
615 struct sk_buff *skb;
616 struct rb_node *p;
618 tfifo_dequeue:
619 skb = __qdisc_dequeue_head(&sch->q);
620 if (skb) {
621 qdisc_qstats_backlog_dec(sch, skb);
622 deliver:
623 qdisc_bstats_update(sch, skb);
624 return skb;
626 p = rb_first(&q->t_root);
627 if (p) {
628 u64 time_to_send;
629 u64 now = ktime_get_ns();
631 skb = rb_to_skb(p);
633 /* if more time remaining? */
634 time_to_send = netem_skb_cb(skb)->time_to_send;
635 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
636 get_slot_next(q, now);
638 if (time_to_send <= now && q->slot.slot_next <= now) {
639 rb_erase(p, &q->t_root);
640 sch->q.qlen--;
641 qdisc_qstats_backlog_dec(sch, skb);
642 skb->next = NULL;
643 skb->prev = NULL;
644 /* skb->dev shares skb->rbnode area,
645 * we need to restore its value.
647 skb->dev = qdisc_dev(sch);
649 #ifdef CONFIG_NET_CLS_ACT
651 * If it's at ingress let's pretend the delay is
652 * from the network (tstamp will be updated).
654 if (skb->tc_redirected && skb->tc_from_ingress)
655 skb->tstamp = 0;
656 #endif
658 if (q->slot.slot_next) {
659 q->slot.packets_left--;
660 q->slot.bytes_left -= qdisc_pkt_len(skb);
661 if (q->slot.packets_left <= 0 ||
662 q->slot.bytes_left <= 0)
663 get_slot_next(q, now);
666 if (q->qdisc) {
667 unsigned int pkt_len = qdisc_pkt_len(skb);
668 struct sk_buff *to_free = NULL;
669 int err;
671 err = qdisc_enqueue(skb, q->qdisc, &to_free);
672 kfree_skb_list(to_free);
673 if (err != NET_XMIT_SUCCESS &&
674 net_xmit_drop_count(err)) {
675 qdisc_qstats_drop(sch);
676 qdisc_tree_reduce_backlog(sch, 1,
677 pkt_len);
679 goto tfifo_dequeue;
681 goto deliver;
684 if (q->qdisc) {
685 skb = q->qdisc->ops->dequeue(q->qdisc);
686 if (skb)
687 goto deliver;
690 qdisc_watchdog_schedule_ns(&q->watchdog,
691 max(time_to_send,
692 q->slot.slot_next));
695 if (q->qdisc) {
696 skb = q->qdisc->ops->dequeue(q->qdisc);
697 if (skb)
698 goto deliver;
700 return NULL;
703 static void netem_reset(struct Qdisc *sch)
705 struct netem_sched_data *q = qdisc_priv(sch);
707 qdisc_reset_queue(sch);
708 tfifo_reset(sch);
709 if (q->qdisc)
710 qdisc_reset(q->qdisc);
711 qdisc_watchdog_cancel(&q->watchdog);
714 static void dist_free(struct disttable *d)
716 kvfree(d);
720 * Distribution data is a variable size payload containing
721 * signed 16 bit values.
724 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
726 struct netem_sched_data *q = qdisc_priv(sch);
727 size_t n = nla_len(attr)/sizeof(__s16);
728 const __s16 *data = nla_data(attr);
729 spinlock_t *root_lock;
730 struct disttable *d;
731 int i;
733 if (n > NETEM_DIST_MAX)
734 return -EINVAL;
736 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
737 if (!d)
738 return -ENOMEM;
740 d->size = n;
741 for (i = 0; i < n; i++)
742 d->table[i] = data[i];
744 root_lock = qdisc_root_sleeping_lock(sch);
746 spin_lock_bh(root_lock);
747 swap(q->delay_dist, d);
748 spin_unlock_bh(root_lock);
750 dist_free(d);
751 return 0;
754 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
756 const struct tc_netem_slot *c = nla_data(attr);
758 q->slot_config = *c;
759 if (q->slot_config.max_packets == 0)
760 q->slot_config.max_packets = INT_MAX;
761 if (q->slot_config.max_bytes == 0)
762 q->slot_config.max_bytes = INT_MAX;
763 q->slot.packets_left = q->slot_config.max_packets;
764 q->slot.bytes_left = q->slot_config.max_bytes;
765 if (q->slot_config.min_delay | q->slot_config.max_delay)
766 q->slot.slot_next = ktime_get_ns();
767 else
768 q->slot.slot_next = 0;
771 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
773 const struct tc_netem_corr *c = nla_data(attr);
775 init_crandom(&q->delay_cor, c->delay_corr);
776 init_crandom(&q->loss_cor, c->loss_corr);
777 init_crandom(&q->dup_cor, c->dup_corr);
780 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
782 const struct tc_netem_reorder *r = nla_data(attr);
784 q->reorder = r->probability;
785 init_crandom(&q->reorder_cor, r->correlation);
788 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
790 const struct tc_netem_corrupt *r = nla_data(attr);
792 q->corrupt = r->probability;
793 init_crandom(&q->corrupt_cor, r->correlation);
796 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
798 const struct tc_netem_rate *r = nla_data(attr);
800 q->rate = r->rate;
801 q->packet_overhead = r->packet_overhead;
802 q->cell_size = r->cell_size;
803 q->cell_overhead = r->cell_overhead;
804 if (q->cell_size)
805 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
806 else
807 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
810 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
812 const struct nlattr *la;
813 int rem;
815 nla_for_each_nested(la, attr, rem) {
816 u16 type = nla_type(la);
818 switch (type) {
819 case NETEM_LOSS_GI: {
820 const struct tc_netem_gimodel *gi = nla_data(la);
822 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
823 pr_info("netem: incorrect gi model size\n");
824 return -EINVAL;
827 q->loss_model = CLG_4_STATES;
829 q->clg.state = TX_IN_GAP_PERIOD;
830 q->clg.a1 = gi->p13;
831 q->clg.a2 = gi->p31;
832 q->clg.a3 = gi->p32;
833 q->clg.a4 = gi->p14;
834 q->clg.a5 = gi->p23;
835 break;
838 case NETEM_LOSS_GE: {
839 const struct tc_netem_gemodel *ge = nla_data(la);
841 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
842 pr_info("netem: incorrect ge model size\n");
843 return -EINVAL;
846 q->loss_model = CLG_GILB_ELL;
847 q->clg.state = GOOD_STATE;
848 q->clg.a1 = ge->p;
849 q->clg.a2 = ge->r;
850 q->clg.a3 = ge->h;
851 q->clg.a4 = ge->k1;
852 break;
855 default:
856 pr_info("netem: unknown loss type %u\n", type);
857 return -EINVAL;
861 return 0;
864 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
865 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
866 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
867 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
868 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
869 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
870 [TCA_NETEM_ECN] = { .type = NLA_U32 },
871 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
872 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
873 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
874 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
877 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
878 const struct nla_policy *policy, int len)
880 int nested_len = nla_len(nla) - NLA_ALIGN(len);
882 if (nested_len < 0) {
883 pr_info("netem: invalid attributes len %d\n", nested_len);
884 return -EINVAL;
887 if (nested_len >= nla_attr_size(0))
888 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
889 nested_len, policy, NULL);
891 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
892 return 0;
895 /* Parse netlink message to set options */
896 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
897 struct netlink_ext_ack *extack)
899 struct netem_sched_data *q = qdisc_priv(sch);
900 struct nlattr *tb[TCA_NETEM_MAX + 1];
901 struct tc_netem_qopt *qopt;
902 struct clgstate old_clg;
903 int old_loss_model = CLG_RANDOM;
904 int ret;
906 if (opt == NULL)
907 return -EINVAL;
909 qopt = nla_data(opt);
910 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
911 if (ret < 0)
912 return ret;
914 /* backup q->clg and q->loss_model */
915 old_clg = q->clg;
916 old_loss_model = q->loss_model;
918 if (tb[TCA_NETEM_LOSS]) {
919 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
920 if (ret) {
921 q->loss_model = old_loss_model;
922 return ret;
924 } else {
925 q->loss_model = CLG_RANDOM;
928 if (tb[TCA_NETEM_DELAY_DIST]) {
929 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
930 if (ret) {
931 /* recover clg and loss_model, in case of
932 * q->clg and q->loss_model were modified
933 * in get_loss_clg()
935 q->clg = old_clg;
936 q->loss_model = old_loss_model;
937 return ret;
941 sch->limit = qopt->limit;
943 q->latency = PSCHED_TICKS2NS(qopt->latency);
944 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
945 q->limit = qopt->limit;
946 q->gap = qopt->gap;
947 q->counter = 0;
948 q->loss = qopt->loss;
949 q->duplicate = qopt->duplicate;
951 /* for compatibility with earlier versions.
952 * if gap is set, need to assume 100% probability
954 if (q->gap)
955 q->reorder = ~0;
957 if (tb[TCA_NETEM_CORR])
958 get_correlation(q, tb[TCA_NETEM_CORR]);
960 if (tb[TCA_NETEM_REORDER])
961 get_reorder(q, tb[TCA_NETEM_REORDER]);
963 if (tb[TCA_NETEM_CORRUPT])
964 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
966 if (tb[TCA_NETEM_RATE])
967 get_rate(q, tb[TCA_NETEM_RATE]);
969 if (tb[TCA_NETEM_RATE64])
970 q->rate = max_t(u64, q->rate,
971 nla_get_u64(tb[TCA_NETEM_RATE64]));
973 if (tb[TCA_NETEM_LATENCY64])
974 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
976 if (tb[TCA_NETEM_JITTER64])
977 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
979 if (tb[TCA_NETEM_ECN])
980 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
982 if (tb[TCA_NETEM_SLOT])
983 get_slot(q, tb[TCA_NETEM_SLOT]);
985 return ret;
988 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
989 struct netlink_ext_ack *extack)
991 struct netem_sched_data *q = qdisc_priv(sch);
992 int ret;
994 qdisc_watchdog_init(&q->watchdog, sch);
996 if (!opt)
997 return -EINVAL;
999 q->loss_model = CLG_RANDOM;
1000 ret = netem_change(sch, opt, extack);
1001 if (ret)
1002 pr_info("netem: change failed\n");
1003 return ret;
1006 static void netem_destroy(struct Qdisc *sch)
1008 struct netem_sched_data *q = qdisc_priv(sch);
1010 qdisc_watchdog_cancel(&q->watchdog);
1011 if (q->qdisc)
1012 qdisc_destroy(q->qdisc);
1013 dist_free(q->delay_dist);
1016 static int dump_loss_model(const struct netem_sched_data *q,
1017 struct sk_buff *skb)
1019 struct nlattr *nest;
1021 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1022 if (nest == NULL)
1023 goto nla_put_failure;
1025 switch (q->loss_model) {
1026 case CLG_RANDOM:
1027 /* legacy loss model */
1028 nla_nest_cancel(skb, nest);
1029 return 0; /* no data */
1031 case CLG_4_STATES: {
1032 struct tc_netem_gimodel gi = {
1033 .p13 = q->clg.a1,
1034 .p31 = q->clg.a2,
1035 .p32 = q->clg.a3,
1036 .p14 = q->clg.a4,
1037 .p23 = q->clg.a5,
1040 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1041 goto nla_put_failure;
1042 break;
1044 case CLG_GILB_ELL: {
1045 struct tc_netem_gemodel ge = {
1046 .p = q->clg.a1,
1047 .r = q->clg.a2,
1048 .h = q->clg.a3,
1049 .k1 = q->clg.a4,
1052 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1053 goto nla_put_failure;
1054 break;
1058 nla_nest_end(skb, nest);
1059 return 0;
1061 nla_put_failure:
1062 nla_nest_cancel(skb, nest);
1063 return -1;
1066 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1068 const struct netem_sched_data *q = qdisc_priv(sch);
1069 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1070 struct tc_netem_qopt qopt;
1071 struct tc_netem_corr cor;
1072 struct tc_netem_reorder reorder;
1073 struct tc_netem_corrupt corrupt;
1074 struct tc_netem_rate rate;
1075 struct tc_netem_slot slot;
1077 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1078 UINT_MAX);
1079 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1080 UINT_MAX);
1081 qopt.limit = q->limit;
1082 qopt.loss = q->loss;
1083 qopt.gap = q->gap;
1084 qopt.duplicate = q->duplicate;
1085 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1086 goto nla_put_failure;
1088 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1089 goto nla_put_failure;
1091 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1092 goto nla_put_failure;
1094 cor.delay_corr = q->delay_cor.rho;
1095 cor.loss_corr = q->loss_cor.rho;
1096 cor.dup_corr = q->dup_cor.rho;
1097 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1098 goto nla_put_failure;
1100 reorder.probability = q->reorder;
1101 reorder.correlation = q->reorder_cor.rho;
1102 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1103 goto nla_put_failure;
1105 corrupt.probability = q->corrupt;
1106 corrupt.correlation = q->corrupt_cor.rho;
1107 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1108 goto nla_put_failure;
1110 if (q->rate >= (1ULL << 32)) {
1111 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1112 TCA_NETEM_PAD))
1113 goto nla_put_failure;
1114 rate.rate = ~0U;
1115 } else {
1116 rate.rate = q->rate;
1118 rate.packet_overhead = q->packet_overhead;
1119 rate.cell_size = q->cell_size;
1120 rate.cell_overhead = q->cell_overhead;
1121 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1122 goto nla_put_failure;
1124 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1125 goto nla_put_failure;
1127 if (dump_loss_model(q, skb) != 0)
1128 goto nla_put_failure;
1130 if (q->slot_config.min_delay | q->slot_config.max_delay) {
1131 slot = q->slot_config;
1132 if (slot.max_packets == INT_MAX)
1133 slot.max_packets = 0;
1134 if (slot.max_bytes == INT_MAX)
1135 slot.max_bytes = 0;
1136 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1137 goto nla_put_failure;
1140 return nla_nest_end(skb, nla);
1142 nla_put_failure:
1143 nlmsg_trim(skb, nla);
1144 return -1;
1147 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1148 struct sk_buff *skb, struct tcmsg *tcm)
1150 struct netem_sched_data *q = qdisc_priv(sch);
1152 if (cl != 1 || !q->qdisc) /* only one class */
1153 return -ENOENT;
1155 tcm->tcm_handle |= TC_H_MIN(1);
1156 tcm->tcm_info = q->qdisc->handle;
1158 return 0;
1161 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1162 struct Qdisc **old, struct netlink_ext_ack *extack)
1164 struct netem_sched_data *q = qdisc_priv(sch);
1166 *old = qdisc_replace(sch, new, &q->qdisc);
1167 return 0;
1170 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1172 struct netem_sched_data *q = qdisc_priv(sch);
1173 return q->qdisc;
1176 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1178 return 1;
1181 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1183 if (!walker->stop) {
1184 if (walker->count >= walker->skip)
1185 if (walker->fn(sch, 1, walker) < 0) {
1186 walker->stop = 1;
1187 return;
1189 walker->count++;
1193 static const struct Qdisc_class_ops netem_class_ops = {
1194 .graft = netem_graft,
1195 .leaf = netem_leaf,
1196 .find = netem_find,
1197 .walk = netem_walk,
1198 .dump = netem_dump_class,
1201 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1202 .id = "netem",
1203 .cl_ops = &netem_class_ops,
1204 .priv_size = sizeof(struct netem_sched_data),
1205 .enqueue = netem_enqueue,
1206 .dequeue = netem_dequeue,
1207 .peek = qdisc_peek_dequeued,
1208 .init = netem_init,
1209 .reset = netem_reset,
1210 .destroy = netem_destroy,
1211 .change = netem_change,
1212 .dump = netem_dump,
1213 .owner = THIS_MODULE,
1217 static int __init netem_module_init(void)
1219 pr_info("netem: version " VERSION "\n");
1220 return register_qdisc(&netem_qdisc_ops);
1222 static void __exit netem_module_exit(void)
1224 unregister_qdisc(&netem_qdisc_ops);
1226 module_init(netem_module_init)
1227 module_exit(netem_module_exit)
1228 MODULE_LICENSE("GPL");