2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
71 struct netem_sched_data
{
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root
;
75 /* optional qdisc for classful handling (NULL at netem init) */
78 struct qdisc_watchdog watchdog
;
80 psched_tdiff_t latency
;
81 psched_tdiff_t jitter
;
94 struct reciprocal_value cell_size_reciprocal
;
100 } delay_cor
, loss_cor
, dup_cor
, reorder_cor
, corrupt_cor
;
114 TX_IN_GAP_PERIOD
= 1,
117 LOST_IN_BURST_PERIOD
,
125 /* Correlated Loss Generation models */
127 /* state of the Markov chain */
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1
; /* p13 for 4-states or p for GE */
132 u32 a2
; /* p31 for 4-states or r for GE */
133 u32 a3
; /* p32 for 4-states or h for GE */
134 u32 a4
; /* p14 for 4-states or 1-k for GE */
135 u32 a5
; /* p23 used only in 4-states */
140 /* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
147 struct netem_skb_cb
{
148 psched_time_t time_to_send
;
153 static struct sk_buff
*netem_rb_to_skb(struct rb_node
*rb
)
155 return container_of(rb
, struct sk_buff
, rbnode
);
158 static inline struct netem_skb_cb
*netem_skb_cb(struct sk_buff
*skb
)
160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
161 qdisc_cb_private_validate(skb
, sizeof(struct netem_skb_cb
));
162 return (struct netem_skb_cb
*)qdisc_skb_cb(skb
)->data
;
165 /* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
168 static void init_crandom(struct crndstate
*state
, unsigned long rho
)
171 state
->last
= prandom_u32();
174 /* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
178 static u32
get_crandom(struct crndstate
*state
)
181 unsigned long answer
;
183 if (state
->rho
== 0) /* no correlation */
184 return prandom_u32();
186 value
= prandom_u32();
187 rho
= (u64
)state
->rho
+ 1;
188 answer
= (value
* ((1ull<<32) - rho
) + state
->last
* rho
) >> 32;
189 state
->last
= answer
;
193 /* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
197 static bool loss_4state(struct netem_sched_data
*q
)
199 struct clgstate
*clg
= &q
->clg
;
200 u32 rnd
= prandom_u32();
203 * Makes a comparison between rnd and the transition
204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
212 switch (clg
->state
) {
213 case TX_IN_GAP_PERIOD
:
215 clg
->state
= LOST_IN_BURST_PERIOD
;
217 } else if (clg
->a4
< rnd
&& rnd
< clg
->a1
+ clg
->a4
) {
218 clg
->state
= LOST_IN_GAP_PERIOD
;
220 } else if (clg
->a1
+ clg
->a4
< rnd
) {
221 clg
->state
= TX_IN_GAP_PERIOD
;
225 case TX_IN_BURST_PERIOD
:
227 clg
->state
= LOST_IN_GAP_PERIOD
;
230 clg
->state
= TX_IN_BURST_PERIOD
;
234 case LOST_IN_GAP_PERIOD
:
236 clg
->state
= TX_IN_BURST_PERIOD
;
237 else if (clg
->a3
< rnd
&& rnd
< clg
->a2
+ clg
->a3
) {
238 clg
->state
= TX_IN_GAP_PERIOD
;
239 } else if (clg
->a2
+ clg
->a3
< rnd
) {
240 clg
->state
= LOST_IN_GAP_PERIOD
;
244 case LOST_IN_BURST_PERIOD
:
245 clg
->state
= TX_IN_GAP_PERIOD
;
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
262 static bool loss_gilb_ell(struct netem_sched_data
*q
)
264 struct clgstate
*clg
= &q
->clg
;
266 switch (clg
->state
) {
268 if (prandom_u32() < clg
->a1
)
269 clg
->state
= BAD_STATE
;
270 if (prandom_u32() < clg
->a4
)
274 if (prandom_u32() < clg
->a2
)
275 clg
->state
= GOOD_STATE
;
276 if (prandom_u32() > clg
->a3
)
283 static bool loss_event(struct netem_sched_data
*q
)
285 switch (q
->loss_model
) {
287 /* Random packet drop 0 => none, ~0 => all */
288 return q
->loss
&& q
->loss
>= get_crandom(&q
->loss_cor
);
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
296 return loss_4state(q
);
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
304 return loss_gilb_ell(q
);
307 return false; /* not reached */
311 /* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
315 static psched_tdiff_t
tabledist(psched_tdiff_t mu
, psched_tdiff_t sigma
,
316 struct crndstate
*state
,
317 const struct disttable
*dist
)
326 rnd
= get_crandom(state
);
328 /* default uniform distribution */
330 return (rnd
% (2*sigma
)) - sigma
+ mu
;
332 t
= dist
->table
[rnd
% dist
->size
];
333 x
= (sigma
% NETEM_DIST_SCALE
) * t
;
335 x
+= NETEM_DIST_SCALE
/2;
337 x
-= NETEM_DIST_SCALE
/2;
339 return x
/ NETEM_DIST_SCALE
+ (sigma
/ NETEM_DIST_SCALE
) * t
+ mu
;
342 static psched_time_t
packet_len_2_sched_time(unsigned int len
, struct netem_sched_data
*q
)
346 len
+= q
->packet_overhead
;
349 u32 cells
= reciprocal_divide(len
, q
->cell_size_reciprocal
);
351 if (len
> cells
* q
->cell_size
) /* extra cell needed for remainder */
353 len
= cells
* (q
->cell_size
+ q
->cell_overhead
);
356 ticks
= (u64
)len
* NSEC_PER_SEC
;
358 do_div(ticks
, q
->rate
);
359 return PSCHED_NS2TICKS(ticks
);
362 static void tfifo_reset(struct Qdisc
*sch
)
364 struct netem_sched_data
*q
= qdisc_priv(sch
);
367 while ((p
= rb_first(&q
->t_root
))) {
368 struct sk_buff
*skb
= netem_rb_to_skb(p
);
370 rb_erase(p
, &q
->t_root
);
371 rtnl_kfree_skbs(skb
, skb
);
375 static void tfifo_enqueue(struct sk_buff
*nskb
, struct Qdisc
*sch
)
377 struct netem_sched_data
*q
= qdisc_priv(sch
);
378 psched_time_t tnext
= netem_skb_cb(nskb
)->time_to_send
;
379 struct rb_node
**p
= &q
->t_root
.rb_node
, *parent
= NULL
;
385 skb
= netem_rb_to_skb(parent
);
386 if (tnext
>= netem_skb_cb(skb
)->time_to_send
)
387 p
= &parent
->rb_right
;
389 p
= &parent
->rb_left
;
391 rb_link_node(&nskb
->rbnode
, parent
, p
);
392 rb_insert_color(&nskb
->rbnode
, &q
->t_root
);
396 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
397 * when we statistically choose to corrupt one, we instead segment it, returning
398 * the first packet to be corrupted, and re-enqueue the remaining frames
400 static struct sk_buff
*netem_segment(struct sk_buff
*skb
, struct Qdisc
*sch
,
401 struct sk_buff
**to_free
)
403 struct sk_buff
*segs
;
404 netdev_features_t features
= netif_skb_features(skb
);
406 segs
= skb_gso_segment(skb
, features
& ~NETIF_F_GSO_MASK
);
408 if (IS_ERR_OR_NULL(segs
)) {
409 qdisc_drop(skb
, sch
, to_free
);
416 static void netem_enqueue_skb_head(struct qdisc_skb_head
*qh
, struct sk_buff
*skb
)
418 skb
->next
= qh
->head
;
427 * Insert one skb into qdisc.
428 * Note: parent depends on return value to account for queue length.
429 * NET_XMIT_DROP: queue length didn't change.
430 * NET_XMIT_SUCCESS: one skb was queued.
432 static int netem_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
433 struct sk_buff
**to_free
)
435 struct netem_sched_data
*q
= qdisc_priv(sch
);
436 /* We don't fill cb now as skb_unshare() may invalidate it */
437 struct netem_skb_cb
*cb
;
438 struct sk_buff
*skb2
;
439 struct sk_buff
*segs
= NULL
;
440 unsigned int len
= 0, last_len
, prev_len
= qdisc_pkt_len(skb
);
443 int rc
= NET_XMIT_SUCCESS
;
445 /* Random duplication */
446 if (q
->duplicate
&& q
->duplicate
>= get_crandom(&q
->dup_cor
))
451 if (q
->ecn
&& INET_ECN_set_ce(skb
))
452 qdisc_qstats_drop(sch
); /* mark packet */
457 qdisc_qstats_drop(sch
);
458 __qdisc_drop(skb
, to_free
);
459 return NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
462 /* If a delay is expected, orphan the skb. (orphaning usually takes
463 * place at TX completion time, so _before_ the link transit delay)
465 if (q
->latency
|| q
->jitter
)
466 skb_orphan_partial(skb
);
469 * If we need to duplicate packet, then re-insert at top of the
470 * qdisc tree, since parent queuer expects that only one
471 * skb will be queued.
473 if (count
> 1 && (skb2
= skb_clone(skb
, GFP_ATOMIC
)) != NULL
) {
474 struct Qdisc
*rootq
= qdisc_root(sch
);
475 u32 dupsave
= q
->duplicate
; /* prevent duplicating a dup... */
478 rootq
->enqueue(skb2
, rootq
, to_free
);
479 q
->duplicate
= dupsave
;
483 * Randomized packet corruption.
484 * Make copy if needed since we are modifying
485 * If packet is going to be hardware checksummed, then
486 * do it now in software before we mangle it.
488 if (q
->corrupt
&& q
->corrupt
>= get_crandom(&q
->corrupt_cor
)) {
489 if (skb_is_gso(skb
)) {
490 segs
= netem_segment(skb
, sch
, to_free
);
492 return NET_XMIT_DROP
;
500 skb
= skb_unshare(skb
, GFP_ATOMIC
);
501 if (unlikely(!skb
)) {
502 qdisc_qstats_drop(sch
);
505 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
506 skb_checksum_help(skb
)) {
507 qdisc_drop(skb
, sch
, to_free
);
511 skb
->data
[prandom_u32() % skb_headlen(skb
)] ^=
512 1<<(prandom_u32() % 8);
515 if (unlikely(sch
->q
.qlen
>= sch
->limit
))
516 return qdisc_drop(skb
, sch
, to_free
);
518 qdisc_qstats_backlog_inc(sch
, skb
);
520 cb
= netem_skb_cb(skb
);
521 if (q
->gap
== 0 || /* not doing reordering */
522 q
->counter
< q
->gap
- 1 || /* inside last reordering gap */
523 q
->reorder
< get_crandom(&q
->reorder_cor
)) {
525 psched_tdiff_t delay
;
527 delay
= tabledist(q
->latency
, q
->jitter
,
528 &q
->delay_cor
, q
->delay_dist
);
530 now
= psched_get_time();
533 struct sk_buff
*last
;
538 last
= netem_rb_to_skb(rb_last(&q
->t_root
));
541 * Last packet in queue is reference point (now),
542 * calculate this time bonus and subtract
545 delay
-= netem_skb_cb(last
)->time_to_send
- now
;
546 delay
= max_t(psched_tdiff_t
, 0, delay
);
547 now
= netem_skb_cb(last
)->time_to_send
;
550 delay
+= packet_len_2_sched_time(qdisc_pkt_len(skb
), q
);
553 cb
->time_to_send
= now
+ delay
;
554 cb
->tstamp_save
= skb
->tstamp
;
556 tfifo_enqueue(skb
, sch
);
559 * Do re-ordering by putting one out of N packets at the front
562 cb
->time_to_send
= psched_get_time();
565 netem_enqueue_skb_head(&sch
->q
, skb
);
566 sch
->qstats
.requeues
++;
574 qdisc_skb_cb(segs
)->pkt_len
= segs
->len
;
575 last_len
= segs
->len
;
576 rc
= qdisc_enqueue(segs
, sch
, to_free
);
577 if (rc
!= NET_XMIT_SUCCESS
) {
578 if (net_xmit_drop_count(rc
))
579 qdisc_qstats_drop(sch
);
588 qdisc_tree_reduce_backlog(sch
, 1 - nb
, prev_len
- len
);
590 return NET_XMIT_SUCCESS
;
593 static struct sk_buff
*netem_dequeue(struct Qdisc
*sch
)
595 struct netem_sched_data
*q
= qdisc_priv(sch
);
600 skb
= __qdisc_dequeue_head(&sch
->q
);
602 qdisc_qstats_backlog_dec(sch
, skb
);
604 qdisc_bstats_update(sch
, skb
);
607 p
= rb_first(&q
->t_root
);
609 psched_time_t time_to_send
;
611 skb
= netem_rb_to_skb(p
);
613 /* if more time remaining? */
614 time_to_send
= netem_skb_cb(skb
)->time_to_send
;
615 if (time_to_send
<= psched_get_time()) {
616 rb_erase(p
, &q
->t_root
);
619 qdisc_qstats_backlog_dec(sch
, skb
);
622 skb
->tstamp
= netem_skb_cb(skb
)->tstamp_save
;
624 #ifdef CONFIG_NET_CLS_ACT
626 * If it's at ingress let's pretend the delay is
627 * from the network (tstamp will be updated).
629 if (G_TC_FROM(skb
->tc_verd
) & AT_INGRESS
)
630 skb
->tstamp
.tv64
= 0;
634 unsigned int pkt_len
= qdisc_pkt_len(skb
);
635 struct sk_buff
*to_free
= NULL
;
638 err
= qdisc_enqueue(skb
, q
->qdisc
, &to_free
);
639 kfree_skb_list(to_free
);
640 if (err
!= NET_XMIT_SUCCESS
&&
641 net_xmit_drop_count(err
)) {
642 qdisc_qstats_drop(sch
);
643 qdisc_tree_reduce_backlog(sch
, 1,
652 skb
= q
->qdisc
->ops
->dequeue(q
->qdisc
);
656 qdisc_watchdog_schedule(&q
->watchdog
, time_to_send
);
660 skb
= q
->qdisc
->ops
->dequeue(q
->qdisc
);
667 static void netem_reset(struct Qdisc
*sch
)
669 struct netem_sched_data
*q
= qdisc_priv(sch
);
671 qdisc_reset_queue(sch
);
674 qdisc_reset(q
->qdisc
);
675 qdisc_watchdog_cancel(&q
->watchdog
);
678 static void dist_free(struct disttable
*d
)
684 * Distribution data is a variable size payload containing
685 * signed 16 bit values.
687 static int get_dist_table(struct Qdisc
*sch
, const struct nlattr
*attr
)
689 struct netem_sched_data
*q
= qdisc_priv(sch
);
690 size_t n
= nla_len(attr
)/sizeof(__s16
);
691 const __s16
*data
= nla_data(attr
);
692 spinlock_t
*root_lock
;
697 if (n
> NETEM_DIST_MAX
)
700 s
= sizeof(struct disttable
) + n
* sizeof(s16
);
701 d
= kmalloc(s
, GFP_KERNEL
| __GFP_NOWARN
);
708 for (i
= 0; i
< n
; i
++)
709 d
->table
[i
] = data
[i
];
711 root_lock
= qdisc_root_sleeping_lock(sch
);
713 spin_lock_bh(root_lock
);
714 swap(q
->delay_dist
, d
);
715 spin_unlock_bh(root_lock
);
721 static void get_correlation(struct netem_sched_data
*q
, const struct nlattr
*attr
)
723 const struct tc_netem_corr
*c
= nla_data(attr
);
725 init_crandom(&q
->delay_cor
, c
->delay_corr
);
726 init_crandom(&q
->loss_cor
, c
->loss_corr
);
727 init_crandom(&q
->dup_cor
, c
->dup_corr
);
730 static void get_reorder(struct netem_sched_data
*q
, const struct nlattr
*attr
)
732 const struct tc_netem_reorder
*r
= nla_data(attr
);
734 q
->reorder
= r
->probability
;
735 init_crandom(&q
->reorder_cor
, r
->correlation
);
738 static void get_corrupt(struct netem_sched_data
*q
, const struct nlattr
*attr
)
740 const struct tc_netem_corrupt
*r
= nla_data(attr
);
742 q
->corrupt
= r
->probability
;
743 init_crandom(&q
->corrupt_cor
, r
->correlation
);
746 static void get_rate(struct netem_sched_data
*q
, const struct nlattr
*attr
)
748 const struct tc_netem_rate
*r
= nla_data(attr
);
751 q
->packet_overhead
= r
->packet_overhead
;
752 q
->cell_size
= r
->cell_size
;
753 q
->cell_overhead
= r
->cell_overhead
;
755 q
->cell_size_reciprocal
= reciprocal_value(q
->cell_size
);
757 q
->cell_size_reciprocal
= (struct reciprocal_value
) { 0 };
760 static int get_loss_clg(struct netem_sched_data
*q
, const struct nlattr
*attr
)
762 const struct nlattr
*la
;
765 nla_for_each_nested(la
, attr
, rem
) {
766 u16 type
= nla_type(la
);
769 case NETEM_LOSS_GI
: {
770 const struct tc_netem_gimodel
*gi
= nla_data(la
);
772 if (nla_len(la
) < sizeof(struct tc_netem_gimodel
)) {
773 pr_info("netem: incorrect gi model size\n");
777 q
->loss_model
= CLG_4_STATES
;
779 q
->clg
.state
= TX_IN_GAP_PERIOD
;
788 case NETEM_LOSS_GE
: {
789 const struct tc_netem_gemodel
*ge
= nla_data(la
);
791 if (nla_len(la
) < sizeof(struct tc_netem_gemodel
)) {
792 pr_info("netem: incorrect ge model size\n");
796 q
->loss_model
= CLG_GILB_ELL
;
797 q
->clg
.state
= GOOD_STATE
;
806 pr_info("netem: unknown loss type %u\n", type
);
814 static const struct nla_policy netem_policy
[TCA_NETEM_MAX
+ 1] = {
815 [TCA_NETEM_CORR
] = { .len
= sizeof(struct tc_netem_corr
) },
816 [TCA_NETEM_REORDER
] = { .len
= sizeof(struct tc_netem_reorder
) },
817 [TCA_NETEM_CORRUPT
] = { .len
= sizeof(struct tc_netem_corrupt
) },
818 [TCA_NETEM_RATE
] = { .len
= sizeof(struct tc_netem_rate
) },
819 [TCA_NETEM_LOSS
] = { .type
= NLA_NESTED
},
820 [TCA_NETEM_ECN
] = { .type
= NLA_U32
},
821 [TCA_NETEM_RATE64
] = { .type
= NLA_U64
},
824 static int parse_attr(struct nlattr
*tb
[], int maxtype
, struct nlattr
*nla
,
825 const struct nla_policy
*policy
, int len
)
827 int nested_len
= nla_len(nla
) - NLA_ALIGN(len
);
829 if (nested_len
< 0) {
830 pr_info("netem: invalid attributes len %d\n", nested_len
);
834 if (nested_len
>= nla_attr_size(0))
835 return nla_parse(tb
, maxtype
, nla_data(nla
) + NLA_ALIGN(len
),
838 memset(tb
, 0, sizeof(struct nlattr
*) * (maxtype
+ 1));
842 /* Parse netlink message to set options */
843 static int netem_change(struct Qdisc
*sch
, struct nlattr
*opt
)
845 struct netem_sched_data
*q
= qdisc_priv(sch
);
846 struct nlattr
*tb
[TCA_NETEM_MAX
+ 1];
847 struct tc_netem_qopt
*qopt
;
848 struct clgstate old_clg
;
849 int old_loss_model
= CLG_RANDOM
;
855 qopt
= nla_data(opt
);
856 ret
= parse_attr(tb
, TCA_NETEM_MAX
, opt
, netem_policy
, sizeof(*qopt
));
860 /* backup q->clg and q->loss_model */
862 old_loss_model
= q
->loss_model
;
864 if (tb
[TCA_NETEM_LOSS
]) {
865 ret
= get_loss_clg(q
, tb
[TCA_NETEM_LOSS
]);
867 q
->loss_model
= old_loss_model
;
871 q
->loss_model
= CLG_RANDOM
;
874 if (tb
[TCA_NETEM_DELAY_DIST
]) {
875 ret
= get_dist_table(sch
, tb
[TCA_NETEM_DELAY_DIST
]);
877 /* recover clg and loss_model, in case of
878 * q->clg and q->loss_model were modified
882 q
->loss_model
= old_loss_model
;
887 sch
->limit
= qopt
->limit
;
889 q
->latency
= qopt
->latency
;
890 q
->jitter
= qopt
->jitter
;
891 q
->limit
= qopt
->limit
;
894 q
->loss
= qopt
->loss
;
895 q
->duplicate
= qopt
->duplicate
;
897 /* for compatibility with earlier versions.
898 * if gap is set, need to assume 100% probability
903 if (tb
[TCA_NETEM_CORR
])
904 get_correlation(q
, tb
[TCA_NETEM_CORR
]);
906 if (tb
[TCA_NETEM_REORDER
])
907 get_reorder(q
, tb
[TCA_NETEM_REORDER
]);
909 if (tb
[TCA_NETEM_CORRUPT
])
910 get_corrupt(q
, tb
[TCA_NETEM_CORRUPT
]);
912 if (tb
[TCA_NETEM_RATE
])
913 get_rate(q
, tb
[TCA_NETEM_RATE
]);
915 if (tb
[TCA_NETEM_RATE64
])
916 q
->rate
= max_t(u64
, q
->rate
,
917 nla_get_u64(tb
[TCA_NETEM_RATE64
]));
919 if (tb
[TCA_NETEM_ECN
])
920 q
->ecn
= nla_get_u32(tb
[TCA_NETEM_ECN
]);
925 static int netem_init(struct Qdisc
*sch
, struct nlattr
*opt
)
927 struct netem_sched_data
*q
= qdisc_priv(sch
);
933 qdisc_watchdog_init(&q
->watchdog
, sch
);
935 q
->loss_model
= CLG_RANDOM
;
936 ret
= netem_change(sch
, opt
);
938 pr_info("netem: change failed\n");
942 static void netem_destroy(struct Qdisc
*sch
)
944 struct netem_sched_data
*q
= qdisc_priv(sch
);
946 qdisc_watchdog_cancel(&q
->watchdog
);
948 qdisc_destroy(q
->qdisc
);
949 dist_free(q
->delay_dist
);
952 static int dump_loss_model(const struct netem_sched_data
*q
,
957 nest
= nla_nest_start(skb
, TCA_NETEM_LOSS
);
959 goto nla_put_failure
;
961 switch (q
->loss_model
) {
963 /* legacy loss model */
964 nla_nest_cancel(skb
, nest
);
965 return 0; /* no data */
968 struct tc_netem_gimodel gi
= {
976 if (nla_put(skb
, NETEM_LOSS_GI
, sizeof(gi
), &gi
))
977 goto nla_put_failure
;
981 struct tc_netem_gemodel ge
= {
988 if (nla_put(skb
, NETEM_LOSS_GE
, sizeof(ge
), &ge
))
989 goto nla_put_failure
;
994 nla_nest_end(skb
, nest
);
998 nla_nest_cancel(skb
, nest
);
1002 static int netem_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
1004 const struct netem_sched_data
*q
= qdisc_priv(sch
);
1005 struct nlattr
*nla
= (struct nlattr
*) skb_tail_pointer(skb
);
1006 struct tc_netem_qopt qopt
;
1007 struct tc_netem_corr cor
;
1008 struct tc_netem_reorder reorder
;
1009 struct tc_netem_corrupt corrupt
;
1010 struct tc_netem_rate rate
;
1012 qopt
.latency
= q
->latency
;
1013 qopt
.jitter
= q
->jitter
;
1014 qopt
.limit
= q
->limit
;
1015 qopt
.loss
= q
->loss
;
1017 qopt
.duplicate
= q
->duplicate
;
1018 if (nla_put(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
))
1019 goto nla_put_failure
;
1021 cor
.delay_corr
= q
->delay_cor
.rho
;
1022 cor
.loss_corr
= q
->loss_cor
.rho
;
1023 cor
.dup_corr
= q
->dup_cor
.rho
;
1024 if (nla_put(skb
, TCA_NETEM_CORR
, sizeof(cor
), &cor
))
1025 goto nla_put_failure
;
1027 reorder
.probability
= q
->reorder
;
1028 reorder
.correlation
= q
->reorder_cor
.rho
;
1029 if (nla_put(skb
, TCA_NETEM_REORDER
, sizeof(reorder
), &reorder
))
1030 goto nla_put_failure
;
1032 corrupt
.probability
= q
->corrupt
;
1033 corrupt
.correlation
= q
->corrupt_cor
.rho
;
1034 if (nla_put(skb
, TCA_NETEM_CORRUPT
, sizeof(corrupt
), &corrupt
))
1035 goto nla_put_failure
;
1037 if (q
->rate
>= (1ULL << 32)) {
1038 if (nla_put_u64_64bit(skb
, TCA_NETEM_RATE64
, q
->rate
,
1040 goto nla_put_failure
;
1043 rate
.rate
= q
->rate
;
1045 rate
.packet_overhead
= q
->packet_overhead
;
1046 rate
.cell_size
= q
->cell_size
;
1047 rate
.cell_overhead
= q
->cell_overhead
;
1048 if (nla_put(skb
, TCA_NETEM_RATE
, sizeof(rate
), &rate
))
1049 goto nla_put_failure
;
1051 if (q
->ecn
&& nla_put_u32(skb
, TCA_NETEM_ECN
, q
->ecn
))
1052 goto nla_put_failure
;
1054 if (dump_loss_model(q
, skb
) != 0)
1055 goto nla_put_failure
;
1057 return nla_nest_end(skb
, nla
);
1060 nlmsg_trim(skb
, nla
);
1064 static int netem_dump_class(struct Qdisc
*sch
, unsigned long cl
,
1065 struct sk_buff
*skb
, struct tcmsg
*tcm
)
1067 struct netem_sched_data
*q
= qdisc_priv(sch
);
1069 if (cl
!= 1 || !q
->qdisc
) /* only one class */
1072 tcm
->tcm_handle
|= TC_H_MIN(1);
1073 tcm
->tcm_info
= q
->qdisc
->handle
;
1078 static int netem_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
1081 struct netem_sched_data
*q
= qdisc_priv(sch
);
1083 *old
= qdisc_replace(sch
, new, &q
->qdisc
);
1087 static struct Qdisc
*netem_leaf(struct Qdisc
*sch
, unsigned long arg
)
1089 struct netem_sched_data
*q
= qdisc_priv(sch
);
1093 static unsigned long netem_get(struct Qdisc
*sch
, u32 classid
)
1098 static void netem_put(struct Qdisc
*sch
, unsigned long arg
)
1102 static void netem_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
1104 if (!walker
->stop
) {
1105 if (walker
->count
>= walker
->skip
)
1106 if (walker
->fn(sch
, 1, walker
) < 0) {
1114 static const struct Qdisc_class_ops netem_class_ops
= {
1115 .graft
= netem_graft
,
1120 .dump
= netem_dump_class
,
1123 static struct Qdisc_ops netem_qdisc_ops __read_mostly
= {
1125 .cl_ops
= &netem_class_ops
,
1126 .priv_size
= sizeof(struct netem_sched_data
),
1127 .enqueue
= netem_enqueue
,
1128 .dequeue
= netem_dequeue
,
1129 .peek
= qdisc_peek_dequeued
,
1131 .reset
= netem_reset
,
1132 .destroy
= netem_destroy
,
1133 .change
= netem_change
,
1135 .owner
= THIS_MODULE
,
1139 static int __init
netem_module_init(void)
1141 pr_info("netem: version " VERSION
"\n");
1142 return register_qdisc(&netem_qdisc_ops
);
1144 static void __exit
netem_module_exit(void)
1146 unregister_qdisc(&netem_qdisc_ops
);
1148 module_init(netem_module_init
)
1149 module_exit(netem_module_exit
)
1150 MODULE_LICENSE("GPL");