2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
76 struct netem_sched_data
{
77 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
78 struct rb_root t_root
;
80 /* optional qdisc for classful handling (NULL at netem init) */
83 struct qdisc_watchdog watchdog
;
99 struct reciprocal_value cell_size_reciprocal
;
105 } delay_cor
, loss_cor
, dup_cor
, reorder_cor
, corrupt_cor
;
107 struct disttable
*delay_dist
;
116 TX_IN_GAP_PERIOD
= 1,
119 LOST_IN_BURST_PERIOD
,
127 /* Correlated Loss Generation models */
129 /* state of the Markov chain */
132 /* 4-states and Gilbert-Elliot models */
133 u32 a1
; /* p13 for 4-states or p for GE */
134 u32 a2
; /* p31 for 4-states or r for GE */
135 u32 a3
; /* p32 for 4-states or h for GE */
136 u32 a4
; /* p14 for 4-states or 1-k for GE */
137 u32 a5
; /* p23 used only in 4-states */
140 struct tc_netem_slot slot_config
;
147 struct disttable
*slot_dist
;
150 /* Time stamp put into socket buffer control block
151 * Only valid when skbs are in our internal t(ime)fifo queue.
153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154 * and skb->next & skb->prev are scratch space for a qdisc,
155 * we save skb->tstamp value in skb->cb[] before destroying it.
157 struct netem_skb_cb
{
161 static inline struct netem_skb_cb
*netem_skb_cb(struct sk_buff
*skb
)
163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
164 qdisc_cb_private_validate(skb
, sizeof(struct netem_skb_cb
));
165 return (struct netem_skb_cb
*)qdisc_skb_cb(skb
)->data
;
168 /* init_crandom - initialize correlated random number generator
169 * Use entropy source for initial seed.
171 static void init_crandom(struct crndstate
*state
, unsigned long rho
)
174 state
->last
= prandom_u32();
177 /* get_crandom - correlated random number generator
178 * Next number depends on last value.
179 * rho is scaled to avoid floating point.
181 static u32
get_crandom(struct crndstate
*state
)
184 unsigned long answer
;
186 if (!state
|| state
->rho
== 0) /* no correlation */
187 return prandom_u32();
189 value
= prandom_u32();
190 rho
= (u64
)state
->rho
+ 1;
191 answer
= (value
* ((1ull<<32) - rho
) + state
->last
* rho
) >> 32;
192 state
->last
= answer
;
196 /* loss_4state - 4-state model loss generator
197 * Generates losses according to the 4-state Markov chain adopted in
198 * the GI (General and Intuitive) loss model.
200 static bool loss_4state(struct netem_sched_data
*q
)
202 struct clgstate
*clg
= &q
->clg
;
203 u32 rnd
= prandom_u32();
206 * Makes a comparison between rnd and the transition
207 * probabilities outgoing from the current state, then decides the
208 * next state and if the next packet has to be transmitted or lost.
209 * The four states correspond to:
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
211 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
212 * LOST_IN_GAP_PERIOD => lost packets within a burst period
213 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
215 switch (clg
->state
) {
216 case TX_IN_GAP_PERIOD
:
218 clg
->state
= LOST_IN_BURST_PERIOD
;
220 } else if (clg
->a4
< rnd
&& rnd
< clg
->a1
+ clg
->a4
) {
221 clg
->state
= LOST_IN_GAP_PERIOD
;
223 } else if (clg
->a1
+ clg
->a4
< rnd
) {
224 clg
->state
= TX_IN_GAP_PERIOD
;
228 case TX_IN_BURST_PERIOD
:
230 clg
->state
= LOST_IN_GAP_PERIOD
;
233 clg
->state
= TX_IN_BURST_PERIOD
;
237 case LOST_IN_GAP_PERIOD
:
239 clg
->state
= TX_IN_BURST_PERIOD
;
240 else if (clg
->a3
< rnd
&& rnd
< clg
->a2
+ clg
->a3
) {
241 clg
->state
= TX_IN_GAP_PERIOD
;
242 } else if (clg
->a2
+ clg
->a3
< rnd
) {
243 clg
->state
= LOST_IN_GAP_PERIOD
;
247 case LOST_IN_BURST_PERIOD
:
248 clg
->state
= TX_IN_GAP_PERIOD
;
255 /* loss_gilb_ell - Gilbert-Elliot model loss generator
256 * Generates losses according to the Gilbert-Elliot loss model or
257 * its special cases (Gilbert or Simple Gilbert)
259 * Makes a comparison between random number and the transition
260 * probabilities outgoing from the current state, then decides the
261 * next state. A second random number is extracted and the comparison
262 * with the loss probability of the current state decides if the next
263 * packet will be transmitted or lost.
265 static bool loss_gilb_ell(struct netem_sched_data
*q
)
267 struct clgstate
*clg
= &q
->clg
;
269 switch (clg
->state
) {
271 if (prandom_u32() < clg
->a1
)
272 clg
->state
= BAD_STATE
;
273 if (prandom_u32() < clg
->a4
)
277 if (prandom_u32() < clg
->a2
)
278 clg
->state
= GOOD_STATE
;
279 if (prandom_u32() > clg
->a3
)
286 static bool loss_event(struct netem_sched_data
*q
)
288 switch (q
->loss_model
) {
290 /* Random packet drop 0 => none, ~0 => all */
291 return q
->loss
&& q
->loss
>= get_crandom(&q
->loss_cor
);
294 /* 4state loss model algorithm (used also for GI model)
295 * Extracts a value from the markov 4 state loss generator,
296 * if it is 1 drops a packet and if needed writes the event in
299 return loss_4state(q
);
302 /* Gilbert-Elliot loss model algorithm
303 * Extracts a value from the Gilbert-Elliot loss generator,
304 * if it is 1 drops a packet and if needed writes the event in
307 return loss_gilb_ell(q
);
310 return false; /* not reached */
314 /* tabledist - return a pseudo-randomly distributed value with mean mu and
315 * std deviation sigma. Uses table lookup to approximate the desired
316 * distribution, and a uniformly-distributed pseudo-random source.
318 static s64
tabledist(s64 mu
, s32 sigma
,
319 struct crndstate
*state
,
320 const struct disttable
*dist
)
329 rnd
= get_crandom(state
);
331 /* default uniform distribution */
333 return ((rnd
% (2 * sigma
)) + mu
) - sigma
;
335 t
= dist
->table
[rnd
% dist
->size
];
336 x
= (sigma
% NETEM_DIST_SCALE
) * t
;
338 x
+= NETEM_DIST_SCALE
/2;
340 x
-= NETEM_DIST_SCALE
/2;
342 return x
/ NETEM_DIST_SCALE
+ (sigma
/ NETEM_DIST_SCALE
) * t
+ mu
;
345 static u64
packet_time_ns(u64 len
, const struct netem_sched_data
*q
)
347 len
+= q
->packet_overhead
;
350 u32 cells
= reciprocal_divide(len
, q
->cell_size_reciprocal
);
352 if (len
> cells
* q
->cell_size
) /* extra cell needed for remainder */
354 len
= cells
* (q
->cell_size
+ q
->cell_overhead
);
357 return div64_u64(len
* NSEC_PER_SEC
, q
->rate
);
360 static void tfifo_reset(struct Qdisc
*sch
)
362 struct netem_sched_data
*q
= qdisc_priv(sch
);
363 struct rb_node
*p
= rb_first(&q
->t_root
);
366 struct sk_buff
*skb
= rb_to_skb(p
);
369 rb_erase(&skb
->rbnode
, &q
->t_root
);
370 rtnl_kfree_skbs(skb
, skb
);
374 static void tfifo_enqueue(struct sk_buff
*nskb
, struct Qdisc
*sch
)
376 struct netem_sched_data
*q
= qdisc_priv(sch
);
377 u64 tnext
= netem_skb_cb(nskb
)->time_to_send
;
378 struct rb_node
**p
= &q
->t_root
.rb_node
, *parent
= NULL
;
384 skb
= rb_to_skb(parent
);
385 if (tnext
>= netem_skb_cb(skb
)->time_to_send
)
386 p
= &parent
->rb_right
;
388 p
= &parent
->rb_left
;
390 rb_link_node(&nskb
->rbnode
, parent
, p
);
391 rb_insert_color(&nskb
->rbnode
, &q
->t_root
);
395 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
396 * when we statistically choose to corrupt one, we instead segment it, returning
397 * the first packet to be corrupted, and re-enqueue the remaining frames
399 static struct sk_buff
*netem_segment(struct sk_buff
*skb
, struct Qdisc
*sch
,
400 struct sk_buff
**to_free
)
402 struct sk_buff
*segs
;
403 netdev_features_t features
= netif_skb_features(skb
);
405 segs
= skb_gso_segment(skb
, features
& ~NETIF_F_GSO_MASK
);
407 if (IS_ERR_OR_NULL(segs
)) {
408 qdisc_drop(skb
, sch
, to_free
);
416 * Insert one skb into qdisc.
417 * Note: parent depends on return value to account for queue length.
418 * NET_XMIT_DROP: queue length didn't change.
419 * NET_XMIT_SUCCESS: one skb was queued.
421 static int netem_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
422 struct sk_buff
**to_free
)
424 struct netem_sched_data
*q
= qdisc_priv(sch
);
425 /* We don't fill cb now as skb_unshare() may invalidate it */
426 struct netem_skb_cb
*cb
;
427 struct sk_buff
*skb2
;
428 struct sk_buff
*segs
= NULL
;
429 unsigned int len
= 0, last_len
, prev_len
= qdisc_pkt_len(skb
);
432 int rc
= NET_XMIT_SUCCESS
;
434 /* Random duplication */
435 if (q
->duplicate
&& q
->duplicate
>= get_crandom(&q
->dup_cor
))
440 if (q
->ecn
&& INET_ECN_set_ce(skb
))
441 qdisc_qstats_drop(sch
); /* mark packet */
446 qdisc_qstats_drop(sch
);
447 __qdisc_drop(skb
, to_free
);
448 return NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
451 /* If a delay is expected, orphan the skb. (orphaning usually takes
452 * place at TX completion time, so _before_ the link transit delay)
454 if (q
->latency
|| q
->jitter
|| q
->rate
)
455 skb_orphan_partial(skb
);
458 * If we need to duplicate packet, then re-insert at top of the
459 * qdisc tree, since parent queuer expects that only one
460 * skb will be queued.
462 if (count
> 1 && (skb2
= skb_clone(skb
, GFP_ATOMIC
)) != NULL
) {
463 struct Qdisc
*rootq
= qdisc_root(sch
);
464 u32 dupsave
= q
->duplicate
; /* prevent duplicating a dup... */
467 rootq
->enqueue(skb2
, rootq
, to_free
);
468 q
->duplicate
= dupsave
;
472 * Randomized packet corruption.
473 * Make copy if needed since we are modifying
474 * If packet is going to be hardware checksummed, then
475 * do it now in software before we mangle it.
477 if (q
->corrupt
&& q
->corrupt
>= get_crandom(&q
->corrupt_cor
)) {
478 if (skb_is_gso(skb
)) {
479 segs
= netem_segment(skb
, sch
, to_free
);
481 return NET_XMIT_DROP
;
489 skb
= skb_unshare(skb
, GFP_ATOMIC
);
490 if (unlikely(!skb
)) {
491 qdisc_qstats_drop(sch
);
494 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
495 skb_checksum_help(skb
)) {
496 qdisc_drop(skb
, sch
, to_free
);
500 skb
->data
[prandom_u32() % skb_headlen(skb
)] ^=
501 1<<(prandom_u32() % 8);
504 if (unlikely(sch
->q
.qlen
>= sch
->limit
))
505 return qdisc_drop_all(skb
, sch
, to_free
);
507 qdisc_qstats_backlog_inc(sch
, skb
);
509 cb
= netem_skb_cb(skb
);
510 if (q
->gap
== 0 || /* not doing reordering */
511 q
->counter
< q
->gap
- 1 || /* inside last reordering gap */
512 q
->reorder
< get_crandom(&q
->reorder_cor
)) {
516 delay
= tabledist(q
->latency
, q
->jitter
,
517 &q
->delay_cor
, q
->delay_dist
);
519 now
= ktime_get_ns();
522 struct netem_skb_cb
*last
= NULL
;
525 last
= netem_skb_cb(sch
->q
.tail
);
526 if (q
->t_root
.rb_node
) {
527 struct sk_buff
*t_skb
;
528 struct netem_skb_cb
*t_last
;
530 t_skb
= skb_rb_last(&q
->t_root
);
531 t_last
= netem_skb_cb(t_skb
);
533 t_last
->time_to_send
> last
->time_to_send
) {
540 * Last packet in queue is reference point (now),
541 * calculate this time bonus and subtract
544 delay
-= last
->time_to_send
- now
;
545 delay
= max_t(s64
, 0, delay
);
546 now
= last
->time_to_send
;
549 delay
+= packet_time_ns(qdisc_pkt_len(skb
), q
);
552 cb
->time_to_send
= now
+ delay
;
554 tfifo_enqueue(skb
, sch
);
557 * Do re-ordering by putting one out of N packets at the front
560 cb
->time_to_send
= ktime_get_ns();
563 __qdisc_enqueue_head(skb
, &sch
->q
);
564 sch
->qstats
.requeues
++;
571 skb_mark_not_on_list(segs
);
572 qdisc_skb_cb(segs
)->pkt_len
= segs
->len
;
573 last_len
= segs
->len
;
574 rc
= qdisc_enqueue(segs
, sch
, to_free
);
575 if (rc
!= NET_XMIT_SUCCESS
) {
576 if (net_xmit_drop_count(rc
))
577 qdisc_qstats_drop(sch
);
586 qdisc_tree_reduce_backlog(sch
, 1 - nb
, prev_len
- len
);
588 return NET_XMIT_SUCCESS
;
591 /* Delay the next round with a new future slot with a
592 * correct number of bytes and packets.
595 static void get_slot_next(struct netem_sched_data
*q
, u64 now
)
600 next_delay
= q
->slot_config
.min_delay
+
602 (q
->slot_config
.max_delay
-
603 q
->slot_config
.min_delay
) >> 32);
605 next_delay
= tabledist(q
->slot_config
.dist_delay
,
606 (s32
)(q
->slot_config
.dist_jitter
),
609 q
->slot
.slot_next
= now
+ next_delay
;
610 q
->slot
.packets_left
= q
->slot_config
.max_packets
;
611 q
->slot
.bytes_left
= q
->slot_config
.max_bytes
;
614 static struct sk_buff
*netem_dequeue(struct Qdisc
*sch
)
616 struct netem_sched_data
*q
= qdisc_priv(sch
);
621 skb
= __qdisc_dequeue_head(&sch
->q
);
623 qdisc_qstats_backlog_dec(sch
, skb
);
625 qdisc_bstats_update(sch
, skb
);
628 p
= rb_first(&q
->t_root
);
631 u64 now
= ktime_get_ns();
635 /* if more time remaining? */
636 time_to_send
= netem_skb_cb(skb
)->time_to_send
;
637 if (q
->slot
.slot_next
&& q
->slot
.slot_next
< time_to_send
)
638 get_slot_next(q
, now
);
640 if (time_to_send
<= now
&& q
->slot
.slot_next
<= now
) {
641 rb_erase(p
, &q
->t_root
);
643 qdisc_qstats_backlog_dec(sch
, skb
);
646 /* skb->dev shares skb->rbnode area,
647 * we need to restore its value.
649 skb
->dev
= qdisc_dev(sch
);
651 #ifdef CONFIG_NET_CLS_ACT
653 * If it's at ingress let's pretend the delay is
654 * from the network (tstamp will be updated).
656 if (skb
->tc_redirected
&& skb
->tc_from_ingress
)
660 if (q
->slot
.slot_next
) {
661 q
->slot
.packets_left
--;
662 q
->slot
.bytes_left
-= qdisc_pkt_len(skb
);
663 if (q
->slot
.packets_left
<= 0 ||
664 q
->slot
.bytes_left
<= 0)
665 get_slot_next(q
, now
);
669 unsigned int pkt_len
= qdisc_pkt_len(skb
);
670 struct sk_buff
*to_free
= NULL
;
673 err
= qdisc_enqueue(skb
, q
->qdisc
, &to_free
);
674 kfree_skb_list(to_free
);
675 if (err
!= NET_XMIT_SUCCESS
&&
676 net_xmit_drop_count(err
)) {
677 qdisc_qstats_drop(sch
);
678 qdisc_tree_reduce_backlog(sch
, 1,
687 skb
= q
->qdisc
->ops
->dequeue(q
->qdisc
);
692 qdisc_watchdog_schedule_ns(&q
->watchdog
,
698 skb
= q
->qdisc
->ops
->dequeue(q
->qdisc
);
705 static void netem_reset(struct Qdisc
*sch
)
707 struct netem_sched_data
*q
= qdisc_priv(sch
);
709 qdisc_reset_queue(sch
);
712 qdisc_reset(q
->qdisc
);
713 qdisc_watchdog_cancel(&q
->watchdog
);
716 static void dist_free(struct disttable
*d
)
722 * Distribution data is a variable size payload containing
723 * signed 16 bit values.
726 static int get_dist_table(struct Qdisc
*sch
, struct disttable
**tbl
,
727 const struct nlattr
*attr
)
729 size_t n
= nla_len(attr
)/sizeof(__s16
);
730 const __s16
*data
= nla_data(attr
);
731 spinlock_t
*root_lock
;
735 if (n
> NETEM_DIST_MAX
)
738 d
= kvmalloc(sizeof(struct disttable
) + n
* sizeof(s16
), GFP_KERNEL
);
743 for (i
= 0; i
< n
; i
++)
744 d
->table
[i
] = data
[i
];
746 root_lock
= qdisc_root_sleeping_lock(sch
);
748 spin_lock_bh(root_lock
);
750 spin_unlock_bh(root_lock
);
756 static void get_slot(struct netem_sched_data
*q
, const struct nlattr
*attr
)
758 const struct tc_netem_slot
*c
= nla_data(attr
);
761 if (q
->slot_config
.max_packets
== 0)
762 q
->slot_config
.max_packets
= INT_MAX
;
763 if (q
->slot_config
.max_bytes
== 0)
764 q
->slot_config
.max_bytes
= INT_MAX
;
765 q
->slot
.packets_left
= q
->slot_config
.max_packets
;
766 q
->slot
.bytes_left
= q
->slot_config
.max_bytes
;
767 if (q
->slot_config
.min_delay
| q
->slot_config
.max_delay
|
768 q
->slot_config
.dist_jitter
)
769 q
->slot
.slot_next
= ktime_get_ns();
771 q
->slot
.slot_next
= 0;
774 static void get_correlation(struct netem_sched_data
*q
, const struct nlattr
*attr
)
776 const struct tc_netem_corr
*c
= nla_data(attr
);
778 init_crandom(&q
->delay_cor
, c
->delay_corr
);
779 init_crandom(&q
->loss_cor
, c
->loss_corr
);
780 init_crandom(&q
->dup_cor
, c
->dup_corr
);
783 static void get_reorder(struct netem_sched_data
*q
, const struct nlattr
*attr
)
785 const struct tc_netem_reorder
*r
= nla_data(attr
);
787 q
->reorder
= r
->probability
;
788 init_crandom(&q
->reorder_cor
, r
->correlation
);
791 static void get_corrupt(struct netem_sched_data
*q
, const struct nlattr
*attr
)
793 const struct tc_netem_corrupt
*r
= nla_data(attr
);
795 q
->corrupt
= r
->probability
;
796 init_crandom(&q
->corrupt_cor
, r
->correlation
);
799 static void get_rate(struct netem_sched_data
*q
, const struct nlattr
*attr
)
801 const struct tc_netem_rate
*r
= nla_data(attr
);
804 q
->packet_overhead
= r
->packet_overhead
;
805 q
->cell_size
= r
->cell_size
;
806 q
->cell_overhead
= r
->cell_overhead
;
808 q
->cell_size_reciprocal
= reciprocal_value(q
->cell_size
);
810 q
->cell_size_reciprocal
= (struct reciprocal_value
) { 0 };
813 static int get_loss_clg(struct netem_sched_data
*q
, const struct nlattr
*attr
)
815 const struct nlattr
*la
;
818 nla_for_each_nested(la
, attr
, rem
) {
819 u16 type
= nla_type(la
);
822 case NETEM_LOSS_GI
: {
823 const struct tc_netem_gimodel
*gi
= nla_data(la
);
825 if (nla_len(la
) < sizeof(struct tc_netem_gimodel
)) {
826 pr_info("netem: incorrect gi model size\n");
830 q
->loss_model
= CLG_4_STATES
;
832 q
->clg
.state
= TX_IN_GAP_PERIOD
;
841 case NETEM_LOSS_GE
: {
842 const struct tc_netem_gemodel
*ge
= nla_data(la
);
844 if (nla_len(la
) < sizeof(struct tc_netem_gemodel
)) {
845 pr_info("netem: incorrect ge model size\n");
849 q
->loss_model
= CLG_GILB_ELL
;
850 q
->clg
.state
= GOOD_STATE
;
859 pr_info("netem: unknown loss type %u\n", type
);
867 static const struct nla_policy netem_policy
[TCA_NETEM_MAX
+ 1] = {
868 [TCA_NETEM_CORR
] = { .len
= sizeof(struct tc_netem_corr
) },
869 [TCA_NETEM_REORDER
] = { .len
= sizeof(struct tc_netem_reorder
) },
870 [TCA_NETEM_CORRUPT
] = { .len
= sizeof(struct tc_netem_corrupt
) },
871 [TCA_NETEM_RATE
] = { .len
= sizeof(struct tc_netem_rate
) },
872 [TCA_NETEM_LOSS
] = { .type
= NLA_NESTED
},
873 [TCA_NETEM_ECN
] = { .type
= NLA_U32
},
874 [TCA_NETEM_RATE64
] = { .type
= NLA_U64
},
875 [TCA_NETEM_LATENCY64
] = { .type
= NLA_S64
},
876 [TCA_NETEM_JITTER64
] = { .type
= NLA_S64
},
877 [TCA_NETEM_SLOT
] = { .len
= sizeof(struct tc_netem_slot
) },
880 static int parse_attr(struct nlattr
*tb
[], int maxtype
, struct nlattr
*nla
,
881 const struct nla_policy
*policy
, int len
)
883 int nested_len
= nla_len(nla
) - NLA_ALIGN(len
);
885 if (nested_len
< 0) {
886 pr_info("netem: invalid attributes len %d\n", nested_len
);
890 if (nested_len
>= nla_attr_size(0))
891 return nla_parse(tb
, maxtype
, nla_data(nla
) + NLA_ALIGN(len
),
892 nested_len
, policy
, NULL
);
894 memset(tb
, 0, sizeof(struct nlattr
*) * (maxtype
+ 1));
898 /* Parse netlink message to set options */
899 static int netem_change(struct Qdisc
*sch
, struct nlattr
*opt
,
900 struct netlink_ext_ack
*extack
)
902 struct netem_sched_data
*q
= qdisc_priv(sch
);
903 struct nlattr
*tb
[TCA_NETEM_MAX
+ 1];
904 struct tc_netem_qopt
*qopt
;
905 struct clgstate old_clg
;
906 int old_loss_model
= CLG_RANDOM
;
912 qopt
= nla_data(opt
);
913 ret
= parse_attr(tb
, TCA_NETEM_MAX
, opt
, netem_policy
, sizeof(*qopt
));
917 /* backup q->clg and q->loss_model */
919 old_loss_model
= q
->loss_model
;
921 if (tb
[TCA_NETEM_LOSS
]) {
922 ret
= get_loss_clg(q
, tb
[TCA_NETEM_LOSS
]);
924 q
->loss_model
= old_loss_model
;
928 q
->loss_model
= CLG_RANDOM
;
931 if (tb
[TCA_NETEM_DELAY_DIST
]) {
932 ret
= get_dist_table(sch
, &q
->delay_dist
,
933 tb
[TCA_NETEM_DELAY_DIST
]);
935 goto get_table_failure
;
938 if (tb
[TCA_NETEM_SLOT_DIST
]) {
939 ret
= get_dist_table(sch
, &q
->slot_dist
,
940 tb
[TCA_NETEM_SLOT_DIST
]);
942 goto get_table_failure
;
945 sch
->limit
= qopt
->limit
;
947 q
->latency
= PSCHED_TICKS2NS(qopt
->latency
);
948 q
->jitter
= PSCHED_TICKS2NS(qopt
->jitter
);
949 q
->limit
= qopt
->limit
;
952 q
->loss
= qopt
->loss
;
953 q
->duplicate
= qopt
->duplicate
;
955 /* for compatibility with earlier versions.
956 * if gap is set, need to assume 100% probability
961 if (tb
[TCA_NETEM_CORR
])
962 get_correlation(q
, tb
[TCA_NETEM_CORR
]);
964 if (tb
[TCA_NETEM_REORDER
])
965 get_reorder(q
, tb
[TCA_NETEM_REORDER
]);
967 if (tb
[TCA_NETEM_CORRUPT
])
968 get_corrupt(q
, tb
[TCA_NETEM_CORRUPT
]);
970 if (tb
[TCA_NETEM_RATE
])
971 get_rate(q
, tb
[TCA_NETEM_RATE
]);
973 if (tb
[TCA_NETEM_RATE64
])
974 q
->rate
= max_t(u64
, q
->rate
,
975 nla_get_u64(tb
[TCA_NETEM_RATE64
]));
977 if (tb
[TCA_NETEM_LATENCY64
])
978 q
->latency
= nla_get_s64(tb
[TCA_NETEM_LATENCY64
]);
980 if (tb
[TCA_NETEM_JITTER64
])
981 q
->jitter
= nla_get_s64(tb
[TCA_NETEM_JITTER64
]);
983 if (tb
[TCA_NETEM_ECN
])
984 q
->ecn
= nla_get_u32(tb
[TCA_NETEM_ECN
]);
986 if (tb
[TCA_NETEM_SLOT
])
987 get_slot(q
, tb
[TCA_NETEM_SLOT
]);
992 /* recover clg and loss_model, in case of
993 * q->clg and q->loss_model were modified
997 q
->loss_model
= old_loss_model
;
1001 static int netem_init(struct Qdisc
*sch
, struct nlattr
*opt
,
1002 struct netlink_ext_ack
*extack
)
1004 struct netem_sched_data
*q
= qdisc_priv(sch
);
1007 qdisc_watchdog_init(&q
->watchdog
, sch
);
1012 q
->loss_model
= CLG_RANDOM
;
1013 ret
= netem_change(sch
, opt
, extack
);
1015 pr_info("netem: change failed\n");
1019 static void netem_destroy(struct Qdisc
*sch
)
1021 struct netem_sched_data
*q
= qdisc_priv(sch
);
1023 qdisc_watchdog_cancel(&q
->watchdog
);
1025 qdisc_put(q
->qdisc
);
1026 dist_free(q
->delay_dist
);
1027 dist_free(q
->slot_dist
);
1030 static int dump_loss_model(const struct netem_sched_data
*q
,
1031 struct sk_buff
*skb
)
1033 struct nlattr
*nest
;
1035 nest
= nla_nest_start(skb
, TCA_NETEM_LOSS
);
1037 goto nla_put_failure
;
1039 switch (q
->loss_model
) {
1041 /* legacy loss model */
1042 nla_nest_cancel(skb
, nest
);
1043 return 0; /* no data */
1045 case CLG_4_STATES
: {
1046 struct tc_netem_gimodel gi
= {
1054 if (nla_put(skb
, NETEM_LOSS_GI
, sizeof(gi
), &gi
))
1055 goto nla_put_failure
;
1058 case CLG_GILB_ELL
: {
1059 struct tc_netem_gemodel ge
= {
1066 if (nla_put(skb
, NETEM_LOSS_GE
, sizeof(ge
), &ge
))
1067 goto nla_put_failure
;
1072 nla_nest_end(skb
, nest
);
1076 nla_nest_cancel(skb
, nest
);
1080 static int netem_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
1082 const struct netem_sched_data
*q
= qdisc_priv(sch
);
1083 struct nlattr
*nla
= (struct nlattr
*) skb_tail_pointer(skb
);
1084 struct tc_netem_qopt qopt
;
1085 struct tc_netem_corr cor
;
1086 struct tc_netem_reorder reorder
;
1087 struct tc_netem_corrupt corrupt
;
1088 struct tc_netem_rate rate
;
1089 struct tc_netem_slot slot
;
1091 qopt
.latency
= min_t(psched_tdiff_t
, PSCHED_NS2TICKS(q
->latency
),
1093 qopt
.jitter
= min_t(psched_tdiff_t
, PSCHED_NS2TICKS(q
->jitter
),
1095 qopt
.limit
= q
->limit
;
1096 qopt
.loss
= q
->loss
;
1098 qopt
.duplicate
= q
->duplicate
;
1099 if (nla_put(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
))
1100 goto nla_put_failure
;
1102 if (nla_put(skb
, TCA_NETEM_LATENCY64
, sizeof(q
->latency
), &q
->latency
))
1103 goto nla_put_failure
;
1105 if (nla_put(skb
, TCA_NETEM_JITTER64
, sizeof(q
->jitter
), &q
->jitter
))
1106 goto nla_put_failure
;
1108 cor
.delay_corr
= q
->delay_cor
.rho
;
1109 cor
.loss_corr
= q
->loss_cor
.rho
;
1110 cor
.dup_corr
= q
->dup_cor
.rho
;
1111 if (nla_put(skb
, TCA_NETEM_CORR
, sizeof(cor
), &cor
))
1112 goto nla_put_failure
;
1114 reorder
.probability
= q
->reorder
;
1115 reorder
.correlation
= q
->reorder_cor
.rho
;
1116 if (nla_put(skb
, TCA_NETEM_REORDER
, sizeof(reorder
), &reorder
))
1117 goto nla_put_failure
;
1119 corrupt
.probability
= q
->corrupt
;
1120 corrupt
.correlation
= q
->corrupt_cor
.rho
;
1121 if (nla_put(skb
, TCA_NETEM_CORRUPT
, sizeof(corrupt
), &corrupt
))
1122 goto nla_put_failure
;
1124 if (q
->rate
>= (1ULL << 32)) {
1125 if (nla_put_u64_64bit(skb
, TCA_NETEM_RATE64
, q
->rate
,
1127 goto nla_put_failure
;
1130 rate
.rate
= q
->rate
;
1132 rate
.packet_overhead
= q
->packet_overhead
;
1133 rate
.cell_size
= q
->cell_size
;
1134 rate
.cell_overhead
= q
->cell_overhead
;
1135 if (nla_put(skb
, TCA_NETEM_RATE
, sizeof(rate
), &rate
))
1136 goto nla_put_failure
;
1138 if (q
->ecn
&& nla_put_u32(skb
, TCA_NETEM_ECN
, q
->ecn
))
1139 goto nla_put_failure
;
1141 if (dump_loss_model(q
, skb
) != 0)
1142 goto nla_put_failure
;
1144 if (q
->slot_config
.min_delay
| q
->slot_config
.max_delay
|
1145 q
->slot_config
.dist_jitter
) {
1146 slot
= q
->slot_config
;
1147 if (slot
.max_packets
== INT_MAX
)
1148 slot
.max_packets
= 0;
1149 if (slot
.max_bytes
== INT_MAX
)
1151 if (nla_put(skb
, TCA_NETEM_SLOT
, sizeof(slot
), &slot
))
1152 goto nla_put_failure
;
1155 return nla_nest_end(skb
, nla
);
1158 nlmsg_trim(skb
, nla
);
1162 static int netem_dump_class(struct Qdisc
*sch
, unsigned long cl
,
1163 struct sk_buff
*skb
, struct tcmsg
*tcm
)
1165 struct netem_sched_data
*q
= qdisc_priv(sch
);
1167 if (cl
!= 1 || !q
->qdisc
) /* only one class */
1170 tcm
->tcm_handle
|= TC_H_MIN(1);
1171 tcm
->tcm_info
= q
->qdisc
->handle
;
1176 static int netem_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
1177 struct Qdisc
**old
, struct netlink_ext_ack
*extack
)
1179 struct netem_sched_data
*q
= qdisc_priv(sch
);
1181 *old
= qdisc_replace(sch
, new, &q
->qdisc
);
1185 static struct Qdisc
*netem_leaf(struct Qdisc
*sch
, unsigned long arg
)
1187 struct netem_sched_data
*q
= qdisc_priv(sch
);
1191 static unsigned long netem_find(struct Qdisc
*sch
, u32 classid
)
1196 static void netem_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
1198 if (!walker
->stop
) {
1199 if (walker
->count
>= walker
->skip
)
1200 if (walker
->fn(sch
, 1, walker
) < 0) {
1208 static const struct Qdisc_class_ops netem_class_ops
= {
1209 .graft
= netem_graft
,
1213 .dump
= netem_dump_class
,
1216 static struct Qdisc_ops netem_qdisc_ops __read_mostly
= {
1218 .cl_ops
= &netem_class_ops
,
1219 .priv_size
= sizeof(struct netem_sched_data
),
1220 .enqueue
= netem_enqueue
,
1221 .dequeue
= netem_dequeue
,
1222 .peek
= qdisc_peek_dequeued
,
1224 .reset
= netem_reset
,
1225 .destroy
= netem_destroy
,
1226 .change
= netem_change
,
1228 .owner
= THIS_MODULE
,
1232 static int __init
netem_module_init(void)
1234 pr_info("netem: version " VERSION
"\n");
1235 return register_qdisc(&netem_qdisc_ops
);
1237 static void __exit
netem_module_exit(void)
1239 unregister_qdisc(&netem_qdisc_ops
);
1241 module_init(netem_module_init
)
1242 module_exit(netem_module_exit
)
1243 MODULE_LICENSE("GPL");