2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/skbuff.h>
22 #include <linux/vmalloc.h>
23 #include <linux/rtnetlink.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
30 /* Network Emulation Queuing algorithm.
31 ====================================
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
37 ----------------------------------------------------------------
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
52 Correlated Loss Generator models
54 Added generation of correlated loss according to the
55 "Gilbert-Elliot" model, a 4-state markov model.
58 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
59 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
60 and intuitive loss model for packet networks and its implementation
61 in the Netem module in the Linux kernel", available in [1]
63 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
64 Fabio Ludovici <fabio.ludovici at yahoo.it>
67 struct netem_sched_data
{
69 struct qdisc_watchdog watchdog
;
71 psched_tdiff_t latency
;
72 psched_tdiff_t jitter
;
85 } delay_cor
, loss_cor
, dup_cor
, reorder_cor
, corrupt_cor
;
98 /* Correlated Loss Generation models */
100 /* state of the Markov chain */
103 /* 4-states and Gilbert-Elliot models */
104 u32 a1
; /* p13 for 4-states or p for GE */
105 u32 a2
; /* p31 for 4-states or r for GE */
106 u32 a3
; /* p32 for 4-states or h for GE */
107 u32 a4
; /* p14 for 4-states or 1-k for GE */
108 u32 a5
; /* p23 used only in 4-states */
113 /* Time stamp put into socket buffer control block */
114 struct netem_skb_cb
{
115 psched_time_t time_to_send
;
118 static inline struct netem_skb_cb
*netem_skb_cb(struct sk_buff
*skb
)
120 qdisc_cb_private_validate(skb
, sizeof(struct netem_skb_cb
));
121 return (struct netem_skb_cb
*)qdisc_skb_cb(skb
)->data
;
124 /* init_crandom - initialize correlated random number generator
125 * Use entropy source for initial seed.
127 static void init_crandom(struct crndstate
*state
, unsigned long rho
)
130 state
->last
= net_random();
133 /* get_crandom - correlated random number generator
134 * Next number depends on last value.
135 * rho is scaled to avoid floating point.
137 static u32
get_crandom(struct crndstate
*state
)
140 unsigned long answer
;
142 if (state
->rho
== 0) /* no correlation */
145 value
= net_random();
146 rho
= (u64
)state
->rho
+ 1;
147 answer
= (value
* ((1ull<<32) - rho
) + state
->last
* rho
) >> 32;
148 state
->last
= answer
;
152 /* loss_4state - 4-state model loss generator
153 * Generates losses according to the 4-state Markov chain adopted in
154 * the GI (General and Intuitive) loss model.
156 static bool loss_4state(struct netem_sched_data
*q
)
158 struct clgstate
*clg
= &q
->clg
;
159 u32 rnd
= net_random();
162 * Makes a comparison between rnd and the transition
163 * probabilities outgoing from the current state, then decides the
164 * next state and if the next packet has to be transmitted or lost.
165 * The four states correspond to:
166 * 1 => successfully transmitted packets within a gap period
167 * 4 => isolated losses within a gap period
168 * 3 => lost packets within a burst period
169 * 2 => successfully transmitted packets within a burst period
171 switch (clg
->state
) {
176 } else if (clg
->a4
< rnd
&& rnd
< clg
->a1
) {
179 } else if (clg
->a1
< rnd
)
194 else if (clg
->a3
< rnd
&& rnd
< clg
->a2
+ clg
->a3
) {
197 } else if (clg
->a2
+ clg
->a3
< rnd
) {
210 /* loss_gilb_ell - Gilbert-Elliot model loss generator
211 * Generates losses according to the Gilbert-Elliot loss model or
212 * its special cases (Gilbert or Simple Gilbert)
214 * Makes a comparison between random number and the transition
215 * probabilities outgoing from the current state, then decides the
216 * next state. A second random number is extracted and the comparison
217 * with the loss probability of the current state decides if the next
218 * packet will be transmitted or lost.
220 static bool loss_gilb_ell(struct netem_sched_data
*q
)
222 struct clgstate
*clg
= &q
->clg
;
224 switch (clg
->state
) {
226 if (net_random() < clg
->a1
)
228 if (net_random() < clg
->a4
)
231 if (net_random() < clg
->a2
)
233 if (clg
->a3
> net_random())
240 static bool loss_event(struct netem_sched_data
*q
)
242 switch (q
->loss_model
) {
244 /* Random packet drop 0 => none, ~0 => all */
245 return q
->loss
&& q
->loss
>= get_crandom(&q
->loss_cor
);
248 /* 4state loss model algorithm (used also for GI model)
249 * Extracts a value from the markov 4 state loss generator,
250 * if it is 1 drops a packet and if needed writes the event in
253 return loss_4state(q
);
256 /* Gilbert-Elliot loss model algorithm
257 * Extracts a value from the Gilbert-Elliot loss generator,
258 * if it is 1 drops a packet and if needed writes the event in
261 return loss_gilb_ell(q
);
264 return false; /* not reached */
268 /* tabledist - return a pseudo-randomly distributed value with mean mu and
269 * std deviation sigma. Uses table lookup to approximate the desired
270 * distribution, and a uniformly-distributed pseudo-random source.
272 static psched_tdiff_t
tabledist(psched_tdiff_t mu
, psched_tdiff_t sigma
,
273 struct crndstate
*state
,
274 const struct disttable
*dist
)
283 rnd
= get_crandom(state
);
285 /* default uniform distribution */
287 return (rnd
% (2*sigma
)) - sigma
+ mu
;
289 t
= dist
->table
[rnd
% dist
->size
];
290 x
= (sigma
% NETEM_DIST_SCALE
) * t
;
292 x
+= NETEM_DIST_SCALE
/2;
294 x
-= NETEM_DIST_SCALE
/2;
296 return x
/ NETEM_DIST_SCALE
+ (sigma
/ NETEM_DIST_SCALE
) * t
+ mu
;
300 * Insert one skb into qdisc.
301 * Note: parent depends on return value to account for queue length.
302 * NET_XMIT_DROP: queue length didn't change.
303 * NET_XMIT_SUCCESS: one skb was queued.
305 static int netem_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
307 struct netem_sched_data
*q
= qdisc_priv(sch
);
308 /* We don't fill cb now as skb_unshare() may invalidate it */
309 struct netem_skb_cb
*cb
;
310 struct sk_buff
*skb2
;
314 /* Random duplication */
315 if (q
->duplicate
&& q
->duplicate
>= get_crandom(&q
->dup_cor
))
325 return NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
331 * If we need to duplicate packet, then re-insert at top of the
332 * qdisc tree, since parent queuer expects that only one
333 * skb will be queued.
335 if (count
> 1 && (skb2
= skb_clone(skb
, GFP_ATOMIC
)) != NULL
) {
336 struct Qdisc
*rootq
= qdisc_root(sch
);
337 u32 dupsave
= q
->duplicate
; /* prevent duplicating a dup... */
340 qdisc_enqueue_root(skb2
, rootq
);
341 q
->duplicate
= dupsave
;
345 * Randomized packet corruption.
346 * Make copy if needed since we are modifying
347 * If packet is going to be hardware checksummed, then
348 * do it now in software before we mangle it.
350 if (q
->corrupt
&& q
->corrupt
>= get_crandom(&q
->corrupt_cor
)) {
351 if (!(skb
= skb_unshare(skb
, GFP_ATOMIC
)) ||
352 (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
353 skb_checksum_help(skb
)))
354 return qdisc_drop(skb
, sch
);
356 skb
->data
[net_random() % skb_headlen(skb
)] ^= 1<<(net_random() % 8);
359 cb
= netem_skb_cb(skb
);
360 if (q
->gap
== 0 || /* not doing reordering */
361 q
->counter
< q
->gap
|| /* inside last reordering gap */
362 q
->reorder
< get_crandom(&q
->reorder_cor
)) {
364 psched_tdiff_t delay
;
366 delay
= tabledist(q
->latency
, q
->jitter
,
367 &q
->delay_cor
, q
->delay_dist
);
369 now
= psched_get_time();
370 cb
->time_to_send
= now
+ delay
;
372 ret
= qdisc_enqueue(skb
, q
->qdisc
);
375 * Do re-ordering by putting one out of N packets at the front
378 cb
->time_to_send
= psched_get_time();
381 __skb_queue_head(&q
->qdisc
->q
, skb
);
382 sch
->qstats
.backlog
+= qdisc_pkt_len(skb
);
383 sch
->qstats
.requeues
++;
384 ret
= NET_XMIT_SUCCESS
;
387 if (ret
!= NET_XMIT_SUCCESS
) {
388 if (net_xmit_drop_count(ret
)) {
395 return NET_XMIT_SUCCESS
;
398 static unsigned int netem_drop(struct Qdisc
*sch
)
400 struct netem_sched_data
*q
= qdisc_priv(sch
);
401 unsigned int len
= 0;
403 if (q
->qdisc
->ops
->drop
&& (len
= q
->qdisc
->ops
->drop(q
->qdisc
)) != 0) {
410 static struct sk_buff
*netem_dequeue(struct Qdisc
*sch
)
412 struct netem_sched_data
*q
= qdisc_priv(sch
);
415 if (qdisc_is_throttled(sch
))
418 skb
= q
->qdisc
->ops
->peek(q
->qdisc
);
420 const struct netem_skb_cb
*cb
= netem_skb_cb(skb
);
421 psched_time_t now
= psched_get_time();
423 /* if more time remaining? */
424 if (cb
->time_to_send
<= now
) {
425 skb
= qdisc_dequeue_peeked(q
->qdisc
);
429 #ifdef CONFIG_NET_CLS_ACT
431 * If it's at ingress let's pretend the delay is
432 * from the network (tstamp will be updated).
434 if (G_TC_FROM(skb
->tc_verd
) & AT_INGRESS
)
435 skb
->tstamp
.tv64
= 0;
439 qdisc_unthrottled(sch
);
440 qdisc_bstats_update(sch
, skb
);
444 qdisc_watchdog_schedule(&q
->watchdog
, cb
->time_to_send
);
450 static void netem_reset(struct Qdisc
*sch
)
452 struct netem_sched_data
*q
= qdisc_priv(sch
);
454 qdisc_reset(q
->qdisc
);
456 qdisc_watchdog_cancel(&q
->watchdog
);
459 static void dist_free(struct disttable
*d
)
462 if (is_vmalloc_addr(d
))
470 * Distribution data is a variable size payload containing
471 * signed 16 bit values.
473 static int get_dist_table(struct Qdisc
*sch
, const struct nlattr
*attr
)
475 struct netem_sched_data
*q
= qdisc_priv(sch
);
476 size_t n
= nla_len(attr
)/sizeof(__s16
);
477 const __s16
*data
= nla_data(attr
);
478 spinlock_t
*root_lock
;
483 if (n
> NETEM_DIST_MAX
)
486 s
= sizeof(struct disttable
) + n
* sizeof(s16
);
487 d
= kmalloc(s
, GFP_KERNEL
);
494 for (i
= 0; i
< n
; i
++)
495 d
->table
[i
] = data
[i
];
497 root_lock
= qdisc_root_sleeping_lock(sch
);
499 spin_lock_bh(root_lock
);
500 dist_free(q
->delay_dist
);
502 spin_unlock_bh(root_lock
);
506 static void get_correlation(struct Qdisc
*sch
, const struct nlattr
*attr
)
508 struct netem_sched_data
*q
= qdisc_priv(sch
);
509 const struct tc_netem_corr
*c
= nla_data(attr
);
511 init_crandom(&q
->delay_cor
, c
->delay_corr
);
512 init_crandom(&q
->loss_cor
, c
->loss_corr
);
513 init_crandom(&q
->dup_cor
, c
->dup_corr
);
516 static void get_reorder(struct Qdisc
*sch
, const struct nlattr
*attr
)
518 struct netem_sched_data
*q
= qdisc_priv(sch
);
519 const struct tc_netem_reorder
*r
= nla_data(attr
);
521 q
->reorder
= r
->probability
;
522 init_crandom(&q
->reorder_cor
, r
->correlation
);
525 static void get_corrupt(struct Qdisc
*sch
, const struct nlattr
*attr
)
527 struct netem_sched_data
*q
= qdisc_priv(sch
);
528 const struct tc_netem_corrupt
*r
= nla_data(attr
);
530 q
->corrupt
= r
->probability
;
531 init_crandom(&q
->corrupt_cor
, r
->correlation
);
534 static int get_loss_clg(struct Qdisc
*sch
, const struct nlattr
*attr
)
536 struct netem_sched_data
*q
= qdisc_priv(sch
);
537 const struct nlattr
*la
;
540 nla_for_each_nested(la
, attr
, rem
) {
541 u16 type
= nla_type(la
);
544 case NETEM_LOSS_GI
: {
545 const struct tc_netem_gimodel
*gi
= nla_data(la
);
547 if (nla_len(la
) != sizeof(struct tc_netem_gimodel
)) {
548 pr_info("netem: incorrect gi model size\n");
552 q
->loss_model
= CLG_4_STATES
;
563 case NETEM_LOSS_GE
: {
564 const struct tc_netem_gemodel
*ge
= nla_data(la
);
566 if (nla_len(la
) != sizeof(struct tc_netem_gemodel
)) {
567 pr_info("netem: incorrect gi model size\n");
571 q
->loss_model
= CLG_GILB_ELL
;
581 pr_info("netem: unknown loss type %u\n", type
);
589 static const struct nla_policy netem_policy
[TCA_NETEM_MAX
+ 1] = {
590 [TCA_NETEM_CORR
] = { .len
= sizeof(struct tc_netem_corr
) },
591 [TCA_NETEM_REORDER
] = { .len
= sizeof(struct tc_netem_reorder
) },
592 [TCA_NETEM_CORRUPT
] = { .len
= sizeof(struct tc_netem_corrupt
) },
593 [TCA_NETEM_LOSS
] = { .type
= NLA_NESTED
},
596 static int parse_attr(struct nlattr
*tb
[], int maxtype
, struct nlattr
*nla
,
597 const struct nla_policy
*policy
, int len
)
599 int nested_len
= nla_len(nla
) - NLA_ALIGN(len
);
601 if (nested_len
< 0) {
602 pr_info("netem: invalid attributes len %d\n", nested_len
);
606 if (nested_len
>= nla_attr_size(0))
607 return nla_parse(tb
, maxtype
, nla_data(nla
) + NLA_ALIGN(len
),
610 memset(tb
, 0, sizeof(struct nlattr
*) * (maxtype
+ 1));
614 /* Parse netlink message to set options */
615 static int netem_change(struct Qdisc
*sch
, struct nlattr
*opt
)
617 struct netem_sched_data
*q
= qdisc_priv(sch
);
618 struct nlattr
*tb
[TCA_NETEM_MAX
+ 1];
619 struct tc_netem_qopt
*qopt
;
625 qopt
= nla_data(opt
);
626 ret
= parse_attr(tb
, TCA_NETEM_MAX
, opt
, netem_policy
, sizeof(*qopt
));
630 ret
= fifo_set_limit(q
->qdisc
, qopt
->limit
);
632 pr_info("netem: can't set fifo limit\n");
636 q
->latency
= qopt
->latency
;
637 q
->jitter
= qopt
->jitter
;
638 q
->limit
= qopt
->limit
;
641 q
->loss
= qopt
->loss
;
642 q
->duplicate
= qopt
->duplicate
;
644 /* for compatibility with earlier versions.
645 * if gap is set, need to assume 100% probability
650 if (tb
[TCA_NETEM_CORR
])
651 get_correlation(sch
, tb
[TCA_NETEM_CORR
]);
653 if (tb
[TCA_NETEM_DELAY_DIST
]) {
654 ret
= get_dist_table(sch
, tb
[TCA_NETEM_DELAY_DIST
]);
659 if (tb
[TCA_NETEM_REORDER
])
660 get_reorder(sch
, tb
[TCA_NETEM_REORDER
]);
662 if (tb
[TCA_NETEM_CORRUPT
])
663 get_corrupt(sch
, tb
[TCA_NETEM_CORRUPT
]);
665 q
->loss_model
= CLG_RANDOM
;
666 if (tb
[TCA_NETEM_LOSS
])
667 ret
= get_loss_clg(sch
, tb
[TCA_NETEM_LOSS
]);
673 * Special case version of FIFO queue for use by netem.
674 * It queues in order based on timestamps in skb's
676 struct fifo_sched_data
{
678 psched_time_t oldest
;
681 static int tfifo_enqueue(struct sk_buff
*nskb
, struct Qdisc
*sch
)
683 struct fifo_sched_data
*q
= qdisc_priv(sch
);
684 struct sk_buff_head
*list
= &sch
->q
;
685 psched_time_t tnext
= netem_skb_cb(nskb
)->time_to_send
;
688 if (likely(skb_queue_len(list
) < q
->limit
)) {
689 /* Optimize for add at tail */
690 if (likely(skb_queue_empty(list
) || tnext
>= q
->oldest
)) {
692 return qdisc_enqueue_tail(nskb
, sch
);
695 skb_queue_reverse_walk(list
, skb
) {
696 const struct netem_skb_cb
*cb
= netem_skb_cb(skb
);
698 if (tnext
>= cb
->time_to_send
)
702 __skb_queue_after(list
, skb
, nskb
);
704 sch
->qstats
.backlog
+= qdisc_pkt_len(nskb
);
706 return NET_XMIT_SUCCESS
;
709 return qdisc_reshape_fail(nskb
, sch
);
712 static int tfifo_init(struct Qdisc
*sch
, struct nlattr
*opt
)
714 struct fifo_sched_data
*q
= qdisc_priv(sch
);
717 struct tc_fifo_qopt
*ctl
= nla_data(opt
);
718 if (nla_len(opt
) < sizeof(*ctl
))
721 q
->limit
= ctl
->limit
;
723 q
->limit
= max_t(u32
, qdisc_dev(sch
)->tx_queue_len
, 1);
725 q
->oldest
= PSCHED_PASTPERFECT
;
729 static int tfifo_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
731 struct fifo_sched_data
*q
= qdisc_priv(sch
);
732 struct tc_fifo_qopt opt
= { .limit
= q
->limit
};
734 NLA_PUT(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
);
741 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly
= {
743 .priv_size
= sizeof(struct fifo_sched_data
),
744 .enqueue
= tfifo_enqueue
,
745 .dequeue
= qdisc_dequeue_head
,
746 .peek
= qdisc_peek_head
,
747 .drop
= qdisc_queue_drop
,
749 .reset
= qdisc_reset_queue
,
750 .change
= tfifo_init
,
754 static int netem_init(struct Qdisc
*sch
, struct nlattr
*opt
)
756 struct netem_sched_data
*q
= qdisc_priv(sch
);
762 qdisc_watchdog_init(&q
->watchdog
, sch
);
764 q
->loss_model
= CLG_RANDOM
;
765 q
->qdisc
= qdisc_create_dflt(sch
->dev_queue
, &tfifo_qdisc_ops
,
766 TC_H_MAKE(sch
->handle
, 1));
768 pr_notice("netem: qdisc create tfifo qdisc failed\n");
772 ret
= netem_change(sch
, opt
);
774 pr_info("netem: change failed\n");
775 qdisc_destroy(q
->qdisc
);
780 static void netem_destroy(struct Qdisc
*sch
)
782 struct netem_sched_data
*q
= qdisc_priv(sch
);
784 qdisc_watchdog_cancel(&q
->watchdog
);
785 qdisc_destroy(q
->qdisc
);
786 dist_free(q
->delay_dist
);
789 static int dump_loss_model(const struct netem_sched_data
*q
,
794 nest
= nla_nest_start(skb
, TCA_NETEM_LOSS
);
796 goto nla_put_failure
;
798 switch (q
->loss_model
) {
800 /* legacy loss model */
801 nla_nest_cancel(skb
, nest
);
802 return 0; /* no data */
805 struct tc_netem_gimodel gi
= {
813 NLA_PUT(skb
, NETEM_LOSS_GI
, sizeof(gi
), &gi
);
817 struct tc_netem_gemodel ge
= {
824 NLA_PUT(skb
, NETEM_LOSS_GE
, sizeof(ge
), &ge
);
829 nla_nest_end(skb
, nest
);
833 nla_nest_cancel(skb
, nest
);
837 static int netem_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
839 const struct netem_sched_data
*q
= qdisc_priv(sch
);
840 struct nlattr
*nla
= (struct nlattr
*) skb_tail_pointer(skb
);
841 struct tc_netem_qopt qopt
;
842 struct tc_netem_corr cor
;
843 struct tc_netem_reorder reorder
;
844 struct tc_netem_corrupt corrupt
;
846 qopt
.latency
= q
->latency
;
847 qopt
.jitter
= q
->jitter
;
848 qopt
.limit
= q
->limit
;
851 qopt
.duplicate
= q
->duplicate
;
852 NLA_PUT(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
);
854 cor
.delay_corr
= q
->delay_cor
.rho
;
855 cor
.loss_corr
= q
->loss_cor
.rho
;
856 cor
.dup_corr
= q
->dup_cor
.rho
;
857 NLA_PUT(skb
, TCA_NETEM_CORR
, sizeof(cor
), &cor
);
859 reorder
.probability
= q
->reorder
;
860 reorder
.correlation
= q
->reorder_cor
.rho
;
861 NLA_PUT(skb
, TCA_NETEM_REORDER
, sizeof(reorder
), &reorder
);
863 corrupt
.probability
= q
->corrupt
;
864 corrupt
.correlation
= q
->corrupt_cor
.rho
;
865 NLA_PUT(skb
, TCA_NETEM_CORRUPT
, sizeof(corrupt
), &corrupt
);
867 if (dump_loss_model(q
, skb
) != 0)
868 goto nla_put_failure
;
870 return nla_nest_end(skb
, nla
);
873 nlmsg_trim(skb
, nla
);
877 static int netem_dump_class(struct Qdisc
*sch
, unsigned long cl
,
878 struct sk_buff
*skb
, struct tcmsg
*tcm
)
880 struct netem_sched_data
*q
= qdisc_priv(sch
);
882 if (cl
!= 1) /* only one class */
885 tcm
->tcm_handle
|= TC_H_MIN(1);
886 tcm
->tcm_info
= q
->qdisc
->handle
;
891 static int netem_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
894 struct netem_sched_data
*q
= qdisc_priv(sch
);
902 qdisc_tree_decrease_qlen(*old
, (*old
)->q
.qlen
);
904 sch_tree_unlock(sch
);
909 static struct Qdisc
*netem_leaf(struct Qdisc
*sch
, unsigned long arg
)
911 struct netem_sched_data
*q
= qdisc_priv(sch
);
915 static unsigned long netem_get(struct Qdisc
*sch
, u32 classid
)
920 static void netem_put(struct Qdisc
*sch
, unsigned long arg
)
924 static void netem_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
927 if (walker
->count
>= walker
->skip
)
928 if (walker
->fn(sch
, 1, walker
) < 0) {
936 static const struct Qdisc_class_ops netem_class_ops
= {
937 .graft
= netem_graft
,
942 .dump
= netem_dump_class
,
945 static struct Qdisc_ops netem_qdisc_ops __read_mostly
= {
947 .cl_ops
= &netem_class_ops
,
948 .priv_size
= sizeof(struct netem_sched_data
),
949 .enqueue
= netem_enqueue
,
950 .dequeue
= netem_dequeue
,
951 .peek
= qdisc_peek_dequeued
,
954 .reset
= netem_reset
,
955 .destroy
= netem_destroy
,
956 .change
= netem_change
,
958 .owner
= THIS_MODULE
,
962 static int __init
netem_module_init(void)
964 pr_info("netem: version " VERSION
"\n");
965 return register_qdisc(&netem_qdisc_ops
);
967 static void __exit
netem_module_exit(void)
969 unregister_qdisc(&netem_qdisc_ops
);
971 module_init(netem_module_init
)
972 module_exit(netem_module_exit
)
973 MODULE_LICENSE("GPL");