2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
26 /* Parameters, settable by user:
27 -----------------------------
29 limit - bytes (must be > qth_max + burst)
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
39 struct red_sched_data
{
40 u32 limit
; /* HARD maximal queue length */
42 struct timer_list adapt_timer
;
43 struct red_parms parms
;
45 struct red_stats stats
;
49 static inline int red_use_ecn(struct red_sched_data
*q
)
51 return q
->flags
& TC_RED_ECN
;
54 static inline int red_use_harddrop(struct red_sched_data
*q
)
56 return q
->flags
& TC_RED_HARDDROP
;
59 static int red_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
61 struct red_sched_data
*q
= qdisc_priv(sch
);
62 struct Qdisc
*child
= q
->qdisc
;
65 q
->vars
.qavg
= red_calc_qavg(&q
->parms
,
67 child
->qstats
.backlog
);
69 if (red_is_idling(&q
->vars
))
70 red_end_of_idle_period(&q
->vars
);
72 switch (red_action(&q
->parms
, &q
->vars
, q
->vars
.qavg
)) {
77 qdisc_qstats_overlimit(sch
);
78 if (!red_use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
87 qdisc_qstats_overlimit(sch
);
88 if (red_use_harddrop(q
) || !red_use_ecn(q
) ||
89 !INET_ECN_set_ce(skb
)) {
90 q
->stats
.forced_drop
++;
94 q
->stats
.forced_mark
++;
98 ret
= qdisc_enqueue(skb
, child
);
99 if (likely(ret
== NET_XMIT_SUCCESS
)) {
101 } else if (net_xmit_drop_count(ret
)) {
103 qdisc_qstats_drop(sch
);
108 qdisc_drop(skb
, sch
);
112 static struct sk_buff
*red_dequeue(struct Qdisc
*sch
)
115 struct red_sched_data
*q
= qdisc_priv(sch
);
116 struct Qdisc
*child
= q
->qdisc
;
118 skb
= child
->dequeue(child
);
120 qdisc_bstats_update(sch
, skb
);
123 if (!red_is_idling(&q
->vars
))
124 red_start_of_idle_period(&q
->vars
);
129 static struct sk_buff
*red_peek(struct Qdisc
*sch
)
131 struct red_sched_data
*q
= qdisc_priv(sch
);
132 struct Qdisc
*child
= q
->qdisc
;
134 return child
->ops
->peek(child
);
137 static unsigned int red_drop(struct Qdisc
*sch
)
139 struct red_sched_data
*q
= qdisc_priv(sch
);
140 struct Qdisc
*child
= q
->qdisc
;
143 if (child
->ops
->drop
&& (len
= child
->ops
->drop(child
)) > 0) {
145 qdisc_qstats_drop(sch
);
150 if (!red_is_idling(&q
->vars
))
151 red_start_of_idle_period(&q
->vars
);
156 static void red_reset(struct Qdisc
*sch
)
158 struct red_sched_data
*q
= qdisc_priv(sch
);
160 qdisc_reset(q
->qdisc
);
162 red_restart(&q
->vars
);
165 static void red_destroy(struct Qdisc
*sch
)
167 struct red_sched_data
*q
= qdisc_priv(sch
);
169 del_timer_sync(&q
->adapt_timer
);
170 qdisc_destroy(q
->qdisc
);
173 static const struct nla_policy red_policy
[TCA_RED_MAX
+ 1] = {
174 [TCA_RED_PARMS
] = { .len
= sizeof(struct tc_red_qopt
) },
175 [TCA_RED_STAB
] = { .len
= RED_STAB_SIZE
},
176 [TCA_RED_MAX_P
] = { .type
= NLA_U32
},
179 static int red_change(struct Qdisc
*sch
, struct nlattr
*opt
)
181 struct red_sched_data
*q
= qdisc_priv(sch
);
182 struct nlattr
*tb
[TCA_RED_MAX
+ 1];
183 struct tc_red_qopt
*ctl
;
184 struct Qdisc
*child
= NULL
;
191 err
= nla_parse_nested(tb
, TCA_RED_MAX
, opt
, red_policy
);
195 if (tb
[TCA_RED_PARMS
] == NULL
||
196 tb
[TCA_RED_STAB
] == NULL
)
199 max_P
= tb
[TCA_RED_MAX_P
] ? nla_get_u32(tb
[TCA_RED_MAX_P
]) : 0;
201 ctl
= nla_data(tb
[TCA_RED_PARMS
]);
203 if (ctl
->limit
> 0) {
204 child
= fifo_create_dflt(sch
, &bfifo_qdisc_ops
, ctl
->limit
);
206 return PTR_ERR(child
);
210 q
->flags
= ctl
->flags
;
211 q
->limit
= ctl
->limit
;
213 qdisc_tree_decrease_qlen(q
->qdisc
, q
->qdisc
->q
.qlen
);
214 qdisc_destroy(q
->qdisc
);
218 red_set_parms(&q
->parms
,
219 ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
220 ctl
->Plog
, ctl
->Scell_log
,
221 nla_data(tb
[TCA_RED_STAB
]),
223 red_set_vars(&q
->vars
);
225 del_timer(&q
->adapt_timer
);
226 if (ctl
->flags
& TC_RED_ADAPTATIVE
)
227 mod_timer(&q
->adapt_timer
, jiffies
+ HZ
/2);
229 if (!q
->qdisc
->q
.qlen
)
230 red_start_of_idle_period(&q
->vars
);
232 sch_tree_unlock(sch
);
236 static inline void red_adaptative_timer(unsigned long arg
)
238 struct Qdisc
*sch
= (struct Qdisc
*)arg
;
239 struct red_sched_data
*q
= qdisc_priv(sch
);
240 spinlock_t
*root_lock
= qdisc_lock(qdisc_root_sleeping(sch
));
242 spin_lock(root_lock
);
243 red_adaptative_algo(&q
->parms
, &q
->vars
);
244 mod_timer(&q
->adapt_timer
, jiffies
+ HZ
/2);
245 spin_unlock(root_lock
);
248 static int red_init(struct Qdisc
*sch
, struct nlattr
*opt
)
250 struct red_sched_data
*q
= qdisc_priv(sch
);
252 q
->qdisc
= &noop_qdisc
;
253 setup_timer(&q
->adapt_timer
, red_adaptative_timer
, (unsigned long)sch
);
254 return red_change(sch
, opt
);
257 static int red_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
259 struct red_sched_data
*q
= qdisc_priv(sch
);
260 struct nlattr
*opts
= NULL
;
261 struct tc_red_qopt opt
= {
264 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
265 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
266 .Wlog
= q
->parms
.Wlog
,
267 .Plog
= q
->parms
.Plog
,
268 .Scell_log
= q
->parms
.Scell_log
,
271 sch
->qstats
.backlog
= q
->qdisc
->qstats
.backlog
;
272 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
274 goto nla_put_failure
;
275 if (nla_put(skb
, TCA_RED_PARMS
, sizeof(opt
), &opt
) ||
276 nla_put_u32(skb
, TCA_RED_MAX_P
, q
->parms
.max_P
))
277 goto nla_put_failure
;
278 return nla_nest_end(skb
, opts
);
281 nla_nest_cancel(skb
, opts
);
285 static int red_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
287 struct red_sched_data
*q
= qdisc_priv(sch
);
288 struct tc_red_xstats st
= {
289 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
290 .pdrop
= q
->stats
.pdrop
,
291 .other
= q
->stats
.other
,
292 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
295 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
298 static int red_dump_class(struct Qdisc
*sch
, unsigned long cl
,
299 struct sk_buff
*skb
, struct tcmsg
*tcm
)
301 struct red_sched_data
*q
= qdisc_priv(sch
);
303 tcm
->tcm_handle
|= TC_H_MIN(1);
304 tcm
->tcm_info
= q
->qdisc
->handle
;
308 static int red_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
311 struct red_sched_data
*q
= qdisc_priv(sch
);
319 qdisc_tree_decrease_qlen(*old
, (*old
)->q
.qlen
);
321 sch_tree_unlock(sch
);
325 static struct Qdisc
*red_leaf(struct Qdisc
*sch
, unsigned long arg
)
327 struct red_sched_data
*q
= qdisc_priv(sch
);
331 static unsigned long red_get(struct Qdisc
*sch
, u32 classid
)
336 static void red_put(struct Qdisc
*sch
, unsigned long arg
)
340 static void red_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
343 if (walker
->count
>= walker
->skip
)
344 if (walker
->fn(sch
, 1, walker
) < 0) {
352 static const struct Qdisc_class_ops red_class_ops
= {
358 .dump
= red_dump_class
,
361 static struct Qdisc_ops red_qdisc_ops __read_mostly
= {
363 .priv_size
= sizeof(struct red_sched_data
),
364 .cl_ops
= &red_class_ops
,
365 .enqueue
= red_enqueue
,
366 .dequeue
= red_dequeue
,
371 .destroy
= red_destroy
,
372 .change
= red_change
,
374 .dump_stats
= red_dump_stats
,
375 .owner
= THIS_MODULE
,
378 static int __init
red_module_init(void)
380 return register_qdisc(&red_qdisc_ops
);
383 static void __exit
red_module_exit(void)
385 unregister_qdisc(&red_qdisc_ops
);
388 module_init(red_module_init
)
389 module_exit(red_module_exit
)
391 MODULE_LICENSE("GPL");