drm/i915: Reduce locking inside swfinish ioctl
[linux/fpc-iii.git] / net / sched / sch_fq_codel.c
blobda250b2e06ae3b2cc054ff1d31e29649a74f6e13
1 /*
2 * Fair Queue CoDel discipline
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/codel.h>
27 #include <net/codel_impl.h>
28 #include <net/codel_qdisc.h>
30 /* Fair Queue CoDel.
32 * Principles :
33 * Packets are classified (internal classifier or external) on flows.
34 * This is a Stochastic model (as we use a hash, several flows
35 * might be hashed on same slot)
36 * Each flow has a CoDel managed queue.
37 * Flows are linked onto two (Round Robin) lists,
38 * so that new flows have priority on old ones.
40 * For a given flow, packets are not reordered (CoDel uses a FIFO)
41 * head drops only.
42 * ECN capability is on by default.
43 * Low memory footprint (64 bytes per flow)
46 struct fq_codel_flow {
47 struct sk_buff *head;
48 struct sk_buff *tail;
49 struct list_head flowchain;
50 int deficit;
51 u32 dropped; /* number of drops (or ECN marks) on this flow */
52 struct codel_vars cvars;
53 }; /* please try to keep this structure <= 64 bytes */
55 struct fq_codel_sched_data {
56 struct tcf_proto __rcu *filter_list; /* optional external classifier */
57 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
58 u32 *backlogs; /* backlog table [flows_cnt] */
59 u32 flows_cnt; /* number of flows */
60 u32 perturbation; /* hash perturbation */
61 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
62 u32 drop_batch_size;
63 u32 memory_limit;
64 struct codel_params cparams;
65 struct codel_stats cstats;
66 u32 memory_usage;
67 u32 drop_overmemory;
68 u32 drop_overlimit;
69 u32 new_flow_count;
71 struct list_head new_flows; /* list of new flows */
72 struct list_head old_flows; /* list of old flows */
75 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
76 struct sk_buff *skb)
78 u32 hash = skb_get_hash_perturb(skb, q->perturbation);
80 return reciprocal_scale(hash, q->flows_cnt);
83 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
84 int *qerr)
86 struct fq_codel_sched_data *q = qdisc_priv(sch);
87 struct tcf_proto *filter;
88 struct tcf_result res;
89 int result;
91 if (TC_H_MAJ(skb->priority) == sch->handle &&
92 TC_H_MIN(skb->priority) > 0 &&
93 TC_H_MIN(skb->priority) <= q->flows_cnt)
94 return TC_H_MIN(skb->priority);
96 filter = rcu_dereference_bh(q->filter_list);
97 if (!filter)
98 return fq_codel_hash(q, skb) + 1;
100 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
101 result = tc_classify(skb, filter, &res, false);
102 if (result >= 0) {
103 #ifdef CONFIG_NET_CLS_ACT
104 switch (result) {
105 case TC_ACT_STOLEN:
106 case TC_ACT_QUEUED:
107 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
108 case TC_ACT_SHOT:
109 return 0;
111 #endif
112 if (TC_H_MIN(res.classid) <= q->flows_cnt)
113 return TC_H_MIN(res.classid);
115 return 0;
118 /* helper functions : might be changed when/if skb use a standard list_head */
120 /* remove one skb from head of slot queue */
121 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
123 struct sk_buff *skb = flow->head;
125 flow->head = skb->next;
126 skb->next = NULL;
127 return skb;
130 /* add skb to flow queue (tail add) */
131 static inline void flow_queue_add(struct fq_codel_flow *flow,
132 struct sk_buff *skb)
134 if (flow->head == NULL)
135 flow->head = skb;
136 else
137 flow->tail->next = skb;
138 flow->tail = skb;
139 skb->next = NULL;
142 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
144 struct fq_codel_sched_data *q = qdisc_priv(sch);
145 struct sk_buff *skb;
146 unsigned int maxbacklog = 0, idx = 0, i, len;
147 struct fq_codel_flow *flow;
148 unsigned int threshold;
149 unsigned int mem = 0;
151 /* Queue is full! Find the fat flow and drop packet(s) from it.
152 * This might sound expensive, but with 1024 flows, we scan
153 * 4KB of memory, and we dont need to handle a complex tree
154 * in fast path (packet queue/enqueue) with many cache misses.
155 * In stress mode, we'll try to drop 64 packets from the flow,
156 * amortizing this linear lookup to one cache line per drop.
158 for (i = 0; i < q->flows_cnt; i++) {
159 if (q->backlogs[i] > maxbacklog) {
160 maxbacklog = q->backlogs[i];
161 idx = i;
165 /* Our goal is to drop half of this fat flow backlog */
166 threshold = maxbacklog >> 1;
168 flow = &q->flows[idx];
169 len = 0;
170 i = 0;
171 do {
172 skb = dequeue_head(flow);
173 len += qdisc_pkt_len(skb);
174 mem += skb->truesize;
175 kfree_skb(skb);
176 } while (++i < max_packets && len < threshold);
178 flow->dropped += i;
179 q->backlogs[idx] -= len;
180 q->memory_usage -= mem;
181 sch->qstats.drops += i;
182 sch->qstats.backlog -= len;
183 sch->q.qlen -= i;
184 return idx;
187 static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
189 unsigned int prev_backlog;
191 prev_backlog = sch->qstats.backlog;
192 fq_codel_drop(sch, 1U);
193 return prev_backlog - sch->qstats.backlog;
196 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
198 struct fq_codel_sched_data *q = qdisc_priv(sch);
199 unsigned int idx, prev_backlog, prev_qlen;
200 struct fq_codel_flow *flow;
201 int uninitialized_var(ret);
202 unsigned int pkt_len;
203 bool memory_limited;
205 idx = fq_codel_classify(skb, sch, &ret);
206 if (idx == 0) {
207 if (ret & __NET_XMIT_BYPASS)
208 qdisc_qstats_drop(sch);
209 kfree_skb(skb);
210 return ret;
212 idx--;
214 codel_set_enqueue_time(skb);
215 flow = &q->flows[idx];
216 flow_queue_add(flow, skb);
217 q->backlogs[idx] += qdisc_pkt_len(skb);
218 qdisc_qstats_backlog_inc(sch, skb);
220 if (list_empty(&flow->flowchain)) {
221 list_add_tail(&flow->flowchain, &q->new_flows);
222 q->new_flow_count++;
223 flow->deficit = q->quantum;
224 flow->dropped = 0;
226 q->memory_usage += skb->truesize;
227 memory_limited = q->memory_usage > q->memory_limit;
228 if (++sch->q.qlen <= sch->limit && !memory_limited)
229 return NET_XMIT_SUCCESS;
231 prev_backlog = sch->qstats.backlog;
232 prev_qlen = sch->q.qlen;
234 /* save this packet length as it might be dropped by fq_codel_drop() */
235 pkt_len = qdisc_pkt_len(skb);
236 /* fq_codel_drop() is quite expensive, as it performs a linear search
237 * in q->backlogs[] to find a fat flow.
238 * So instead of dropping a single packet, drop half of its backlog
239 * with a 64 packets limit to not add a too big cpu spike here.
241 ret = fq_codel_drop(sch, q->drop_batch_size);
243 prev_qlen -= sch->q.qlen;
244 prev_backlog -= sch->qstats.backlog;
245 q->drop_overlimit += prev_qlen;
246 if (memory_limited)
247 q->drop_overmemory += prev_qlen;
249 /* As we dropped packet(s), better let upper stack know this.
250 * If we dropped a packet for this flow, return NET_XMIT_CN,
251 * but in this case, our parents wont increase their backlogs.
253 if (ret == idx) {
254 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
255 prev_backlog - pkt_len);
256 return NET_XMIT_CN;
258 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
259 return NET_XMIT_SUCCESS;
262 /* This is the specific function called from codel_dequeue()
263 * to dequeue a packet from queue. Note: backlog is handled in
264 * codel, we dont need to reduce it here.
266 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
268 struct Qdisc *sch = ctx;
269 struct fq_codel_sched_data *q = qdisc_priv(sch);
270 struct fq_codel_flow *flow;
271 struct sk_buff *skb = NULL;
273 flow = container_of(vars, struct fq_codel_flow, cvars);
274 if (flow->head) {
275 skb = dequeue_head(flow);
276 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
277 q->memory_usage -= skb->truesize;
278 sch->q.qlen--;
279 sch->qstats.backlog -= qdisc_pkt_len(skb);
281 return skb;
284 static void drop_func(struct sk_buff *skb, void *ctx)
286 struct Qdisc *sch = ctx;
288 qdisc_drop(skb, sch);
291 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
293 struct fq_codel_sched_data *q = qdisc_priv(sch);
294 struct sk_buff *skb;
295 struct fq_codel_flow *flow;
296 struct list_head *head;
297 u32 prev_drop_count, prev_ecn_mark;
298 unsigned int prev_backlog;
300 begin:
301 head = &q->new_flows;
302 if (list_empty(head)) {
303 head = &q->old_flows;
304 if (list_empty(head))
305 return NULL;
307 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
309 if (flow->deficit <= 0) {
310 flow->deficit += q->quantum;
311 list_move_tail(&flow->flowchain, &q->old_flows);
312 goto begin;
315 prev_drop_count = q->cstats.drop_count;
316 prev_ecn_mark = q->cstats.ecn_mark;
317 prev_backlog = sch->qstats.backlog;
319 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
320 &flow->cvars, &q->cstats, qdisc_pkt_len,
321 codel_get_enqueue_time, drop_func, dequeue_func);
323 flow->dropped += q->cstats.drop_count - prev_drop_count;
324 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
326 if (!skb) {
327 /* force a pass through old_flows to prevent starvation */
328 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
329 list_move_tail(&flow->flowchain, &q->old_flows);
330 else
331 list_del_init(&flow->flowchain);
332 goto begin;
334 qdisc_bstats_update(sch, skb);
335 flow->deficit -= qdisc_pkt_len(skb);
336 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
337 * or HTB crashes. Defer it for next round.
339 if (q->cstats.drop_count && sch->q.qlen) {
340 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
341 q->cstats.drop_len);
342 q->cstats.drop_count = 0;
343 q->cstats.drop_len = 0;
345 return skb;
348 static void fq_codel_reset(struct Qdisc *sch)
350 struct fq_codel_sched_data *q = qdisc_priv(sch);
351 int i;
353 INIT_LIST_HEAD(&q->new_flows);
354 INIT_LIST_HEAD(&q->old_flows);
355 for (i = 0; i < q->flows_cnt; i++) {
356 struct fq_codel_flow *flow = q->flows + i;
358 while (flow->head) {
359 struct sk_buff *skb = dequeue_head(flow);
361 qdisc_qstats_backlog_dec(sch, skb);
362 kfree_skb(skb);
365 INIT_LIST_HEAD(&flow->flowchain);
366 codel_vars_init(&flow->cvars);
368 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
369 sch->q.qlen = 0;
370 q->memory_usage = 0;
373 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
374 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
375 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
376 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
377 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
378 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
379 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
380 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
381 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
382 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
385 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
387 struct fq_codel_sched_data *q = qdisc_priv(sch);
388 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
389 int err;
391 if (!opt)
392 return -EINVAL;
394 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
395 if (err < 0)
396 return err;
397 if (tb[TCA_FQ_CODEL_FLOWS]) {
398 if (q->flows)
399 return -EINVAL;
400 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
401 if (!q->flows_cnt ||
402 q->flows_cnt > 65536)
403 return -EINVAL;
405 sch_tree_lock(sch);
407 if (tb[TCA_FQ_CODEL_TARGET]) {
408 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
410 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
413 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
414 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
416 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
419 if (tb[TCA_FQ_CODEL_INTERVAL]) {
420 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
422 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
425 if (tb[TCA_FQ_CODEL_LIMIT])
426 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
428 if (tb[TCA_FQ_CODEL_ECN])
429 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
431 if (tb[TCA_FQ_CODEL_QUANTUM])
432 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
434 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
435 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
437 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
438 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
440 while (sch->q.qlen > sch->limit ||
441 q->memory_usage > q->memory_limit) {
442 struct sk_buff *skb = fq_codel_dequeue(sch);
444 q->cstats.drop_len += qdisc_pkt_len(skb);
445 kfree_skb(skb);
446 q->cstats.drop_count++;
448 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
449 q->cstats.drop_count = 0;
450 q->cstats.drop_len = 0;
452 sch_tree_unlock(sch);
453 return 0;
456 static void *fq_codel_zalloc(size_t sz)
458 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
460 if (!ptr)
461 ptr = vzalloc(sz);
462 return ptr;
465 static void fq_codel_free(void *addr)
467 kvfree(addr);
470 static void fq_codel_destroy(struct Qdisc *sch)
472 struct fq_codel_sched_data *q = qdisc_priv(sch);
474 tcf_destroy_chain(&q->filter_list);
475 fq_codel_free(q->backlogs);
476 fq_codel_free(q->flows);
479 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
481 struct fq_codel_sched_data *q = qdisc_priv(sch);
482 int i;
484 sch->limit = 10*1024;
485 q->flows_cnt = 1024;
486 q->memory_limit = 32 << 20; /* 32 MBytes */
487 q->drop_batch_size = 64;
488 q->quantum = psched_mtu(qdisc_dev(sch));
489 q->perturbation = prandom_u32();
490 INIT_LIST_HEAD(&q->new_flows);
491 INIT_LIST_HEAD(&q->old_flows);
492 codel_params_init(&q->cparams);
493 codel_stats_init(&q->cstats);
494 q->cparams.ecn = true;
495 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
497 if (opt) {
498 int err = fq_codel_change(sch, opt);
499 if (err)
500 return err;
503 if (!q->flows) {
504 q->flows = fq_codel_zalloc(q->flows_cnt *
505 sizeof(struct fq_codel_flow));
506 if (!q->flows)
507 return -ENOMEM;
508 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
509 if (!q->backlogs) {
510 fq_codel_free(q->flows);
511 return -ENOMEM;
513 for (i = 0; i < q->flows_cnt; i++) {
514 struct fq_codel_flow *flow = q->flows + i;
516 INIT_LIST_HEAD(&flow->flowchain);
517 codel_vars_init(&flow->cvars);
520 if (sch->limit >= 1)
521 sch->flags |= TCQ_F_CAN_BYPASS;
522 else
523 sch->flags &= ~TCQ_F_CAN_BYPASS;
524 return 0;
527 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
529 struct fq_codel_sched_data *q = qdisc_priv(sch);
530 struct nlattr *opts;
532 opts = nla_nest_start(skb, TCA_OPTIONS);
533 if (opts == NULL)
534 goto nla_put_failure;
536 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
537 codel_time_to_us(q->cparams.target)) ||
538 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
539 sch->limit) ||
540 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
541 codel_time_to_us(q->cparams.interval)) ||
542 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
543 q->cparams.ecn) ||
544 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
545 q->quantum) ||
546 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
547 q->drop_batch_size) ||
548 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
549 q->memory_limit) ||
550 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
551 q->flows_cnt))
552 goto nla_put_failure;
554 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
555 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
556 codel_time_to_us(q->cparams.ce_threshold)))
557 goto nla_put_failure;
559 return nla_nest_end(skb, opts);
561 nla_put_failure:
562 return -1;
565 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
567 struct fq_codel_sched_data *q = qdisc_priv(sch);
568 struct tc_fq_codel_xstats st = {
569 .type = TCA_FQ_CODEL_XSTATS_QDISC,
571 struct list_head *pos;
573 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
574 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
575 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
576 st.qdisc_stats.new_flow_count = q->new_flow_count;
577 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
578 st.qdisc_stats.memory_usage = q->memory_usage;
579 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
581 list_for_each(pos, &q->new_flows)
582 st.qdisc_stats.new_flows_len++;
584 list_for_each(pos, &q->old_flows)
585 st.qdisc_stats.old_flows_len++;
587 return gnet_stats_copy_app(d, &st, sizeof(st));
590 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
592 return NULL;
595 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
597 return 0;
600 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
601 u32 classid)
603 /* we cannot bypass queue discipline anymore */
604 sch->flags &= ~TCQ_F_CAN_BYPASS;
605 return 0;
608 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
612 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
613 unsigned long cl)
615 struct fq_codel_sched_data *q = qdisc_priv(sch);
617 if (cl)
618 return NULL;
619 return &q->filter_list;
622 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
623 struct sk_buff *skb, struct tcmsg *tcm)
625 tcm->tcm_handle |= TC_H_MIN(cl);
626 return 0;
629 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
630 struct gnet_dump *d)
632 struct fq_codel_sched_data *q = qdisc_priv(sch);
633 u32 idx = cl - 1;
634 struct gnet_stats_queue qs = { 0 };
635 struct tc_fq_codel_xstats xstats;
637 if (idx < q->flows_cnt) {
638 const struct fq_codel_flow *flow = &q->flows[idx];
639 const struct sk_buff *skb = flow->head;
641 memset(&xstats, 0, sizeof(xstats));
642 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
643 xstats.class_stats.deficit = flow->deficit;
644 xstats.class_stats.ldelay =
645 codel_time_to_us(flow->cvars.ldelay);
646 xstats.class_stats.count = flow->cvars.count;
647 xstats.class_stats.lastcount = flow->cvars.lastcount;
648 xstats.class_stats.dropping = flow->cvars.dropping;
649 if (flow->cvars.dropping) {
650 codel_tdiff_t delta = flow->cvars.drop_next -
651 codel_get_time();
653 xstats.class_stats.drop_next = (delta >= 0) ?
654 codel_time_to_us(delta) :
655 -codel_time_to_us(-delta);
657 while (skb) {
658 qs.qlen++;
659 skb = skb->next;
661 qs.backlog = q->backlogs[idx];
662 qs.drops = flow->dropped;
664 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
665 return -1;
666 if (idx < q->flows_cnt)
667 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
668 return 0;
671 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
673 struct fq_codel_sched_data *q = qdisc_priv(sch);
674 unsigned int i;
676 if (arg->stop)
677 return;
679 for (i = 0; i < q->flows_cnt; i++) {
680 if (list_empty(&q->flows[i].flowchain) ||
681 arg->count < arg->skip) {
682 arg->count++;
683 continue;
685 if (arg->fn(sch, i + 1, arg) < 0) {
686 arg->stop = 1;
687 break;
689 arg->count++;
693 static const struct Qdisc_class_ops fq_codel_class_ops = {
694 .leaf = fq_codel_leaf,
695 .get = fq_codel_get,
696 .put = fq_codel_put,
697 .tcf_chain = fq_codel_find_tcf,
698 .bind_tcf = fq_codel_bind,
699 .unbind_tcf = fq_codel_put,
700 .dump = fq_codel_dump_class,
701 .dump_stats = fq_codel_dump_class_stats,
702 .walk = fq_codel_walk,
705 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
706 .cl_ops = &fq_codel_class_ops,
707 .id = "fq_codel",
708 .priv_size = sizeof(struct fq_codel_sched_data),
709 .enqueue = fq_codel_enqueue,
710 .dequeue = fq_codel_dequeue,
711 .peek = qdisc_peek_dequeued,
712 .drop = fq_codel_qdisc_drop,
713 .init = fq_codel_init,
714 .reset = fq_codel_reset,
715 .destroy = fq_codel_destroy,
716 .change = fq_codel_change,
717 .dump = fq_codel_dump,
718 .dump_stats = fq_codel_dump_stats,
719 .owner = THIS_MODULE,
722 static int __init fq_codel_module_init(void)
724 return register_qdisc(&fq_codel_qdisc_ops);
727 static void __exit fq_codel_module_exit(void)
729 unregister_qdisc(&fq_codel_qdisc_ops);
732 module_init(fq_codel_module_init)
733 module_exit(fq_codel_module_exit)
734 MODULE_AUTHOR("Eric Dumazet");
735 MODULE_LICENSE("GPL");