2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Meant to be mostly used for locally generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
19 * Burst avoidance (aka pacing) capability :
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
54 #include <net/tcp_states.h>
58 * Per flow structure, dynamically allocated
61 struct sk_buff
*head
; /* list of skbs for this flow : first skb */
63 struct sk_buff
*tail
; /* last skb in the list */
64 unsigned long age
; /* jiffies when flow was emptied, for gc */
66 struct rb_node fq_node
; /* anchor in fq_root[] trees */
68 int qlen
; /* number of packets in flow queue */
70 u32 socket_hash
; /* sk_hash */
71 struct fq_flow
*next
; /* next pointer in RR lists, or &detached */
73 struct rb_node rate_node
; /* anchor in q->delayed tree */
78 struct fq_flow
*first
;
82 struct fq_sched_data
{
83 struct fq_flow_head new_flows
;
85 struct fq_flow_head old_flows
;
87 struct rb_root delayed
; /* for rate limited flows */
88 u64 time_next_delayed_flow
;
89 unsigned long unthrottle_latency_ns
;
91 struct fq_flow internal
; /* for non classified or high prio packets */
94 u32 flow_refill_delay
;
95 u32 flow_max_rate
; /* optional max rate per flow */
96 u32 flow_plimit
; /* max packets per flow */
97 u32 orphan_mask
; /* mask for orphaned skb */
98 u32 low_rate_threshold
;
99 struct rb_root
*fq_root
;
108 u64 stat_internal_packets
;
109 u64 stat_tcp_retrans
;
111 u64 stat_flows_plimit
;
112 u64 stat_pkts_too_long
;
113 u64 stat_allocation_errors
;
114 struct qdisc_watchdog watchdog
;
117 /* special value to mark a detached flow (not on old/new list) */
118 static struct fq_flow detached
, throttled
;
120 static void fq_flow_set_detached(struct fq_flow
*f
)
126 static bool fq_flow_is_detached(const struct fq_flow
*f
)
128 return f
->next
== &detached
;
131 static bool fq_flow_is_throttled(const struct fq_flow
*f
)
133 return f
->next
== &throttled
;
136 static void fq_flow_add_tail(struct fq_flow_head
*head
, struct fq_flow
*flow
)
139 head
->last
->next
= flow
;
146 static void fq_flow_unset_throttled(struct fq_sched_data
*q
, struct fq_flow
*f
)
148 rb_erase(&f
->rate_node
, &q
->delayed
);
149 q
->throttled_flows
--;
150 fq_flow_add_tail(&q
->old_flows
, f
);
153 static void fq_flow_set_throttled(struct fq_sched_data
*q
, struct fq_flow
*f
)
155 struct rb_node
**p
= &q
->delayed
.rb_node
, *parent
= NULL
;
161 aux
= rb_entry(parent
, struct fq_flow
, rate_node
);
162 if (f
->time_next_packet
>= aux
->time_next_packet
)
163 p
= &parent
->rb_right
;
165 p
= &parent
->rb_left
;
167 rb_link_node(&f
->rate_node
, parent
, p
);
168 rb_insert_color(&f
->rate_node
, &q
->delayed
);
169 q
->throttled_flows
++;
172 f
->next
= &throttled
;
173 if (q
->time_next_delayed_flow
> f
->time_next_packet
)
174 q
->time_next_delayed_flow
= f
->time_next_packet
;
178 static struct kmem_cache
*fq_flow_cachep __read_mostly
;
181 /* limit number of collected flows per round */
183 #define FQ_GC_AGE (3*HZ)
185 static bool fq_gc_candidate(const struct fq_flow
*f
)
187 return fq_flow_is_detached(f
) &&
188 time_after(jiffies
, f
->age
+ FQ_GC_AGE
);
191 static void fq_gc(struct fq_sched_data
*q
,
192 struct rb_root
*root
,
195 struct fq_flow
*f
, *tofree
[FQ_GC_MAX
];
196 struct rb_node
**p
, *parent
;
204 f
= rb_entry(parent
, struct fq_flow
, fq_node
);
208 if (fq_gc_candidate(f
)) {
210 if (fcnt
== FQ_GC_MAX
)
215 p
= &parent
->rb_right
;
217 p
= &parent
->rb_left
;
221 q
->inactive_flows
-= fcnt
;
222 q
->stat_gc_flows
+= fcnt
;
224 struct fq_flow
*f
= tofree
[--fcnt
];
226 rb_erase(&f
->fq_node
, root
);
227 kmem_cache_free(fq_flow_cachep
, f
);
231 static struct fq_flow
*fq_classify(struct sk_buff
*skb
, struct fq_sched_data
*q
)
233 struct rb_node
**p
, *parent
;
234 struct sock
*sk
= skb
->sk
;
235 struct rb_root
*root
;
238 /* warning: no starvation prevention... */
239 if (unlikely((skb
->priority
& TC_PRIO_MAX
) == TC_PRIO_CONTROL
))
242 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
243 * or a listener (SYNCOOKIE mode)
244 * 1) request sockets are not full blown,
245 * they do not contain sk_pacing_rate
246 * 2) They are not part of a 'flow' yet
247 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
248 * especially if the listener set SO_MAX_PACING_RATE
249 * 4) We pretend they are orphaned
251 if (!sk
|| sk_listener(sk
)) {
252 unsigned long hash
= skb_get_hash(skb
) & q
->orphan_mask
;
254 /* By forcing low order bit to 1, we make sure to not
255 * collide with a local flow (socket pointers are word aligned)
257 sk
= (struct sock
*)((hash
<< 1) | 1UL);
261 root
= &q
->fq_root
[hash_ptr(sk
, q
->fq_trees_log
)];
263 if (q
->flows
>= (2U << q
->fq_trees_log
) &&
264 q
->inactive_flows
> q
->flows
/2)
272 f
= rb_entry(parent
, struct fq_flow
, fq_node
);
274 /* socket might have been reallocated, so check
275 * if its sk_hash is the same.
276 * It not, we need to refill credit with
279 if (unlikely(skb
->sk
&&
280 f
->socket_hash
!= sk
->sk_hash
)) {
281 f
->credit
= q
->initial_quantum
;
282 f
->socket_hash
= sk
->sk_hash
;
283 if (fq_flow_is_throttled(f
))
284 fq_flow_unset_throttled(q
, f
);
285 f
->time_next_packet
= 0ULL;
290 p
= &parent
->rb_right
;
292 p
= &parent
->rb_left
;
295 f
= kmem_cache_zalloc(fq_flow_cachep
, GFP_ATOMIC
| __GFP_NOWARN
);
297 q
->stat_allocation_errors
++;
300 fq_flow_set_detached(f
);
303 f
->socket_hash
= sk
->sk_hash
;
304 f
->credit
= q
->initial_quantum
;
306 rb_link_node(&f
->fq_node
, parent
, p
);
307 rb_insert_color(&f
->fq_node
, root
);
315 /* remove one skb from head of flow queue */
316 static struct sk_buff
*fq_dequeue_head(struct Qdisc
*sch
, struct fq_flow
*flow
)
318 struct sk_buff
*skb
= flow
->head
;
321 flow
->head
= skb
->next
;
324 qdisc_qstats_backlog_dec(sch
, skb
);
330 /* We might add in the future detection of retransmits
331 * For the time being, just return false
333 static bool skb_is_retransmit(struct sk_buff
*skb
)
338 /* add skb to flow queue
339 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
340 * We special case tcp retransmits to be transmitted before other packets.
341 * We rely on fact that TCP retransmits are unlikely, so we do not waste
342 * a separate queue or a pointer.
343 * head-> [retrans pkt 1]
348 * tail-> [ normal pkt 4]
350 static void flow_queue_add(struct fq_flow
*flow
, struct sk_buff
*skb
)
352 struct sk_buff
*prev
, *head
= flow
->head
;
360 if (likely(!skb_is_retransmit(skb
))) {
361 flow
->tail
->next
= skb
;
366 /* This skb is a tcp retransmit,
367 * find the last retrans packet in the queue
370 while (skb_is_retransmit(head
)) {
376 if (!prev
) { /* no rtx packet in queue, become the new head */
377 skb
->next
= flow
->head
;
380 if (prev
== flow
->tail
)
383 skb
->next
= prev
->next
;
388 static int fq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
389 struct sk_buff
**to_free
)
391 struct fq_sched_data
*q
= qdisc_priv(sch
);
394 if (unlikely(sch
->q
.qlen
>= sch
->limit
))
395 return qdisc_drop(skb
, sch
, to_free
);
397 f
= fq_classify(skb
, q
);
398 if (unlikely(f
->qlen
>= q
->flow_plimit
&& f
!= &q
->internal
)) {
399 q
->stat_flows_plimit
++;
400 return qdisc_drop(skb
, sch
, to_free
);
404 if (skb_is_retransmit(skb
))
405 q
->stat_tcp_retrans
++;
406 qdisc_qstats_backlog_inc(sch
, skb
);
407 if (fq_flow_is_detached(f
)) {
408 struct sock
*sk
= skb
->sk
;
410 fq_flow_add_tail(&q
->new_flows
, f
);
411 if (time_after(jiffies
, f
->age
+ q
->flow_refill_delay
))
412 f
->credit
= max_t(u32
, f
->credit
, q
->quantum
);
413 if (sk
&& q
->rate_enable
) {
414 if (unlikely(smp_load_acquire(&sk
->sk_pacing_status
) !=
416 smp_store_release(&sk
->sk_pacing_status
,
422 /* Note: this overwrites f->age */
423 flow_queue_add(f
, skb
);
425 if (unlikely(f
== &q
->internal
)) {
426 q
->stat_internal_packets
++;
430 return NET_XMIT_SUCCESS
;
433 static void fq_check_throttled(struct fq_sched_data
*q
, u64 now
)
435 unsigned long sample
;
438 if (q
->time_next_delayed_flow
> now
)
441 /* Update unthrottle latency EWMA.
442 * This is cheap and can help diagnosing timer/latency problems.
444 sample
= (unsigned long)(now
- q
->time_next_delayed_flow
);
445 q
->unthrottle_latency_ns
-= q
->unthrottle_latency_ns
>> 3;
446 q
->unthrottle_latency_ns
+= sample
>> 3;
448 q
->time_next_delayed_flow
= ~0ULL;
449 while ((p
= rb_first(&q
->delayed
)) != NULL
) {
450 struct fq_flow
*f
= rb_entry(p
, struct fq_flow
, rate_node
);
452 if (f
->time_next_packet
> now
) {
453 q
->time_next_delayed_flow
= f
->time_next_packet
;
456 fq_flow_unset_throttled(q
, f
);
460 static struct sk_buff
*fq_dequeue(struct Qdisc
*sch
)
462 struct fq_sched_data
*q
= qdisc_priv(sch
);
463 u64 now
= ktime_get_ns();
464 struct fq_flow_head
*head
;
469 skb
= fq_dequeue_head(sch
, &q
->internal
);
472 fq_check_throttled(q
, now
);
474 head
= &q
->new_flows
;
476 head
= &q
->old_flows
;
478 if (q
->time_next_delayed_flow
!= ~0ULL)
479 qdisc_watchdog_schedule_ns(&q
->watchdog
,
480 q
->time_next_delayed_flow
);
486 if (f
->credit
<= 0) {
487 f
->credit
+= q
->quantum
;
488 head
->first
= f
->next
;
489 fq_flow_add_tail(&q
->old_flows
, f
);
494 if (unlikely(skb
&& now
< f
->time_next_packet
&&
495 !skb_is_tcp_pure_ack(skb
))) {
496 head
->first
= f
->next
;
497 fq_flow_set_throttled(q
, f
);
501 skb
= fq_dequeue_head(sch
, f
);
503 head
->first
= f
->next
;
504 /* force a pass through old_flows to prevent starvation */
505 if ((head
== &q
->new_flows
) && q
->old_flows
.first
) {
506 fq_flow_add_tail(&q
->old_flows
, f
);
508 fq_flow_set_detached(f
);
514 f
->credit
-= qdisc_pkt_len(skb
);
519 /* Do not pace locally generated ack packets */
520 if (skb_is_tcp_pure_ack(skb
))
523 rate
= q
->flow_max_rate
;
525 rate
= min(skb
->sk
->sk_pacing_rate
, rate
);
527 if (rate
<= q
->low_rate_threshold
) {
529 plen
= qdisc_pkt_len(skb
);
531 plen
= max(qdisc_pkt_len(skb
), q
->quantum
);
536 u64 len
= (u64
)plen
* NSEC_PER_SEC
;
540 /* Since socket rate can change later,
541 * clamp the delay to 1 second.
542 * Really, providers of too big packets should be fixed !
544 if (unlikely(len
> NSEC_PER_SEC
)) {
546 q
->stat_pkts_too_long
++;
548 /* Account for schedule/timers drifts.
549 * f->time_next_packet was set when prior packet was sent,
550 * and current time (@now) can be too late by tens of us.
552 if (f
->time_next_packet
)
553 len
-= min(len
/2, now
- f
->time_next_packet
);
554 f
->time_next_packet
= now
+ len
;
557 qdisc_bstats_update(sch
, skb
);
561 static void fq_flow_purge(struct fq_flow
*flow
)
563 rtnl_kfree_skbs(flow
->head
, flow
->tail
);
568 static void fq_reset(struct Qdisc
*sch
)
570 struct fq_sched_data
*q
= qdisc_priv(sch
);
571 struct rb_root
*root
;
577 sch
->qstats
.backlog
= 0;
579 fq_flow_purge(&q
->internal
);
584 for (idx
= 0; idx
< (1U << q
->fq_trees_log
); idx
++) {
585 root
= &q
->fq_root
[idx
];
586 while ((p
= rb_first(root
)) != NULL
) {
587 f
= rb_entry(p
, struct fq_flow
, fq_node
);
592 kmem_cache_free(fq_flow_cachep
, f
);
595 q
->new_flows
.first
= NULL
;
596 q
->old_flows
.first
= NULL
;
597 q
->delayed
= RB_ROOT
;
599 q
->inactive_flows
= 0;
600 q
->throttled_flows
= 0;
603 static void fq_rehash(struct fq_sched_data
*q
,
604 struct rb_root
*old_array
, u32 old_log
,
605 struct rb_root
*new_array
, u32 new_log
)
607 struct rb_node
*op
, **np
, *parent
;
608 struct rb_root
*oroot
, *nroot
;
609 struct fq_flow
*of
, *nf
;
613 for (idx
= 0; idx
< (1U << old_log
); idx
++) {
614 oroot
= &old_array
[idx
];
615 while ((op
= rb_first(oroot
)) != NULL
) {
617 of
= rb_entry(op
, struct fq_flow
, fq_node
);
618 if (fq_gc_candidate(of
)) {
620 kmem_cache_free(fq_flow_cachep
, of
);
623 nroot
= &new_array
[hash_ptr(of
->sk
, new_log
)];
625 np
= &nroot
->rb_node
;
630 nf
= rb_entry(parent
, struct fq_flow
, fq_node
);
631 BUG_ON(nf
->sk
== of
->sk
);
634 np
= &parent
->rb_right
;
636 np
= &parent
->rb_left
;
639 rb_link_node(&of
->fq_node
, parent
, np
);
640 rb_insert_color(&of
->fq_node
, nroot
);
644 q
->inactive_flows
-= fcnt
;
645 q
->stat_gc_flows
+= fcnt
;
648 static void fq_free(void *addr
)
653 static int fq_resize(struct Qdisc
*sch
, u32 log
)
655 struct fq_sched_data
*q
= qdisc_priv(sch
);
656 struct rb_root
*array
;
660 if (q
->fq_root
&& log
== q
->fq_trees_log
)
663 /* If XPS was setup, we can allocate memory on right NUMA node */
664 array
= kvmalloc_node(sizeof(struct rb_root
) << log
, GFP_KERNEL
| __GFP_RETRY_MAYFAIL
,
665 netdev_queue_numa_node_read(sch
->dev_queue
));
669 for (idx
= 0; idx
< (1U << log
); idx
++)
670 array
[idx
] = RB_ROOT
;
674 old_fq_root
= q
->fq_root
;
676 fq_rehash(q
, old_fq_root
, q
->fq_trees_log
, array
, log
);
679 q
->fq_trees_log
= log
;
681 sch_tree_unlock(sch
);
683 fq_free(old_fq_root
);
688 static const struct nla_policy fq_policy
[TCA_FQ_MAX
+ 1] = {
689 [TCA_FQ_PLIMIT
] = { .type
= NLA_U32
},
690 [TCA_FQ_FLOW_PLIMIT
] = { .type
= NLA_U32
},
691 [TCA_FQ_QUANTUM
] = { .type
= NLA_U32
},
692 [TCA_FQ_INITIAL_QUANTUM
] = { .type
= NLA_U32
},
693 [TCA_FQ_RATE_ENABLE
] = { .type
= NLA_U32
},
694 [TCA_FQ_FLOW_DEFAULT_RATE
] = { .type
= NLA_U32
},
695 [TCA_FQ_FLOW_MAX_RATE
] = { .type
= NLA_U32
},
696 [TCA_FQ_BUCKETS_LOG
] = { .type
= NLA_U32
},
697 [TCA_FQ_FLOW_REFILL_DELAY
] = { .type
= NLA_U32
},
698 [TCA_FQ_LOW_RATE_THRESHOLD
] = { .type
= NLA_U32
},
701 static int fq_change(struct Qdisc
*sch
, struct nlattr
*opt
,
702 struct netlink_ext_ack
*extack
)
704 struct fq_sched_data
*q
= qdisc_priv(sch
);
705 struct nlattr
*tb
[TCA_FQ_MAX
+ 1];
706 int err
, drop_count
= 0;
707 unsigned drop_len
= 0;
713 err
= nla_parse_nested(tb
, TCA_FQ_MAX
, opt
, fq_policy
, NULL
);
719 fq_log
= q
->fq_trees_log
;
721 if (tb
[TCA_FQ_BUCKETS_LOG
]) {
722 u32 nval
= nla_get_u32(tb
[TCA_FQ_BUCKETS_LOG
]);
724 if (nval
>= 1 && nval
<= ilog2(256*1024))
729 if (tb
[TCA_FQ_PLIMIT
])
730 sch
->limit
= nla_get_u32(tb
[TCA_FQ_PLIMIT
]);
732 if (tb
[TCA_FQ_FLOW_PLIMIT
])
733 q
->flow_plimit
= nla_get_u32(tb
[TCA_FQ_FLOW_PLIMIT
]);
735 if (tb
[TCA_FQ_QUANTUM
]) {
736 u32 quantum
= nla_get_u32(tb
[TCA_FQ_QUANTUM
]);
739 q
->quantum
= quantum
;
744 if (tb
[TCA_FQ_INITIAL_QUANTUM
])
745 q
->initial_quantum
= nla_get_u32(tb
[TCA_FQ_INITIAL_QUANTUM
]);
747 if (tb
[TCA_FQ_FLOW_DEFAULT_RATE
])
748 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
749 nla_get_u32(tb
[TCA_FQ_FLOW_DEFAULT_RATE
]));
751 if (tb
[TCA_FQ_FLOW_MAX_RATE
])
752 q
->flow_max_rate
= nla_get_u32(tb
[TCA_FQ_FLOW_MAX_RATE
]);
754 if (tb
[TCA_FQ_LOW_RATE_THRESHOLD
])
755 q
->low_rate_threshold
=
756 nla_get_u32(tb
[TCA_FQ_LOW_RATE_THRESHOLD
]);
758 if (tb
[TCA_FQ_RATE_ENABLE
]) {
759 u32 enable
= nla_get_u32(tb
[TCA_FQ_RATE_ENABLE
]);
762 q
->rate_enable
= enable
;
767 if (tb
[TCA_FQ_FLOW_REFILL_DELAY
]) {
768 u32 usecs_delay
= nla_get_u32(tb
[TCA_FQ_FLOW_REFILL_DELAY
]) ;
770 q
->flow_refill_delay
= usecs_to_jiffies(usecs_delay
);
773 if (tb
[TCA_FQ_ORPHAN_MASK
])
774 q
->orphan_mask
= nla_get_u32(tb
[TCA_FQ_ORPHAN_MASK
]);
777 sch_tree_unlock(sch
);
778 err
= fq_resize(sch
, fq_log
);
781 while (sch
->q
.qlen
> sch
->limit
) {
782 struct sk_buff
*skb
= fq_dequeue(sch
);
786 drop_len
+= qdisc_pkt_len(skb
);
787 rtnl_kfree_skbs(skb
, skb
);
790 qdisc_tree_reduce_backlog(sch
, drop_count
, drop_len
);
792 sch_tree_unlock(sch
);
796 static void fq_destroy(struct Qdisc
*sch
)
798 struct fq_sched_data
*q
= qdisc_priv(sch
);
802 qdisc_watchdog_cancel(&q
->watchdog
);
805 static int fq_init(struct Qdisc
*sch
, struct nlattr
*opt
,
806 struct netlink_ext_ack
*extack
)
808 struct fq_sched_data
*q
= qdisc_priv(sch
);
812 q
->flow_plimit
= 100;
813 q
->quantum
= 2 * psched_mtu(qdisc_dev(sch
));
814 q
->initial_quantum
= 10 * psched_mtu(qdisc_dev(sch
));
815 q
->flow_refill_delay
= msecs_to_jiffies(40);
816 q
->flow_max_rate
= ~0U;
817 q
->time_next_delayed_flow
= ~0ULL;
819 q
->new_flows
.first
= NULL
;
820 q
->old_flows
.first
= NULL
;
821 q
->delayed
= RB_ROOT
;
823 q
->fq_trees_log
= ilog2(1024);
824 q
->orphan_mask
= 1024 - 1;
825 q
->low_rate_threshold
= 550000 / 8;
826 qdisc_watchdog_init(&q
->watchdog
, sch
);
829 err
= fq_change(sch
, opt
, extack
);
831 err
= fq_resize(sch
, q
->fq_trees_log
);
836 static int fq_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
838 struct fq_sched_data
*q
= qdisc_priv(sch
);
841 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
843 goto nla_put_failure
;
845 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
847 if (nla_put_u32(skb
, TCA_FQ_PLIMIT
, sch
->limit
) ||
848 nla_put_u32(skb
, TCA_FQ_FLOW_PLIMIT
, q
->flow_plimit
) ||
849 nla_put_u32(skb
, TCA_FQ_QUANTUM
, q
->quantum
) ||
850 nla_put_u32(skb
, TCA_FQ_INITIAL_QUANTUM
, q
->initial_quantum
) ||
851 nla_put_u32(skb
, TCA_FQ_RATE_ENABLE
, q
->rate_enable
) ||
852 nla_put_u32(skb
, TCA_FQ_FLOW_MAX_RATE
, q
->flow_max_rate
) ||
853 nla_put_u32(skb
, TCA_FQ_FLOW_REFILL_DELAY
,
854 jiffies_to_usecs(q
->flow_refill_delay
)) ||
855 nla_put_u32(skb
, TCA_FQ_ORPHAN_MASK
, q
->orphan_mask
) ||
856 nla_put_u32(skb
, TCA_FQ_LOW_RATE_THRESHOLD
,
857 q
->low_rate_threshold
) ||
858 nla_put_u32(skb
, TCA_FQ_BUCKETS_LOG
, q
->fq_trees_log
))
859 goto nla_put_failure
;
861 return nla_nest_end(skb
, opts
);
867 static int fq_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
869 struct fq_sched_data
*q
= qdisc_priv(sch
);
870 struct tc_fq_qd_stats st
;
874 st
.gc_flows
= q
->stat_gc_flows
;
875 st
.highprio_packets
= q
->stat_internal_packets
;
876 st
.tcp_retrans
= q
->stat_tcp_retrans
;
877 st
.throttled
= q
->stat_throttled
;
878 st
.flows_plimit
= q
->stat_flows_plimit
;
879 st
.pkts_too_long
= q
->stat_pkts_too_long
;
880 st
.allocation_errors
= q
->stat_allocation_errors
;
881 st
.time_next_delayed_flow
= q
->time_next_delayed_flow
- ktime_get_ns();
883 st
.inactive_flows
= q
->inactive_flows
;
884 st
.throttled_flows
= q
->throttled_flows
;
885 st
.unthrottle_latency_ns
= min_t(unsigned long,
886 q
->unthrottle_latency_ns
, ~0U);
887 sch_tree_unlock(sch
);
889 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
892 static struct Qdisc_ops fq_qdisc_ops __read_mostly
= {
894 .priv_size
= sizeof(struct fq_sched_data
),
896 .enqueue
= fq_enqueue
,
897 .dequeue
= fq_dequeue
,
898 .peek
= qdisc_peek_dequeued
,
901 .destroy
= fq_destroy
,
904 .dump_stats
= fq_dump_stats
,
905 .owner
= THIS_MODULE
,
908 static int __init
fq_module_init(void)
912 fq_flow_cachep
= kmem_cache_create("fq_flow_cache",
913 sizeof(struct fq_flow
),
918 ret
= register_qdisc(&fq_qdisc_ops
);
920 kmem_cache_destroy(fq_flow_cachep
);
924 static void __exit
fq_module_exit(void)
926 unregister_qdisc(&fq_qdisc_ops
);
927 kmem_cache_destroy(fq_flow_cachep
);
930 module_init(fq_module_init
)
931 module_exit(fq_module_exit
)
932 MODULE_AUTHOR("Eric Dumazet");
933 MODULE_LICENSE("GPL");