2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 * Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Meant to be mostly used for localy generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
19 * Burst avoidance (aka pacing) capability :
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <net/netlink.h>
51 #include <net/pkt_sched.h>
53 #include <net/tcp_states.h>
56 * Per flow structure, dynamically allocated
59 struct sk_buff
*head
; /* list of skbs for this flow : first skb */
61 struct sk_buff
*tail
; /* last skb in the list */
62 unsigned long age
; /* jiffies when flow was emptied, for gc */
64 struct rb_node fq_node
; /* anchor in fq_root[] trees */
66 int qlen
; /* number of packets in flow queue */
68 u32 socket_hash
; /* sk_hash */
69 struct fq_flow
*next
; /* next pointer in RR lists, or &detached */
71 struct rb_node rate_node
; /* anchor in q->delayed tree */
76 struct fq_flow
*first
;
80 struct fq_sched_data
{
81 struct fq_flow_head new_flows
;
83 struct fq_flow_head old_flows
;
85 struct rb_root delayed
; /* for rate limited flows */
86 u64 time_next_delayed_flow
;
88 struct fq_flow internal
; /* for non classified or high prio packets */
91 u32 flow_refill_delay
;
92 u32 flow_max_rate
; /* optional max rate per flow */
93 u32 flow_plimit
; /* max packets per flow */
94 struct rb_root
*fq_root
;
103 u64 stat_internal_packets
;
104 u64 stat_tcp_retrans
;
106 u64 stat_flows_plimit
;
107 u64 stat_pkts_too_long
;
108 u64 stat_allocation_errors
;
109 struct qdisc_watchdog watchdog
;
112 /* special value to mark a detached flow (not on old/new list) */
113 static struct fq_flow detached
, throttled
;
115 static void fq_flow_set_detached(struct fq_flow
*f
)
121 static bool fq_flow_is_detached(const struct fq_flow
*f
)
123 return f
->next
== &detached
;
126 static void fq_flow_set_throttled(struct fq_sched_data
*q
, struct fq_flow
*f
)
128 struct rb_node
**p
= &q
->delayed
.rb_node
, *parent
= NULL
;
134 aux
= container_of(parent
, struct fq_flow
, rate_node
);
135 if (f
->time_next_packet
>= aux
->time_next_packet
)
136 p
= &parent
->rb_right
;
138 p
= &parent
->rb_left
;
140 rb_link_node(&f
->rate_node
, parent
, p
);
141 rb_insert_color(&f
->rate_node
, &q
->delayed
);
142 q
->throttled_flows
++;
145 f
->next
= &throttled
;
146 if (q
->time_next_delayed_flow
> f
->time_next_packet
)
147 q
->time_next_delayed_flow
= f
->time_next_packet
;
151 static struct kmem_cache
*fq_flow_cachep __read_mostly
;
153 static void fq_flow_add_tail(struct fq_flow_head
*head
, struct fq_flow
*flow
)
156 head
->last
->next
= flow
;
163 /* limit number of collected flows per round */
165 #define FQ_GC_AGE (3*HZ)
167 static bool fq_gc_candidate(const struct fq_flow
*f
)
169 return fq_flow_is_detached(f
) &&
170 time_after(jiffies
, f
->age
+ FQ_GC_AGE
);
173 static void fq_gc(struct fq_sched_data
*q
,
174 struct rb_root
*root
,
177 struct fq_flow
*f
, *tofree
[FQ_GC_MAX
];
178 struct rb_node
**p
, *parent
;
186 f
= container_of(parent
, struct fq_flow
, fq_node
);
190 if (fq_gc_candidate(f
)) {
192 if (fcnt
== FQ_GC_MAX
)
197 p
= &parent
->rb_right
;
199 p
= &parent
->rb_left
;
203 q
->inactive_flows
-= fcnt
;
204 q
->stat_gc_flows
+= fcnt
;
206 struct fq_flow
*f
= tofree
[--fcnt
];
208 rb_erase(&f
->fq_node
, root
);
209 kmem_cache_free(fq_flow_cachep
, f
);
213 static struct fq_flow
*fq_classify(struct sk_buff
*skb
, struct fq_sched_data
*q
)
215 struct rb_node
**p
, *parent
;
216 struct sock
*sk
= skb
->sk
;
217 struct rb_root
*root
;
220 /* warning: no starvation prevention... */
221 if (unlikely((skb
->priority
& TC_PRIO_MAX
) == TC_PRIO_CONTROL
))
225 /* By forcing low order bit to 1, we make sure to not
226 * collide with a local flow (socket pointers are word aligned)
228 sk
= (struct sock
*)(skb_get_rxhash(skb
) | 1L);
231 root
= &q
->fq_root
[hash_32((u32
)(long)sk
, q
->fq_trees_log
)];
233 if (q
->flows
>= (2U << q
->fq_trees_log
) &&
234 q
->inactive_flows
> q
->flows
/2)
242 f
= container_of(parent
, struct fq_flow
, fq_node
);
244 /* socket might have been reallocated, so check
245 * if its sk_hash is the same.
246 * It not, we need to refill credit with
249 if (unlikely(skb
->sk
&&
250 f
->socket_hash
!= sk
->sk_hash
)) {
251 f
->credit
= q
->initial_quantum
;
252 f
->socket_hash
= sk
->sk_hash
;
253 f
->time_next_packet
= 0ULL;
258 p
= &parent
->rb_right
;
260 p
= &parent
->rb_left
;
263 f
= kmem_cache_zalloc(fq_flow_cachep
, GFP_ATOMIC
| __GFP_NOWARN
);
265 q
->stat_allocation_errors
++;
268 fq_flow_set_detached(f
);
271 f
->socket_hash
= sk
->sk_hash
;
272 f
->credit
= q
->initial_quantum
;
274 rb_link_node(&f
->fq_node
, parent
, p
);
275 rb_insert_color(&f
->fq_node
, root
);
283 /* remove one skb from head of flow queue */
284 static struct sk_buff
*fq_dequeue_head(struct Qdisc
*sch
, struct fq_flow
*flow
)
286 struct sk_buff
*skb
= flow
->head
;
289 flow
->head
= skb
->next
;
292 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
298 /* We might add in the future detection of retransmits
299 * For the time being, just return false
301 static bool skb_is_retransmit(struct sk_buff
*skb
)
306 /* add skb to flow queue
307 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
308 * We special case tcp retransmits to be transmitted before other packets.
309 * We rely on fact that TCP retransmits are unlikely, so we do not waste
310 * a separate queue or a pointer.
311 * head-> [retrans pkt 1]
316 * tail-> [ normal pkt 4]
318 static void flow_queue_add(struct fq_flow
*flow
, struct sk_buff
*skb
)
320 struct sk_buff
*prev
, *head
= flow
->head
;
328 if (likely(!skb_is_retransmit(skb
))) {
329 flow
->tail
->next
= skb
;
334 /* This skb is a tcp retransmit,
335 * find the last retrans packet in the queue
338 while (skb_is_retransmit(head
)) {
344 if (!prev
) { /* no rtx packet in queue, become the new head */
345 skb
->next
= flow
->head
;
348 if (prev
== flow
->tail
)
351 skb
->next
= prev
->next
;
356 static int fq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
358 struct fq_sched_data
*q
= qdisc_priv(sch
);
361 if (unlikely(sch
->q
.qlen
>= sch
->limit
))
362 return qdisc_drop(skb
, sch
);
364 f
= fq_classify(skb
, q
);
365 if (unlikely(f
->qlen
>= q
->flow_plimit
&& f
!= &q
->internal
)) {
366 q
->stat_flows_plimit
++;
367 return qdisc_drop(skb
, sch
);
371 if (skb_is_retransmit(skb
))
372 q
->stat_tcp_retrans
++;
373 sch
->qstats
.backlog
+= qdisc_pkt_len(skb
);
374 if (fq_flow_is_detached(f
)) {
375 fq_flow_add_tail(&q
->new_flows
, f
);
376 if (time_after(jiffies
, f
->age
+ q
->flow_refill_delay
))
377 f
->credit
= max_t(u32
, f
->credit
, q
->quantum
);
379 qdisc_unthrottled(sch
);
382 /* Note: this overwrites f->age */
383 flow_queue_add(f
, skb
);
385 if (unlikely(f
== &q
->internal
)) {
386 q
->stat_internal_packets
++;
387 qdisc_unthrottled(sch
);
391 return NET_XMIT_SUCCESS
;
394 static void fq_check_throttled(struct fq_sched_data
*q
, u64 now
)
398 if (q
->time_next_delayed_flow
> now
)
401 q
->time_next_delayed_flow
= ~0ULL;
402 while ((p
= rb_first(&q
->delayed
)) != NULL
) {
403 struct fq_flow
*f
= container_of(p
, struct fq_flow
, rate_node
);
405 if (f
->time_next_packet
> now
) {
406 q
->time_next_delayed_flow
= f
->time_next_packet
;
409 rb_erase(p
, &q
->delayed
);
410 q
->throttled_flows
--;
411 fq_flow_add_tail(&q
->old_flows
, f
);
415 static struct sk_buff
*fq_dequeue(struct Qdisc
*sch
)
417 struct fq_sched_data
*q
= qdisc_priv(sch
);
418 u64 now
= ktime_to_ns(ktime_get());
419 struct fq_flow_head
*head
;
424 skb
= fq_dequeue_head(sch
, &q
->internal
);
427 fq_check_throttled(q
, now
);
429 head
= &q
->new_flows
;
431 head
= &q
->old_flows
;
433 if (q
->time_next_delayed_flow
!= ~0ULL)
434 qdisc_watchdog_schedule_ns(&q
->watchdog
,
435 q
->time_next_delayed_flow
);
441 if (f
->credit
<= 0) {
442 f
->credit
+= q
->quantum
;
443 head
->first
= f
->next
;
444 fq_flow_add_tail(&q
->old_flows
, f
);
448 if (unlikely(f
->head
&& now
< f
->time_next_packet
)) {
449 head
->first
= f
->next
;
450 fq_flow_set_throttled(q
, f
);
454 skb
= fq_dequeue_head(sch
, f
);
456 head
->first
= f
->next
;
457 /* force a pass through old_flows to prevent starvation */
458 if ((head
== &q
->new_flows
) && q
->old_flows
.first
) {
459 fq_flow_add_tail(&q
->old_flows
, f
);
461 fq_flow_set_detached(f
);
467 f
->time_next_packet
= now
;
468 f
->credit
-= qdisc_pkt_len(skb
);
470 if (f
->credit
> 0 || !q
->rate_enable
)
473 rate
= q
->flow_max_rate
;
474 if (skb
->sk
&& skb
->sk
->sk_state
!= TCP_TIME_WAIT
)
475 rate
= min(skb
->sk
->sk_pacing_rate
, rate
);
478 u32 plen
= max(qdisc_pkt_len(skb
), q
->quantum
);
479 u64 len
= (u64
)plen
* NSEC_PER_SEC
;
483 /* Since socket rate can change later,
484 * clamp the delay to 125 ms.
485 * TODO: maybe segment the too big skb, as in commit
486 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
488 if (unlikely(len
> 125 * NSEC_PER_MSEC
)) {
489 len
= 125 * NSEC_PER_MSEC
;
490 q
->stat_pkts_too_long
++;
493 f
->time_next_packet
= now
+ len
;
496 qdisc_bstats_update(sch
, skb
);
497 qdisc_unthrottled(sch
);
501 static void fq_reset(struct Qdisc
*sch
)
503 struct fq_sched_data
*q
= qdisc_priv(sch
);
504 struct rb_root
*root
;
510 while ((skb
= fq_dequeue_head(sch
, &q
->internal
)) != NULL
)
516 for (idx
= 0; idx
< (1U << q
->fq_trees_log
); idx
++) {
517 root
= &q
->fq_root
[idx
];
518 while ((p
= rb_first(root
)) != NULL
) {
519 f
= container_of(p
, struct fq_flow
, fq_node
);
522 while ((skb
= fq_dequeue_head(sch
, f
)) != NULL
)
525 kmem_cache_free(fq_flow_cachep
, f
);
528 q
->new_flows
.first
= NULL
;
529 q
->old_flows
.first
= NULL
;
530 q
->delayed
= RB_ROOT
;
532 q
->inactive_flows
= 0;
533 q
->throttled_flows
= 0;
536 static void fq_rehash(struct fq_sched_data
*q
,
537 struct rb_root
*old_array
, u32 old_log
,
538 struct rb_root
*new_array
, u32 new_log
)
540 struct rb_node
*op
, **np
, *parent
;
541 struct rb_root
*oroot
, *nroot
;
542 struct fq_flow
*of
, *nf
;
546 for (idx
= 0; idx
< (1U << old_log
); idx
++) {
547 oroot
= &old_array
[idx
];
548 while ((op
= rb_first(oroot
)) != NULL
) {
550 of
= container_of(op
, struct fq_flow
, fq_node
);
551 if (fq_gc_candidate(of
)) {
553 kmem_cache_free(fq_flow_cachep
, of
);
556 nroot
= &new_array
[hash_32((u32
)(long)of
->sk
, new_log
)];
558 np
= &nroot
->rb_node
;
563 nf
= container_of(parent
, struct fq_flow
, fq_node
);
564 BUG_ON(nf
->sk
== of
->sk
);
567 np
= &parent
->rb_right
;
569 np
= &parent
->rb_left
;
572 rb_link_node(&of
->fq_node
, parent
, np
);
573 rb_insert_color(&of
->fq_node
, nroot
);
577 q
->inactive_flows
-= fcnt
;
578 q
->stat_gc_flows
+= fcnt
;
581 static int fq_resize(struct fq_sched_data
*q
, u32 log
)
583 struct rb_root
*array
;
586 if (q
->fq_root
&& log
== q
->fq_trees_log
)
589 array
= kmalloc(sizeof(struct rb_root
) << log
, GFP_KERNEL
);
593 for (idx
= 0; idx
< (1U << log
); idx
++)
594 array
[idx
] = RB_ROOT
;
597 fq_rehash(q
, q
->fq_root
, q
->fq_trees_log
, array
, log
);
601 q
->fq_trees_log
= log
;
606 static const struct nla_policy fq_policy
[TCA_FQ_MAX
+ 1] = {
607 [TCA_FQ_PLIMIT
] = { .type
= NLA_U32
},
608 [TCA_FQ_FLOW_PLIMIT
] = { .type
= NLA_U32
},
609 [TCA_FQ_QUANTUM
] = { .type
= NLA_U32
},
610 [TCA_FQ_INITIAL_QUANTUM
] = { .type
= NLA_U32
},
611 [TCA_FQ_RATE_ENABLE
] = { .type
= NLA_U32
},
612 [TCA_FQ_FLOW_DEFAULT_RATE
] = { .type
= NLA_U32
},
613 [TCA_FQ_FLOW_MAX_RATE
] = { .type
= NLA_U32
},
614 [TCA_FQ_BUCKETS_LOG
] = { .type
= NLA_U32
},
615 [TCA_FQ_FLOW_REFILL_DELAY
] = { .type
= NLA_U32
},
618 static int fq_change(struct Qdisc
*sch
, struct nlattr
*opt
)
620 struct fq_sched_data
*q
= qdisc_priv(sch
);
621 struct nlattr
*tb
[TCA_FQ_MAX
+ 1];
622 int err
, drop_count
= 0;
628 err
= nla_parse_nested(tb
, TCA_FQ_MAX
, opt
, fq_policy
);
634 fq_log
= q
->fq_trees_log
;
636 if (tb
[TCA_FQ_BUCKETS_LOG
]) {
637 u32 nval
= nla_get_u32(tb
[TCA_FQ_BUCKETS_LOG
]);
639 if (nval
>= 1 && nval
<= ilog2(256*1024))
644 if (tb
[TCA_FQ_PLIMIT
])
645 sch
->limit
= nla_get_u32(tb
[TCA_FQ_PLIMIT
]);
647 if (tb
[TCA_FQ_FLOW_PLIMIT
])
648 q
->flow_plimit
= nla_get_u32(tb
[TCA_FQ_FLOW_PLIMIT
]);
650 if (tb
[TCA_FQ_QUANTUM
])
651 q
->quantum
= nla_get_u32(tb
[TCA_FQ_QUANTUM
]);
653 if (tb
[TCA_FQ_INITIAL_QUANTUM
])
654 q
->initial_quantum
= nla_get_u32(tb
[TCA_FQ_INITIAL_QUANTUM
]);
656 if (tb
[TCA_FQ_FLOW_DEFAULT_RATE
])
657 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
658 nla_get_u32(tb
[TCA_FQ_FLOW_DEFAULT_RATE
]));
660 if (tb
[TCA_FQ_FLOW_MAX_RATE
])
661 q
->flow_max_rate
= nla_get_u32(tb
[TCA_FQ_FLOW_MAX_RATE
]);
663 if (tb
[TCA_FQ_RATE_ENABLE
]) {
664 u32 enable
= nla_get_u32(tb
[TCA_FQ_RATE_ENABLE
]);
667 q
->rate_enable
= enable
;
672 if (tb
[TCA_FQ_FLOW_REFILL_DELAY
]) {
673 u32 usecs_delay
= nla_get_u32(tb
[TCA_FQ_FLOW_REFILL_DELAY
]) ;
675 q
->flow_refill_delay
= usecs_to_jiffies(usecs_delay
);
679 err
= fq_resize(q
, fq_log
);
681 while (sch
->q
.qlen
> sch
->limit
) {
682 struct sk_buff
*skb
= fq_dequeue(sch
);
689 qdisc_tree_decrease_qlen(sch
, drop_count
);
691 sch_tree_unlock(sch
);
695 static void fq_destroy(struct Qdisc
*sch
)
697 struct fq_sched_data
*q
= qdisc_priv(sch
);
701 qdisc_watchdog_cancel(&q
->watchdog
);
704 static int fq_init(struct Qdisc
*sch
, struct nlattr
*opt
)
706 struct fq_sched_data
*q
= qdisc_priv(sch
);
710 q
->flow_plimit
= 100;
711 q
->quantum
= 2 * psched_mtu(qdisc_dev(sch
));
712 q
->initial_quantum
= 10 * psched_mtu(qdisc_dev(sch
));
713 q
->flow_refill_delay
= msecs_to_jiffies(40);
714 q
->flow_max_rate
= ~0U;
716 q
->new_flows
.first
= NULL
;
717 q
->old_flows
.first
= NULL
;
718 q
->delayed
= RB_ROOT
;
720 q
->fq_trees_log
= ilog2(1024);
721 qdisc_watchdog_init(&q
->watchdog
, sch
);
724 err
= fq_change(sch
, opt
);
726 err
= fq_resize(q
, q
->fq_trees_log
);
731 static int fq_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
733 struct fq_sched_data
*q
= qdisc_priv(sch
);
736 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
738 goto nla_put_failure
;
740 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
742 if (nla_put_u32(skb
, TCA_FQ_PLIMIT
, sch
->limit
) ||
743 nla_put_u32(skb
, TCA_FQ_FLOW_PLIMIT
, q
->flow_plimit
) ||
744 nla_put_u32(skb
, TCA_FQ_QUANTUM
, q
->quantum
) ||
745 nla_put_u32(skb
, TCA_FQ_INITIAL_QUANTUM
, q
->initial_quantum
) ||
746 nla_put_u32(skb
, TCA_FQ_RATE_ENABLE
, q
->rate_enable
) ||
747 nla_put_u32(skb
, TCA_FQ_FLOW_MAX_RATE
, q
->flow_max_rate
) ||
748 nla_put_u32(skb
, TCA_FQ_FLOW_REFILL_DELAY
,
749 jiffies_to_usecs(q
->flow_refill_delay
)) ||
750 nla_put_u32(skb
, TCA_FQ_BUCKETS_LOG
, q
->fq_trees_log
))
751 goto nla_put_failure
;
753 nla_nest_end(skb
, opts
);
760 static int fq_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
762 struct fq_sched_data
*q
= qdisc_priv(sch
);
763 u64 now
= ktime_to_ns(ktime_get());
764 struct tc_fq_qd_stats st
= {
765 .gc_flows
= q
->stat_gc_flows
,
766 .highprio_packets
= q
->stat_internal_packets
,
767 .tcp_retrans
= q
->stat_tcp_retrans
,
768 .throttled
= q
->stat_throttled
,
769 .flows_plimit
= q
->stat_flows_plimit
,
770 .pkts_too_long
= q
->stat_pkts_too_long
,
771 .allocation_errors
= q
->stat_allocation_errors
,
773 .inactive_flows
= q
->inactive_flows
,
774 .throttled_flows
= q
->throttled_flows
,
775 .time_next_delayed_flow
= q
->time_next_delayed_flow
- now
,
778 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
781 static struct Qdisc_ops fq_qdisc_ops __read_mostly
= {
783 .priv_size
= sizeof(struct fq_sched_data
),
785 .enqueue
= fq_enqueue
,
786 .dequeue
= fq_dequeue
,
787 .peek
= qdisc_peek_dequeued
,
790 .destroy
= fq_destroy
,
793 .dump_stats
= fq_dump_stats
,
794 .owner
= THIS_MODULE
,
797 static int __init
fq_module_init(void)
801 fq_flow_cachep
= kmem_cache_create("fq_flow_cache",
802 sizeof(struct fq_flow
),
807 ret
= register_qdisc(&fq_qdisc_ops
);
809 kmem_cache_destroy(fq_flow_cachep
);
813 static void __exit
fq_module_exit(void)
815 unregister_qdisc(&fq_qdisc_ops
);
816 kmem_cache_destroy(fq_flow_cachep
);
819 module_init(fq_module_init
)
820 module_exit(fq_module_exit
)
821 MODULE_AUTHOR("Eric Dumazet");
822 MODULE_LICENSE("GPL");