1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2016 Qualcomm Atheros, Inc
5 * Based on net/sched/sch_fq_codel.c
7 #ifndef __NET_SCHED_FQ_IMPL_H
8 #define __NET_SCHED_FQ_IMPL_H
12 /* functions that are embedded into includer */
16 __fq_adjust_removal(struct fq
*fq
, struct fq_flow
*flow
, unsigned int packets
,
17 unsigned int bytes
, unsigned int truesize
)
19 struct fq_tin
*tin
= flow
->tin
;
22 tin
->backlog_bytes
-= bytes
;
23 tin
->backlog_packets
-= packets
;
24 flow
->backlog
-= bytes
;
25 fq
->backlog
-= packets
;
26 fq
->memory_usage
-= truesize
;
31 if (flow
== &tin
->default_flow
) {
32 list_del_init(&tin
->tin_list
);
36 idx
= flow
- fq
->flows
;
37 __clear_bit(idx
, fq
->flows_bitmap
);
40 static void fq_adjust_removal(struct fq
*fq
,
44 __fq_adjust_removal(fq
, flow
, 1, skb
->len
, skb
->truesize
);
47 static struct sk_buff
*fq_flow_dequeue(struct fq
*fq
,
52 lockdep_assert_held(&fq
->lock
);
54 skb
= __skb_dequeue(&flow
->queue
);
58 fq_adjust_removal(fq
, flow
, skb
);
63 static int fq_flow_drop(struct fq
*fq
, struct fq_flow
*flow
,
64 fq_skb_free_t free_func
)
66 unsigned int packets
= 0, bytes
= 0, truesize
= 0;
67 struct fq_tin
*tin
= flow
->tin
;
71 lockdep_assert_held(&fq
->lock
);
73 pending
= min_t(int, 32, skb_queue_len(&flow
->queue
) / 2);
75 skb
= __skb_dequeue(&flow
->queue
);
81 truesize
+= skb
->truesize
;
82 free_func(fq
, tin
, flow
, skb
);
83 } while (packets
< pending
);
85 __fq_adjust_removal(fq
, flow
, packets
, bytes
, truesize
);
90 static struct sk_buff
*fq_tin_dequeue(struct fq
*fq
,
92 fq_tin_dequeue_t dequeue_func
)
95 struct list_head
*head
;
98 lockdep_assert_held(&fq
->lock
);
101 head
= &tin
->new_flows
;
102 if (list_empty(head
)) {
103 head
= &tin
->old_flows
;
104 if (list_empty(head
))
108 flow
= list_first_entry(head
, struct fq_flow
, flowchain
);
110 if (flow
->deficit
<= 0) {
111 flow
->deficit
+= fq
->quantum
;
112 list_move_tail(&flow
->flowchain
,
117 skb
= dequeue_func(fq
, tin
, flow
);
119 /* force a pass through old_flows to prevent starvation */
120 if ((head
== &tin
->new_flows
) &&
121 !list_empty(&tin
->old_flows
)) {
122 list_move_tail(&flow
->flowchain
, &tin
->old_flows
);
124 list_del_init(&flow
->flowchain
);
130 flow
->deficit
-= skb
->len
;
131 tin
->tx_bytes
+= skb
->len
;
137 static u32
fq_flow_idx(struct fq
*fq
, struct sk_buff
*skb
)
139 u32 hash
= skb_get_hash(skb
);
141 return reciprocal_scale(hash
, fq
->flows_cnt
);
144 static struct fq_flow
*fq_flow_classify(struct fq
*fq
,
145 struct fq_tin
*tin
, u32 idx
,
148 struct fq_flow
*flow
;
150 lockdep_assert_held(&fq
->lock
);
152 flow
= &fq
->flows
[idx
];
153 if (flow
->tin
&& flow
->tin
!= tin
) {
154 flow
= &tin
->default_flow
;
165 static struct fq_flow
*fq_find_fattest_flow(struct fq
*fq
)
168 struct fq_flow
*flow
= NULL
;
172 for_each_set_bit(i
, fq
->flows_bitmap
, fq
->flows_cnt
) {
173 struct fq_flow
*cur
= &fq
->flows
[i
];
174 unsigned int cur_len
;
176 cur_len
= cur
->backlog
;
184 list_for_each_entry(tin
, &fq
->tin_backlog
, tin_list
) {
185 unsigned int cur_len
= tin
->default_flow
.backlog
;
190 flow
= &tin
->default_flow
;
197 static void fq_tin_enqueue(struct fq
*fq
,
198 struct fq_tin
*tin
, u32 idx
,
200 fq_skb_free_t free_func
)
202 struct fq_flow
*flow
;
203 struct sk_buff
*next
;
206 lockdep_assert_held(&fq
->lock
);
208 flow
= fq_flow_classify(fq
, tin
, idx
, skb
);
210 if (!flow
->backlog
) {
211 if (flow
!= &tin
->default_flow
)
212 __set_bit(idx
, fq
->flows_bitmap
);
213 else if (list_empty(&tin
->tin_list
))
214 list_add(&tin
->tin_list
, &fq
->tin_backlog
);
218 skb_list_walk_safe(skb
, skb
, next
) {
219 skb_mark_not_on_list(skb
);
220 flow
->backlog
+= skb
->len
;
221 tin
->backlog_bytes
+= skb
->len
;
222 tin
->backlog_packets
++;
223 fq
->memory_usage
+= skb
->truesize
;
225 __skb_queue_tail(&flow
->queue
, skb
);
228 if (list_empty(&flow
->flowchain
)) {
229 flow
->deficit
= fq
->quantum
;
230 list_add_tail(&flow
->flowchain
,
234 oom
= (fq
->memory_usage
> fq
->memory_limit
);
235 while (fq
->backlog
> fq
->limit
|| oom
) {
236 flow
= fq_find_fattest_flow(fq
);
240 if (!fq_flow_drop(fq
, flow
, free_func
))
243 flow
->tin
->overlimit
++;
247 oom
= (fq
->memory_usage
> fq
->memory_limit
);
252 static void fq_flow_filter(struct fq
*fq
,
253 struct fq_flow
*flow
,
254 fq_skb_filter_t filter_func
,
256 fq_skb_free_t free_func
)
258 struct fq_tin
*tin
= flow
->tin
;
259 struct sk_buff
*skb
, *tmp
;
261 lockdep_assert_held(&fq
->lock
);
263 skb_queue_walk_safe(&flow
->queue
, skb
, tmp
) {
264 if (!filter_func(fq
, tin
, flow
, skb
, filter_data
))
267 __skb_unlink(skb
, &flow
->queue
);
268 fq_adjust_removal(fq
, flow
, skb
);
269 free_func(fq
, tin
, flow
, skb
);
273 static void fq_tin_filter(struct fq
*fq
,
275 fq_skb_filter_t filter_func
,
277 fq_skb_free_t free_func
)
279 struct fq_flow
*flow
;
281 lockdep_assert_held(&fq
->lock
);
283 list_for_each_entry(flow
, &tin
->new_flows
, flowchain
)
284 fq_flow_filter(fq
, flow
, filter_func
, filter_data
, free_func
);
285 list_for_each_entry(flow
, &tin
->old_flows
, flowchain
)
286 fq_flow_filter(fq
, flow
, filter_func
, filter_data
, free_func
);
289 static void fq_flow_reset(struct fq
*fq
,
290 struct fq_flow
*flow
,
291 fq_skb_free_t free_func
)
293 struct fq_tin
*tin
= flow
->tin
;
296 while ((skb
= fq_flow_dequeue(fq
, flow
)))
297 free_func(fq
, tin
, flow
, skb
);
299 if (!list_empty(&flow
->flowchain
)) {
300 list_del_init(&flow
->flowchain
);
301 if (list_empty(&tin
->new_flows
) &&
302 list_empty(&tin
->old_flows
))
303 list_del_init(&tin
->tin_list
);
308 WARN_ON_ONCE(flow
->backlog
);
311 static void fq_tin_reset(struct fq
*fq
,
313 fq_skb_free_t free_func
)
315 struct list_head
*head
;
316 struct fq_flow
*flow
;
319 head
= &tin
->new_flows
;
320 if (list_empty(head
)) {
321 head
= &tin
->old_flows
;
322 if (list_empty(head
))
326 flow
= list_first_entry(head
, struct fq_flow
, flowchain
);
327 fq_flow_reset(fq
, flow
, free_func
);
330 WARN_ON_ONCE(!list_empty(&tin
->tin_list
));
331 WARN_ON_ONCE(tin
->backlog_bytes
);
332 WARN_ON_ONCE(tin
->backlog_packets
);
335 static void fq_flow_init(struct fq_flow
*flow
)
337 INIT_LIST_HEAD(&flow
->flowchain
);
338 __skb_queue_head_init(&flow
->queue
);
341 static void fq_tin_init(struct fq_tin
*tin
)
343 INIT_LIST_HEAD(&tin
->new_flows
);
344 INIT_LIST_HEAD(&tin
->old_flows
);
345 INIT_LIST_HEAD(&tin
->tin_list
);
346 fq_flow_init(&tin
->default_flow
);
349 static int fq_init(struct fq
*fq
, int flows_cnt
)
353 memset(fq
, 0, sizeof(fq
[0]));
354 spin_lock_init(&fq
->lock
);
355 INIT_LIST_HEAD(&fq
->tin_backlog
);
356 fq
->flows_cnt
= max_t(u32
, flows_cnt
, 1);
359 fq
->memory_limit
= 16 << 20; /* 16 MBytes */
361 fq
->flows
= kvcalloc(fq
->flows_cnt
, sizeof(fq
->flows
[0]), GFP_KERNEL
);
365 fq
->flows_bitmap
= bitmap_zalloc(fq
->flows_cnt
, GFP_KERNEL
);
366 if (!fq
->flows_bitmap
) {
372 for (i
= 0; i
< fq
->flows_cnt
; i
++)
373 fq_flow_init(&fq
->flows
[i
]);
378 static void fq_reset(struct fq
*fq
,
379 fq_skb_free_t free_func
)
383 for (i
= 0; i
< fq
->flows_cnt
; i
++)
384 fq_flow_reset(fq
, &fq
->flows
[i
], free_func
);
389 bitmap_free(fq
->flows_bitmap
);
390 fq
->flows_bitmap
= NULL
;