2 * Copyright (c) 2016 Qualcomm Atheros, Inc
6 * Based on net/sched/sch_fq_codel.c
8 #ifndef __NET_SCHED_FQ_IMPL_H
9 #define __NET_SCHED_FQ_IMPL_H
13 /* functions that are embedded into includer */
15 static void fq_adjust_removal(struct fq
*fq
,
19 struct fq_tin
*tin
= flow
->tin
;
21 tin
->backlog_bytes
-= skb
->len
;
22 tin
->backlog_packets
--;
23 flow
->backlog
-= skb
->len
;
25 fq
->memory_usage
-= skb
->truesize
;
28 static void fq_rejigger_backlog(struct fq
*fq
, struct fq_flow
*flow
)
32 if (flow
->backlog
== 0) {
33 list_del_init(&flow
->backlogchain
);
37 list_for_each_entry_continue(i
, &fq
->backlogs
, backlogchain
)
38 if (i
->backlog
< flow
->backlog
)
41 list_move_tail(&flow
->backlogchain
,
46 static struct sk_buff
*fq_flow_dequeue(struct fq
*fq
,
51 lockdep_assert_held(&fq
->lock
);
53 skb
= __skb_dequeue(&flow
->queue
);
57 fq_adjust_removal(fq
, flow
, skb
);
58 fq_rejigger_backlog(fq
, flow
);
63 static struct sk_buff
*fq_tin_dequeue(struct fq
*fq
,
65 fq_tin_dequeue_t dequeue_func
)
68 struct list_head
*head
;
71 lockdep_assert_held(&fq
->lock
);
74 head
= &tin
->new_flows
;
75 if (list_empty(head
)) {
76 head
= &tin
->old_flows
;
81 flow
= list_first_entry(head
, struct fq_flow
, flowchain
);
83 if (flow
->deficit
<= 0) {
84 flow
->deficit
+= fq
->quantum
;
85 list_move_tail(&flow
->flowchain
,
90 skb
= dequeue_func(fq
, tin
, flow
);
92 /* force a pass through old_flows to prevent starvation */
93 if ((head
== &tin
->new_flows
) &&
94 !list_empty(&tin
->old_flows
)) {
95 list_move_tail(&flow
->flowchain
, &tin
->old_flows
);
97 list_del_init(&flow
->flowchain
);
103 flow
->deficit
-= skb
->len
;
104 tin
->tx_bytes
+= skb
->len
;
110 static struct fq_flow
*fq_flow_classify(struct fq
*fq
,
113 fq_flow_get_default_t get_default_func
)
115 struct fq_flow
*flow
;
119 lockdep_assert_held(&fq
->lock
);
121 hash
= skb_get_hash_perturb(skb
, fq
->perturbation
);
122 idx
= reciprocal_scale(hash
, fq
->flows_cnt
);
123 flow
= &fq
->flows
[idx
];
125 if (flow
->tin
&& flow
->tin
!= tin
) {
126 flow
= get_default_func(fq
, tin
, idx
, skb
);
137 static void fq_recalc_backlog(struct fq
*fq
,
139 struct fq_flow
*flow
)
143 if (list_empty(&flow
->backlogchain
))
144 list_add_tail(&flow
->backlogchain
, &fq
->backlogs
);
147 list_for_each_entry_continue_reverse(i
, &fq
->backlogs
,
149 if (i
->backlog
> flow
->backlog
)
152 list_move(&flow
->backlogchain
, &i
->backlogchain
);
155 static void fq_tin_enqueue(struct fq
*fq
,
158 fq_skb_free_t free_func
,
159 fq_flow_get_default_t get_default_func
)
161 struct fq_flow
*flow
;
164 lockdep_assert_held(&fq
->lock
);
166 flow
= fq_flow_classify(fq
, tin
, skb
, get_default_func
);
169 flow
->backlog
+= skb
->len
;
170 tin
->backlog_bytes
+= skb
->len
;
171 tin
->backlog_packets
++;
172 fq
->memory_usage
+= skb
->truesize
;
175 fq_recalc_backlog(fq
, tin
, flow
);
177 if (list_empty(&flow
->flowchain
)) {
178 flow
->deficit
= fq
->quantum
;
179 list_add_tail(&flow
->flowchain
,
183 __skb_queue_tail(&flow
->queue
, skb
);
184 oom
= (fq
->memory_usage
> fq
->memory_limit
);
185 while (fq
->backlog
> fq
->limit
|| oom
) {
186 flow
= list_first_entry_or_null(&fq
->backlogs
,
192 skb
= fq_flow_dequeue(fq
, flow
);
196 free_func(fq
, flow
->tin
, flow
, skb
);
198 flow
->tin
->overlimit
++;
202 oom
= (fq
->memory_usage
> fq
->memory_limit
);
207 static void fq_flow_filter(struct fq
*fq
,
208 struct fq_flow
*flow
,
209 fq_skb_filter_t filter_func
,
211 fq_skb_free_t free_func
)
213 struct fq_tin
*tin
= flow
->tin
;
214 struct sk_buff
*skb
, *tmp
;
216 lockdep_assert_held(&fq
->lock
);
218 skb_queue_walk_safe(&flow
->queue
, skb
, tmp
) {
219 if (!filter_func(fq
, tin
, flow
, skb
, filter_data
))
222 __skb_unlink(skb
, &flow
->queue
);
223 fq_adjust_removal(fq
, flow
, skb
);
224 free_func(fq
, tin
, flow
, skb
);
227 fq_rejigger_backlog(fq
, flow
);
230 static void fq_tin_filter(struct fq
*fq
,
232 fq_skb_filter_t filter_func
,
234 fq_skb_free_t free_func
)
236 struct fq_flow
*flow
;
238 lockdep_assert_held(&fq
->lock
);
240 list_for_each_entry(flow
, &tin
->new_flows
, flowchain
)
241 fq_flow_filter(fq
, flow
, filter_func
, filter_data
, free_func
);
242 list_for_each_entry(flow
, &tin
->old_flows
, flowchain
)
243 fq_flow_filter(fq
, flow
, filter_func
, filter_data
, free_func
);
246 static void fq_flow_reset(struct fq
*fq
,
247 struct fq_flow
*flow
,
248 fq_skb_free_t free_func
)
252 while ((skb
= fq_flow_dequeue(fq
, flow
)))
253 free_func(fq
, flow
->tin
, flow
, skb
);
255 if (!list_empty(&flow
->flowchain
))
256 list_del_init(&flow
->flowchain
);
258 if (!list_empty(&flow
->backlogchain
))
259 list_del_init(&flow
->backlogchain
);
263 WARN_ON_ONCE(flow
->backlog
);
266 static void fq_tin_reset(struct fq
*fq
,
268 fq_skb_free_t free_func
)
270 struct list_head
*head
;
271 struct fq_flow
*flow
;
274 head
= &tin
->new_flows
;
275 if (list_empty(head
)) {
276 head
= &tin
->old_flows
;
277 if (list_empty(head
))
281 flow
= list_first_entry(head
, struct fq_flow
, flowchain
);
282 fq_flow_reset(fq
, flow
, free_func
);
285 WARN_ON_ONCE(tin
->backlog_bytes
);
286 WARN_ON_ONCE(tin
->backlog_packets
);
289 static void fq_flow_init(struct fq_flow
*flow
)
291 INIT_LIST_HEAD(&flow
->flowchain
);
292 INIT_LIST_HEAD(&flow
->backlogchain
);
293 __skb_queue_head_init(&flow
->queue
);
296 static void fq_tin_init(struct fq_tin
*tin
)
298 INIT_LIST_HEAD(&tin
->new_flows
);
299 INIT_LIST_HEAD(&tin
->old_flows
);
302 static int fq_init(struct fq
*fq
, int flows_cnt
)
306 memset(fq
, 0, sizeof(fq
[0]));
307 INIT_LIST_HEAD(&fq
->backlogs
);
308 spin_lock_init(&fq
->lock
);
309 fq
->flows_cnt
= max_t(u32
, flows_cnt
, 1);
310 fq
->perturbation
= prandom_u32();
313 fq
->memory_limit
= 16 << 20; /* 16 MBytes */
315 fq
->flows
= kcalloc(fq
->flows_cnt
, sizeof(fq
->flows
[0]), GFP_KERNEL
);
319 for (i
= 0; i
< fq
->flows_cnt
; i
++)
320 fq_flow_init(&fq
->flows
[i
]);
325 static void fq_reset(struct fq
*fq
,
326 fq_skb_free_t free_func
)
330 for (i
= 0; i
< fq
->flows_cnt
; i
++)
331 fq_flow_reset(fq
, &fq
->flows
[i
], free_func
);