seccomp: Fix ioctl number for SECCOMP_IOCTL_NOTIF_ID_VALID
[linux/fpc-iii.git] / net / sched / sch_cake.c
blob896c0562cb42a837536306e2f78edd1e00fbdb43
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 /* COMMON Applications Kept Enhanced (CAKE) discipline
5 * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
6 * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
7 * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
8 * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
9 * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
10 * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
12 * The CAKE Principles:
13 * (or, how to have your cake and eat it too)
15 * This is a combination of several shaping, AQM and FQ techniques into one
16 * easy-to-use package:
18 * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
19 * equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
20 * eliminating the need for any sort of burst parameter (eg. token bucket
21 * depth). Burst support is limited to that necessary to overcome scheduling
22 * latency.
24 * - A Diffserv-aware priority queue, giving more priority to certain classes,
25 * up to a specified fraction of bandwidth. Above that bandwidth threshold,
26 * the priority is reduced to avoid starving other tins.
28 * - Each priority tin has a separate Flow Queue system, to isolate traffic
29 * flows from each other. This prevents a burst on one flow from increasing
30 * the delay to another. Flows are distributed to queues using a
31 * set-associative hash function.
33 * - Each queue is actively managed by Cobalt, which is a combination of the
34 * Codel and Blue AQM algorithms. This serves flows fairly, and signals
35 * congestion early via ECN (if available) and/or packet drops, to keep
36 * latency low. The codel parameters are auto-tuned based on the bandwidth
37 * setting, as is necessary at low bandwidths.
39 * The configuration parameters are kept deliberately simple for ease of use.
40 * Everything has sane defaults. Complete generality of configuration is *not*
41 * a goal.
43 * The priority queue operates according to a weighted DRR scheme, combined with
44 * a bandwidth tracker which reuses the shaper logic to detect which side of the
45 * bandwidth sharing threshold the tin is operating. This determines whether a
46 * priority-based weight (high) or a bandwidth-based weight (low) is used for
47 * that tin in the current pass.
49 * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
50 * granted us permission to leverage.
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/jiffies.h>
57 #include <linux/string.h>
58 #include <linux/in.h>
59 #include <linux/errno.h>
60 #include <linux/init.h>
61 #include <linux/skbuff.h>
62 #include <linux/jhash.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <linux/reciprocal_div.h>
66 #include <net/netlink.h>
67 #include <linux/if_vlan.h>
68 #include <net/pkt_sched.h>
69 #include <net/pkt_cls.h>
70 #include <net/tcp.h>
71 #include <net/flow_dissector.h>
73 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
74 #include <net/netfilter/nf_conntrack_core.h>
75 #endif
77 #define CAKE_SET_WAYS (8)
78 #define CAKE_MAX_TINS (8)
79 #define CAKE_QUEUES (1024)
80 #define CAKE_FLOW_MASK 63
81 #define CAKE_FLOW_NAT_FLAG 64
83 /* struct cobalt_params - contains codel and blue parameters
84 * @interval: codel initial drop rate
85 * @target: maximum persistent sojourn time & blue update rate
86 * @mtu_time: serialisation delay of maximum-size packet
87 * @p_inc: increment of blue drop probability (0.32 fxp)
88 * @p_dec: decrement of blue drop probability (0.32 fxp)
90 struct cobalt_params {
91 u64 interval;
92 u64 target;
93 u64 mtu_time;
94 u32 p_inc;
95 u32 p_dec;
98 /* struct cobalt_vars - contains codel and blue variables
99 * @count: codel dropping frequency
100 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
101 * @drop_next: time to drop next packet, or when we dropped last
102 * @blue_timer: Blue time to next drop
103 * @p_drop: BLUE drop probability (0.32 fxp)
104 * @dropping: set if in dropping state
105 * @ecn_marked: set if marked
107 struct cobalt_vars {
108 u32 count;
109 u32 rec_inv_sqrt;
110 ktime_t drop_next;
111 ktime_t blue_timer;
112 u32 p_drop;
113 bool dropping;
114 bool ecn_marked;
117 enum {
118 CAKE_SET_NONE = 0,
119 CAKE_SET_SPARSE,
120 CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
121 CAKE_SET_BULK,
122 CAKE_SET_DECAYING
125 struct cake_flow {
126 /* this stuff is all needed per-flow at dequeue time */
127 struct sk_buff *head;
128 struct sk_buff *tail;
129 struct list_head flowchain;
130 s32 deficit;
131 u32 dropped;
132 struct cobalt_vars cvars;
133 u16 srchost; /* index into cake_host table */
134 u16 dsthost;
135 u8 set;
136 }; /* please try to keep this structure <= 64 bytes */
138 struct cake_host {
139 u32 srchost_tag;
140 u32 dsthost_tag;
141 u16 srchost_bulk_flow_count;
142 u16 dsthost_bulk_flow_count;
145 struct cake_heap_entry {
146 u16 t:3, b:10;
149 struct cake_tin_data {
150 struct cake_flow flows[CAKE_QUEUES];
151 u32 backlogs[CAKE_QUEUES];
152 u32 tags[CAKE_QUEUES]; /* for set association */
153 u16 overflow_idx[CAKE_QUEUES];
154 struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
155 u16 flow_quantum;
157 struct cobalt_params cparams;
158 u32 drop_overlimit;
159 u16 bulk_flow_count;
160 u16 sparse_flow_count;
161 u16 decaying_flow_count;
162 u16 unresponsive_flow_count;
164 u32 max_skblen;
166 struct list_head new_flows;
167 struct list_head old_flows;
168 struct list_head decaying_flows;
170 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
171 ktime_t time_next_packet;
172 u64 tin_rate_ns;
173 u64 tin_rate_bps;
174 u16 tin_rate_shft;
176 u16 tin_quantum_prio;
177 u16 tin_quantum_band;
178 s32 tin_deficit;
179 u32 tin_backlog;
180 u32 tin_dropped;
181 u32 tin_ecn_mark;
183 u32 packets;
184 u64 bytes;
186 u32 ack_drops;
188 /* moving averages */
189 u64 avge_delay;
190 u64 peak_delay;
191 u64 base_delay;
193 /* hash function stats */
194 u32 way_directs;
195 u32 way_hits;
196 u32 way_misses;
197 u32 way_collisions;
198 }; /* number of tins is small, so size of this struct doesn't matter much */
200 struct cake_sched_data {
201 struct tcf_proto __rcu *filter_list; /* optional external classifier */
202 struct tcf_block *block;
203 struct cake_tin_data *tins;
205 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
206 u16 overflow_timeout;
208 u16 tin_cnt;
209 u8 tin_mode;
210 u8 flow_mode;
211 u8 ack_filter;
212 u8 atm_mode;
214 u32 fwmark_mask;
215 u16 fwmark_shft;
217 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
218 u16 rate_shft;
219 ktime_t time_next_packet;
220 ktime_t failsafe_next_packet;
221 u64 rate_ns;
222 u64 rate_bps;
223 u16 rate_flags;
224 s16 rate_overhead;
225 u16 rate_mpu;
226 u64 interval;
227 u64 target;
229 /* resource tracking */
230 u32 buffer_used;
231 u32 buffer_max_used;
232 u32 buffer_limit;
233 u32 buffer_config_limit;
235 /* indices for dequeue */
236 u16 cur_tin;
237 u16 cur_flow;
239 struct qdisc_watchdog watchdog;
240 const u8 *tin_index;
241 const u8 *tin_order;
243 /* bandwidth capacity estimate */
244 ktime_t last_packet_time;
245 ktime_t avg_window_begin;
246 u64 avg_packet_interval;
247 u64 avg_window_bytes;
248 u64 avg_peak_bandwidth;
249 ktime_t last_reconfig_time;
251 /* packet length stats */
252 u32 avg_netoff;
253 u16 max_netlen;
254 u16 max_adjlen;
255 u16 min_netlen;
256 u16 min_adjlen;
259 enum {
260 CAKE_FLAG_OVERHEAD = BIT(0),
261 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
262 CAKE_FLAG_INGRESS = BIT(2),
263 CAKE_FLAG_WASH = BIT(3),
264 CAKE_FLAG_SPLIT_GSO = BIT(4)
267 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
268 * obtain the best features of each. Codel is excellent on flows which
269 * respond to congestion signals in a TCP-like way. BLUE is more effective on
270 * unresponsive flows.
273 struct cobalt_skb_cb {
274 ktime_t enqueue_time;
275 u32 adjusted_len;
278 static u64 us_to_ns(u64 us)
280 return us * NSEC_PER_USEC;
283 static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
285 qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
286 return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
289 static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
291 return get_cobalt_cb(skb)->enqueue_time;
294 static void cobalt_set_enqueue_time(struct sk_buff *skb,
295 ktime_t now)
297 get_cobalt_cb(skb)->enqueue_time = now;
300 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
302 /* Diffserv lookup tables */
304 static const u8 precedence[] = {
305 0, 0, 0, 0, 0, 0, 0, 0,
306 1, 1, 1, 1, 1, 1, 1, 1,
307 2, 2, 2, 2, 2, 2, 2, 2,
308 3, 3, 3, 3, 3, 3, 3, 3,
309 4, 4, 4, 4, 4, 4, 4, 4,
310 5, 5, 5, 5, 5, 5, 5, 5,
311 6, 6, 6, 6, 6, 6, 6, 6,
312 7, 7, 7, 7, 7, 7, 7, 7,
315 static const u8 diffserv8[] = {
316 2, 5, 1, 2, 4, 2, 2, 2,
317 0, 2, 1, 2, 1, 2, 1, 2,
318 5, 2, 4, 2, 4, 2, 4, 2,
319 3, 2, 3, 2, 3, 2, 3, 2,
320 6, 2, 3, 2, 3, 2, 3, 2,
321 6, 2, 2, 2, 6, 2, 6, 2,
322 7, 2, 2, 2, 2, 2, 2, 2,
323 7, 2, 2, 2, 2, 2, 2, 2,
326 static const u8 diffserv4[] = {
327 0, 2, 0, 0, 2, 0, 0, 0,
328 1, 0, 0, 0, 0, 0, 0, 0,
329 2, 0, 2, 0, 2, 0, 2, 0,
330 2, 0, 2, 0, 2, 0, 2, 0,
331 3, 0, 2, 0, 2, 0, 2, 0,
332 3, 0, 0, 0, 3, 0, 3, 0,
333 3, 0, 0, 0, 0, 0, 0, 0,
334 3, 0, 0, 0, 0, 0, 0, 0,
337 static const u8 diffserv3[] = {
338 0, 0, 0, 0, 2, 0, 0, 0,
339 1, 0, 0, 0, 0, 0, 0, 0,
340 0, 0, 0, 0, 0, 0, 0, 0,
341 0, 0, 0, 0, 0, 0, 0, 0,
342 0, 0, 0, 0, 0, 0, 0, 0,
343 0, 0, 0, 0, 2, 0, 2, 0,
344 2, 0, 0, 0, 0, 0, 0, 0,
345 2, 0, 0, 0, 0, 0, 0, 0,
348 static const u8 besteffort[] = {
349 0, 0, 0, 0, 0, 0, 0, 0,
350 0, 0, 0, 0, 0, 0, 0, 0,
351 0, 0, 0, 0, 0, 0, 0, 0,
352 0, 0, 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0, 0, 0,
354 0, 0, 0, 0, 0, 0, 0, 0,
355 0, 0, 0, 0, 0, 0, 0, 0,
356 0, 0, 0, 0, 0, 0, 0, 0,
359 /* tin priority order for stats dumping */
361 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
362 static const u8 bulk_order[] = {1, 0, 2, 3};
364 #define REC_INV_SQRT_CACHE (16)
365 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
367 /* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
368 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
370 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
373 static void cobalt_newton_step(struct cobalt_vars *vars)
375 u32 invsqrt, invsqrt2;
376 u64 val;
378 invsqrt = vars->rec_inv_sqrt;
379 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
380 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
382 val >>= 2; /* avoid overflow in following multiply */
383 val = (val * invsqrt) >> (32 - 2 + 1);
385 vars->rec_inv_sqrt = val;
388 static void cobalt_invsqrt(struct cobalt_vars *vars)
390 if (vars->count < REC_INV_SQRT_CACHE)
391 vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
392 else
393 cobalt_newton_step(vars);
396 /* There is a big difference in timing between the accurate values placed in
397 * the cache and the approximations given by a single Newton step for small
398 * count values, particularly when stepping from count 1 to 2 or vice versa.
399 * Above 16, a single Newton step gives sufficient accuracy in either
400 * direction, given the precision stored.
402 * The magnitude of the error when stepping up to count 2 is such as to give
403 * the value that *should* have been produced at count 4.
406 static void cobalt_cache_init(void)
408 struct cobalt_vars v;
410 memset(&v, 0, sizeof(v));
411 v.rec_inv_sqrt = ~0U;
412 cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
414 for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
415 cobalt_newton_step(&v);
416 cobalt_newton_step(&v);
417 cobalt_newton_step(&v);
418 cobalt_newton_step(&v);
420 cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
424 static void cobalt_vars_init(struct cobalt_vars *vars)
426 memset(vars, 0, sizeof(*vars));
428 if (!cobalt_rec_inv_sqrt_cache[0]) {
429 cobalt_cache_init();
430 cobalt_rec_inv_sqrt_cache[0] = ~0;
434 /* CoDel control_law is t + interval/sqrt(count)
435 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
436 * both sqrt() and divide operation.
438 static ktime_t cobalt_control(ktime_t t,
439 u64 interval,
440 u32 rec_inv_sqrt)
442 return ktime_add_ns(t, reciprocal_scale(interval,
443 rec_inv_sqrt));
446 /* Call this when a packet had to be dropped due to queue overflow. Returns
447 * true if the BLUE state was quiescent before but active after this call.
449 static bool cobalt_queue_full(struct cobalt_vars *vars,
450 struct cobalt_params *p,
451 ktime_t now)
453 bool up = false;
455 if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
456 up = !vars->p_drop;
457 vars->p_drop += p->p_inc;
458 if (vars->p_drop < p->p_inc)
459 vars->p_drop = ~0;
460 vars->blue_timer = now;
462 vars->dropping = true;
463 vars->drop_next = now;
464 if (!vars->count)
465 vars->count = 1;
467 return up;
470 /* Call this when the queue was serviced but turned out to be empty. Returns
471 * true if the BLUE state was active before but quiescent after this call.
473 static bool cobalt_queue_empty(struct cobalt_vars *vars,
474 struct cobalt_params *p,
475 ktime_t now)
477 bool down = false;
479 if (vars->p_drop &&
480 ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
481 if (vars->p_drop < p->p_dec)
482 vars->p_drop = 0;
483 else
484 vars->p_drop -= p->p_dec;
485 vars->blue_timer = now;
486 down = !vars->p_drop;
488 vars->dropping = false;
490 if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
491 vars->count--;
492 cobalt_invsqrt(vars);
493 vars->drop_next = cobalt_control(vars->drop_next,
494 p->interval,
495 vars->rec_inv_sqrt);
498 return down;
501 /* Call this with a freshly dequeued packet for possible congestion marking.
502 * Returns true as an instruction to drop the packet, false for delivery.
504 static bool cobalt_should_drop(struct cobalt_vars *vars,
505 struct cobalt_params *p,
506 ktime_t now,
507 struct sk_buff *skb,
508 u32 bulk_flows)
510 bool next_due, over_target, drop = false;
511 ktime_t schedule;
512 u64 sojourn;
514 /* The 'schedule' variable records, in its sign, whether 'now' is before or
515 * after 'drop_next'. This allows 'drop_next' to be updated before the next
516 * scheduling decision is actually branched, without destroying that
517 * information. Similarly, the first 'schedule' value calculated is preserved
518 * in the boolean 'next_due'.
520 * As for 'drop_next', we take advantage of the fact that 'interval' is both
521 * the delay between first exceeding 'target' and the first signalling event,
522 * *and* the scaling factor for the signalling frequency. It's therefore very
523 * natural to use a single mechanism for both purposes, and eliminates a
524 * significant amount of reference Codel's spaghetti code. To help with this,
525 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
526 * as possible to 1.0 in fixed-point.
529 sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
530 schedule = ktime_sub(now, vars->drop_next);
531 over_target = sojourn > p->target &&
532 sojourn > p->mtu_time * bulk_flows * 2 &&
533 sojourn > p->mtu_time * 4;
534 next_due = vars->count && ktime_to_ns(schedule) >= 0;
536 vars->ecn_marked = false;
538 if (over_target) {
539 if (!vars->dropping) {
540 vars->dropping = true;
541 vars->drop_next = cobalt_control(now,
542 p->interval,
543 vars->rec_inv_sqrt);
545 if (!vars->count)
546 vars->count = 1;
547 } else if (vars->dropping) {
548 vars->dropping = false;
551 if (next_due && vars->dropping) {
552 /* Use ECN mark if possible, otherwise drop */
553 drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
555 vars->count++;
556 if (!vars->count)
557 vars->count--;
558 cobalt_invsqrt(vars);
559 vars->drop_next = cobalt_control(vars->drop_next,
560 p->interval,
561 vars->rec_inv_sqrt);
562 schedule = ktime_sub(now, vars->drop_next);
563 } else {
564 while (next_due) {
565 vars->count--;
566 cobalt_invsqrt(vars);
567 vars->drop_next = cobalt_control(vars->drop_next,
568 p->interval,
569 vars->rec_inv_sqrt);
570 schedule = ktime_sub(now, vars->drop_next);
571 next_due = vars->count && ktime_to_ns(schedule) >= 0;
575 /* Simple BLUE implementation. Lack of ECN is deliberate. */
576 if (vars->p_drop)
577 drop |= (prandom_u32() < vars->p_drop);
579 /* Overload the drop_next field as an activity timeout */
580 if (!vars->count)
581 vars->drop_next = ktime_add_ns(now, p->interval);
582 else if (ktime_to_ns(schedule) > 0 && !drop)
583 vars->drop_next = now;
585 return drop;
588 static void cake_update_flowkeys(struct flow_keys *keys,
589 const struct sk_buff *skb)
591 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
592 struct nf_conntrack_tuple tuple = {};
593 bool rev = !skb->_nfct;
595 if (skb_protocol(skb, true) != htons(ETH_P_IP))
596 return;
598 if (!nf_ct_get_tuple_skb(&tuple, skb))
599 return;
601 keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
602 keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
604 if (keys->ports.ports) {
605 keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
606 keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
608 #endif
611 /* Cake has several subtle multiple bit settings. In these cases you
612 * would be matching triple isolate mode as well.
615 static bool cake_dsrc(int flow_mode)
617 return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
620 static bool cake_ddst(int flow_mode)
622 return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
625 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
626 int flow_mode, u16 flow_override, u16 host_override)
628 u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
629 u16 reduced_hash, srchost_idx, dsthost_idx;
630 struct flow_keys keys, host_keys;
632 if (unlikely(flow_mode == CAKE_FLOW_NONE))
633 return 0;
635 /* If both overrides are set we can skip packet dissection entirely */
636 if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
637 (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
638 goto skip_hash;
640 skb_flow_dissect_flow_keys(skb, &keys,
641 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
643 if (flow_mode & CAKE_FLOW_NAT_FLAG)
644 cake_update_flowkeys(&keys, skb);
646 /* flow_hash_from_keys() sorts the addresses by value, so we have
647 * to preserve their order in a separate data structure to treat
648 * src and dst host addresses as independently selectable.
650 host_keys = keys;
651 host_keys.ports.ports = 0;
652 host_keys.basic.ip_proto = 0;
653 host_keys.keyid.keyid = 0;
654 host_keys.tags.flow_label = 0;
656 switch (host_keys.control.addr_type) {
657 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
658 host_keys.addrs.v4addrs.src = 0;
659 dsthost_hash = flow_hash_from_keys(&host_keys);
660 host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
661 host_keys.addrs.v4addrs.dst = 0;
662 srchost_hash = flow_hash_from_keys(&host_keys);
663 break;
665 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
666 memset(&host_keys.addrs.v6addrs.src, 0,
667 sizeof(host_keys.addrs.v6addrs.src));
668 dsthost_hash = flow_hash_from_keys(&host_keys);
669 host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
670 memset(&host_keys.addrs.v6addrs.dst, 0,
671 sizeof(host_keys.addrs.v6addrs.dst));
672 srchost_hash = flow_hash_from_keys(&host_keys);
673 break;
675 default:
676 dsthost_hash = 0;
677 srchost_hash = 0;
680 /* This *must* be after the above switch, since as a
681 * side-effect it sorts the src and dst addresses.
683 if (flow_mode & CAKE_FLOW_FLOWS)
684 flow_hash = flow_hash_from_keys(&keys);
686 skip_hash:
687 if (flow_override)
688 flow_hash = flow_override - 1;
689 if (host_override) {
690 dsthost_hash = host_override - 1;
691 srchost_hash = host_override - 1;
694 if (!(flow_mode & CAKE_FLOW_FLOWS)) {
695 if (flow_mode & CAKE_FLOW_SRC_IP)
696 flow_hash ^= srchost_hash;
698 if (flow_mode & CAKE_FLOW_DST_IP)
699 flow_hash ^= dsthost_hash;
702 reduced_hash = flow_hash % CAKE_QUEUES;
704 /* set-associative hashing */
705 /* fast path if no hash collision (direct lookup succeeds) */
706 if (likely(q->tags[reduced_hash] == flow_hash &&
707 q->flows[reduced_hash].set)) {
708 q->way_directs++;
709 } else {
710 u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
711 u32 outer_hash = reduced_hash - inner_hash;
712 bool allocate_src = false;
713 bool allocate_dst = false;
714 u32 i, k;
716 /* check if any active queue in the set is reserved for
717 * this flow.
719 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
720 i++, k = (k + 1) % CAKE_SET_WAYS) {
721 if (q->tags[outer_hash + k] == flow_hash) {
722 if (i)
723 q->way_hits++;
725 if (!q->flows[outer_hash + k].set) {
726 /* need to increment host refcnts */
727 allocate_src = cake_dsrc(flow_mode);
728 allocate_dst = cake_ddst(flow_mode);
731 goto found;
735 /* no queue is reserved for this flow, look for an
736 * empty one.
738 for (i = 0; i < CAKE_SET_WAYS;
739 i++, k = (k + 1) % CAKE_SET_WAYS) {
740 if (!q->flows[outer_hash + k].set) {
741 q->way_misses++;
742 allocate_src = cake_dsrc(flow_mode);
743 allocate_dst = cake_ddst(flow_mode);
744 goto found;
748 /* With no empty queues, default to the original
749 * queue, accept the collision, update the host tags.
751 q->way_collisions++;
752 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
753 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
754 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
756 allocate_src = cake_dsrc(flow_mode);
757 allocate_dst = cake_ddst(flow_mode);
758 found:
759 /* reserve queue for future packets in same flow */
760 reduced_hash = outer_hash + k;
761 q->tags[reduced_hash] = flow_hash;
763 if (allocate_src) {
764 srchost_idx = srchost_hash % CAKE_QUEUES;
765 inner_hash = srchost_idx % CAKE_SET_WAYS;
766 outer_hash = srchost_idx - inner_hash;
767 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
768 i++, k = (k + 1) % CAKE_SET_WAYS) {
769 if (q->hosts[outer_hash + k].srchost_tag ==
770 srchost_hash)
771 goto found_src;
773 for (i = 0; i < CAKE_SET_WAYS;
774 i++, k = (k + 1) % CAKE_SET_WAYS) {
775 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
776 break;
778 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
779 found_src:
780 srchost_idx = outer_hash + k;
781 if (q->flows[reduced_hash].set == CAKE_SET_BULK)
782 q->hosts[srchost_idx].srchost_bulk_flow_count++;
783 q->flows[reduced_hash].srchost = srchost_idx;
786 if (allocate_dst) {
787 dsthost_idx = dsthost_hash % CAKE_QUEUES;
788 inner_hash = dsthost_idx % CAKE_SET_WAYS;
789 outer_hash = dsthost_idx - inner_hash;
790 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
791 i++, k = (k + 1) % CAKE_SET_WAYS) {
792 if (q->hosts[outer_hash + k].dsthost_tag ==
793 dsthost_hash)
794 goto found_dst;
796 for (i = 0; i < CAKE_SET_WAYS;
797 i++, k = (k + 1) % CAKE_SET_WAYS) {
798 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
799 break;
801 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
802 found_dst:
803 dsthost_idx = outer_hash + k;
804 if (q->flows[reduced_hash].set == CAKE_SET_BULK)
805 q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
806 q->flows[reduced_hash].dsthost = dsthost_idx;
810 return reduced_hash;
813 /* helper functions : might be changed when/if skb use a standard list_head */
814 /* remove one skb from head of slot queue */
816 static struct sk_buff *dequeue_head(struct cake_flow *flow)
818 struct sk_buff *skb = flow->head;
820 if (skb) {
821 flow->head = skb->next;
822 skb_mark_not_on_list(skb);
825 return skb;
828 /* add skb to flow queue (tail add) */
830 static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
832 if (!flow->head)
833 flow->head = skb;
834 else
835 flow->tail->next = skb;
836 flow->tail = skb;
837 skb->next = NULL;
840 static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
841 struct ipv6hdr *buf)
843 unsigned int offset = skb_network_offset(skb);
844 struct iphdr *iph;
846 iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
848 if (!iph)
849 return NULL;
851 if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
852 return skb_header_pointer(skb, offset + iph->ihl * 4,
853 sizeof(struct ipv6hdr), buf);
855 else if (iph->version == 4)
856 return iph;
858 else if (iph->version == 6)
859 return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
860 buf);
862 return NULL;
865 static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
866 void *buf, unsigned int bufsize)
868 unsigned int offset = skb_network_offset(skb);
869 const struct ipv6hdr *ipv6h;
870 const struct tcphdr *tcph;
871 const struct iphdr *iph;
872 struct ipv6hdr _ipv6h;
873 struct tcphdr _tcph;
875 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
877 if (!ipv6h)
878 return NULL;
880 if (ipv6h->version == 4) {
881 iph = (struct iphdr *)ipv6h;
882 offset += iph->ihl * 4;
884 /* special-case 6in4 tunnelling, as that is a common way to get
885 * v6 connectivity in the home
887 if (iph->protocol == IPPROTO_IPV6) {
888 ipv6h = skb_header_pointer(skb, offset,
889 sizeof(_ipv6h), &_ipv6h);
891 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
892 return NULL;
894 offset += sizeof(struct ipv6hdr);
896 } else if (iph->protocol != IPPROTO_TCP) {
897 return NULL;
900 } else if (ipv6h->version == 6) {
901 if (ipv6h->nexthdr != IPPROTO_TCP)
902 return NULL;
904 offset += sizeof(struct ipv6hdr);
905 } else {
906 return NULL;
909 tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
910 if (!tcph)
911 return NULL;
913 return skb_header_pointer(skb, offset,
914 min(__tcp_hdrlen(tcph), bufsize), buf);
917 static const void *cake_get_tcpopt(const struct tcphdr *tcph,
918 int code, int *oplen)
920 /* inspired by tcp_parse_options in tcp_input.c */
921 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
922 const u8 *ptr = (const u8 *)(tcph + 1);
924 while (length > 0) {
925 int opcode = *ptr++;
926 int opsize;
928 if (opcode == TCPOPT_EOL)
929 break;
930 if (opcode == TCPOPT_NOP) {
931 length--;
932 continue;
934 opsize = *ptr++;
935 if (opsize < 2 || opsize > length)
936 break;
938 if (opcode == code) {
939 *oplen = opsize;
940 return ptr;
943 ptr += opsize - 2;
944 length -= opsize;
947 return NULL;
950 /* Compare two SACK sequences. A sequence is considered greater if it SACKs more
951 * bytes than the other. In the case where both sequences ACKs bytes that the
952 * other doesn't, A is considered greater. DSACKs in A also makes A be
953 * considered greater.
955 * @return -1, 0 or 1 as normal compare functions
957 static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
958 const struct tcphdr *tcph_b)
960 const struct tcp_sack_block_wire *sack_a, *sack_b;
961 u32 ack_seq_a = ntohl(tcph_a->ack_seq);
962 u32 bytes_a = 0, bytes_b = 0;
963 int oplen_a, oplen_b;
964 bool first = true;
966 sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
967 sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
969 /* pointers point to option contents */
970 oplen_a -= TCPOLEN_SACK_BASE;
971 oplen_b -= TCPOLEN_SACK_BASE;
973 if (sack_a && oplen_a >= sizeof(*sack_a) &&
974 (!sack_b || oplen_b < sizeof(*sack_b)))
975 return -1;
976 else if (sack_b && oplen_b >= sizeof(*sack_b) &&
977 (!sack_a || oplen_a < sizeof(*sack_a)))
978 return 1;
979 else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
980 (!sack_b || oplen_b < sizeof(*sack_b)))
981 return 0;
983 while (oplen_a >= sizeof(*sack_a)) {
984 const struct tcp_sack_block_wire *sack_tmp = sack_b;
985 u32 start_a = get_unaligned_be32(&sack_a->start_seq);
986 u32 end_a = get_unaligned_be32(&sack_a->end_seq);
987 int oplen_tmp = oplen_b;
988 bool found = false;
990 /* DSACK; always considered greater to prevent dropping */
991 if (before(start_a, ack_seq_a))
992 return -1;
994 bytes_a += end_a - start_a;
996 while (oplen_tmp >= sizeof(*sack_tmp)) {
997 u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
998 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
1000 /* first time through we count the total size */
1001 if (first)
1002 bytes_b += end_b - start_b;
1004 if (!after(start_b, start_a) && !before(end_b, end_a)) {
1005 found = true;
1006 if (!first)
1007 break;
1009 oplen_tmp -= sizeof(*sack_tmp);
1010 sack_tmp++;
1013 if (!found)
1014 return -1;
1016 oplen_a -= sizeof(*sack_a);
1017 sack_a++;
1018 first = false;
1021 /* If we made it this far, all ranges SACKed by A are covered by B, so
1022 * either the SACKs are equal, or B SACKs more bytes.
1024 return bytes_b > bytes_a ? 1 : 0;
1027 static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
1028 u32 *tsval, u32 *tsecr)
1030 const u8 *ptr;
1031 int opsize;
1033 ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
1035 if (ptr && opsize == TCPOLEN_TIMESTAMP) {
1036 *tsval = get_unaligned_be32(ptr);
1037 *tsecr = get_unaligned_be32(ptr + 4);
1041 static bool cake_tcph_may_drop(const struct tcphdr *tcph,
1042 u32 tstamp_new, u32 tsecr_new)
1044 /* inspired by tcp_parse_options in tcp_input.c */
1045 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
1046 const u8 *ptr = (const u8 *)(tcph + 1);
1047 u32 tstamp, tsecr;
1049 /* 3 reserved flags must be unset to avoid future breakage
1050 * ACK must be set
1051 * ECE/CWR are handled separately
1052 * All other flags URG/PSH/RST/SYN/FIN must be unset
1053 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
1054 * 0x00C00000 = CWR/ECE (handled separately)
1055 * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
1057 if (((tcp_flag_word(tcph) &
1058 cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
1059 return false;
1061 while (length > 0) {
1062 int opcode = *ptr++;
1063 int opsize;
1065 if (opcode == TCPOPT_EOL)
1066 break;
1067 if (opcode == TCPOPT_NOP) {
1068 length--;
1069 continue;
1071 opsize = *ptr++;
1072 if (opsize < 2 || opsize > length)
1073 break;
1075 switch (opcode) {
1076 case TCPOPT_MD5SIG: /* doesn't influence state */
1077 break;
1079 case TCPOPT_SACK: /* stricter checking performed later */
1080 if (opsize % 8 != 2)
1081 return false;
1082 break;
1084 case TCPOPT_TIMESTAMP:
1085 /* only drop timestamps lower than new */
1086 if (opsize != TCPOLEN_TIMESTAMP)
1087 return false;
1088 tstamp = get_unaligned_be32(ptr);
1089 tsecr = get_unaligned_be32(ptr + 4);
1090 if (after(tstamp, tstamp_new) ||
1091 after(tsecr, tsecr_new))
1092 return false;
1093 break;
1095 case TCPOPT_MSS: /* these should only be set on SYN */
1096 case TCPOPT_WINDOW:
1097 case TCPOPT_SACK_PERM:
1098 case TCPOPT_FASTOPEN:
1099 case TCPOPT_EXP:
1100 default: /* don't drop if any unknown options are present */
1101 return false;
1104 ptr += opsize - 2;
1105 length -= opsize;
1108 return true;
1111 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
1112 struct cake_flow *flow)
1114 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
1115 struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
1116 struct sk_buff *skb_check, *skb_prev = NULL;
1117 const struct ipv6hdr *ipv6h, *ipv6h_check;
1118 unsigned char _tcph[64], _tcph_check[64];
1119 const struct tcphdr *tcph, *tcph_check;
1120 const struct iphdr *iph, *iph_check;
1121 struct ipv6hdr _iph, _iph_check;
1122 const struct sk_buff *skb;
1123 int seglen, num_found = 0;
1124 u32 tstamp = 0, tsecr = 0;
1125 __be32 elig_flags = 0;
1126 int sack_comp;
1128 /* no other possible ACKs to filter */
1129 if (flow->head == flow->tail)
1130 return NULL;
1132 skb = flow->tail;
1133 tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
1134 iph = cake_get_iphdr(skb, &_iph);
1135 if (!tcph)
1136 return NULL;
1138 cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
1140 /* the 'triggering' packet need only have the ACK flag set.
1141 * also check that SYN is not set, as there won't be any previous ACKs.
1143 if ((tcp_flag_word(tcph) &
1144 (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
1145 return NULL;
1147 /* the 'triggering' ACK is at the tail of the queue, we have already
1148 * returned if it is the only packet in the flow. loop through the rest
1149 * of the queue looking for pure ACKs with the same 5-tuple as the
1150 * triggering one.
1152 for (skb_check = flow->head;
1153 skb_check && skb_check != skb;
1154 skb_prev = skb_check, skb_check = skb_check->next) {
1155 iph_check = cake_get_iphdr(skb_check, &_iph_check);
1156 tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
1157 sizeof(_tcph_check));
1159 /* only TCP packets with matching 5-tuple are eligible, and only
1160 * drop safe headers
1162 if (!tcph_check || iph->version != iph_check->version ||
1163 tcph_check->source != tcph->source ||
1164 tcph_check->dest != tcph->dest)
1165 continue;
1167 if (iph_check->version == 4) {
1168 if (iph_check->saddr != iph->saddr ||
1169 iph_check->daddr != iph->daddr)
1170 continue;
1172 seglen = ntohs(iph_check->tot_len) -
1173 (4 * iph_check->ihl);
1174 } else if (iph_check->version == 6) {
1175 ipv6h = (struct ipv6hdr *)iph;
1176 ipv6h_check = (struct ipv6hdr *)iph_check;
1178 if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
1179 ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
1180 continue;
1182 seglen = ntohs(ipv6h_check->payload_len);
1183 } else {
1184 WARN_ON(1); /* shouldn't happen */
1185 continue;
1188 /* If the ECE/CWR flags changed from the previous eligible
1189 * packet in the same flow, we should no longer be dropping that
1190 * previous packet as this would lose information.
1192 if (elig_ack && (tcp_flag_word(tcph_check) &
1193 (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
1194 elig_ack = NULL;
1195 elig_ack_prev = NULL;
1196 num_found--;
1199 /* Check TCP options and flags, don't drop ACKs with segment
1200 * data, and don't drop ACKs with a higher cumulative ACK
1201 * counter than the triggering packet. Check ACK seqno here to
1202 * avoid parsing SACK options of packets we are going to exclude
1203 * anyway.
1205 if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
1206 (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
1207 after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
1208 continue;
1210 /* Check SACK options. The triggering packet must SACK more data
1211 * than the ACK under consideration, or SACK the same range but
1212 * have a larger cumulative ACK counter. The latter is a
1213 * pathological case, but is contained in the following check
1214 * anyway, just to be safe.
1216 sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
1218 if (sack_comp < 0 ||
1219 (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
1220 sack_comp == 0))
1221 continue;
1223 /* At this point we have found an eligible pure ACK to drop; if
1224 * we are in aggressive mode, we are done. Otherwise, keep
1225 * searching unless this is the second eligible ACK we
1226 * found.
1228 * Since we want to drop ACK closest to the head of the queue,
1229 * save the first eligible ACK we find, even if we need to loop
1230 * again.
1232 if (!elig_ack) {
1233 elig_ack = skb_check;
1234 elig_ack_prev = skb_prev;
1235 elig_flags = (tcp_flag_word(tcph_check)
1236 & (TCP_FLAG_ECE | TCP_FLAG_CWR));
1239 if (num_found++ > 0)
1240 goto found;
1243 /* We made it through the queue without finding two eligible ACKs . If
1244 * we found a single eligible ACK we can drop it in aggressive mode if
1245 * we can guarantee that this does not interfere with ECN flag
1246 * information. We ensure this by dropping it only if the enqueued
1247 * packet is consecutive with the eligible ACK, and their flags match.
1249 if (elig_ack && aggressive && elig_ack->next == skb &&
1250 (elig_flags == (tcp_flag_word(tcph) &
1251 (TCP_FLAG_ECE | TCP_FLAG_CWR))))
1252 goto found;
1254 return NULL;
1256 found:
1257 if (elig_ack_prev)
1258 elig_ack_prev->next = elig_ack->next;
1259 else
1260 flow->head = elig_ack->next;
1262 skb_mark_not_on_list(elig_ack);
1264 return elig_ack;
1267 static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
1269 avg -= avg >> shift;
1270 avg += sample >> shift;
1271 return avg;
1274 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
1276 if (q->rate_flags & CAKE_FLAG_OVERHEAD)
1277 len -= off;
1279 if (q->max_netlen < len)
1280 q->max_netlen = len;
1281 if (q->min_netlen > len)
1282 q->min_netlen = len;
1284 len += q->rate_overhead;
1286 if (len < q->rate_mpu)
1287 len = q->rate_mpu;
1289 if (q->atm_mode == CAKE_ATM_ATM) {
1290 len += 47;
1291 len /= 48;
1292 len *= 53;
1293 } else if (q->atm_mode == CAKE_ATM_PTM) {
1294 /* Add one byte per 64 bytes or part thereof.
1295 * This is conservative and easier to calculate than the
1296 * precise value.
1298 len += (len + 63) / 64;
1301 if (q->max_adjlen < len)
1302 q->max_adjlen = len;
1303 if (q->min_adjlen > len)
1304 q->min_adjlen = len;
1306 return len;
1309 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
1311 const struct skb_shared_info *shinfo = skb_shinfo(skb);
1312 unsigned int hdr_len, last_len = 0;
1313 u32 off = skb_network_offset(skb);
1314 u32 len = qdisc_pkt_len(skb);
1315 u16 segs = 1;
1317 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
1319 if (!shinfo->gso_size)
1320 return cake_calc_overhead(q, len, off);
1322 /* borrowed from qdisc_pkt_len_init() */
1323 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1325 /* + transport layer */
1326 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
1327 SKB_GSO_TCPV6))) {
1328 const struct tcphdr *th;
1329 struct tcphdr _tcphdr;
1331 th = skb_header_pointer(skb, skb_transport_offset(skb),
1332 sizeof(_tcphdr), &_tcphdr);
1333 if (likely(th))
1334 hdr_len += __tcp_hdrlen(th);
1335 } else {
1336 struct udphdr _udphdr;
1338 if (skb_header_pointer(skb, skb_transport_offset(skb),
1339 sizeof(_udphdr), &_udphdr))
1340 hdr_len += sizeof(struct udphdr);
1343 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
1344 segs = DIV_ROUND_UP(skb->len - hdr_len,
1345 shinfo->gso_size);
1346 else
1347 segs = shinfo->gso_segs;
1349 len = shinfo->gso_size + hdr_len;
1350 last_len = skb->len - shinfo->gso_size * (segs - 1);
1352 return (cake_calc_overhead(q, len, off) * (segs - 1) +
1353 cake_calc_overhead(q, last_len, off));
1356 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
1358 struct cake_heap_entry ii = q->overflow_heap[i];
1359 struct cake_heap_entry jj = q->overflow_heap[j];
1361 q->overflow_heap[i] = jj;
1362 q->overflow_heap[j] = ii;
1364 q->tins[ii.t].overflow_idx[ii.b] = j;
1365 q->tins[jj.t].overflow_idx[jj.b] = i;
1368 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
1370 struct cake_heap_entry ii = q->overflow_heap[i];
1372 return q->tins[ii.t].backlogs[ii.b];
1375 static void cake_heapify(struct cake_sched_data *q, u16 i)
1377 static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
1378 u32 mb = cake_heap_get_backlog(q, i);
1379 u32 m = i;
1381 while (m < a) {
1382 u32 l = m + m + 1;
1383 u32 r = l + 1;
1385 if (l < a) {
1386 u32 lb = cake_heap_get_backlog(q, l);
1388 if (lb > mb) {
1389 m = l;
1390 mb = lb;
1394 if (r < a) {
1395 u32 rb = cake_heap_get_backlog(q, r);
1397 if (rb > mb) {
1398 m = r;
1399 mb = rb;
1403 if (m != i) {
1404 cake_heap_swap(q, i, m);
1405 i = m;
1406 } else {
1407 break;
1412 static void cake_heapify_up(struct cake_sched_data *q, u16 i)
1414 while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
1415 u16 p = (i - 1) >> 1;
1416 u32 ib = cake_heap_get_backlog(q, i);
1417 u32 pb = cake_heap_get_backlog(q, p);
1419 if (ib > pb) {
1420 cake_heap_swap(q, i, p);
1421 i = p;
1422 } else {
1423 break;
1428 static int cake_advance_shaper(struct cake_sched_data *q,
1429 struct cake_tin_data *b,
1430 struct sk_buff *skb,
1431 ktime_t now, bool drop)
1433 u32 len = get_cobalt_cb(skb)->adjusted_len;
1435 /* charge packet bandwidth to this tin
1436 * and to the global shaper.
1438 if (q->rate_ns) {
1439 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
1440 u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
1441 u64 failsafe_dur = global_dur + (global_dur >> 1);
1443 if (ktime_before(b->time_next_packet, now))
1444 b->time_next_packet = ktime_add_ns(b->time_next_packet,
1445 tin_dur);
1447 else if (ktime_before(b->time_next_packet,
1448 ktime_add_ns(now, tin_dur)))
1449 b->time_next_packet = ktime_add_ns(now, tin_dur);
1451 q->time_next_packet = ktime_add_ns(q->time_next_packet,
1452 global_dur);
1453 if (!drop)
1454 q->failsafe_next_packet = \
1455 ktime_add_ns(q->failsafe_next_packet,
1456 failsafe_dur);
1458 return len;
1461 static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1463 struct cake_sched_data *q = qdisc_priv(sch);
1464 ktime_t now = ktime_get();
1465 u32 idx = 0, tin = 0, len;
1466 struct cake_heap_entry qq;
1467 struct cake_tin_data *b;
1468 struct cake_flow *flow;
1469 struct sk_buff *skb;
1471 if (!q->overflow_timeout) {
1472 int i;
1473 /* Build fresh max-heap */
1474 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
1475 cake_heapify(q, i);
1477 q->overflow_timeout = 65535;
1479 /* select longest queue for pruning */
1480 qq = q->overflow_heap[0];
1481 tin = qq.t;
1482 idx = qq.b;
1484 b = &q->tins[tin];
1485 flow = &b->flows[idx];
1486 skb = dequeue_head(flow);
1487 if (unlikely(!skb)) {
1488 /* heap has gone wrong, rebuild it next time */
1489 q->overflow_timeout = 0;
1490 return idx + (tin << 16);
1493 if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
1494 b->unresponsive_flow_count++;
1496 len = qdisc_pkt_len(skb);
1497 q->buffer_used -= skb->truesize;
1498 b->backlogs[idx] -= len;
1499 b->tin_backlog -= len;
1500 sch->qstats.backlog -= len;
1501 qdisc_tree_reduce_backlog(sch, 1, len);
1503 flow->dropped++;
1504 b->tin_dropped++;
1505 sch->qstats.drops++;
1507 if (q->rate_flags & CAKE_FLAG_INGRESS)
1508 cake_advance_shaper(q, b, skb, now, true);
1510 __qdisc_drop(skb, to_free);
1511 sch->q.qlen--;
1513 cake_heapify(q, 0);
1515 return idx + (tin << 16);
1518 static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
1520 const int offset = skb_network_offset(skb);
1521 u16 *buf, buf_;
1522 u8 dscp;
1524 switch (skb_protocol(skb, true)) {
1525 case htons(ETH_P_IP):
1526 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1527 if (unlikely(!buf))
1528 return 0;
1530 /* ToS is in the second byte of iphdr */
1531 dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
1533 if (wash && dscp) {
1534 const int wlen = offset + sizeof(struct iphdr);
1536 if (!pskb_may_pull(skb, wlen) ||
1537 skb_try_make_writable(skb, wlen))
1538 return 0;
1540 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1543 return dscp;
1545 case htons(ETH_P_IPV6):
1546 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1547 if (unlikely(!buf))
1548 return 0;
1550 /* Traffic class is in the first and second bytes of ipv6hdr */
1551 dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
1553 if (wash && dscp) {
1554 const int wlen = offset + sizeof(struct ipv6hdr);
1556 if (!pskb_may_pull(skb, wlen) ||
1557 skb_try_make_writable(skb, wlen))
1558 return 0;
1560 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
1563 return dscp;
1565 case htons(ETH_P_ARP):
1566 return 0x38; /* CS7 - Net Control */
1568 default:
1569 /* If there is no Diffserv field, treat as best-effort */
1570 return 0;
1574 static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1575 struct sk_buff *skb)
1577 struct cake_sched_data *q = qdisc_priv(sch);
1578 u32 tin, mark;
1579 bool wash;
1580 u8 dscp;
1582 /* Tin selection: Default to diffserv-based selection, allow overriding
1583 * using firewall marks or skb->priority. Call DSCP parsing early if
1584 * wash is enabled, otherwise defer to below to skip unneeded parsing.
1586 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
1587 wash = !!(q->rate_flags & CAKE_FLAG_WASH);
1588 if (wash)
1589 dscp = cake_handle_diffserv(skb, wash);
1591 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
1592 tin = 0;
1594 else if (mark && mark <= q->tin_cnt)
1595 tin = q->tin_order[mark - 1];
1597 else if (TC_H_MAJ(skb->priority) == sch->handle &&
1598 TC_H_MIN(skb->priority) > 0 &&
1599 TC_H_MIN(skb->priority) <= q->tin_cnt)
1600 tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
1602 else {
1603 if (!wash)
1604 dscp = cake_handle_diffserv(skb, wash);
1605 tin = q->tin_index[dscp];
1607 if (unlikely(tin >= q->tin_cnt))
1608 tin = 0;
1611 return &q->tins[tin];
1614 static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1615 struct sk_buff *skb, int flow_mode, int *qerr)
1617 struct cake_sched_data *q = qdisc_priv(sch);
1618 struct tcf_proto *filter;
1619 struct tcf_result res;
1620 u16 flow = 0, host = 0;
1621 int result;
1623 filter = rcu_dereference_bh(q->filter_list);
1624 if (!filter)
1625 goto hash;
1627 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1628 result = tcf_classify(skb, filter, &res, false);
1630 if (result >= 0) {
1631 #ifdef CONFIG_NET_CLS_ACT
1632 switch (result) {
1633 case TC_ACT_STOLEN:
1634 case TC_ACT_QUEUED:
1635 case TC_ACT_TRAP:
1636 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1637 /* fall through */
1638 case TC_ACT_SHOT:
1639 return 0;
1641 #endif
1642 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1643 flow = TC_H_MIN(res.classid);
1644 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
1645 host = TC_H_MAJ(res.classid) >> 16;
1647 hash:
1648 *t = cake_select_tin(sch, skb);
1649 return cake_hash(*t, skb, flow_mode, flow, host) + 1;
1652 static void cake_reconfigure(struct Qdisc *sch);
1654 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1655 struct sk_buff **to_free)
1657 struct cake_sched_data *q = qdisc_priv(sch);
1658 int len = qdisc_pkt_len(skb);
1659 int uninitialized_var(ret);
1660 struct sk_buff *ack = NULL;
1661 ktime_t now = ktime_get();
1662 struct cake_tin_data *b;
1663 struct cake_flow *flow;
1664 u32 idx;
1666 /* choose flow to insert into */
1667 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
1668 if (idx == 0) {
1669 if (ret & __NET_XMIT_BYPASS)
1670 qdisc_qstats_drop(sch);
1671 __qdisc_drop(skb, to_free);
1672 return ret;
1674 idx--;
1675 flow = &b->flows[idx];
1677 /* ensure shaper state isn't stale */
1678 if (!b->tin_backlog) {
1679 if (ktime_before(b->time_next_packet, now))
1680 b->time_next_packet = now;
1682 if (!sch->q.qlen) {
1683 if (ktime_before(q->time_next_packet, now)) {
1684 q->failsafe_next_packet = now;
1685 q->time_next_packet = now;
1686 } else if (ktime_after(q->time_next_packet, now) &&
1687 ktime_after(q->failsafe_next_packet, now)) {
1688 u64 next = \
1689 min(ktime_to_ns(q->time_next_packet),
1690 ktime_to_ns(
1691 q->failsafe_next_packet));
1692 sch->qstats.overlimits++;
1693 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1698 if (unlikely(len > b->max_skblen))
1699 b->max_skblen = len;
1701 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1702 struct sk_buff *segs, *nskb;
1703 netdev_features_t features = netif_skb_features(skb);
1704 unsigned int slen = 0, numsegs = 0;
1706 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1707 if (IS_ERR_OR_NULL(segs))
1708 return qdisc_drop(skb, sch, to_free);
1710 while (segs) {
1711 nskb = segs->next;
1712 skb_mark_not_on_list(segs);
1713 qdisc_skb_cb(segs)->pkt_len = segs->len;
1714 cobalt_set_enqueue_time(segs, now);
1715 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
1716 segs);
1717 flow_queue_add(flow, segs);
1719 sch->q.qlen++;
1720 numsegs++;
1721 slen += segs->len;
1722 q->buffer_used += segs->truesize;
1723 b->packets++;
1724 segs = nskb;
1727 /* stats */
1728 b->bytes += slen;
1729 b->backlogs[idx] += slen;
1730 b->tin_backlog += slen;
1731 sch->qstats.backlog += slen;
1732 q->avg_window_bytes += slen;
1734 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1735 consume_skb(skb);
1736 } else {
1737 /* not splitting */
1738 cobalt_set_enqueue_time(skb, now);
1739 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
1740 flow_queue_add(flow, skb);
1742 if (q->ack_filter)
1743 ack = cake_ack_filter(q, flow);
1745 if (ack) {
1746 b->ack_drops++;
1747 sch->qstats.drops++;
1748 b->bytes += qdisc_pkt_len(ack);
1749 len -= qdisc_pkt_len(ack);
1750 q->buffer_used += skb->truesize - ack->truesize;
1751 if (q->rate_flags & CAKE_FLAG_INGRESS)
1752 cake_advance_shaper(q, b, ack, now, true);
1754 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1755 consume_skb(ack);
1756 } else {
1757 sch->q.qlen++;
1758 q->buffer_used += skb->truesize;
1761 /* stats */
1762 b->packets++;
1763 b->bytes += len;
1764 b->backlogs[idx] += len;
1765 b->tin_backlog += len;
1766 sch->qstats.backlog += len;
1767 q->avg_window_bytes += len;
1770 if (q->overflow_timeout)
1771 cake_heapify_up(q, b->overflow_idx[idx]);
1773 /* incoming bandwidth capacity estimate */
1774 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
1775 u64 packet_interval = \
1776 ktime_to_ns(ktime_sub(now, q->last_packet_time));
1778 if (packet_interval > NSEC_PER_SEC)
1779 packet_interval = NSEC_PER_SEC;
1781 /* filter out short-term bursts, eg. wifi aggregation */
1782 q->avg_packet_interval = \
1783 cake_ewma(q->avg_packet_interval,
1784 packet_interval,
1785 (packet_interval > q->avg_packet_interval ?
1786 2 : 8));
1788 q->last_packet_time = now;
1790 if (packet_interval > q->avg_packet_interval) {
1791 u64 window_interval = \
1792 ktime_to_ns(ktime_sub(now,
1793 q->avg_window_begin));
1794 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1796 b = div64_u64(b, window_interval);
1797 q->avg_peak_bandwidth =
1798 cake_ewma(q->avg_peak_bandwidth, b,
1799 b > q->avg_peak_bandwidth ? 2 : 8);
1800 q->avg_window_bytes = 0;
1801 q->avg_window_begin = now;
1803 if (ktime_after(now,
1804 ktime_add_ms(q->last_reconfig_time,
1805 250))) {
1806 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1807 cake_reconfigure(sch);
1810 } else {
1811 q->avg_window_bytes = 0;
1812 q->last_packet_time = now;
1815 /* flowchain */
1816 if (!flow->set || flow->set == CAKE_SET_DECAYING) {
1817 struct cake_host *srchost = &b->hosts[flow->srchost];
1818 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1819 u16 host_load = 1;
1821 if (!flow->set) {
1822 list_add_tail(&flow->flowchain, &b->new_flows);
1823 } else {
1824 b->decaying_flow_count--;
1825 list_move_tail(&flow->flowchain, &b->new_flows);
1827 flow->set = CAKE_SET_SPARSE;
1828 b->sparse_flow_count++;
1830 if (cake_dsrc(q->flow_mode))
1831 host_load = max(host_load, srchost->srchost_bulk_flow_count);
1833 if (cake_ddst(q->flow_mode))
1834 host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
1836 flow->deficit = (b->flow_quantum *
1837 quantum_div[host_load]) >> 16;
1838 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1839 struct cake_host *srchost = &b->hosts[flow->srchost];
1840 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1842 /* this flow was empty, accounted as a sparse flow, but actually
1843 * in the bulk rotation.
1845 flow->set = CAKE_SET_BULK;
1846 b->sparse_flow_count--;
1847 b->bulk_flow_count++;
1849 if (cake_dsrc(q->flow_mode))
1850 srchost->srchost_bulk_flow_count++;
1852 if (cake_ddst(q->flow_mode))
1853 dsthost->dsthost_bulk_flow_count++;
1857 if (q->buffer_used > q->buffer_max_used)
1858 q->buffer_max_used = q->buffer_used;
1860 if (q->buffer_used > q->buffer_limit) {
1861 u32 dropped = 0;
1863 while (q->buffer_used > q->buffer_limit) {
1864 dropped++;
1865 cake_drop(sch, to_free);
1867 b->drop_overlimit += dropped;
1869 return NET_XMIT_SUCCESS;
1872 static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
1874 struct cake_sched_data *q = qdisc_priv(sch);
1875 struct cake_tin_data *b = &q->tins[q->cur_tin];
1876 struct cake_flow *flow = &b->flows[q->cur_flow];
1877 struct sk_buff *skb = NULL;
1878 u32 len;
1880 if (flow->head) {
1881 skb = dequeue_head(flow);
1882 len = qdisc_pkt_len(skb);
1883 b->backlogs[q->cur_flow] -= len;
1884 b->tin_backlog -= len;
1885 sch->qstats.backlog -= len;
1886 q->buffer_used -= skb->truesize;
1887 sch->q.qlen--;
1889 if (q->overflow_timeout)
1890 cake_heapify(q, b->overflow_idx[q->cur_flow]);
1892 return skb;
1895 /* Discard leftover packets from a tin no longer in use. */
1896 static void cake_clear_tin(struct Qdisc *sch, u16 tin)
1898 struct cake_sched_data *q = qdisc_priv(sch);
1899 struct sk_buff *skb;
1901 q->cur_tin = tin;
1902 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
1903 while (!!(skb = cake_dequeue_one(sch)))
1904 kfree_skb(skb);
1907 static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1909 struct cake_sched_data *q = qdisc_priv(sch);
1910 struct cake_tin_data *b = &q->tins[q->cur_tin];
1911 struct cake_host *srchost, *dsthost;
1912 ktime_t now = ktime_get();
1913 struct cake_flow *flow;
1914 struct list_head *head;
1915 bool first_flow = true;
1916 struct sk_buff *skb;
1917 u16 host_load;
1918 u64 delay;
1919 u32 len;
1921 begin:
1922 if (!sch->q.qlen)
1923 return NULL;
1925 /* global hard shaper */
1926 if (ktime_after(q->time_next_packet, now) &&
1927 ktime_after(q->failsafe_next_packet, now)) {
1928 u64 next = min(ktime_to_ns(q->time_next_packet),
1929 ktime_to_ns(q->failsafe_next_packet));
1931 sch->qstats.overlimits++;
1932 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1933 return NULL;
1936 /* Choose a class to work on. */
1937 if (!q->rate_ns) {
1938 /* In unlimited mode, can't rely on shaper timings, just balance
1939 * with DRR
1941 bool wrapped = false, empty = true;
1943 while (b->tin_deficit < 0 ||
1944 !(b->sparse_flow_count + b->bulk_flow_count)) {
1945 if (b->tin_deficit <= 0)
1946 b->tin_deficit += b->tin_quantum_band;
1947 if (b->sparse_flow_count + b->bulk_flow_count)
1948 empty = false;
1950 q->cur_tin++;
1951 b++;
1952 if (q->cur_tin >= q->tin_cnt) {
1953 q->cur_tin = 0;
1954 b = q->tins;
1956 if (wrapped) {
1957 /* It's possible for q->qlen to be
1958 * nonzero when we actually have no
1959 * packets anywhere.
1961 if (empty)
1962 return NULL;
1963 } else {
1964 wrapped = true;
1968 } else {
1969 /* In shaped mode, choose:
1970 * - Highest-priority tin with queue and meeting schedule, or
1971 * - The earliest-scheduled tin with queue.
1973 ktime_t best_time = KTIME_MAX;
1974 int tin, best_tin = 0;
1976 for (tin = 0; tin < q->tin_cnt; tin++) {
1977 b = q->tins + tin;
1978 if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
1979 ktime_t time_to_pkt = \
1980 ktime_sub(b->time_next_packet, now);
1982 if (ktime_to_ns(time_to_pkt) <= 0 ||
1983 ktime_compare(time_to_pkt,
1984 best_time) <= 0) {
1985 best_time = time_to_pkt;
1986 best_tin = tin;
1991 q->cur_tin = best_tin;
1992 b = q->tins + best_tin;
1994 /* No point in going further if no packets to deliver. */
1995 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
1996 return NULL;
1999 retry:
2000 /* service this class */
2001 head = &b->decaying_flows;
2002 if (!first_flow || list_empty(head)) {
2003 head = &b->new_flows;
2004 if (list_empty(head)) {
2005 head = &b->old_flows;
2006 if (unlikely(list_empty(head))) {
2007 head = &b->decaying_flows;
2008 if (unlikely(list_empty(head)))
2009 goto begin;
2013 flow = list_first_entry(head, struct cake_flow, flowchain);
2014 q->cur_flow = flow - b->flows;
2015 first_flow = false;
2017 /* triple isolation (modified DRR++) */
2018 srchost = &b->hosts[flow->srchost];
2019 dsthost = &b->hosts[flow->dsthost];
2020 host_load = 1;
2022 /* flow isolation (DRR++) */
2023 if (flow->deficit <= 0) {
2024 /* Keep all flows with deficits out of the sparse and decaying
2025 * rotations. No non-empty flow can go into the decaying
2026 * rotation, so they can't get deficits
2028 if (flow->set == CAKE_SET_SPARSE) {
2029 if (flow->head) {
2030 b->sparse_flow_count--;
2031 b->bulk_flow_count++;
2033 if (cake_dsrc(q->flow_mode))
2034 srchost->srchost_bulk_flow_count++;
2036 if (cake_ddst(q->flow_mode))
2037 dsthost->dsthost_bulk_flow_count++;
2039 flow->set = CAKE_SET_BULK;
2040 } else {
2041 /* we've moved it to the bulk rotation for
2042 * correct deficit accounting but we still want
2043 * to count it as a sparse flow, not a bulk one.
2045 flow->set = CAKE_SET_SPARSE_WAIT;
2049 if (cake_dsrc(q->flow_mode))
2050 host_load = max(host_load, srchost->srchost_bulk_flow_count);
2052 if (cake_ddst(q->flow_mode))
2053 host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
2055 WARN_ON(host_load > CAKE_QUEUES);
2057 /* The shifted prandom_u32() is a way to apply dithering to
2058 * avoid accumulating roundoff errors
2060 flow->deficit += (b->flow_quantum * quantum_div[host_load] +
2061 (prandom_u32() >> 16)) >> 16;
2062 list_move_tail(&flow->flowchain, &b->old_flows);
2064 goto retry;
2067 /* Retrieve a packet via the AQM */
2068 while (1) {
2069 skb = cake_dequeue_one(sch);
2070 if (!skb) {
2071 /* this queue was actually empty */
2072 if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
2073 b->unresponsive_flow_count--;
2075 if (flow->cvars.p_drop || flow->cvars.count ||
2076 ktime_before(now, flow->cvars.drop_next)) {
2077 /* keep in the flowchain until the state has
2078 * decayed to rest
2080 list_move_tail(&flow->flowchain,
2081 &b->decaying_flows);
2082 if (flow->set == CAKE_SET_BULK) {
2083 b->bulk_flow_count--;
2085 if (cake_dsrc(q->flow_mode))
2086 srchost->srchost_bulk_flow_count--;
2088 if (cake_ddst(q->flow_mode))
2089 dsthost->dsthost_bulk_flow_count--;
2091 b->decaying_flow_count++;
2092 } else if (flow->set == CAKE_SET_SPARSE ||
2093 flow->set == CAKE_SET_SPARSE_WAIT) {
2094 b->sparse_flow_count--;
2095 b->decaying_flow_count++;
2097 flow->set = CAKE_SET_DECAYING;
2098 } else {
2099 /* remove empty queue from the flowchain */
2100 list_del_init(&flow->flowchain);
2101 if (flow->set == CAKE_SET_SPARSE ||
2102 flow->set == CAKE_SET_SPARSE_WAIT)
2103 b->sparse_flow_count--;
2104 else if (flow->set == CAKE_SET_BULK) {
2105 b->bulk_flow_count--;
2107 if (cake_dsrc(q->flow_mode))
2108 srchost->srchost_bulk_flow_count--;
2110 if (cake_ddst(q->flow_mode))
2111 dsthost->dsthost_bulk_flow_count--;
2113 } else
2114 b->decaying_flow_count--;
2116 flow->set = CAKE_SET_NONE;
2118 goto begin;
2121 /* Last packet in queue may be marked, shouldn't be dropped */
2122 if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
2123 (b->bulk_flow_count *
2124 !!(q->rate_flags &
2125 CAKE_FLAG_INGRESS))) ||
2126 !flow->head)
2127 break;
2129 /* drop this packet, get another one */
2130 if (q->rate_flags & CAKE_FLAG_INGRESS) {
2131 len = cake_advance_shaper(q, b, skb,
2132 now, true);
2133 flow->deficit -= len;
2134 b->tin_deficit -= len;
2136 flow->dropped++;
2137 b->tin_dropped++;
2138 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
2139 qdisc_qstats_drop(sch);
2140 kfree_skb(skb);
2141 if (q->rate_flags & CAKE_FLAG_INGRESS)
2142 goto retry;
2145 b->tin_ecn_mark += !!flow->cvars.ecn_marked;
2146 qdisc_bstats_update(sch, skb);
2148 /* collect delay stats */
2149 delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
2150 b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
2151 b->peak_delay = cake_ewma(b->peak_delay, delay,
2152 delay > b->peak_delay ? 2 : 8);
2153 b->base_delay = cake_ewma(b->base_delay, delay,
2154 delay < b->base_delay ? 2 : 8);
2156 len = cake_advance_shaper(q, b, skb, now, false);
2157 flow->deficit -= len;
2158 b->tin_deficit -= len;
2160 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
2161 u64 next = min(ktime_to_ns(q->time_next_packet),
2162 ktime_to_ns(q->failsafe_next_packet));
2164 qdisc_watchdog_schedule_ns(&q->watchdog, next);
2165 } else if (!sch->q.qlen) {
2166 int i;
2168 for (i = 0; i < q->tin_cnt; i++) {
2169 if (q->tins[i].decaying_flow_count) {
2170 ktime_t next = \
2171 ktime_add_ns(now,
2172 q->tins[i].cparams.target);
2174 qdisc_watchdog_schedule_ns(&q->watchdog,
2175 ktime_to_ns(next));
2176 break;
2181 if (q->overflow_timeout)
2182 q->overflow_timeout--;
2184 return skb;
2187 static void cake_reset(struct Qdisc *sch)
2189 u32 c;
2191 for (c = 0; c < CAKE_MAX_TINS; c++)
2192 cake_clear_tin(sch, c);
2195 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2196 [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
2197 [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
2198 [TCA_CAKE_ATM] = { .type = NLA_U32 },
2199 [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
2200 [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
2201 [TCA_CAKE_RTT] = { .type = NLA_U32 },
2202 [TCA_CAKE_TARGET] = { .type = NLA_U32 },
2203 [TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
2204 [TCA_CAKE_MEMORY] = { .type = NLA_U32 },
2205 [TCA_CAKE_NAT] = { .type = NLA_U32 },
2206 [TCA_CAKE_RAW] = { .type = NLA_U32 },
2207 [TCA_CAKE_WASH] = { .type = NLA_U32 },
2208 [TCA_CAKE_MPU] = { .type = NLA_U32 },
2209 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
2210 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
2211 [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 },
2212 [TCA_CAKE_FWMARK] = { .type = NLA_U32 },
2215 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
2216 u64 target_ns, u64 rtt_est_ns)
2218 /* convert byte-rate into time-per-byte
2219 * so it will always unwedge in reasonable time.
2221 static const u64 MIN_RATE = 64;
2222 u32 byte_target = mtu;
2223 u64 byte_target_ns;
2224 u8 rate_shft = 0;
2225 u64 rate_ns = 0;
2227 b->flow_quantum = 1514;
2228 if (rate) {
2229 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
2230 rate_shft = 34;
2231 rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
2232 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
2233 while (!!(rate_ns >> 34)) {
2234 rate_ns >>= 1;
2235 rate_shft--;
2237 } /* else unlimited, ie. zero delay */
2239 b->tin_rate_bps = rate;
2240 b->tin_rate_ns = rate_ns;
2241 b->tin_rate_shft = rate_shft;
2243 byte_target_ns = (byte_target * rate_ns) >> rate_shft;
2245 b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
2246 b->cparams.interval = max(rtt_est_ns +
2247 b->cparams.target - target_ns,
2248 b->cparams.target * 2);
2249 b->cparams.mtu_time = byte_target_ns;
2250 b->cparams.p_inc = 1 << 24; /* 1/256 */
2251 b->cparams.p_dec = 1 << 20; /* 1/4096 */
2254 static int cake_config_besteffort(struct Qdisc *sch)
2256 struct cake_sched_data *q = qdisc_priv(sch);
2257 struct cake_tin_data *b = &q->tins[0];
2258 u32 mtu = psched_mtu(qdisc_dev(sch));
2259 u64 rate = q->rate_bps;
2261 q->tin_cnt = 1;
2263 q->tin_index = besteffort;
2264 q->tin_order = normal_order;
2266 cake_set_rate(b, rate, mtu,
2267 us_to_ns(q->target), us_to_ns(q->interval));
2268 b->tin_quantum_band = 65535;
2269 b->tin_quantum_prio = 65535;
2271 return 0;
2274 static int cake_config_precedence(struct Qdisc *sch)
2276 /* convert high-level (user visible) parameters into internal format */
2277 struct cake_sched_data *q = qdisc_priv(sch);
2278 u32 mtu = psched_mtu(qdisc_dev(sch));
2279 u64 rate = q->rate_bps;
2280 u32 quantum1 = 256;
2281 u32 quantum2 = 256;
2282 u32 i;
2284 q->tin_cnt = 8;
2285 q->tin_index = precedence;
2286 q->tin_order = normal_order;
2288 for (i = 0; i < q->tin_cnt; i++) {
2289 struct cake_tin_data *b = &q->tins[i];
2291 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2292 us_to_ns(q->interval));
2294 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2295 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2297 /* calculate next class's parameters */
2298 rate *= 7;
2299 rate >>= 3;
2301 quantum1 *= 3;
2302 quantum1 >>= 1;
2304 quantum2 *= 7;
2305 quantum2 >>= 3;
2308 return 0;
2311 /* List of known Diffserv codepoints:
2313 * Least Effort (CS1)
2314 * Best Effort (CS0)
2315 * Max Reliability & LLT "Lo" (TOS1)
2316 * Max Throughput (TOS2)
2317 * Min Delay (TOS4)
2318 * LLT "La" (TOS5)
2319 * Assured Forwarding 1 (AF1x) - x3
2320 * Assured Forwarding 2 (AF2x) - x3
2321 * Assured Forwarding 3 (AF3x) - x3
2322 * Assured Forwarding 4 (AF4x) - x3
2323 * Precedence Class 2 (CS2)
2324 * Precedence Class 3 (CS3)
2325 * Precedence Class 4 (CS4)
2326 * Precedence Class 5 (CS5)
2327 * Precedence Class 6 (CS6)
2328 * Precedence Class 7 (CS7)
2329 * Voice Admit (VA)
2330 * Expedited Forwarding (EF)
2332 * Total 25 codepoints.
2335 /* List of traffic classes in RFC 4594:
2336 * (roughly descending order of contended priority)
2337 * (roughly ascending order of uncontended throughput)
2339 * Network Control (CS6,CS7) - routing traffic
2340 * Telephony (EF,VA) - aka. VoIP streams
2341 * Signalling (CS5) - VoIP setup
2342 * Multimedia Conferencing (AF4x) - aka. video calls
2343 * Realtime Interactive (CS4) - eg. games
2344 * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
2345 * Broadcast Video (CS3)
2346 * Low Latency Data (AF2x,TOS4) - eg. database
2347 * Ops, Admin, Management (CS2,TOS1) - eg. ssh
2348 * Standard Service (CS0 & unrecognised codepoints)
2349 * High Throughput Data (AF1x,TOS2) - eg. web traffic
2350 * Low Priority Data (CS1) - eg. BitTorrent
2352 * Total 12 traffic classes.
2355 static int cake_config_diffserv8(struct Qdisc *sch)
2357 /* Pruned list of traffic classes for typical applications:
2359 * Network Control (CS6, CS7)
2360 * Minimum Latency (EF, VA, CS5, CS4)
2361 * Interactive Shell (CS2, TOS1)
2362 * Low Latency Transactions (AF2x, TOS4)
2363 * Video Streaming (AF4x, AF3x, CS3)
2364 * Bog Standard (CS0 etc.)
2365 * High Throughput (AF1x, TOS2)
2366 * Background Traffic (CS1)
2368 * Total 8 traffic classes.
2371 struct cake_sched_data *q = qdisc_priv(sch);
2372 u32 mtu = psched_mtu(qdisc_dev(sch));
2373 u64 rate = q->rate_bps;
2374 u32 quantum1 = 256;
2375 u32 quantum2 = 256;
2376 u32 i;
2378 q->tin_cnt = 8;
2380 /* codepoint to class mapping */
2381 q->tin_index = diffserv8;
2382 q->tin_order = normal_order;
2384 /* class characteristics */
2385 for (i = 0; i < q->tin_cnt; i++) {
2386 struct cake_tin_data *b = &q->tins[i];
2388 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2389 us_to_ns(q->interval));
2391 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2392 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2394 /* calculate next class's parameters */
2395 rate *= 7;
2396 rate >>= 3;
2398 quantum1 *= 3;
2399 quantum1 >>= 1;
2401 quantum2 *= 7;
2402 quantum2 >>= 3;
2405 return 0;
2408 static int cake_config_diffserv4(struct Qdisc *sch)
2410 /* Further pruned list of traffic classes for four-class system:
2412 * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
2413 * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
2414 * Best Effort (CS0, AF1x, TOS2, and those not specified)
2415 * Background Traffic (CS1)
2417 * Total 4 traffic classes.
2420 struct cake_sched_data *q = qdisc_priv(sch);
2421 u32 mtu = psched_mtu(qdisc_dev(sch));
2422 u64 rate = q->rate_bps;
2423 u32 quantum = 1024;
2425 q->tin_cnt = 4;
2427 /* codepoint to class mapping */
2428 q->tin_index = diffserv4;
2429 q->tin_order = bulk_order;
2431 /* class characteristics */
2432 cake_set_rate(&q->tins[0], rate, mtu,
2433 us_to_ns(q->target), us_to_ns(q->interval));
2434 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2435 us_to_ns(q->target), us_to_ns(q->interval));
2436 cake_set_rate(&q->tins[2], rate >> 1, mtu,
2437 us_to_ns(q->target), us_to_ns(q->interval));
2438 cake_set_rate(&q->tins[3], rate >> 2, mtu,
2439 us_to_ns(q->target), us_to_ns(q->interval));
2441 /* priority weights */
2442 q->tins[0].tin_quantum_prio = quantum;
2443 q->tins[1].tin_quantum_prio = quantum >> 4;
2444 q->tins[2].tin_quantum_prio = quantum << 2;
2445 q->tins[3].tin_quantum_prio = quantum << 4;
2447 /* bandwidth-sharing weights */
2448 q->tins[0].tin_quantum_band = quantum;
2449 q->tins[1].tin_quantum_band = quantum >> 4;
2450 q->tins[2].tin_quantum_band = quantum >> 1;
2451 q->tins[3].tin_quantum_band = quantum >> 2;
2453 return 0;
2456 static int cake_config_diffserv3(struct Qdisc *sch)
2458 /* Simplified Diffserv structure with 3 tins.
2459 * Low Priority (CS1)
2460 * Best Effort
2461 * Latency Sensitive (TOS4, VA, EF, CS6, CS7)
2463 struct cake_sched_data *q = qdisc_priv(sch);
2464 u32 mtu = psched_mtu(qdisc_dev(sch));
2465 u64 rate = q->rate_bps;
2466 u32 quantum = 1024;
2468 q->tin_cnt = 3;
2470 /* codepoint to class mapping */
2471 q->tin_index = diffserv3;
2472 q->tin_order = bulk_order;
2474 /* class characteristics */
2475 cake_set_rate(&q->tins[0], rate, mtu,
2476 us_to_ns(q->target), us_to_ns(q->interval));
2477 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2478 us_to_ns(q->target), us_to_ns(q->interval));
2479 cake_set_rate(&q->tins[2], rate >> 2, mtu,
2480 us_to_ns(q->target), us_to_ns(q->interval));
2482 /* priority weights */
2483 q->tins[0].tin_quantum_prio = quantum;
2484 q->tins[1].tin_quantum_prio = quantum >> 4;
2485 q->tins[2].tin_quantum_prio = quantum << 4;
2487 /* bandwidth-sharing weights */
2488 q->tins[0].tin_quantum_band = quantum;
2489 q->tins[1].tin_quantum_band = quantum >> 4;
2490 q->tins[2].tin_quantum_band = quantum >> 2;
2492 return 0;
2495 static void cake_reconfigure(struct Qdisc *sch)
2497 struct cake_sched_data *q = qdisc_priv(sch);
2498 int c, ft;
2500 switch (q->tin_mode) {
2501 case CAKE_DIFFSERV_BESTEFFORT:
2502 ft = cake_config_besteffort(sch);
2503 break;
2505 case CAKE_DIFFSERV_PRECEDENCE:
2506 ft = cake_config_precedence(sch);
2507 break;
2509 case CAKE_DIFFSERV_DIFFSERV8:
2510 ft = cake_config_diffserv8(sch);
2511 break;
2513 case CAKE_DIFFSERV_DIFFSERV4:
2514 ft = cake_config_diffserv4(sch);
2515 break;
2517 case CAKE_DIFFSERV_DIFFSERV3:
2518 default:
2519 ft = cake_config_diffserv3(sch);
2520 break;
2523 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
2524 cake_clear_tin(sch, c);
2525 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
2528 q->rate_ns = q->tins[ft].tin_rate_ns;
2529 q->rate_shft = q->tins[ft].tin_rate_shft;
2531 if (q->buffer_config_limit) {
2532 q->buffer_limit = q->buffer_config_limit;
2533 } else if (q->rate_bps) {
2534 u64 t = q->rate_bps * q->interval;
2536 do_div(t, USEC_PER_SEC / 4);
2537 q->buffer_limit = max_t(u32, t, 4U << 20);
2538 } else {
2539 q->buffer_limit = ~0;
2542 sch->flags &= ~TCQ_F_CAN_BYPASS;
2544 q->buffer_limit = min(q->buffer_limit,
2545 max(sch->limit * psched_mtu(qdisc_dev(sch)),
2546 q->buffer_config_limit));
2549 static int cake_change(struct Qdisc *sch, struct nlattr *opt,
2550 struct netlink_ext_ack *extack)
2552 struct cake_sched_data *q = qdisc_priv(sch);
2553 struct nlattr *tb[TCA_CAKE_MAX + 1];
2554 int err;
2556 if (!opt)
2557 return -EINVAL;
2559 err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
2560 extack);
2561 if (err < 0)
2562 return err;
2564 if (tb[TCA_CAKE_NAT]) {
2565 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
2566 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
2567 q->flow_mode |= CAKE_FLOW_NAT_FLAG *
2568 !!nla_get_u32(tb[TCA_CAKE_NAT]);
2569 #else
2570 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
2571 "No conntrack support in kernel");
2572 return -EOPNOTSUPP;
2573 #endif
2576 if (tb[TCA_CAKE_BASE_RATE64])
2577 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
2579 if (tb[TCA_CAKE_DIFFSERV_MODE])
2580 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
2582 if (tb[TCA_CAKE_WASH]) {
2583 if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
2584 q->rate_flags |= CAKE_FLAG_WASH;
2585 else
2586 q->rate_flags &= ~CAKE_FLAG_WASH;
2589 if (tb[TCA_CAKE_FLOW_MODE])
2590 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
2591 (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
2592 CAKE_FLOW_MASK));
2594 if (tb[TCA_CAKE_ATM])
2595 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
2597 if (tb[TCA_CAKE_OVERHEAD]) {
2598 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
2599 q->rate_flags |= CAKE_FLAG_OVERHEAD;
2601 q->max_netlen = 0;
2602 q->max_adjlen = 0;
2603 q->min_netlen = ~0;
2604 q->min_adjlen = ~0;
2607 if (tb[TCA_CAKE_RAW]) {
2608 q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
2610 q->max_netlen = 0;
2611 q->max_adjlen = 0;
2612 q->min_netlen = ~0;
2613 q->min_adjlen = ~0;
2616 if (tb[TCA_CAKE_MPU])
2617 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
2619 if (tb[TCA_CAKE_RTT]) {
2620 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
2622 if (!q->interval)
2623 q->interval = 1;
2626 if (tb[TCA_CAKE_TARGET]) {
2627 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
2629 if (!q->target)
2630 q->target = 1;
2633 if (tb[TCA_CAKE_AUTORATE]) {
2634 if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
2635 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
2636 else
2637 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
2640 if (tb[TCA_CAKE_INGRESS]) {
2641 if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
2642 q->rate_flags |= CAKE_FLAG_INGRESS;
2643 else
2644 q->rate_flags &= ~CAKE_FLAG_INGRESS;
2647 if (tb[TCA_CAKE_ACK_FILTER])
2648 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
2650 if (tb[TCA_CAKE_MEMORY])
2651 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
2653 if (tb[TCA_CAKE_SPLIT_GSO]) {
2654 if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
2655 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2656 else
2657 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
2660 if (tb[TCA_CAKE_FWMARK]) {
2661 q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
2662 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
2665 if (q->tins) {
2666 sch_tree_lock(sch);
2667 cake_reconfigure(sch);
2668 sch_tree_unlock(sch);
2671 return 0;
2674 static void cake_destroy(struct Qdisc *sch)
2676 struct cake_sched_data *q = qdisc_priv(sch);
2678 qdisc_watchdog_cancel(&q->watchdog);
2679 tcf_block_put(q->block);
2680 kvfree(q->tins);
2683 static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2684 struct netlink_ext_ack *extack)
2686 struct cake_sched_data *q = qdisc_priv(sch);
2687 int i, j, err;
2689 sch->limit = 10240;
2690 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
2691 q->flow_mode = CAKE_FLOW_TRIPLE;
2693 q->rate_bps = 0; /* unlimited by default */
2695 q->interval = 100000; /* 100ms default */
2696 q->target = 5000; /* 5ms: codel RFC argues
2697 * for 5 to 10% of interval
2699 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2700 q->cur_tin = 0;
2701 q->cur_flow = 0;
2703 qdisc_watchdog_init(&q->watchdog, sch);
2705 if (opt) {
2706 err = cake_change(sch, opt, extack);
2708 if (err)
2709 return err;
2712 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2713 if (err)
2714 return err;
2716 quantum_div[0] = ~0;
2717 for (i = 1; i <= CAKE_QUEUES; i++)
2718 quantum_div[i] = 65535 / i;
2720 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
2721 GFP_KERNEL);
2722 if (!q->tins)
2723 goto nomem;
2725 for (i = 0; i < CAKE_MAX_TINS; i++) {
2726 struct cake_tin_data *b = q->tins + i;
2728 INIT_LIST_HEAD(&b->new_flows);
2729 INIT_LIST_HEAD(&b->old_flows);
2730 INIT_LIST_HEAD(&b->decaying_flows);
2731 b->sparse_flow_count = 0;
2732 b->bulk_flow_count = 0;
2733 b->decaying_flow_count = 0;
2735 for (j = 0; j < CAKE_QUEUES; j++) {
2736 struct cake_flow *flow = b->flows + j;
2737 u32 k = j * CAKE_MAX_TINS + i;
2739 INIT_LIST_HEAD(&flow->flowchain);
2740 cobalt_vars_init(&flow->cvars);
2742 q->overflow_heap[k].t = i;
2743 q->overflow_heap[k].b = j;
2744 b->overflow_idx[j] = k;
2748 cake_reconfigure(sch);
2749 q->avg_peak_bandwidth = q->rate_bps;
2750 q->min_netlen = ~0;
2751 q->min_adjlen = ~0;
2752 return 0;
2754 nomem:
2755 cake_destroy(sch);
2756 return -ENOMEM;
2759 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2761 struct cake_sched_data *q = qdisc_priv(sch);
2762 struct nlattr *opts;
2764 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
2765 if (!opts)
2766 goto nla_put_failure;
2768 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
2769 TCA_CAKE_PAD))
2770 goto nla_put_failure;
2772 if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
2773 q->flow_mode & CAKE_FLOW_MASK))
2774 goto nla_put_failure;
2776 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
2777 goto nla_put_failure;
2779 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
2780 goto nla_put_failure;
2782 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
2783 goto nla_put_failure;
2785 if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
2786 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
2787 goto nla_put_failure;
2789 if (nla_put_u32(skb, TCA_CAKE_INGRESS,
2790 !!(q->rate_flags & CAKE_FLAG_INGRESS)))
2791 goto nla_put_failure;
2793 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
2794 goto nla_put_failure;
2796 if (nla_put_u32(skb, TCA_CAKE_NAT,
2797 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
2798 goto nla_put_failure;
2800 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
2801 goto nla_put_failure;
2803 if (nla_put_u32(skb, TCA_CAKE_WASH,
2804 !!(q->rate_flags & CAKE_FLAG_WASH)))
2805 goto nla_put_failure;
2807 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
2808 goto nla_put_failure;
2810 if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
2811 if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
2812 goto nla_put_failure;
2814 if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
2815 goto nla_put_failure;
2817 if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
2818 goto nla_put_failure;
2820 if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
2821 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
2822 goto nla_put_failure;
2824 if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
2825 goto nla_put_failure;
2827 return nla_nest_end(skb, opts);
2829 nla_put_failure:
2830 return -1;
2833 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2835 struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
2836 struct cake_sched_data *q = qdisc_priv(sch);
2837 struct nlattr *tstats, *ts;
2838 int i;
2840 if (!stats)
2841 return -1;
2843 #define PUT_STAT_U32(attr, data) do { \
2844 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2845 goto nla_put_failure; \
2846 } while (0)
2847 #define PUT_STAT_U64(attr, data) do { \
2848 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
2849 data, TCA_CAKE_STATS_PAD)) \
2850 goto nla_put_failure; \
2851 } while (0)
2853 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
2854 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
2855 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
2856 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
2857 PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
2858 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
2859 PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
2860 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
2862 #undef PUT_STAT_U32
2863 #undef PUT_STAT_U64
2865 tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS);
2866 if (!tstats)
2867 goto nla_put_failure;
2869 #define PUT_TSTAT_U32(attr, data) do { \
2870 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
2871 goto nla_put_failure; \
2872 } while (0)
2873 #define PUT_TSTAT_U64(attr, data) do { \
2874 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
2875 data, TCA_CAKE_TIN_STATS_PAD)) \
2876 goto nla_put_failure; \
2877 } while (0)
2879 for (i = 0; i < q->tin_cnt; i++) {
2880 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
2882 ts = nla_nest_start_noflag(d->skb, i + 1);
2883 if (!ts)
2884 goto nla_put_failure;
2886 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
2887 PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
2888 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
2890 PUT_TSTAT_U32(TARGET_US,
2891 ktime_to_us(ns_to_ktime(b->cparams.target)));
2892 PUT_TSTAT_U32(INTERVAL_US,
2893 ktime_to_us(ns_to_ktime(b->cparams.interval)));
2895 PUT_TSTAT_U32(SENT_PACKETS, b->packets);
2896 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
2897 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
2898 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
2900 PUT_TSTAT_U32(PEAK_DELAY_US,
2901 ktime_to_us(ns_to_ktime(b->peak_delay)));
2902 PUT_TSTAT_U32(AVG_DELAY_US,
2903 ktime_to_us(ns_to_ktime(b->avge_delay)));
2904 PUT_TSTAT_U32(BASE_DELAY_US,
2905 ktime_to_us(ns_to_ktime(b->base_delay)));
2907 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
2908 PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
2909 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
2911 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
2912 b->decaying_flow_count);
2913 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
2914 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
2915 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
2917 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
2918 nla_nest_end(d->skb, ts);
2921 #undef PUT_TSTAT_U32
2922 #undef PUT_TSTAT_U64
2924 nla_nest_end(d->skb, tstats);
2925 return nla_nest_end(d->skb, stats);
2927 nla_put_failure:
2928 nla_nest_cancel(d->skb, stats);
2929 return -1;
2932 static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
2934 return NULL;
2937 static unsigned long cake_find(struct Qdisc *sch, u32 classid)
2939 return 0;
2942 static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
2943 u32 classid)
2945 return 0;
2948 static void cake_unbind(struct Qdisc *q, unsigned long cl)
2952 static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
2953 struct netlink_ext_ack *extack)
2955 struct cake_sched_data *q = qdisc_priv(sch);
2957 if (cl)
2958 return NULL;
2959 return q->block;
2962 static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
2963 struct sk_buff *skb, struct tcmsg *tcm)
2965 tcm->tcm_handle |= TC_H_MIN(cl);
2966 return 0;
2969 static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2970 struct gnet_dump *d)
2972 struct cake_sched_data *q = qdisc_priv(sch);
2973 const struct cake_flow *flow = NULL;
2974 struct gnet_stats_queue qs = { 0 };
2975 struct nlattr *stats;
2976 u32 idx = cl - 1;
2978 if (idx < CAKE_QUEUES * q->tin_cnt) {
2979 const struct cake_tin_data *b = \
2980 &q->tins[q->tin_order[idx / CAKE_QUEUES]];
2981 const struct sk_buff *skb;
2983 flow = &b->flows[idx % CAKE_QUEUES];
2985 if (flow->head) {
2986 sch_tree_lock(sch);
2987 skb = flow->head;
2988 while (skb) {
2989 qs.qlen++;
2990 skb = skb->next;
2992 sch_tree_unlock(sch);
2994 qs.backlog = b->backlogs[idx % CAKE_QUEUES];
2995 qs.drops = flow->dropped;
2997 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
2998 return -1;
2999 if (flow) {
3000 ktime_t now = ktime_get();
3002 stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
3003 if (!stats)
3004 return -1;
3006 #define PUT_STAT_U32(attr, data) do { \
3007 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
3008 goto nla_put_failure; \
3009 } while (0)
3010 #define PUT_STAT_S32(attr, data) do { \
3011 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
3012 goto nla_put_failure; \
3013 } while (0)
3015 PUT_STAT_S32(DEFICIT, flow->deficit);
3016 PUT_STAT_U32(DROPPING, flow->cvars.dropping);
3017 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
3018 PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
3019 if (flow->cvars.p_drop) {
3020 PUT_STAT_S32(BLUE_TIMER_US,
3021 ktime_to_us(
3022 ktime_sub(now,
3023 flow->cvars.blue_timer)));
3025 if (flow->cvars.dropping) {
3026 PUT_STAT_S32(DROP_NEXT_US,
3027 ktime_to_us(
3028 ktime_sub(now,
3029 flow->cvars.drop_next)));
3032 if (nla_nest_end(d->skb, stats) < 0)
3033 return -1;
3036 return 0;
3038 nla_put_failure:
3039 nla_nest_cancel(d->skb, stats);
3040 return -1;
3043 static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
3045 struct cake_sched_data *q = qdisc_priv(sch);
3046 unsigned int i, j;
3048 if (arg->stop)
3049 return;
3051 for (i = 0; i < q->tin_cnt; i++) {
3052 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
3054 for (j = 0; j < CAKE_QUEUES; j++) {
3055 if (list_empty(&b->flows[j].flowchain) ||
3056 arg->count < arg->skip) {
3057 arg->count++;
3058 continue;
3060 if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
3061 arg->stop = 1;
3062 break;
3064 arg->count++;
3069 static const struct Qdisc_class_ops cake_class_ops = {
3070 .leaf = cake_leaf,
3071 .find = cake_find,
3072 .tcf_block = cake_tcf_block,
3073 .bind_tcf = cake_bind,
3074 .unbind_tcf = cake_unbind,
3075 .dump = cake_dump_class,
3076 .dump_stats = cake_dump_class_stats,
3077 .walk = cake_walk,
3080 static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
3081 .cl_ops = &cake_class_ops,
3082 .id = "cake",
3083 .priv_size = sizeof(struct cake_sched_data),
3084 .enqueue = cake_enqueue,
3085 .dequeue = cake_dequeue,
3086 .peek = qdisc_peek_dequeued,
3087 .init = cake_init,
3088 .reset = cake_reset,
3089 .destroy = cake_destroy,
3090 .change = cake_change,
3091 .dump = cake_dump,
3092 .dump_stats = cake_dump_stats,
3093 .owner = THIS_MODULE,
3096 static int __init cake_module_init(void)
3098 return register_qdisc(&cake_qdisc_ops);
3101 static void __exit cake_module_exit(void)
3103 unregister_qdisc(&cake_qdisc_ops);
3106 module_init(cake_module_init)
3107 module_exit(cake_module_exit)
3108 MODULE_AUTHOR("Jonathan Morton");
3109 MODULE_LICENSE("Dual BSD/GPL");
3110 MODULE_DESCRIPTION("The CAKE shaper.");