1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2013 Cisco Systems, Inc, 2013.
4 * Author: Vijay Subramanian <vijaynsu@cisco.com>
5 * Author: Mythili Prabhu <mysuryan@cisco.com>
7 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
8 * University of Oslo, Norway.
11 * RFC 8033: https://tools.ietf.org/html/rfc8033
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/pkt_sched.h>
21 #include <net/inet_ecn.h>
23 #define QUEUE_THRESHOLD 16384
24 #define DQCOUNT_INVALID -1
25 #define MAX_PROB 0xffffffffffffffff
30 psched_time_t target
; /* user specified target delay in pschedtime */
31 u32 tupdate
; /* timer frequency (in jiffies) */
32 u32 limit
; /* number of packets that can be enqueued */
33 u32 alpha
; /* alpha and beta are between 0 and 32 */
34 u32 beta
; /* and are used for shift relative to 1 */
35 bool ecn
; /* true if ecn is enabled */
36 bool bytemode
; /* to scale drop early prob based on pkt size */
41 u64 prob
; /* probability but scaled by u64 limit. */
42 psched_time_t burst_time
;
44 psched_time_t qdelay_old
;
45 u64 dq_count
; /* measured in bytes */
46 psched_time_t dq_tstamp
; /* drain rate */
47 u64 accu_prob
; /* accumulated drop probability */
48 u32 avg_dq_rate
; /* bytes per pschedtime tick,scaled */
49 u32 qlen_old
; /* in bytes */
50 u8 accu_prob_overflows
; /* overflows of accu_prob */
53 /* statistics gathering */
55 u32 packets_in
; /* total number of packets enqueued */
56 u32 dropped
; /* packets dropped due to pie_action */
57 u32 overlimit
; /* dropped due to lack of space in queue */
58 u32 maxq
; /* maximum queue size */
59 u32 ecn_mark
; /* packets marked with ECN */
62 /* private data for the Qdisc */
63 struct pie_sched_data
{
64 struct pie_params params
;
66 struct pie_stats stats
;
67 struct timer_list adapt_timer
;
71 static void pie_params_init(struct pie_params
*params
)
75 params
->tupdate
= usecs_to_jiffies(15 * USEC_PER_MSEC
); /* 15 ms */
76 params
->limit
= 1000; /* default of 1000 packets */
77 params
->target
= PSCHED_NS2TICKS(15 * NSEC_PER_MSEC
); /* 15 ms */
79 params
->bytemode
= false;
82 static void pie_vars_init(struct pie_vars
*vars
)
84 vars
->dq_count
= DQCOUNT_INVALID
;
86 vars
->avg_dq_rate
= 0;
87 /* default of 150 ms in pschedtime */
88 vars
->burst_time
= PSCHED_NS2TICKS(150 * NSEC_PER_MSEC
);
89 vars
->accu_prob_overflows
= 0;
92 static bool drop_early(struct Qdisc
*sch
, u32 packet_size
)
94 struct pie_sched_data
*q
= qdisc_priv(sch
);
96 u64 local_prob
= q
->vars
.prob
;
97 u32 mtu
= psched_mtu(qdisc_dev(sch
));
99 /* If there is still burst allowance left skip random early drop */
100 if (q
->vars
.burst_time
> 0)
103 /* If current delay is less than half of target, and
104 * if drop prob is low already, disable early_drop
106 if ((q
->vars
.qdelay
< q
->params
.target
/ 2) &&
107 (q
->vars
.prob
< MAX_PROB
/ 5))
110 /* If we have fewer than 2 mtu-sized packets, disable drop_early,
111 * similar to min_th in RED
113 if (sch
->qstats
.backlog
< 2 * mtu
)
116 /* If bytemode is turned on, use packet size to compute new
117 * probablity. Smaller packets will have lower drop prob in this case
119 if (q
->params
.bytemode
&& packet_size
<= mtu
)
120 local_prob
= (u64
)packet_size
* div_u64(local_prob
, mtu
);
122 local_prob
= q
->vars
.prob
;
124 if (local_prob
== 0) {
125 q
->vars
.accu_prob
= 0;
126 q
->vars
.accu_prob_overflows
= 0;
129 if (local_prob
> MAX_PROB
- q
->vars
.accu_prob
)
130 q
->vars
.accu_prob_overflows
++;
132 q
->vars
.accu_prob
+= local_prob
;
134 if (q
->vars
.accu_prob_overflows
== 0 &&
135 q
->vars
.accu_prob
< (MAX_PROB
/ 100) * 85)
137 if (q
->vars
.accu_prob_overflows
== 8 &&
138 q
->vars
.accu_prob
>= MAX_PROB
/ 2)
141 prandom_bytes(&rnd
, 8);
142 if (rnd
< local_prob
) {
143 q
->vars
.accu_prob
= 0;
144 q
->vars
.accu_prob_overflows
= 0;
151 static int pie_qdisc_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
152 struct sk_buff
**to_free
)
154 struct pie_sched_data
*q
= qdisc_priv(sch
);
155 bool enqueue
= false;
157 if (unlikely(qdisc_qlen(sch
) >= sch
->limit
)) {
158 q
->stats
.overlimit
++;
162 if (!drop_early(sch
, skb
->len
)) {
164 } else if (q
->params
.ecn
&& (q
->vars
.prob
<= MAX_PROB
/ 10) &&
165 INET_ECN_set_ce(skb
)) {
166 /* If packet is ecn capable, mark it if drop probability
167 * is lower than 10%, else drop it.
173 /* we can enqueue the packet */
175 q
->stats
.packets_in
++;
176 if (qdisc_qlen(sch
) > q
->stats
.maxq
)
177 q
->stats
.maxq
= qdisc_qlen(sch
);
179 return qdisc_enqueue_tail(skb
, sch
);
184 q
->vars
.accu_prob
= 0;
185 q
->vars
.accu_prob_overflows
= 0;
186 return qdisc_drop(skb
, sch
, to_free
);
189 static const struct nla_policy pie_policy
[TCA_PIE_MAX
+ 1] = {
190 [TCA_PIE_TARGET
] = {.type
= NLA_U32
},
191 [TCA_PIE_LIMIT
] = {.type
= NLA_U32
},
192 [TCA_PIE_TUPDATE
] = {.type
= NLA_U32
},
193 [TCA_PIE_ALPHA
] = {.type
= NLA_U32
},
194 [TCA_PIE_BETA
] = {.type
= NLA_U32
},
195 [TCA_PIE_ECN
] = {.type
= NLA_U32
},
196 [TCA_PIE_BYTEMODE
] = {.type
= NLA_U32
},
199 static int pie_change(struct Qdisc
*sch
, struct nlattr
*opt
,
200 struct netlink_ext_ack
*extack
)
202 struct pie_sched_data
*q
= qdisc_priv(sch
);
203 struct nlattr
*tb
[TCA_PIE_MAX
+ 1];
204 unsigned int qlen
, dropped
= 0;
210 err
= nla_parse_nested_deprecated(tb
, TCA_PIE_MAX
, opt
, pie_policy
,
217 /* convert from microseconds to pschedtime */
218 if (tb
[TCA_PIE_TARGET
]) {
219 /* target is in us */
220 u32 target
= nla_get_u32(tb
[TCA_PIE_TARGET
]);
222 /* convert to pschedtime */
223 q
->params
.target
= PSCHED_NS2TICKS((u64
)target
* NSEC_PER_USEC
);
226 /* tupdate is in jiffies */
227 if (tb
[TCA_PIE_TUPDATE
])
229 usecs_to_jiffies(nla_get_u32(tb
[TCA_PIE_TUPDATE
]));
231 if (tb
[TCA_PIE_LIMIT
]) {
232 u32 limit
= nla_get_u32(tb
[TCA_PIE_LIMIT
]);
234 q
->params
.limit
= limit
;
238 if (tb
[TCA_PIE_ALPHA
])
239 q
->params
.alpha
= nla_get_u32(tb
[TCA_PIE_ALPHA
]);
241 if (tb
[TCA_PIE_BETA
])
242 q
->params
.beta
= nla_get_u32(tb
[TCA_PIE_BETA
]);
245 q
->params
.ecn
= nla_get_u32(tb
[TCA_PIE_ECN
]);
247 if (tb
[TCA_PIE_BYTEMODE
])
248 q
->params
.bytemode
= nla_get_u32(tb
[TCA_PIE_BYTEMODE
]);
250 /* Drop excess packets if new limit is lower */
252 while (sch
->q
.qlen
> sch
->limit
) {
253 struct sk_buff
*skb
= __qdisc_dequeue_head(&sch
->q
);
255 dropped
+= qdisc_pkt_len(skb
);
256 qdisc_qstats_backlog_dec(sch
, skb
);
257 rtnl_qdisc_drop(skb
, sch
);
259 qdisc_tree_reduce_backlog(sch
, qlen
- sch
->q
.qlen
, dropped
);
261 sch_tree_unlock(sch
);
265 static void pie_process_dequeue(struct Qdisc
*sch
, struct sk_buff
*skb
)
267 struct pie_sched_data
*q
= qdisc_priv(sch
);
268 int qlen
= sch
->qstats
.backlog
; /* current queue size in bytes */
270 /* If current queue is about 10 packets or more and dq_count is unset
271 * we have enough packets to calculate the drain rate. Save
272 * current time as dq_tstamp and start measurement cycle.
274 if (qlen
>= QUEUE_THRESHOLD
&& q
->vars
.dq_count
== DQCOUNT_INVALID
) {
275 q
->vars
.dq_tstamp
= psched_get_time();
276 q
->vars
.dq_count
= 0;
279 /* Calculate the average drain rate from this value. If queue length
280 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
281 * the dq_count to -1 as we don't have enough packets to calculate the
282 * drain rate anymore The following if block is entered only when we
283 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
284 * and we calculate the drain rate for the threshold here. dq_count is
285 * in bytes, time difference in psched_time, hence rate is in
288 if (q
->vars
.dq_count
!= DQCOUNT_INVALID
) {
289 q
->vars
.dq_count
+= skb
->len
;
291 if (q
->vars
.dq_count
>= QUEUE_THRESHOLD
) {
292 psched_time_t now
= psched_get_time();
293 u32 dtime
= now
- q
->vars
.dq_tstamp
;
294 u32 count
= q
->vars
.dq_count
<< PIE_SCALE
;
299 count
= count
/ dtime
;
301 if (q
->vars
.avg_dq_rate
== 0)
302 q
->vars
.avg_dq_rate
= count
;
304 q
->vars
.avg_dq_rate
=
305 (q
->vars
.avg_dq_rate
-
306 (q
->vars
.avg_dq_rate
>> 3)) + (count
>> 3);
308 /* If the queue has receded below the threshold, we hold
309 * on to the last drain rate calculated, else we reset
310 * dq_count to 0 to re-enter the if block when the next
313 if (qlen
< QUEUE_THRESHOLD
) {
314 q
->vars
.dq_count
= DQCOUNT_INVALID
;
316 q
->vars
.dq_count
= 0;
317 q
->vars
.dq_tstamp
= psched_get_time();
320 if (q
->vars
.burst_time
> 0) {
321 if (q
->vars
.burst_time
> dtime
)
322 q
->vars
.burst_time
-= dtime
;
324 q
->vars
.burst_time
= 0;
330 static void calculate_probability(struct Qdisc
*sch
)
332 struct pie_sched_data
*q
= qdisc_priv(sch
);
333 u32 qlen
= sch
->qstats
.backlog
; /* queue size in bytes */
334 psched_time_t qdelay
= 0; /* in pschedtime */
335 psched_time_t qdelay_old
= q
->vars
.qdelay
; /* in pschedtime */
336 s64 delta
= 0; /* determines the change in probability */
340 bool update_prob
= true;
342 q
->vars
.qdelay_old
= q
->vars
.qdelay
;
344 if (q
->vars
.avg_dq_rate
> 0)
345 qdelay
= (qlen
<< PIE_SCALE
) / q
->vars
.avg_dq_rate
;
349 /* If qdelay is zero and qlen is not, it means qlen is very small, less
350 * than dequeue_rate, so we do not update probabilty in this round
352 if (qdelay
== 0 && qlen
!= 0)
355 /* In the algorithm, alpha and beta are between 0 and 2 with typical
356 * value for alpha as 0.125. In this implementation, we use values 0-32
357 * passed from user space to represent this. Also, alpha and beta have
358 * unit of HZ and need to be scaled before they can used to update
359 * probability. alpha/beta are updated locally below by scaling down
360 * by 16 to come to 0-2 range.
362 alpha
= ((u64
)q
->params
.alpha
* (MAX_PROB
/ PSCHED_TICKS_PER_SEC
)) >> 4;
363 beta
= ((u64
)q
->params
.beta
* (MAX_PROB
/ PSCHED_TICKS_PER_SEC
)) >> 4;
365 /* We scale alpha and beta differently depending on how heavy the
366 * congestion is. Please see RFC 8033 for details.
368 if (q
->vars
.prob
< MAX_PROB
/ 10) {
373 while (q
->vars
.prob
< div_u64(MAX_PROB
, power
) &&
381 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
382 delta
+= alpha
* (u64
)(qdelay
- q
->params
.target
);
383 delta
+= beta
* (u64
)(qdelay
- qdelay_old
);
385 oldprob
= q
->vars
.prob
;
387 /* to ensure we increase probability in steps of no more than 2% */
388 if (delta
> (s64
)(MAX_PROB
/ (100 / 2)) &&
389 q
->vars
.prob
>= MAX_PROB
/ 10)
390 delta
= (MAX_PROB
/ 100) * 2;
393 * Tune drop probability to increase quickly for high delays(>= 250ms)
394 * 250ms is derived through experiments and provides error protection
397 if (qdelay
> (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC
)))
398 delta
+= MAX_PROB
/ (100 / 2);
400 q
->vars
.prob
+= delta
;
403 /* prevent overflow */
404 if (q
->vars
.prob
< oldprob
) {
405 q
->vars
.prob
= MAX_PROB
;
406 /* Prevent normalization error. If probability is at
407 * maximum value already, we normalize it here, and
408 * skip the check to do a non-linear drop in the next
414 /* prevent underflow */
415 if (q
->vars
.prob
> oldprob
)
419 /* Non-linear drop in probability: Reduce drop probability quickly if
420 * delay is 0 for 2 consecutive Tupdate periods.
423 if (qdelay
== 0 && qdelay_old
== 0 && update_prob
)
424 /* Reduce drop probability to 98.4% */
425 q
->vars
.prob
-= q
->vars
.prob
/ 64u;
427 q
->vars
.qdelay
= qdelay
;
428 q
->vars
.qlen_old
= qlen
;
430 /* We restart the measurement cycle if the following conditions are met
431 * 1. If the delay has been low for 2 consecutive Tupdate periods
432 * 2. Calculated drop probability is zero
433 * 3. We have atleast one estimate for the avg_dq_rate ie.,
434 * is a non-zero value
436 if ((q
->vars
.qdelay
< q
->params
.target
/ 2) &&
437 (q
->vars
.qdelay_old
< q
->params
.target
/ 2) &&
439 q
->vars
.avg_dq_rate
> 0)
440 pie_vars_init(&q
->vars
);
443 static void pie_timer(struct timer_list
*t
)
445 struct pie_sched_data
*q
= from_timer(q
, t
, adapt_timer
);
446 struct Qdisc
*sch
= q
->sch
;
447 spinlock_t
*root_lock
= qdisc_lock(qdisc_root_sleeping(sch
));
449 spin_lock(root_lock
);
450 calculate_probability(sch
);
452 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
453 if (q
->params
.tupdate
)
454 mod_timer(&q
->adapt_timer
, jiffies
+ q
->params
.tupdate
);
455 spin_unlock(root_lock
);
458 static int pie_init(struct Qdisc
*sch
, struct nlattr
*opt
,
459 struct netlink_ext_ack
*extack
)
461 struct pie_sched_data
*q
= qdisc_priv(sch
);
463 pie_params_init(&q
->params
);
464 pie_vars_init(&q
->vars
);
465 sch
->limit
= q
->params
.limit
;
468 timer_setup(&q
->adapt_timer
, pie_timer
, 0);
471 int err
= pie_change(sch
, opt
, extack
);
477 mod_timer(&q
->adapt_timer
, jiffies
+ HZ
/ 2);
481 static int pie_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
483 struct pie_sched_data
*q
= qdisc_priv(sch
);
486 opts
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
488 goto nla_put_failure
;
490 /* convert target from pschedtime to us */
491 if (nla_put_u32(skb
, TCA_PIE_TARGET
,
492 ((u32
)PSCHED_TICKS2NS(q
->params
.target
)) /
494 nla_put_u32(skb
, TCA_PIE_LIMIT
, sch
->limit
) ||
495 nla_put_u32(skb
, TCA_PIE_TUPDATE
,
496 jiffies_to_usecs(q
->params
.tupdate
)) ||
497 nla_put_u32(skb
, TCA_PIE_ALPHA
, q
->params
.alpha
) ||
498 nla_put_u32(skb
, TCA_PIE_BETA
, q
->params
.beta
) ||
499 nla_put_u32(skb
, TCA_PIE_ECN
, q
->params
.ecn
) ||
500 nla_put_u32(skb
, TCA_PIE_BYTEMODE
, q
->params
.bytemode
))
501 goto nla_put_failure
;
503 return nla_nest_end(skb
, opts
);
506 nla_nest_cancel(skb
, opts
);
510 static int pie_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
512 struct pie_sched_data
*q
= qdisc_priv(sch
);
513 struct tc_pie_xstats st
= {
514 .prob
= q
->vars
.prob
,
515 .delay
= ((u32
)PSCHED_TICKS2NS(q
->vars
.qdelay
)) /
517 /* unscale and return dq_rate in bytes per sec */
518 .avg_dq_rate
= q
->vars
.avg_dq_rate
*
519 (PSCHED_TICKS_PER_SEC
) >> PIE_SCALE
,
520 .packets_in
= q
->stats
.packets_in
,
521 .overlimit
= q
->stats
.overlimit
,
522 .maxq
= q
->stats
.maxq
,
523 .dropped
= q
->stats
.dropped
,
524 .ecn_mark
= q
->stats
.ecn_mark
,
527 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
530 static struct sk_buff
*pie_qdisc_dequeue(struct Qdisc
*sch
)
532 struct sk_buff
*skb
= qdisc_dequeue_head(sch
);
537 pie_process_dequeue(sch
, skb
);
541 static void pie_reset(struct Qdisc
*sch
)
543 struct pie_sched_data
*q
= qdisc_priv(sch
);
545 qdisc_reset_queue(sch
);
546 pie_vars_init(&q
->vars
);
549 static void pie_destroy(struct Qdisc
*sch
)
551 struct pie_sched_data
*q
= qdisc_priv(sch
);
553 q
->params
.tupdate
= 0;
554 del_timer_sync(&q
->adapt_timer
);
557 static struct Qdisc_ops pie_qdisc_ops __read_mostly
= {
559 .priv_size
= sizeof(struct pie_sched_data
),
560 .enqueue
= pie_qdisc_enqueue
,
561 .dequeue
= pie_qdisc_dequeue
,
562 .peek
= qdisc_peek_dequeued
,
564 .destroy
= pie_destroy
,
566 .change
= pie_change
,
568 .dump_stats
= pie_dump_stats
,
569 .owner
= THIS_MODULE
,
572 static int __init
pie_module_init(void)
574 return register_qdisc(&pie_qdisc_ops
);
577 static void __exit
pie_module_exit(void)
579 unregister_qdisc(&pie_qdisc_ops
);
582 module_init(pie_module_init
);
583 module_exit(pie_module_exit
);
585 MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
586 MODULE_AUTHOR("Vijay Subramanian");
587 MODULE_AUTHOR("Mythili Prabhu");
588 MODULE_LICENSE("GPL");