1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
7 * 991129: - Bug fix with grio mode
8 * - a better sing. AvgQ mode with Grio(WRED)
9 * - A finer grained VQ dequeue based on suggestion
13 * For all the glorious comments look at include/net/red.h
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
25 #define GRED_DEF_PRIO (MAX_DPs / 2)
26 #define GRED_VQ_MASK (MAX_DPs - 1)
28 #define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
30 struct gred_sched_data
;
33 struct gred_sched_data
{
34 u32 limit
; /* HARD maximal queue length */
35 u32 DP
; /* the drop parameters */
36 u32 red_flags
; /* virtualQ version of red_flags */
37 u64 bytesin
; /* bytes seen on virtualQ so far*/
38 u32 packetsin
; /* packets seen on virtualQ so far*/
39 u32 backlog
; /* bytes on the virtualQ */
40 u8 prio
; /* the prio of this vq */
42 struct red_parms parms
;
44 struct red_stats stats
;
53 struct gred_sched_data
*tab
[MAX_DPs
];
58 struct red_vars wred_set
;
59 struct tc_gred_qopt_offload
*opt
;
62 static inline int gred_wred_mode(struct gred_sched
*table
)
64 return test_bit(GRED_WRED_MODE
, &table
->flags
);
67 static inline void gred_enable_wred_mode(struct gred_sched
*table
)
69 __set_bit(GRED_WRED_MODE
, &table
->flags
);
72 static inline void gred_disable_wred_mode(struct gred_sched
*table
)
74 __clear_bit(GRED_WRED_MODE
, &table
->flags
);
77 static inline int gred_rio_mode(struct gred_sched
*table
)
79 return test_bit(GRED_RIO_MODE
, &table
->flags
);
82 static inline void gred_enable_rio_mode(struct gred_sched
*table
)
84 __set_bit(GRED_RIO_MODE
, &table
->flags
);
87 static inline void gred_disable_rio_mode(struct gred_sched
*table
)
89 __clear_bit(GRED_RIO_MODE
, &table
->flags
);
92 static inline int gred_wred_mode_check(struct Qdisc
*sch
)
94 struct gred_sched
*table
= qdisc_priv(sch
);
97 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 for (i
= 0; i
< table
->DPs
; i
++) {
99 struct gred_sched_data
*q
= table
->tab
[i
];
105 for (n
= i
+ 1; n
< table
->DPs
; n
++)
106 if (table
->tab
[n
] && table
->tab
[n
]->prio
== q
->prio
)
113 static inline unsigned int gred_backlog(struct gred_sched
*table
,
114 struct gred_sched_data
*q
,
117 if (gred_wred_mode(table
))
118 return sch
->qstats
.backlog
;
123 static inline u16
tc_index_to_dp(struct sk_buff
*skb
)
125 return skb
->tc_index
& GRED_VQ_MASK
;
128 static inline void gred_load_wred_set(const struct gred_sched
*table
,
129 struct gred_sched_data
*q
)
131 q
->vars
.qavg
= table
->wred_set
.qavg
;
132 q
->vars
.qidlestart
= table
->wred_set
.qidlestart
;
135 static inline void gred_store_wred_set(struct gred_sched
*table
,
136 struct gred_sched_data
*q
)
138 table
->wred_set
.qavg
= q
->vars
.qavg
;
139 table
->wred_set
.qidlestart
= q
->vars
.qidlestart
;
142 static int gred_use_ecn(struct gred_sched_data
*q
)
144 return q
->red_flags
& TC_RED_ECN
;
147 static int gred_use_harddrop(struct gred_sched_data
*q
)
149 return q
->red_flags
& TC_RED_HARDDROP
;
152 static bool gred_per_vq_red_flags_used(struct gred_sched
*table
)
156 /* Local per-vq flags couldn't have been set unless global are 0 */
157 if (table
->red_flags
)
159 for (i
= 0; i
< MAX_DPs
; i
++)
160 if (table
->tab
[i
] && table
->tab
[i
]->red_flags
)
165 static int gred_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
166 struct sk_buff
**to_free
)
168 struct gred_sched_data
*q
= NULL
;
169 struct gred_sched
*t
= qdisc_priv(sch
);
170 unsigned long qavg
= 0;
171 u16 dp
= tc_index_to_dp(skb
);
173 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
178 /* Pass through packets not assigned to a DP
179 * if no default DP has been configured. This
180 * allows for DP flows to be left untouched.
182 if (likely(sch
->qstats
.backlog
+ qdisc_pkt_len(skb
) <=
184 return qdisc_enqueue_tail(skb
, sch
);
189 /* fix tc_index? --could be controversial but needed for
191 skb
->tc_index
= (skb
->tc_index
& ~GRED_VQ_MASK
) | dp
;
194 /* sum up all the qaves of prios < ours to get the new qave */
195 if (!gred_wred_mode(t
) && gred_rio_mode(t
)) {
198 for (i
= 0; i
< t
->DPs
; i
++) {
199 if (t
->tab
[i
] && t
->tab
[i
]->prio
< q
->prio
&&
200 !red_is_idling(&t
->tab
[i
]->vars
))
201 qavg
+= t
->tab
[i
]->vars
.qavg
;
207 q
->bytesin
+= qdisc_pkt_len(skb
);
209 if (gred_wred_mode(t
))
210 gred_load_wred_set(t
, q
);
212 q
->vars
.qavg
= red_calc_qavg(&q
->parms
,
214 gred_backlog(t
, q
, sch
));
216 if (red_is_idling(&q
->vars
))
217 red_end_of_idle_period(&q
->vars
);
219 if (gred_wred_mode(t
))
220 gred_store_wred_set(t
, q
);
222 switch (red_action(&q
->parms
, &q
->vars
, q
->vars
.qavg
+ qavg
)) {
227 qdisc_qstats_overlimit(sch
);
228 if (!gred_use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
229 q
->stats
.prob_drop
++;
230 goto congestion_drop
;
233 q
->stats
.prob_mark
++;
237 qdisc_qstats_overlimit(sch
);
238 if (gred_use_harddrop(q
) || !gred_use_ecn(q
) ||
239 !INET_ECN_set_ce(skb
)) {
240 q
->stats
.forced_drop
++;
241 goto congestion_drop
;
243 q
->stats
.forced_mark
++;
247 if (gred_backlog(t
, q
, sch
) + qdisc_pkt_len(skb
) <= q
->limit
) {
248 q
->backlog
+= qdisc_pkt_len(skb
);
249 return qdisc_enqueue_tail(skb
, sch
);
254 return qdisc_drop_reason(skb
, sch
, to_free
, SKB_DROP_REASON_QDISC_OVERLIMIT
);
257 qdisc_drop_reason(skb
, sch
, to_free
, SKB_DROP_REASON_QDISC_CONGESTED
);
261 static struct sk_buff
*gred_dequeue(struct Qdisc
*sch
)
264 struct gred_sched
*t
= qdisc_priv(sch
);
266 skb
= qdisc_dequeue_head(sch
);
269 struct gred_sched_data
*q
;
270 u16 dp
= tc_index_to_dp(skb
);
272 if (dp
>= t
->DPs
|| (q
= t
->tab
[dp
]) == NULL
) {
273 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
274 tc_index_to_dp(skb
));
276 q
->backlog
-= qdisc_pkt_len(skb
);
278 if (gred_wred_mode(t
)) {
279 if (!sch
->qstats
.backlog
)
280 red_start_of_idle_period(&t
->wred_set
);
283 red_start_of_idle_period(&q
->vars
);
293 static void gred_reset(struct Qdisc
*sch
)
296 struct gred_sched
*t
= qdisc_priv(sch
);
298 qdisc_reset_queue(sch
);
300 for (i
= 0; i
< t
->DPs
; i
++) {
301 struct gred_sched_data
*q
= t
->tab
[i
];
306 red_restart(&q
->vars
);
311 static void gred_offload(struct Qdisc
*sch
, enum tc_gred_command command
)
313 struct gred_sched
*table
= qdisc_priv(sch
);
314 struct net_device
*dev
= qdisc_dev(sch
);
315 struct tc_gred_qopt_offload
*opt
= table
->opt
;
317 if (!tc_can_offload(dev
) || !dev
->netdev_ops
->ndo_setup_tc
)
320 memset(opt
, 0, sizeof(*opt
));
321 opt
->command
= command
;
322 opt
->handle
= sch
->handle
;
323 opt
->parent
= sch
->parent
;
325 if (command
== TC_GRED_REPLACE
) {
328 opt
->set
.grio_on
= gred_rio_mode(table
);
329 opt
->set
.wred_on
= gred_wred_mode(table
);
330 opt
->set
.dp_cnt
= table
->DPs
;
331 opt
->set
.dp_def
= table
->def
;
333 for (i
= 0; i
< table
->DPs
; i
++) {
334 struct gred_sched_data
*q
= table
->tab
[i
];
338 opt
->set
.tab
[i
].present
= true;
339 opt
->set
.tab
[i
].limit
= q
->limit
;
340 opt
->set
.tab
[i
].prio
= q
->prio
;
341 opt
->set
.tab
[i
].min
= q
->parms
.qth_min
>> q
->parms
.Wlog
;
342 opt
->set
.tab
[i
].max
= q
->parms
.qth_max
>> q
->parms
.Wlog
;
343 opt
->set
.tab
[i
].is_ecn
= gred_use_ecn(q
);
344 opt
->set
.tab
[i
].is_harddrop
= gred_use_harddrop(q
);
345 opt
->set
.tab
[i
].probability
= q
->parms
.max_P
;
346 opt
->set
.tab
[i
].backlog
= &q
->backlog
;
348 opt
->set
.qstats
= &sch
->qstats
;
351 dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_GRED
, opt
);
354 static int gred_offload_dump_stats(struct Qdisc
*sch
)
356 struct gred_sched
*table
= qdisc_priv(sch
);
357 struct tc_gred_qopt_offload
*hw_stats
;
358 u64 bytes
= 0, packets
= 0;
362 hw_stats
= kzalloc(sizeof(*hw_stats
), GFP_KERNEL
);
366 hw_stats
->command
= TC_GRED_STATS
;
367 hw_stats
->handle
= sch
->handle
;
368 hw_stats
->parent
= sch
->parent
;
370 for (i
= 0; i
< MAX_DPs
; i
++) {
371 gnet_stats_basic_sync_init(&hw_stats
->stats
.bstats
[i
]);
373 hw_stats
->stats
.xstats
[i
] = &table
->tab
[i
]->stats
;
376 ret
= qdisc_offload_dump_helper(sch
, TC_SETUP_QDISC_GRED
, hw_stats
);
377 /* Even if driver returns failure adjust the stats - in case offload
378 * ended but driver still wants to adjust the values.
381 for (i
= 0; i
< MAX_DPs
; i
++) {
384 table
->tab
[i
]->packetsin
+= u64_stats_read(&hw_stats
->stats
.bstats
[i
].packets
);
385 table
->tab
[i
]->bytesin
+= u64_stats_read(&hw_stats
->stats
.bstats
[i
].bytes
);
386 table
->tab
[i
]->backlog
+= hw_stats
->stats
.qstats
[i
].backlog
;
388 bytes
+= u64_stats_read(&hw_stats
->stats
.bstats
[i
].bytes
);
389 packets
+= u64_stats_read(&hw_stats
->stats
.bstats
[i
].packets
);
390 sch
->qstats
.qlen
+= hw_stats
->stats
.qstats
[i
].qlen
;
391 sch
->qstats
.backlog
+= hw_stats
->stats
.qstats
[i
].backlog
;
392 sch
->qstats
.drops
+= hw_stats
->stats
.qstats
[i
].drops
;
393 sch
->qstats
.requeues
+= hw_stats
->stats
.qstats
[i
].requeues
;
394 sch
->qstats
.overlimits
+= hw_stats
->stats
.qstats
[i
].overlimits
;
396 _bstats_update(&sch
->bstats
, bytes
, packets
);
397 sch_tree_unlock(sch
);
403 static inline void gred_destroy_vq(struct gred_sched_data
*q
)
408 static int gred_change_table_def(struct Qdisc
*sch
, struct nlattr
*dps
,
409 struct netlink_ext_ack
*extack
)
411 struct gred_sched
*table
= qdisc_priv(sch
);
412 struct tc_gred_sopt
*sopt
;
413 bool red_flags_changed
;
419 sopt
= nla_data(dps
);
421 if (sopt
->DPs
> MAX_DPs
) {
422 NL_SET_ERR_MSG_MOD(extack
, "number of virtual queues too high");
425 if (sopt
->DPs
== 0) {
426 NL_SET_ERR_MSG_MOD(extack
,
427 "number of virtual queues can't be 0");
430 if (sopt
->def_DP
>= sopt
->DPs
) {
431 NL_SET_ERR_MSG_MOD(extack
, "default virtual queue above virtual queue count");
434 if (sopt
->flags
&& gred_per_vq_red_flags_used(table
)) {
435 NL_SET_ERR_MSG_MOD(extack
, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
440 table
->DPs
= sopt
->DPs
;
441 table
->def
= sopt
->def_DP
;
442 red_flags_changed
= table
->red_flags
!= sopt
->flags
;
443 table
->red_flags
= sopt
->flags
;
446 * Every entry point to GRED is synchronized with the above code
447 * and the DP is checked against DPs, i.e. shadowed VQs can no
448 * longer be found so we can unlock right here.
450 sch_tree_unlock(sch
);
453 gred_enable_rio_mode(table
);
454 gred_disable_wred_mode(table
);
455 if (gred_wred_mode_check(sch
))
456 gred_enable_wred_mode(table
);
458 gred_disable_rio_mode(table
);
459 gred_disable_wred_mode(table
);
462 if (red_flags_changed
)
463 for (i
= 0; i
< table
->DPs
; i
++)
465 table
->tab
[i
]->red_flags
=
466 table
->red_flags
& GRED_VQ_RED_FLAGS
;
468 for (i
= table
->DPs
; i
< MAX_DPs
; i
++) {
470 pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
472 gred_destroy_vq(table
->tab
[i
]);
473 table
->tab
[i
] = NULL
;
477 gred_offload(sch
, TC_GRED_REPLACE
);
481 static inline int gred_change_vq(struct Qdisc
*sch
, int dp
,
482 struct tc_gred_qopt
*ctl
, int prio
,
484 struct gred_sched_data
**prealloc
,
485 struct netlink_ext_ack
*extack
)
487 struct gred_sched
*table
= qdisc_priv(sch
);
488 struct gred_sched_data
*q
= table
->tab
[dp
];
490 if (!red_check_params(ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
, ctl
->Scell_log
, stab
)) {
491 NL_SET_ERR_MSG_MOD(extack
, "invalid RED parameters");
496 table
->tab
[dp
] = q
= *prealloc
;
500 q
->red_flags
= table
->red_flags
& GRED_VQ_RED_FLAGS
;
505 if (ctl
->limit
> sch
->limit
)
506 q
->limit
= sch
->limit
;
508 q
->limit
= ctl
->limit
;
511 red_end_of_idle_period(&q
->vars
);
513 red_set_parms(&q
->parms
,
514 ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
, ctl
->Plog
,
515 ctl
->Scell_log
, stab
, max_P
);
516 red_set_vars(&q
->vars
);
520 static const struct nla_policy gred_vq_policy
[TCA_GRED_VQ_MAX
+ 1] = {
521 [TCA_GRED_VQ_DP
] = { .type
= NLA_U32
},
522 [TCA_GRED_VQ_FLAGS
] = { .type
= NLA_U32
},
525 static const struct nla_policy gred_vqe_policy
[TCA_GRED_VQ_ENTRY_MAX
+ 1] = {
526 [TCA_GRED_VQ_ENTRY
] = { .type
= NLA_NESTED
},
529 static const struct nla_policy gred_policy
[TCA_GRED_MAX
+ 1] = {
530 [TCA_GRED_PARMS
] = { .len
= sizeof(struct tc_gred_qopt
) },
531 [TCA_GRED_STAB
] = { .len
= 256 },
532 [TCA_GRED_DPS
] = { .len
= sizeof(struct tc_gred_sopt
) },
533 [TCA_GRED_MAX_P
] = { .type
= NLA_U32
},
534 [TCA_GRED_LIMIT
] = { .type
= NLA_U32
},
535 [TCA_GRED_VQ_LIST
] = { .type
= NLA_NESTED
},
538 static void gred_vq_apply(struct gred_sched
*table
, const struct nlattr
*entry
)
540 struct nlattr
*tb
[TCA_GRED_VQ_MAX
+ 1];
543 nla_parse_nested_deprecated(tb
, TCA_GRED_VQ_MAX
, entry
,
544 gred_vq_policy
, NULL
);
546 dp
= nla_get_u32(tb
[TCA_GRED_VQ_DP
]);
548 if (tb
[TCA_GRED_VQ_FLAGS
])
549 table
->tab
[dp
]->red_flags
= nla_get_u32(tb
[TCA_GRED_VQ_FLAGS
]);
552 static void gred_vqs_apply(struct gred_sched
*table
, struct nlattr
*vqs
)
554 const struct nlattr
*attr
;
557 nla_for_each_nested(attr
, vqs
, rem
) {
558 switch (nla_type(attr
)) {
559 case TCA_GRED_VQ_ENTRY
:
560 gred_vq_apply(table
, attr
);
566 static int gred_vq_validate(struct gred_sched
*table
, u32 cdp
,
567 const struct nlattr
*entry
,
568 struct netlink_ext_ack
*extack
)
570 struct nlattr
*tb
[TCA_GRED_VQ_MAX
+ 1];
574 err
= nla_parse_nested_deprecated(tb
, TCA_GRED_VQ_MAX
, entry
,
575 gred_vq_policy
, extack
);
579 if (!tb
[TCA_GRED_VQ_DP
]) {
580 NL_SET_ERR_MSG_MOD(extack
, "Virtual queue with no index specified");
583 dp
= nla_get_u32(tb
[TCA_GRED_VQ_DP
]);
584 if (dp
>= table
->DPs
) {
585 NL_SET_ERR_MSG_MOD(extack
, "Virtual queue with index out of bounds");
588 if (dp
!= cdp
&& !table
->tab
[dp
]) {
589 NL_SET_ERR_MSG_MOD(extack
, "Virtual queue not yet instantiated");
593 if (tb
[TCA_GRED_VQ_FLAGS
]) {
594 u32 red_flags
= nla_get_u32(tb
[TCA_GRED_VQ_FLAGS
]);
596 if (table
->red_flags
&& table
->red_flags
!= red_flags
) {
597 NL_SET_ERR_MSG_MOD(extack
, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
600 if (red_flags
& ~GRED_VQ_RED_FLAGS
) {
601 NL_SET_ERR_MSG_MOD(extack
,
602 "invalid RED flags specified");
610 static int gred_vqs_validate(struct gred_sched
*table
, u32 cdp
,
611 struct nlattr
*vqs
, struct netlink_ext_ack
*extack
)
613 const struct nlattr
*attr
;
616 err
= nla_validate_nested_deprecated(vqs
, TCA_GRED_VQ_ENTRY_MAX
,
617 gred_vqe_policy
, extack
);
621 nla_for_each_nested(attr
, vqs
, rem
) {
622 switch (nla_type(attr
)) {
623 case TCA_GRED_VQ_ENTRY
:
624 err
= gred_vq_validate(table
, cdp
, attr
, extack
);
629 NL_SET_ERR_MSG_MOD(extack
, "GRED_VQ_LIST can contain only entry attributes");
635 NL_SET_ERR_MSG_MOD(extack
, "Trailing data after parsing virtual queue list");
642 static int gred_change(struct Qdisc
*sch
, struct nlattr
*opt
,
643 struct netlink_ext_ack
*extack
)
645 struct gred_sched
*table
= qdisc_priv(sch
);
646 struct tc_gred_qopt
*ctl
;
647 struct nlattr
*tb
[TCA_GRED_MAX
+ 1];
648 int err
, prio
= GRED_DEF_PRIO
;
651 struct gred_sched_data
*prealloc
;
653 err
= nla_parse_nested_deprecated(tb
, TCA_GRED_MAX
, opt
, gred_policy
,
658 if (tb
[TCA_GRED_PARMS
] == NULL
&& tb
[TCA_GRED_STAB
] == NULL
) {
659 if (tb
[TCA_GRED_LIMIT
] != NULL
)
660 sch
->limit
= nla_get_u32(tb
[TCA_GRED_LIMIT
]);
661 return gred_change_table_def(sch
, tb
[TCA_GRED_DPS
], extack
);
664 if (tb
[TCA_GRED_PARMS
] == NULL
||
665 tb
[TCA_GRED_STAB
] == NULL
||
666 tb
[TCA_GRED_LIMIT
] != NULL
) {
667 NL_SET_ERR_MSG_MOD(extack
, "can't configure Qdisc and virtual queue at the same time");
671 max_P
= nla_get_u32_default(tb
[TCA_GRED_MAX_P
], 0);
673 ctl
= nla_data(tb
[TCA_GRED_PARMS
]);
674 stab
= nla_data(tb
[TCA_GRED_STAB
]);
676 if (ctl
->DP
>= table
->DPs
) {
677 NL_SET_ERR_MSG_MOD(extack
, "virtual queue index above virtual queue count");
681 if (tb
[TCA_GRED_VQ_LIST
]) {
682 err
= gred_vqs_validate(table
, ctl
->DP
, tb
[TCA_GRED_VQ_LIST
],
688 if (gred_rio_mode(table
)) {
689 if (ctl
->prio
== 0) {
690 int def_prio
= GRED_DEF_PRIO
;
692 if (table
->tab
[table
->def
])
693 def_prio
= table
->tab
[table
->def
]->prio
;
695 printk(KERN_DEBUG
"GRED: DP %u does not have a prio "
696 "setting default to %d\n", ctl
->DP
, def_prio
);
703 prealloc
= kzalloc(sizeof(*prealloc
), GFP_KERNEL
);
706 err
= gred_change_vq(sch
, ctl
->DP
, ctl
, prio
, stab
, max_P
, &prealloc
,
709 goto err_unlock_free
;
711 if (tb
[TCA_GRED_VQ_LIST
])
712 gred_vqs_apply(table
, tb
[TCA_GRED_VQ_LIST
]);
714 if (gred_rio_mode(table
)) {
715 gred_disable_wred_mode(table
);
716 if (gred_wred_mode_check(sch
))
717 gred_enable_wred_mode(table
);
720 sch_tree_unlock(sch
);
723 gred_offload(sch
, TC_GRED_REPLACE
);
727 sch_tree_unlock(sch
);
732 static int gred_init(struct Qdisc
*sch
, struct nlattr
*opt
,
733 struct netlink_ext_ack
*extack
)
735 struct gred_sched
*table
= qdisc_priv(sch
);
736 struct nlattr
*tb
[TCA_GRED_MAX
+ 1];
742 err
= nla_parse_nested_deprecated(tb
, TCA_GRED_MAX
, opt
, gred_policy
,
747 if (tb
[TCA_GRED_PARMS
] || tb
[TCA_GRED_STAB
]) {
748 NL_SET_ERR_MSG_MOD(extack
,
749 "virtual queue configuration can't be specified at initialization time");
753 if (tb
[TCA_GRED_LIMIT
])
754 sch
->limit
= nla_get_u32(tb
[TCA_GRED_LIMIT
]);
756 sch
->limit
= qdisc_dev(sch
)->tx_queue_len
757 * psched_mtu(qdisc_dev(sch
));
759 if (qdisc_dev(sch
)->netdev_ops
->ndo_setup_tc
) {
760 table
->opt
= kzalloc(sizeof(*table
->opt
), GFP_KERNEL
);
765 return gred_change_table_def(sch
, tb
[TCA_GRED_DPS
], extack
);
768 static int gred_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
770 struct gred_sched
*table
= qdisc_priv(sch
);
771 struct nlattr
*parms
, *vqs
, *opts
= NULL
;
774 struct tc_gred_sopt sopt
= {
776 .def_DP
= table
->def
,
777 .grio
= gred_rio_mode(table
),
778 .flags
= table
->red_flags
,
781 if (gred_offload_dump_stats(sch
))
782 goto nla_put_failure
;
784 opts
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
786 goto nla_put_failure
;
787 if (nla_put(skb
, TCA_GRED_DPS
, sizeof(sopt
), &sopt
))
788 goto nla_put_failure
;
790 for (i
= 0; i
< MAX_DPs
; i
++) {
791 struct gred_sched_data
*q
= table
->tab
[i
];
793 max_p
[i
] = q
? q
->parms
.max_P
: 0;
795 if (nla_put(skb
, TCA_GRED_MAX_P
, sizeof(max_p
), max_p
))
796 goto nla_put_failure
;
798 if (nla_put_u32(skb
, TCA_GRED_LIMIT
, sch
->limit
))
799 goto nla_put_failure
;
801 /* Old style all-in-one dump of VQs */
802 parms
= nla_nest_start_noflag(skb
, TCA_GRED_PARMS
);
804 goto nla_put_failure
;
806 for (i
= 0; i
< MAX_DPs
; i
++) {
807 struct gred_sched_data
*q
= table
->tab
[i
];
808 struct tc_gred_qopt opt
;
811 memset(&opt
, 0, sizeof(opt
));
814 /* hack -- fix at some point with proper message
815 This is how we indicate to tc that there is no VQ
818 opt
.DP
= MAX_DPs
+ i
;
822 opt
.limit
= q
->limit
;
824 opt
.backlog
= gred_backlog(table
, q
, sch
);
826 opt
.qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
;
827 opt
.qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
;
828 opt
.Wlog
= q
->parms
.Wlog
;
829 opt
.Plog
= q
->parms
.Plog
;
830 opt
.Scell_log
= q
->parms
.Scell_log
;
831 opt
.early
= q
->stats
.prob_drop
;
832 opt
.forced
= q
->stats
.forced_drop
;
833 opt
.pdrop
= q
->stats
.pdrop
;
834 opt
.packets
= q
->packetsin
;
835 opt
.bytesin
= q
->bytesin
;
837 if (gred_wred_mode(table
))
838 gred_load_wred_set(table
, q
);
840 qavg
= red_calc_qavg(&q
->parms
, &q
->vars
,
841 q
->vars
.qavg
>> q
->parms
.Wlog
);
842 opt
.qave
= qavg
>> q
->parms
.Wlog
;
845 if (nla_append(skb
, sizeof(opt
), &opt
) < 0)
846 goto nla_put_failure
;
849 nla_nest_end(skb
, parms
);
851 /* Dump the VQs again, in more structured way */
852 vqs
= nla_nest_start_noflag(skb
, TCA_GRED_VQ_LIST
);
854 goto nla_put_failure
;
856 for (i
= 0; i
< MAX_DPs
; i
++) {
857 struct gred_sched_data
*q
= table
->tab
[i
];
863 vq
= nla_nest_start_noflag(skb
, TCA_GRED_VQ_ENTRY
);
865 goto nla_put_failure
;
867 if (nla_put_u32(skb
, TCA_GRED_VQ_DP
, q
->DP
))
868 goto nla_put_failure
;
870 if (nla_put_u32(skb
, TCA_GRED_VQ_FLAGS
, q
->red_flags
))
871 goto nla_put_failure
;
874 if (nla_put_u64_64bit(skb
, TCA_GRED_VQ_STAT_BYTES
, q
->bytesin
,
876 goto nla_put_failure
;
877 if (nla_put_u32(skb
, TCA_GRED_VQ_STAT_PACKETS
, q
->packetsin
))
878 goto nla_put_failure
;
879 if (nla_put_u32(skb
, TCA_GRED_VQ_STAT_BACKLOG
,
880 gred_backlog(table
, q
, sch
)))
881 goto nla_put_failure
;
882 if (nla_put_u32(skb
, TCA_GRED_VQ_STAT_PROB_DROP
,
884 goto nla_put_failure
;
885 if (nla_put_u32(skb
, TCA_GRED_VQ_STAT_PROB_MARK
,
887 goto nla_put_failure
;
888 if (nla_put_u32(skb
, TCA_GRED_VQ_STAT_FORCED_DROP
,
889 q
->stats
.forced_drop
))
890 goto nla_put_failure
;
891 if (nla_put_u32(skb
, TCA_GRED_VQ_STAT_FORCED_MARK
,
892 q
->stats
.forced_mark
))
893 goto nla_put_failure
;
894 if (nla_put_u32(skb
, TCA_GRED_VQ_STAT_PDROP
, q
->stats
.pdrop
))
895 goto nla_put_failure
;
897 nla_nest_end(skb
, vq
);
899 nla_nest_end(skb
, vqs
);
901 return nla_nest_end(skb
, opts
);
904 nla_nest_cancel(skb
, opts
);
908 static void gred_destroy(struct Qdisc
*sch
)
910 struct gred_sched
*table
= qdisc_priv(sch
);
913 for (i
= 0; i
< table
->DPs
; i
++)
914 gred_destroy_vq(table
->tab
[i
]);
916 gred_offload(sch
, TC_GRED_DESTROY
);
920 static struct Qdisc_ops gred_qdisc_ops __read_mostly
= {
922 .priv_size
= sizeof(struct gred_sched
),
923 .enqueue
= gred_enqueue
,
924 .dequeue
= gred_dequeue
,
925 .peek
= qdisc_peek_head
,
928 .destroy
= gred_destroy
,
929 .change
= gred_change
,
931 .owner
= THIS_MODULE
,
933 MODULE_ALIAS_NET_SCH("gred");
935 static int __init
gred_module_init(void)
937 return register_qdisc(&gred_qdisc_ops
);
940 static void __exit
gred_module_exit(void)
942 unregister_qdisc(&gred_qdisc_ops
);
945 module_init(gred_module_init
)
946 module_exit(gred_module_exit
)
948 MODULE_LICENSE("GPL");
949 MODULE_DESCRIPTION("Generic Random Early Detection qdisc");