1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
4 #include <linux/rtnetlink.h>
5 #include <net/pkt_cls.h>
6 #include <net/pkt_sched.h>
9 #include "../nfpcore/nfp_cpp.h"
10 #include "../nfp_app.h"
11 #include "../nfp_main.h"
12 #include "../nfp_net.h"
13 #include "../nfp_port.h"
16 static bool nfp_abm_qdisc_is_red(struct nfp_qdisc
*qdisc
)
18 return qdisc
->type
== NFP_QDISC_RED
|| qdisc
->type
== NFP_QDISC_GRED
;
21 static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc
*qdisc
, unsigned int id
)
23 return qdisc
->children
[id
] &&
24 qdisc
->children
[id
] != NFP_QDISC_UNTRACKED
;
27 static void *nfp_abm_qdisc_tree_deref_slot(void __rcu
**slot
)
29 return rtnl_dereference(*slot
);
33 nfp_abm_stats_propagate(struct nfp_alink_stats
*parent
,
34 struct nfp_alink_stats
*child
)
36 parent
->tx_pkts
+= child
->tx_pkts
;
37 parent
->tx_bytes
+= child
->tx_bytes
;
38 parent
->backlog_pkts
+= child
->backlog_pkts
;
39 parent
->backlog_bytes
+= child
->backlog_bytes
;
40 parent
->overlimits
+= child
->overlimits
;
41 parent
->drops
+= child
->drops
;
45 nfp_abm_stats_update_red(struct nfp_abm_link
*alink
, struct nfp_qdisc
*qdisc
,
48 struct nfp_cpp
*cpp
= alink
->abm
->app
->cpp
;
52 if (!qdisc
->offloaded
)
55 for (i
= 0; i
< qdisc
->red
.num_bands
; i
++) {
56 err
= nfp_abm_ctrl_read_q_stats(alink
, i
, queue
,
57 &qdisc
->red
.band
[i
].stats
);
59 nfp_err(cpp
, "RED stats (%d, %d) read failed with error %d\n",
62 err
= nfp_abm_ctrl_read_q_xstats(alink
, i
, queue
,
63 &qdisc
->red
.band
[i
].xstats
);
65 nfp_err(cpp
, "RED xstats (%d, %d) read failed with error %d\n",
71 nfp_abm_stats_update_mq(struct nfp_abm_link
*alink
, struct nfp_qdisc
*qdisc
)
75 if (qdisc
->type
!= NFP_QDISC_MQ
)
78 for (i
= 0; i
< alink
->total_queues
; i
++)
79 if (nfp_abm_qdisc_child_valid(qdisc
, i
))
80 nfp_abm_stats_update_red(alink
, qdisc
->children
[i
], i
);
83 static void __nfp_abm_stats_update(struct nfp_abm_link
*alink
, u64 time_now
)
85 alink
->last_stats_update
= time_now
;
86 if (alink
->root_qdisc
)
87 nfp_abm_stats_update_mq(alink
, alink
->root_qdisc
);
90 static void nfp_abm_stats_update(struct nfp_abm_link
*alink
)
94 /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
95 * of all their leafs, so we would read the same stat multiple times
99 if (now
- alink
->last_stats_update
< NFP_ABM_STATS_REFRESH_IVAL
)
102 __nfp_abm_stats_update(alink
, now
);
106 nfp_abm_qdisc_unlink_children(struct nfp_qdisc
*qdisc
,
107 unsigned int start
, unsigned int end
)
111 for (i
= start
; i
< end
; i
++)
112 if (nfp_abm_qdisc_child_valid(qdisc
, i
)) {
113 qdisc
->children
[i
]->use_cnt
--;
114 qdisc
->children
[i
] = NULL
;
119 nfp_abm_qdisc_offload_stop(struct nfp_abm_link
*alink
, struct nfp_qdisc
*qdisc
)
123 /* Don't complain when qdisc is getting unlinked */
125 nfp_warn(alink
->abm
->app
->cpp
, "Offload of '%08x' stopped\n",
128 if (!nfp_abm_qdisc_is_red(qdisc
))
131 for (i
= 0; i
< qdisc
->red
.num_bands
; i
++) {
132 qdisc
->red
.band
[i
].stats
.backlog_pkts
= 0;
133 qdisc
->red
.band
[i
].stats
.backlog_bytes
= 0;
138 __nfp_abm_stats_init(struct nfp_abm_link
*alink
, unsigned int band
,
139 unsigned int queue
, struct nfp_alink_stats
*prev_stats
,
140 struct nfp_alink_xstats
*prev_xstats
)
142 u64 backlog_pkts
, backlog_bytes
;
145 /* Don't touch the backlog, backlog can only be reset after it has
146 * been reported back to the tc qdisc stats.
148 backlog_pkts
= prev_stats
->backlog_pkts
;
149 backlog_bytes
= prev_stats
->backlog_bytes
;
151 err
= nfp_abm_ctrl_read_q_stats(alink
, band
, queue
, prev_stats
);
153 nfp_err(alink
->abm
->app
->cpp
,
154 "RED stats init (%d, %d) failed with error %d\n",
159 err
= nfp_abm_ctrl_read_q_xstats(alink
, band
, queue
, prev_xstats
);
161 nfp_err(alink
->abm
->app
->cpp
,
162 "RED xstats init (%d, %d) failed with error %d\n",
167 prev_stats
->backlog_pkts
= backlog_pkts
;
168 prev_stats
->backlog_bytes
= backlog_bytes
;
173 nfp_abm_stats_init(struct nfp_abm_link
*alink
, struct nfp_qdisc
*qdisc
,
179 for (i
= 0; i
< qdisc
->red
.num_bands
; i
++) {
180 err
= __nfp_abm_stats_init(alink
, i
, queue
,
181 &qdisc
->red
.band
[i
].prev_stats
,
182 &qdisc
->red
.band
[i
].prev_xstats
);
191 nfp_abm_offload_compile_red(struct nfp_abm_link
*alink
, struct nfp_qdisc
*qdisc
,
194 bool good_red
, good_gred
;
197 good_red
= qdisc
->type
== NFP_QDISC_RED
&&
199 qdisc
->use_cnt
== 1 &&
202 good_gred
= qdisc
->type
== NFP_QDISC_GRED
&&
205 qdisc
->offload_mark
= good_red
|| good_gred
;
207 /* If we are starting offload init prev_stats */
208 if (qdisc
->offload_mark
&& !qdisc
->offloaded
)
209 if (nfp_abm_stats_init(alink
, qdisc
, queue
))
210 qdisc
->offload_mark
= false;
212 if (!qdisc
->offload_mark
)
215 for (i
= 0; i
< alink
->abm
->num_bands
; i
++) {
216 enum nfp_abm_q_action act
;
218 nfp_abm_ctrl_set_q_lvl(alink
, i
, queue
,
219 qdisc
->red
.band
[i
].threshold
);
220 act
= qdisc
->red
.band
[i
].ecn
?
221 NFP_ABM_ACT_MARK_DROP
: NFP_ABM_ACT_DROP
;
222 nfp_abm_ctrl_set_q_act(alink
, i
, queue
, act
);
227 nfp_abm_offload_compile_mq(struct nfp_abm_link
*alink
, struct nfp_qdisc
*qdisc
)
231 qdisc
->offload_mark
= qdisc
->type
== NFP_QDISC_MQ
;
232 if (!qdisc
->offload_mark
)
235 for (i
= 0; i
< alink
->total_queues
; i
++) {
236 struct nfp_qdisc
*child
= qdisc
->children
[i
];
238 if (!nfp_abm_qdisc_child_valid(qdisc
, i
))
241 nfp_abm_offload_compile_red(alink
, child
, i
);
245 void nfp_abm_qdisc_offload_update(struct nfp_abm_link
*alink
)
247 struct nfp_abm
*abm
= alink
->abm
;
248 struct radix_tree_iter iter
;
249 struct nfp_qdisc
*qdisc
;
253 /* Mark all thresholds as unconfigured */
254 for (i
= 0; i
< abm
->num_bands
; i
++)
255 __bitmap_set(abm
->threshold_undef
,
256 i
* NFP_NET_MAX_RX_RINGS
+ alink
->queue_base
,
257 alink
->total_queues
);
259 /* Clear offload marks */
260 radix_tree_for_each_slot(slot
, &alink
->qdiscs
, &iter
, 0) {
261 qdisc
= nfp_abm_qdisc_tree_deref_slot(slot
);
262 qdisc
->offload_mark
= false;
265 if (alink
->root_qdisc
)
266 nfp_abm_offload_compile_mq(alink
, alink
->root_qdisc
);
268 /* Refresh offload status */
269 radix_tree_for_each_slot(slot
, &alink
->qdiscs
, &iter
, 0) {
270 qdisc
= nfp_abm_qdisc_tree_deref_slot(slot
);
271 if (!qdisc
->offload_mark
&& qdisc
->offloaded
)
272 nfp_abm_qdisc_offload_stop(alink
, qdisc
);
273 qdisc
->offloaded
= qdisc
->offload_mark
;
276 /* Reset the unconfigured thresholds */
277 for (i
= 0; i
< abm
->num_thresholds
; i
++)
278 if (test_bit(i
, abm
->threshold_undef
))
279 __nfp_abm_ctrl_set_q_lvl(abm
, i
, NFP_ABM_LVL_INFINITY
);
281 __nfp_abm_stats_update(alink
, ktime_get());
285 nfp_abm_qdisc_clear_mq(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
286 struct nfp_qdisc
*qdisc
)
288 struct radix_tree_iter iter
;
289 unsigned int mq_refs
= 0;
294 /* MQ doesn't notify well on destruction, we need special handling of
297 if (qdisc
->type
== NFP_QDISC_MQ
&&
298 qdisc
== alink
->root_qdisc
&&
299 netdev
->reg_state
== NETREG_UNREGISTERING
)
302 /* Count refs held by MQ instances and clear pointers */
303 radix_tree_for_each_slot(slot
, &alink
->qdiscs
, &iter
, 0) {
304 struct nfp_qdisc
*mq
= nfp_abm_qdisc_tree_deref_slot(slot
);
307 if (mq
->type
!= NFP_QDISC_MQ
|| mq
->netdev
!= netdev
)
309 for (i
= 0; i
< mq
->num_children
; i
++)
310 if (mq
->children
[i
] == qdisc
) {
311 mq
->children
[i
] = NULL
;
316 WARN(qdisc
->use_cnt
!= mq_refs
, "non-zero qdisc use count: %d (- %d)\n",
317 qdisc
->use_cnt
, mq_refs
);
321 nfp_abm_qdisc_free(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
322 struct nfp_qdisc
*qdisc
)
324 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
328 nfp_abm_qdisc_clear_mq(netdev
, alink
, qdisc
);
329 WARN_ON(radix_tree_delete(&alink
->qdiscs
,
330 TC_H_MAJ(qdisc
->handle
)) != qdisc
);
332 kfree(qdisc
->children
);
335 port
->tc_offload_cnt
--;
338 static struct nfp_qdisc
*
339 nfp_abm_qdisc_alloc(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
340 enum nfp_qdisc_type type
, u32 parent_handle
, u32 handle
,
341 unsigned int children
)
343 struct nfp_port
*port
= nfp_port_from_netdev(netdev
);
344 struct nfp_qdisc
*qdisc
;
347 qdisc
= kzalloc(sizeof(*qdisc
), GFP_KERNEL
);
352 qdisc
->children
= kcalloc(children
, sizeof(void *), GFP_KERNEL
);
353 if (!qdisc
->children
)
357 qdisc
->netdev
= netdev
;
359 qdisc
->parent_handle
= parent_handle
;
360 qdisc
->handle
= handle
;
361 qdisc
->num_children
= children
;
363 err
= radix_tree_insert(&alink
->qdiscs
, TC_H_MAJ(qdisc
->handle
), qdisc
);
365 nfp_err(alink
->abm
->app
->cpp
,
366 "Qdisc insertion into radix tree failed: %d\n", err
);
367 goto err_free_child_tbl
;
370 port
->tc_offload_cnt
++;
374 kfree(qdisc
->children
);
380 static struct nfp_qdisc
*
381 nfp_abm_qdisc_find(struct nfp_abm_link
*alink
, u32 handle
)
383 return radix_tree_lookup(&alink
->qdiscs
, TC_H_MAJ(handle
));
387 nfp_abm_qdisc_replace(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
388 enum nfp_qdisc_type type
, u32 parent_handle
, u32 handle
,
389 unsigned int children
, struct nfp_qdisc
**qdisc
)
391 *qdisc
= nfp_abm_qdisc_find(alink
, handle
);
393 if (WARN_ON((*qdisc
)->type
!= type
))
398 *qdisc
= nfp_abm_qdisc_alloc(netdev
, alink
, type
, parent_handle
, handle
,
400 return *qdisc
? 0 : -ENOMEM
;
404 nfp_abm_qdisc_destroy(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
407 struct nfp_qdisc
*qdisc
;
409 qdisc
= nfp_abm_qdisc_find(alink
, handle
);
413 /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
414 if (alink
->root_qdisc
== qdisc
)
417 nfp_abm_qdisc_unlink_children(qdisc
, 0, qdisc
->num_children
);
418 nfp_abm_qdisc_free(netdev
, alink
, qdisc
);
420 if (alink
->root_qdisc
== qdisc
) {
421 alink
->root_qdisc
= NULL
;
422 /* Only root change matters, other changes are acted upon on
423 * the graft notification.
425 nfp_abm_qdisc_offload_update(alink
);
430 nfp_abm_qdisc_graft(struct nfp_abm_link
*alink
, u32 handle
, u32 child_handle
,
433 struct nfp_qdisc
*parent
, *child
;
435 parent
= nfp_abm_qdisc_find(alink
, handle
);
439 if (WARN(id
>= parent
->num_children
,
440 "graft child out of bound %d >= %d\n",
441 id
, parent
->num_children
))
444 nfp_abm_qdisc_unlink_children(parent
, id
, id
+ 1);
446 child
= nfp_abm_qdisc_find(alink
, child_handle
);
450 child
= NFP_QDISC_UNTRACKED
;
451 parent
->children
[id
] = child
;
453 nfp_abm_qdisc_offload_update(alink
);
459 nfp_abm_stats_calculate(struct nfp_alink_stats
*new,
460 struct nfp_alink_stats
*old
,
461 struct gnet_stats_basic_packed
*bstats
,
462 struct gnet_stats_queue
*qstats
)
464 _bstats_update(bstats
, new->tx_bytes
- old
->tx_bytes
,
465 new->tx_pkts
- old
->tx_pkts
);
466 qstats
->qlen
+= new->backlog_pkts
- old
->backlog_pkts
;
467 qstats
->backlog
+= new->backlog_bytes
- old
->backlog_bytes
;
468 qstats
->overlimits
+= new->overlimits
- old
->overlimits
;
469 qstats
->drops
+= new->drops
- old
->drops
;
473 nfp_abm_stats_red_calculate(struct nfp_alink_xstats
*new,
474 struct nfp_alink_xstats
*old
,
475 struct red_stats
*stats
)
477 stats
->forced_mark
+= new->ecn_marked
- old
->ecn_marked
;
478 stats
->pdrop
+= new->pdrop
- old
->pdrop
;
482 nfp_abm_gred_stats(struct nfp_abm_link
*alink
, u32 handle
,
483 struct tc_gred_qopt_offload_stats
*stats
)
485 struct nfp_qdisc
*qdisc
;
488 nfp_abm_stats_update(alink
);
490 qdisc
= nfp_abm_qdisc_find(alink
, handle
);
493 /* If the qdisc offload has stopped we may need to adjust the backlog
494 * counters back so carry on even if qdisc is not currently offloaded.
497 for (i
= 0; i
< qdisc
->red
.num_bands
; i
++) {
498 if (!stats
->xstats
[i
])
501 nfp_abm_stats_calculate(&qdisc
->red
.band
[i
].stats
,
502 &qdisc
->red
.band
[i
].prev_stats
,
503 &stats
->bstats
[i
], &stats
->qstats
[i
]);
504 qdisc
->red
.band
[i
].prev_stats
= qdisc
->red
.band
[i
].stats
;
506 nfp_abm_stats_red_calculate(&qdisc
->red
.band
[i
].xstats
,
507 &qdisc
->red
.band
[i
].prev_xstats
,
509 qdisc
->red
.band
[i
].prev_xstats
= qdisc
->red
.band
[i
].xstats
;
512 return qdisc
->offloaded
? 0 : -EOPNOTSUPP
;
516 nfp_abm_gred_check_params(struct nfp_abm_link
*alink
,
517 struct tc_gred_qopt_offload
*opt
)
519 struct nfp_cpp
*cpp
= alink
->abm
->app
->cpp
;
520 struct nfp_abm
*abm
= alink
->abm
;
523 if (opt
->set
.grio_on
|| opt
->set
.wred_on
) {
524 nfp_warn(cpp
, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
525 opt
->parent
, opt
->handle
);
528 if (opt
->set
.dp_def
!= alink
->def_band
) {
529 nfp_warn(cpp
, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
530 alink
->def_band
, opt
->parent
, opt
->handle
);
533 if (opt
->set
.dp_cnt
!= abm
->num_bands
) {
534 nfp_warn(cpp
, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
535 abm
->num_bands
, opt
->parent
, opt
->handle
);
539 for (i
= 0; i
< abm
->num_bands
; i
++) {
540 struct tc_gred_vq_qopt_offload_params
*band
= &opt
->set
.tab
[i
];
544 if (!band
->is_ecn
&& !nfp_abm_has_drop(abm
)) {
545 nfp_warn(cpp
, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
546 opt
->parent
, opt
->handle
, i
);
549 if (band
->is_ecn
&& !nfp_abm_has_mark(abm
)) {
550 nfp_warn(cpp
, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
551 opt
->parent
, opt
->handle
, i
);
554 if (band
->is_harddrop
) {
555 nfp_warn(cpp
, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
556 opt
->parent
, opt
->handle
, i
);
559 if (band
->min
!= band
->max
) {
560 nfp_warn(cpp
, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
561 opt
->parent
, opt
->handle
, i
);
564 if (band
->min
> S32_MAX
) {
565 nfp_warn(cpp
, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
566 band
->min
, S32_MAX
, opt
->parent
, opt
->handle
,
576 nfp_abm_gred_replace(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
577 struct tc_gred_qopt_offload
*opt
)
579 struct nfp_qdisc
*qdisc
;
583 ret
= nfp_abm_qdisc_replace(netdev
, alink
, NFP_QDISC_GRED
, opt
->parent
,
584 opt
->handle
, 0, &qdisc
);
588 qdisc
->params_ok
= nfp_abm_gred_check_params(alink
, opt
);
589 if (qdisc
->params_ok
) {
590 qdisc
->red
.num_bands
= opt
->set
.dp_cnt
;
591 for (i
= 0; i
< qdisc
->red
.num_bands
; i
++) {
592 qdisc
->red
.band
[i
].ecn
= opt
->set
.tab
[i
].is_ecn
;
593 qdisc
->red
.band
[i
].threshold
= opt
->set
.tab
[i
].min
;
598 nfp_abm_qdisc_offload_update(alink
);
603 int nfp_abm_setup_tc_gred(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
604 struct tc_gred_qopt_offload
*opt
)
606 switch (opt
->command
) {
607 case TC_GRED_REPLACE
:
608 return nfp_abm_gred_replace(netdev
, alink
, opt
);
609 case TC_GRED_DESTROY
:
610 nfp_abm_qdisc_destroy(netdev
, alink
, opt
->handle
);
613 return nfp_abm_gred_stats(alink
, opt
->handle
, &opt
->stats
);
620 nfp_abm_red_xstats(struct nfp_abm_link
*alink
, struct tc_red_qopt_offload
*opt
)
622 struct nfp_qdisc
*qdisc
;
624 nfp_abm_stats_update(alink
);
626 qdisc
= nfp_abm_qdisc_find(alink
, opt
->handle
);
627 if (!qdisc
|| !qdisc
->offloaded
)
630 nfp_abm_stats_red_calculate(&qdisc
->red
.band
[0].xstats
,
631 &qdisc
->red
.band
[0].prev_xstats
,
633 qdisc
->red
.band
[0].prev_xstats
= qdisc
->red
.band
[0].xstats
;
638 nfp_abm_red_stats(struct nfp_abm_link
*alink
, u32 handle
,
639 struct tc_qopt_offload_stats
*stats
)
641 struct nfp_qdisc
*qdisc
;
643 nfp_abm_stats_update(alink
);
645 qdisc
= nfp_abm_qdisc_find(alink
, handle
);
648 /* If the qdisc offload has stopped we may need to adjust the backlog
649 * counters back so carry on even if qdisc is not currently offloaded.
652 nfp_abm_stats_calculate(&qdisc
->red
.band
[0].stats
,
653 &qdisc
->red
.band
[0].prev_stats
,
654 stats
->bstats
, stats
->qstats
);
655 qdisc
->red
.band
[0].prev_stats
= qdisc
->red
.band
[0].stats
;
657 return qdisc
->offloaded
? 0 : -EOPNOTSUPP
;
661 nfp_abm_red_check_params(struct nfp_abm_link
*alink
,
662 struct tc_red_qopt_offload
*opt
)
664 struct nfp_cpp
*cpp
= alink
->abm
->app
->cpp
;
665 struct nfp_abm
*abm
= alink
->abm
;
667 if (!opt
->set
.is_ecn
&& !nfp_abm_has_drop(abm
)) {
668 nfp_warn(cpp
, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
669 opt
->parent
, opt
->handle
);
672 if (opt
->set
.is_ecn
&& !nfp_abm_has_mark(abm
)) {
673 nfp_warn(cpp
, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
674 opt
->parent
, opt
->handle
);
677 if (opt
->set
.is_harddrop
) {
678 nfp_warn(cpp
, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
679 opt
->parent
, opt
->handle
);
682 if (opt
->set
.min
!= opt
->set
.max
) {
683 nfp_warn(cpp
, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
684 opt
->parent
, opt
->handle
);
687 if (opt
->set
.min
> NFP_ABM_LVL_INFINITY
) {
688 nfp_warn(cpp
, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
689 opt
->set
.min
, NFP_ABM_LVL_INFINITY
, opt
->parent
,
698 nfp_abm_red_replace(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
699 struct tc_red_qopt_offload
*opt
)
701 struct nfp_qdisc
*qdisc
;
704 ret
= nfp_abm_qdisc_replace(netdev
, alink
, NFP_QDISC_RED
, opt
->parent
,
705 opt
->handle
, 1, &qdisc
);
709 /* If limit != 0 child gets reset */
710 if (opt
->set
.limit
) {
711 if (nfp_abm_qdisc_child_valid(qdisc
, 0))
712 qdisc
->children
[0]->use_cnt
--;
713 qdisc
->children
[0] = NULL
;
715 /* Qdisc was just allocated without a limit will use noop_qdisc,
719 qdisc
->children
[0] = NFP_QDISC_UNTRACKED
;
722 qdisc
->params_ok
= nfp_abm_red_check_params(alink
, opt
);
723 if (qdisc
->params_ok
) {
724 qdisc
->red
.num_bands
= 1;
725 qdisc
->red
.band
[0].ecn
= opt
->set
.is_ecn
;
726 qdisc
->red
.band
[0].threshold
= opt
->set
.min
;
729 if (qdisc
->use_cnt
== 1)
730 nfp_abm_qdisc_offload_update(alink
);
735 int nfp_abm_setup_tc_red(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
736 struct tc_red_qopt_offload
*opt
)
738 switch (opt
->command
) {
740 return nfp_abm_red_replace(netdev
, alink
, opt
);
742 nfp_abm_qdisc_destroy(netdev
, alink
, opt
->handle
);
745 return nfp_abm_red_stats(alink
, opt
->handle
, &opt
->stats
);
747 return nfp_abm_red_xstats(alink
, opt
);
749 return nfp_abm_qdisc_graft(alink
, opt
->handle
,
750 opt
->child_handle
, 0);
757 nfp_abm_mq_create(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
758 struct tc_mq_qopt_offload
*opt
)
760 struct nfp_qdisc
*qdisc
;
763 ret
= nfp_abm_qdisc_replace(netdev
, alink
, NFP_QDISC_MQ
,
764 TC_H_ROOT
, opt
->handle
, alink
->total_queues
,
769 qdisc
->params_ok
= true;
770 qdisc
->offloaded
= true;
771 nfp_abm_qdisc_offload_update(alink
);
776 nfp_abm_mq_stats(struct nfp_abm_link
*alink
, u32 handle
,
777 struct tc_qopt_offload_stats
*stats
)
779 struct nfp_qdisc
*qdisc
, *red
;
782 qdisc
= nfp_abm_qdisc_find(alink
, handle
);
786 nfp_abm_stats_update(alink
);
788 /* MQ stats are summed over the children in the core, so we need
789 * to add up the unreported child values.
791 memset(&qdisc
->mq
.stats
, 0, sizeof(qdisc
->mq
.stats
));
792 memset(&qdisc
->mq
.prev_stats
, 0, sizeof(qdisc
->mq
.prev_stats
));
794 for (i
= 0; i
< qdisc
->num_children
; i
++) {
795 if (!nfp_abm_qdisc_child_valid(qdisc
, i
))
798 if (!nfp_abm_qdisc_is_red(qdisc
->children
[i
]))
800 red
= qdisc
->children
[i
];
802 for (j
= 0; j
< red
->red
.num_bands
; j
++) {
803 nfp_abm_stats_propagate(&qdisc
->mq
.stats
,
804 &red
->red
.band
[j
].stats
);
805 nfp_abm_stats_propagate(&qdisc
->mq
.prev_stats
,
806 &red
->red
.band
[j
].prev_stats
);
810 nfp_abm_stats_calculate(&qdisc
->mq
.stats
, &qdisc
->mq
.prev_stats
,
811 stats
->bstats
, stats
->qstats
);
813 return qdisc
->offloaded
? 0 : -EOPNOTSUPP
;
816 int nfp_abm_setup_tc_mq(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
817 struct tc_mq_qopt_offload
*opt
)
819 switch (opt
->command
) {
821 return nfp_abm_mq_create(netdev
, alink
, opt
);
823 nfp_abm_qdisc_destroy(netdev
, alink
, opt
->handle
);
826 return nfp_abm_mq_stats(alink
, opt
->handle
, &opt
->stats
);
828 return nfp_abm_qdisc_graft(alink
, opt
->handle
,
829 opt
->graft_params
.child_handle
,
830 opt
->graft_params
.queue
);
836 int nfp_abm_setup_root(struct net_device
*netdev
, struct nfp_abm_link
*alink
,
837 struct tc_root_qopt_offload
*opt
)
841 if (alink
->root_qdisc
)
842 alink
->root_qdisc
->use_cnt
--;
843 alink
->root_qdisc
= nfp_abm_qdisc_find(alink
, opt
->handle
);
844 if (alink
->root_qdisc
)
845 alink
->root_qdisc
->use_cnt
++;
847 nfp_abm_qdisc_offload_update(alink
);