2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/pkt_cls.h>
44 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
46 enum mlxsw_sp_qdisc_type
{
47 MLXSW_SP_QDISC_NO_QDISC
,
52 struct mlxsw_sp_qdisc_ops
{
53 enum mlxsw_sp_qdisc_type type
;
54 int (*check_params
)(struct mlxsw_sp_port
*mlxsw_sp_port
,
55 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
57 int (*replace
)(struct mlxsw_sp_port
*mlxsw_sp_port
,
58 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
, void *params
);
59 int (*destroy
)(struct mlxsw_sp_port
*mlxsw_sp_port
,
60 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
);
61 int (*get_stats
)(struct mlxsw_sp_port
*mlxsw_sp_port
,
62 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
63 struct tc_qopt_offload_stats
*stats_ptr
);
64 int (*get_xstats
)(struct mlxsw_sp_port
*mlxsw_sp_port
,
65 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
67 void (*clean_stats
)(struct mlxsw_sp_port
*mlxsw_sp_port
,
68 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
);
69 /* unoffload - to be used for a qdisc that stops being offloaded without
72 void (*unoffload
)(struct mlxsw_sp_port
*mlxsw_sp_port
,
73 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
, void *params
);
76 struct mlxsw_sp_qdisc
{
82 struct mlxsw_sp_qdisc_stats
{
90 struct mlxsw_sp_qdisc_ops
*ops
;
94 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
, u32 handle
,
95 enum mlxsw_sp_qdisc_type type
)
97 return mlxsw_sp_qdisc
&& mlxsw_sp_qdisc
->ops
&&
98 mlxsw_sp_qdisc
->ops
->type
== type
&&
99 mlxsw_sp_qdisc
->handle
== handle
;
103 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port
*mlxsw_sp_port
,
104 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
)
111 if (mlxsw_sp_qdisc
->ops
&& mlxsw_sp_qdisc
->ops
->destroy
)
112 err
= mlxsw_sp_qdisc
->ops
->destroy(mlxsw_sp_port
,
115 mlxsw_sp_qdisc
->handle
= TC_H_UNSPEC
;
116 mlxsw_sp_qdisc
->ops
= NULL
;
121 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port
*mlxsw_sp_port
, u32 handle
,
122 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
123 struct mlxsw_sp_qdisc_ops
*ops
, void *params
)
127 if (mlxsw_sp_qdisc
->ops
&& mlxsw_sp_qdisc
->ops
->type
!= ops
->type
)
128 /* In case this location contained a different qdisc of the
129 * same type we can override the old qdisc configuration.
130 * Otherwise, we need to remove the old qdisc before setting the
133 mlxsw_sp_qdisc_destroy(mlxsw_sp_port
, mlxsw_sp_qdisc
);
134 err
= ops
->check_params(mlxsw_sp_port
, mlxsw_sp_qdisc
, params
);
138 err
= ops
->replace(mlxsw_sp_port
, mlxsw_sp_qdisc
, params
);
142 if (mlxsw_sp_qdisc
->handle
!= handle
) {
143 mlxsw_sp_qdisc
->ops
= ops
;
144 if (ops
->clean_stats
)
145 ops
->clean_stats(mlxsw_sp_port
, mlxsw_sp_qdisc
);
148 mlxsw_sp_qdisc
->handle
= handle
;
153 if (mlxsw_sp_qdisc
->handle
== handle
&& ops
->unoffload
)
154 ops
->unoffload(mlxsw_sp_port
, mlxsw_sp_qdisc
, params
);
156 mlxsw_sp_qdisc_destroy(mlxsw_sp_port
, mlxsw_sp_qdisc
);
161 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port
*mlxsw_sp_port
,
162 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
163 struct tc_qopt_offload_stats
*stats_ptr
)
165 if (mlxsw_sp_qdisc
&& mlxsw_sp_qdisc
->ops
&&
166 mlxsw_sp_qdisc
->ops
->get_stats
)
167 return mlxsw_sp_qdisc
->ops
->get_stats(mlxsw_sp_port
,
175 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port
*mlxsw_sp_port
,
176 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
179 if (mlxsw_sp_qdisc
&& mlxsw_sp_qdisc
->ops
&&
180 mlxsw_sp_qdisc
->ops
->get_xstats
)
181 return mlxsw_sp_qdisc
->ops
->get_xstats(mlxsw_sp_port
,
189 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
190 int tclass_num
, u32 min
, u32 max
,
191 u32 probability
, bool is_ecn
)
193 char cwtpm_cmd
[MLXSW_REG_CWTPM_LEN
];
194 char cwtp_cmd
[MLXSW_REG_CWTP_LEN
];
195 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
198 mlxsw_reg_cwtp_pack(cwtp_cmd
, mlxsw_sp_port
->local_port
, tclass_num
);
199 mlxsw_reg_cwtp_profile_pack(cwtp_cmd
, MLXSW_REG_CWTP_DEFAULT_PROFILE
,
200 roundup(min
, MLXSW_REG_CWTP_MIN_VALUE
),
201 roundup(max
, MLXSW_REG_CWTP_MIN_VALUE
),
204 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(cwtp
), cwtp_cmd
);
208 mlxsw_reg_cwtpm_pack(cwtpm_cmd
, mlxsw_sp_port
->local_port
, tclass_num
,
209 MLXSW_REG_CWTP_DEFAULT_PROFILE
, true, is_ecn
);
211 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(cwtpm
), cwtpm_cmd
);
215 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
218 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
219 char cwtpm_cmd
[MLXSW_REG_CWTPM_LEN
];
221 mlxsw_reg_cwtpm_pack(cwtpm_cmd
, mlxsw_sp_port
->local_port
, tclass_num
,
222 MLXSW_REG_CWTPM_RESET_PROFILE
, false, false);
223 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(cwtpm
), cwtpm_cmd
);
227 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port
*mlxsw_sp_port
,
228 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
)
230 u8 tclass_num
= mlxsw_sp_qdisc
->tclass_num
;
231 struct mlxsw_sp_qdisc_stats
*stats_base
;
232 struct mlxsw_sp_port_xstats
*xstats
;
233 struct rtnl_link_stats64
*stats
;
234 struct red_stats
*red_base
;
236 xstats
= &mlxsw_sp_port
->periodic_hw_stats
.xstats
;
237 stats
= &mlxsw_sp_port
->periodic_hw_stats
.stats
;
238 stats_base
= &mlxsw_sp_qdisc
->stats_base
;
239 red_base
= &mlxsw_sp_qdisc
->xstats_base
.red
;
241 stats_base
->tx_packets
= stats
->tx_packets
;
242 stats_base
->tx_bytes
= stats
->tx_bytes
;
244 red_base
->prob_mark
= xstats
->ecn
;
245 red_base
->prob_drop
= xstats
->wred_drop
[tclass_num
];
246 red_base
->pdrop
= xstats
->tail_drop
[tclass_num
];
248 stats_base
->overlimits
= red_base
->prob_drop
+ red_base
->prob_mark
;
249 stats_base
->drops
= red_base
->prob_drop
+ red_base
->pdrop
;
251 stats_base
->backlog
= 0;
255 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port
*mlxsw_sp_port
,
256 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
)
258 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port
,
259 mlxsw_sp_qdisc
->tclass_num
);
263 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port
*mlxsw_sp_port
,
264 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
267 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
268 struct tc_red_qopt_offload_params
*p
= params
;
270 if (p
->min
> p
->max
) {
271 dev_err(mlxsw_sp
->bus_info
->dev
,
272 "spectrum: RED: min %u is bigger then max %u\n", p
->min
,
276 if (p
->max
> MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_BUFFER_SIZE
)) {
277 dev_err(mlxsw_sp
->bus_info
->dev
,
278 "spectrum: RED: max value %u is too big\n", p
->max
);
281 if (p
->min
== 0 || p
->max
== 0) {
282 dev_err(mlxsw_sp
->bus_info
->dev
,
283 "spectrum: RED: 0 value is illegal for min and max\n");
290 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port
*mlxsw_sp_port
,
291 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
294 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
295 struct tc_red_qopt_offload_params
*p
= params
;
296 u8 tclass_num
= mlxsw_sp_qdisc
->tclass_num
;
300 /* calculate probability in percentage */
301 prob
= p
->probability
;
303 prob
= DIV_ROUND_UP(prob
, 1 << 16);
304 prob
= DIV_ROUND_UP(prob
, 1 << 16);
305 min
= mlxsw_sp_bytes_cells(mlxsw_sp
, p
->min
);
306 max
= mlxsw_sp_bytes_cells(mlxsw_sp
, p
->max
);
307 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port
, tclass_num
, min
,
308 max
, prob
, p
->is_ecn
);
312 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port
*mlxsw_sp_port
,
313 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
316 struct tc_red_qopt_offload_params
*p
= params
;
319 backlog
= mlxsw_sp_cells_bytes(mlxsw_sp_port
->mlxsw_sp
,
320 mlxsw_sp_qdisc
->stats_base
.backlog
);
321 p
->qstats
->backlog
-= backlog
;
325 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port
*mlxsw_sp_port
,
326 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
329 struct red_stats
*xstats_base
= &mlxsw_sp_qdisc
->xstats_base
.red
;
330 u8 tclass_num
= mlxsw_sp_qdisc
->tclass_num
;
331 struct mlxsw_sp_port_xstats
*xstats
;
332 struct red_stats
*res
= xstats_ptr
;
333 int early_drops
, marks
, pdrops
;
335 xstats
= &mlxsw_sp_port
->periodic_hw_stats
.xstats
;
337 early_drops
= xstats
->wred_drop
[tclass_num
] - xstats_base
->prob_drop
;
338 marks
= xstats
->ecn
- xstats_base
->prob_mark
;
339 pdrops
= xstats
->tail_drop
[tclass_num
] - xstats_base
->pdrop
;
341 res
->pdrop
+= pdrops
;
342 res
->prob_drop
+= early_drops
;
343 res
->prob_mark
+= marks
;
345 xstats_base
->pdrop
+= pdrops
;
346 xstats_base
->prob_drop
+= early_drops
;
347 xstats_base
->prob_mark
+= marks
;
352 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port
*mlxsw_sp_port
,
353 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
354 struct tc_qopt_offload_stats
*stats_ptr
)
356 u64 tx_bytes
, tx_packets
, overlimits
, drops
, backlog
;
357 u8 tclass_num
= mlxsw_sp_qdisc
->tclass_num
;
358 struct mlxsw_sp_qdisc_stats
*stats_base
;
359 struct mlxsw_sp_port_xstats
*xstats
;
360 struct rtnl_link_stats64
*stats
;
362 xstats
= &mlxsw_sp_port
->periodic_hw_stats
.xstats
;
363 stats
= &mlxsw_sp_port
->periodic_hw_stats
.stats
;
364 stats_base
= &mlxsw_sp_qdisc
->stats_base
;
366 tx_bytes
= stats
->tx_bytes
- stats_base
->tx_bytes
;
367 tx_packets
= stats
->tx_packets
- stats_base
->tx_packets
;
368 overlimits
= xstats
->wred_drop
[tclass_num
] + xstats
->ecn
-
369 stats_base
->overlimits
;
370 drops
= xstats
->wred_drop
[tclass_num
] + xstats
->tail_drop
[tclass_num
] -
372 backlog
= xstats
->backlog
[tclass_num
];
374 _bstats_update(stats_ptr
->bstats
, tx_bytes
, tx_packets
);
375 stats_ptr
->qstats
->overlimits
+= overlimits
;
376 stats_ptr
->qstats
->drops
+= drops
;
377 stats_ptr
->qstats
->backlog
+=
378 mlxsw_sp_cells_bytes(mlxsw_sp_port
->mlxsw_sp
,
380 mlxsw_sp_cells_bytes(mlxsw_sp_port
->mlxsw_sp
,
381 stats_base
->backlog
);
383 stats_base
->backlog
= backlog
;
384 stats_base
->drops
+= drops
;
385 stats_base
->overlimits
+= overlimits
;
386 stats_base
->tx_bytes
+= tx_bytes
;
387 stats_base
->tx_packets
+= tx_packets
;
391 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
393 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red
= {
394 .type
= MLXSW_SP_QDISC_RED
,
395 .check_params
= mlxsw_sp_qdisc_red_check_params
,
396 .replace
= mlxsw_sp_qdisc_red_replace
,
397 .unoffload
= mlxsw_sp_qdisc_red_unoffload
,
398 .destroy
= mlxsw_sp_qdisc_red_destroy
,
399 .get_stats
= mlxsw_sp_qdisc_get_red_stats
,
400 .get_xstats
= mlxsw_sp_qdisc_get_red_xstats
,
401 .clean_stats
= mlxsw_sp_setup_tc_qdisc_red_clean_stats
,
404 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port
*mlxsw_sp_port
,
405 struct tc_red_qopt_offload
*p
)
407 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
;
409 if (p
->parent
!= TC_H_ROOT
)
412 mlxsw_sp_qdisc
= mlxsw_sp_port
->root_qdisc
;
414 if (p
->command
== TC_RED_REPLACE
)
415 return mlxsw_sp_qdisc_replace(mlxsw_sp_port
, p
->handle
,
417 &mlxsw_sp_qdisc_ops_red
,
420 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc
, p
->handle
,
424 switch (p
->command
) {
426 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port
, mlxsw_sp_qdisc
);
428 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port
, mlxsw_sp_qdisc
,
431 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port
, mlxsw_sp_qdisc
,
439 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port
*mlxsw_sp_port
,
440 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
)
444 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
445 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
,
446 MLXSW_SP_PORT_DEFAULT_TCLASS
);
452 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port
*mlxsw_sp_port
,
453 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
456 struct tc_prio_qopt_offload_params
*p
= params
;
458 if (p
->bands
> IEEE_8021QAZ_MAX_TCS
)
465 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port
*mlxsw_sp_port
,
466 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
469 struct tc_prio_qopt_offload_params
*p
= params
;
473 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
474 tclass
= MLXSW_SP_PRIO_BAND_TO_TCLASS(p
->priomap
[i
]);
475 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, tclass
);
484 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port
*mlxsw_sp_port
,
485 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
488 struct tc_prio_qopt_offload_params
*p
= params
;
491 backlog
= mlxsw_sp_cells_bytes(mlxsw_sp_port
->mlxsw_sp
,
492 mlxsw_sp_qdisc
->stats_base
.backlog
);
493 p
->qstats
->backlog
-= backlog
;
497 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port
*mlxsw_sp_port
,
498 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
,
499 struct tc_qopt_offload_stats
*stats_ptr
)
501 u64 tx_bytes
, tx_packets
, drops
= 0, backlog
= 0;
502 struct mlxsw_sp_qdisc_stats
*stats_base
;
503 struct mlxsw_sp_port_xstats
*xstats
;
504 struct rtnl_link_stats64
*stats
;
507 xstats
= &mlxsw_sp_port
->periodic_hw_stats
.xstats
;
508 stats
= &mlxsw_sp_port
->periodic_hw_stats
.stats
;
509 stats_base
= &mlxsw_sp_qdisc
->stats_base
;
511 tx_bytes
= stats
->tx_bytes
- stats_base
->tx_bytes
;
512 tx_packets
= stats
->tx_packets
- stats_base
->tx_packets
;
514 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
515 drops
+= xstats
->tail_drop
[i
];
516 backlog
+= xstats
->backlog
[i
];
518 drops
= drops
- stats_base
->drops
;
520 _bstats_update(stats_ptr
->bstats
, tx_bytes
, tx_packets
);
521 stats_ptr
->qstats
->drops
+= drops
;
522 stats_ptr
->qstats
->backlog
+=
523 mlxsw_sp_cells_bytes(mlxsw_sp_port
->mlxsw_sp
,
525 mlxsw_sp_cells_bytes(mlxsw_sp_port
->mlxsw_sp
,
526 stats_base
->backlog
);
527 stats_base
->backlog
= backlog
;
528 stats_base
->drops
+= drops
;
529 stats_base
->tx_bytes
+= tx_bytes
;
530 stats_base
->tx_packets
+= tx_packets
;
535 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port
*mlxsw_sp_port
,
536 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
)
538 struct mlxsw_sp_qdisc_stats
*stats_base
;
539 struct mlxsw_sp_port_xstats
*xstats
;
540 struct rtnl_link_stats64
*stats
;
543 xstats
= &mlxsw_sp_port
->periodic_hw_stats
.xstats
;
544 stats
= &mlxsw_sp_port
->periodic_hw_stats
.stats
;
545 stats_base
= &mlxsw_sp_qdisc
->stats_base
;
547 stats_base
->tx_packets
= stats
->tx_packets
;
548 stats_base
->tx_bytes
= stats
->tx_bytes
;
550 stats_base
->drops
= 0;
551 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
552 stats_base
->drops
+= xstats
->tail_drop
[i
];
554 mlxsw_sp_qdisc
->stats_base
.backlog
= 0;
557 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio
= {
558 .type
= MLXSW_SP_QDISC_PRIO
,
559 .check_params
= mlxsw_sp_qdisc_prio_check_params
,
560 .replace
= mlxsw_sp_qdisc_prio_replace
,
561 .unoffload
= mlxsw_sp_qdisc_prio_unoffload
,
562 .destroy
= mlxsw_sp_qdisc_prio_destroy
,
563 .get_stats
= mlxsw_sp_qdisc_get_prio_stats
,
564 .clean_stats
= mlxsw_sp_setup_tc_qdisc_prio_clean_stats
,
567 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port
*mlxsw_sp_port
,
568 struct tc_prio_qopt_offload
*p
)
570 struct mlxsw_sp_qdisc
*mlxsw_sp_qdisc
;
572 if (p
->parent
!= TC_H_ROOT
)
575 mlxsw_sp_qdisc
= mlxsw_sp_port
->root_qdisc
;
576 if (p
->command
== TC_PRIO_REPLACE
)
577 return mlxsw_sp_qdisc_replace(mlxsw_sp_port
, p
->handle
,
579 &mlxsw_sp_qdisc_ops_prio
,
582 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc
, p
->handle
,
583 MLXSW_SP_QDISC_PRIO
))
586 switch (p
->command
) {
587 case TC_PRIO_DESTROY
:
588 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port
, mlxsw_sp_qdisc
);
590 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port
, mlxsw_sp_qdisc
,
597 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
599 mlxsw_sp_port
->root_qdisc
= kzalloc(sizeof(*mlxsw_sp_port
->root_qdisc
),
601 if (!mlxsw_sp_port
->root_qdisc
)
604 mlxsw_sp_port
->root_qdisc
->tclass_num
= MLXSW_SP_PORT_DEFAULT_TCLASS
;
609 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)
611 kfree(mlxsw_sp_port
->root_qdisc
);