2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/dcbnl.h>
35 #include <linux/math64.h>
41 MLX4_CEE_STATE_DOWN
= 0,
42 MLX4_CEE_STATE_UP
= 1,
45 /* Definitions for QCN
48 struct mlx4_congestion_control_mb_prio_802_1_qau_params
{
49 __be32 modify_enable_high
;
50 __be32 modify_enable_low
;
52 __be32 extended_enable
;
54 __be32 rpg_time_reset
;
55 __be32 rpg_byte_reset
;
61 __be32 rpg_min_dec_fac
;
67 __be32 gd_coefficient
;
69 __be32 cp_sample_base
;
73 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics
{
74 __be64 rppp_rp_centiseconds
;
77 __be32 rppp_created_rps
;
78 __be32 estimated_total_rate
;
79 __be32 max_active_rate_limiter_index
;
80 __be32 dropped_cnms_busy_fw
;
82 __be32 cnms_handled_successfully
;
83 __be32 min_total_limiters_rate
;
84 __be32 max_total_limiters_rate
;
88 static u8
mlx4_en_dcbnl_getcap(struct net_device
*dev
, int capid
, u8
*cap
)
90 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
93 case DCB_CAP_ATTR_PFC
:
96 case DCB_CAP_ATTR_DCBX
:
97 *cap
= priv
->dcbx_cap
;
99 case DCB_CAP_ATTR_PFC_TCS
:
100 *cap
= 1 << mlx4_max_tc(priv
->mdev
->dev
);
110 static u8
mlx4_en_dcbnl_getpfcstate(struct net_device
*netdev
)
112 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
114 return priv
->cee_config
.pfc_state
;
117 static void mlx4_en_dcbnl_setpfcstate(struct net_device
*netdev
, u8 state
)
119 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
121 priv
->cee_config
.pfc_state
= state
;
124 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device
*netdev
, int priority
,
127 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
129 *setting
= priv
->cee_config
.dcb_pfc
[priority
];
132 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device
*netdev
, int priority
,
135 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
137 priv
->cee_config
.dcb_pfc
[priority
] = setting
;
138 priv
->cee_config
.pfc_state
= true;
141 static int mlx4_en_dcbnl_getnumtcs(struct net_device
*netdev
, int tcid
, u8
*num
)
143 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
145 if (!(priv
->flags
& MLX4_EN_FLAG_DCB_ENABLED
))
148 if (tcid
== DCB_NUMTCS_ATTR_PFC
)
149 *num
= mlx4_max_tc(priv
->mdev
->dev
);
156 static u8
mlx4_en_dcbnl_set_all(struct net_device
*netdev
)
158 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
159 struct mlx4_en_dev
*mdev
= priv
->mdev
;
161 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
164 if (priv
->cee_config
.pfc_state
) {
167 priv
->prof
->rx_pause
= 0;
168 priv
->prof
->tx_pause
= 0;
169 for (tc
= 0; tc
< CEE_DCBX_MAX_PRIO
; tc
++) {
170 u8 tc_mask
= 1 << tc
;
172 switch (priv
->cee_config
.dcb_pfc
[tc
]) {
174 priv
->prof
->tx_ppp
&= ~tc_mask
;
175 priv
->prof
->rx_ppp
&= ~tc_mask
;
177 case pfc_enabled_full
:
178 priv
->prof
->tx_ppp
|= tc_mask
;
179 priv
->prof
->rx_ppp
|= tc_mask
;
182 priv
->prof
->tx_ppp
|= tc_mask
;
183 priv
->prof
->rx_ppp
&= ~tc_mask
;
186 priv
->prof
->tx_ppp
&= ~tc_mask
;
187 priv
->prof
->rx_ppp
|= tc_mask
;
193 en_dbg(DRV
, priv
, "Set pfc on\n");
195 priv
->prof
->rx_pause
= 1;
196 priv
->prof
->tx_pause
= 1;
197 en_dbg(DRV
, priv
, "Set pfc off\n");
200 if (mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
201 priv
->rx_skb_size
+ ETH_FCS_LEN
,
202 priv
->prof
->tx_pause
,
204 priv
->prof
->rx_pause
,
205 priv
->prof
->rx_ppp
)) {
206 en_err(priv
, "Failed setting pause params\n");
213 static u8
mlx4_en_dcbnl_get_state(struct net_device
*dev
)
215 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
217 if (priv
->flags
& MLX4_EN_FLAG_DCB_ENABLED
)
218 return MLX4_CEE_STATE_UP
;
220 return MLX4_CEE_STATE_DOWN
;
223 static u8
mlx4_en_dcbnl_set_state(struct net_device
*dev
, u8 state
)
225 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
228 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
231 if (!!(state
) == !!(priv
->flags
& MLX4_EN_FLAG_DCB_ENABLED
))
235 priv
->flags
|= MLX4_EN_FLAG_DCB_ENABLED
;
236 num_tcs
= IEEE_8021QAZ_MAX_TCS
;
238 priv
->flags
&= ~MLX4_EN_FLAG_DCB_ENABLED
;
241 if (mlx4_en_setup_tc(dev
, num_tcs
))
247 /* On success returns a non-zero 802.1p user priority bitmap
248 * otherwise returns 0 as the invalid user priority bitmap to
251 static int mlx4_en_dcbnl_getapp(struct net_device
*netdev
, u8 idtype
, u16 id
)
253 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
254 struct dcb_app app
= {
258 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
261 return dcb_getapp(netdev
, &app
);
264 static int mlx4_en_dcbnl_setapp(struct net_device
*netdev
, u8 idtype
,
267 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
270 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
273 memset(&app
, 0, sizeof(struct dcb_app
));
274 app
.selector
= idtype
;
278 return dcb_setapp(netdev
, &app
);
281 static int mlx4_en_dcbnl_ieee_getets(struct net_device
*dev
,
282 struct ieee_ets
*ets
)
284 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
285 struct ieee_ets
*my_ets
= &priv
->ets
;
290 ets
->ets_cap
= IEEE_8021QAZ_MAX_TCS
;
291 ets
->cbs
= my_ets
->cbs
;
292 memcpy(ets
->tc_tx_bw
, my_ets
->tc_tx_bw
, sizeof(ets
->tc_tx_bw
));
293 memcpy(ets
->tc_tsa
, my_ets
->tc_tsa
, sizeof(ets
->tc_tsa
));
294 memcpy(ets
->prio_tc
, my_ets
->prio_tc
, sizeof(ets
->prio_tc
));
299 static int mlx4_en_ets_validate(struct mlx4_en_priv
*priv
, struct ieee_ets
*ets
)
302 int total_ets_bw
= 0;
305 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
306 if (ets
->prio_tc
[i
] >= MLX4_EN_NUM_UP
) {
307 en_err(priv
, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
312 switch (ets
->tc_tsa
[i
]) {
313 case IEEE_8021QAZ_TSA_STRICT
:
315 case IEEE_8021QAZ_TSA_ETS
:
317 total_ets_bw
+= ets
->tc_tx_bw
[i
];
320 en_err(priv
, "TC[%d]: Not supported TSA: %d\n",
326 if (has_ets_tc
&& total_ets_bw
!= MLX4_EN_BW_MAX
) {
327 en_err(priv
, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
335 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv
*priv
,
336 struct ieee_ets
*ets
, u16
*ratelimit
)
338 struct mlx4_en_dev
*mdev
= priv
->mdev
;
341 __u8 tc_tx_bw
[IEEE_8021QAZ_MAX_TCS
] = { 0 };
342 __u8 pg
[IEEE_8021QAZ_MAX_TCS
] = { 0 };
344 ets
= ets
?: &priv
->ets
;
345 ratelimit
= ratelimit
?: priv
->maxrate
;
347 /* higher TC means higher priority => lower pg */
348 for (i
= IEEE_8021QAZ_MAX_TCS
- 1; i
>= 0; i
--) {
349 switch (ets
->tc_tsa
[i
]) {
350 case IEEE_8021QAZ_TSA_STRICT
:
351 pg
[i
] = num_strict
++;
352 tc_tx_bw
[i
] = MLX4_EN_BW_MAX
;
354 case IEEE_8021QAZ_TSA_ETS
:
355 pg
[i
] = MLX4_EN_TC_ETS
;
356 tc_tx_bw
[i
] = ets
->tc_tx_bw
[i
] ?: MLX4_EN_BW_MIN
;
361 return mlx4_SET_PORT_SCHEDULER(mdev
->dev
, priv
->port
, tc_tx_bw
, pg
,
366 mlx4_en_dcbnl_ieee_setets(struct net_device
*dev
, struct ieee_ets
*ets
)
368 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
369 struct mlx4_en_dev
*mdev
= priv
->mdev
;
372 err
= mlx4_en_ets_validate(priv
, ets
);
376 err
= mlx4_SET_PORT_PRIO2TC(mdev
->dev
, priv
->port
, ets
->prio_tc
);
380 err
= mlx4_en_config_port_scheduler(priv
, ets
, NULL
);
384 memcpy(&priv
->ets
, ets
, sizeof(priv
->ets
));
389 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device
*dev
,
390 struct ieee_pfc
*pfc
)
392 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
394 pfc
->pfc_cap
= IEEE_8021QAZ_MAX_TCS
;
395 pfc
->pfc_en
= priv
->prof
->tx_ppp
;
400 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device
*dev
,
401 struct ieee_pfc
*pfc
)
403 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
404 struct mlx4_en_port_profile
*prof
= priv
->prof
;
405 struct mlx4_en_dev
*mdev
= priv
->mdev
;
408 en_dbg(DRV
, priv
, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
414 prof
->rx_pause
= !pfc
->pfc_en
;
415 prof
->tx_pause
= !pfc
->pfc_en
;
416 prof
->rx_ppp
= pfc
->pfc_en
;
417 prof
->tx_ppp
= pfc
->pfc_en
;
419 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
420 priv
->rx_skb_size
+ ETH_FCS_LEN
,
426 en_err(priv
, "Failed setting pause params\n");
428 mlx4_en_update_pfc_stats_bitmap(mdev
->dev
, &priv
->stats_bitmap
,
429 prof
->rx_ppp
, prof
->rx_pause
,
430 prof
->tx_ppp
, prof
->tx_pause
);
435 static u8
mlx4_en_dcbnl_getdcbx(struct net_device
*dev
)
437 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
439 return priv
->dcbx_cap
;
442 static u8
mlx4_en_dcbnl_setdcbx(struct net_device
*dev
, u8 mode
)
444 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
445 struct ieee_ets ets
= {0};
446 struct ieee_pfc pfc
= {0};
448 if (mode
== priv
->dcbx_cap
)
451 if ((mode
& DCB_CAP_DCBX_LLD_MANAGED
) ||
452 ((mode
& DCB_CAP_DCBX_VER_IEEE
) &&
453 (mode
& DCB_CAP_DCBX_VER_CEE
)) ||
454 !(mode
& DCB_CAP_DCBX_HOST
))
457 priv
->dcbx_cap
= mode
;
459 ets
.ets_cap
= IEEE_8021QAZ_MAX_TCS
;
460 pfc
.pfc_cap
= IEEE_8021QAZ_MAX_TCS
;
462 if (mode
& DCB_CAP_DCBX_VER_IEEE
) {
463 if (mlx4_en_dcbnl_ieee_setets(dev
, &ets
))
465 if (mlx4_en_dcbnl_ieee_setpfc(dev
, &pfc
))
467 } else if (mode
& DCB_CAP_DCBX_VER_CEE
) {
468 if (mlx4_en_dcbnl_set_all(dev
))
471 if (mlx4_en_dcbnl_ieee_setets(dev
, &ets
))
473 if (mlx4_en_dcbnl_ieee_setpfc(dev
, &pfc
))
475 if (mlx4_en_setup_tc(dev
, 0))
484 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
485 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device
*dev
,
486 struct ieee_maxrate
*maxrate
)
488 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
491 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
492 maxrate
->tc_maxrate
[i
] =
493 priv
->maxrate
[i
] * MLX4_RATELIMIT_UNITS_IN_KB
;
498 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device
*dev
,
499 struct ieee_maxrate
*maxrate
)
501 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
502 u16 tmp
[IEEE_8021QAZ_MAX_TCS
];
505 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
506 /* Convert from Kbps into HW units, rounding result up.
507 * Setting to 0, means unlimited BW.
509 tmp
[i
] = div_u64(maxrate
->tc_maxrate
[i
] +
510 MLX4_RATELIMIT_UNITS_IN_KB
- 1,
511 MLX4_RATELIMIT_UNITS_IN_KB
);
514 err
= mlx4_en_config_port_scheduler(priv
, NULL
, tmp
);
518 memcpy(priv
->maxrate
, tmp
, sizeof(priv
->maxrate
));
523 #define RPG_ENABLE_BIT 31
524 #define CN_TAG_BIT 30
526 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device
*dev
,
527 struct ieee_qcn
*qcn
)
529 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
530 struct mlx4_congestion_control_mb_prio_802_1_qau_params
*hw_qcn
;
531 struct mlx4_cmd_mailbox
*mailbox_out
= NULL
;
532 u64 mailbox_in_dma
= 0;
536 if (!(priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QCN
))
539 mailbox_out
= mlx4_alloc_cmd_mailbox(priv
->mdev
->dev
);
540 if (IS_ERR(mailbox_out
))
543 (struct mlx4_congestion_control_mb_prio_802_1_qau_params
*)
546 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
547 inmod
= priv
->port
| ((1 << i
) << 8) |
548 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT
<< 16);
549 err
= mlx4_cmd_box(priv
->mdev
->dev
, mailbox_in_dma
,
551 inmod
, MLX4_CONGESTION_CONTROL_GET_PARAMS
,
552 MLX4_CMD_CONGESTION_CTRL_OPCODE
,
553 MLX4_CMD_TIME_CLASS_C
,
556 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
561 be32_to_cpu(hw_qcn
->extended_enable
) >> RPG_ENABLE_BIT
;
562 qcn
->rppp_max_rps
[i
] =
563 be32_to_cpu(hw_qcn
->rppp_max_rps
);
564 qcn
->rpg_time_reset
[i
] =
565 be32_to_cpu(hw_qcn
->rpg_time_reset
);
566 qcn
->rpg_byte_reset
[i
] =
567 be32_to_cpu(hw_qcn
->rpg_byte_reset
);
568 qcn
->rpg_threshold
[i
] =
569 be32_to_cpu(hw_qcn
->rpg_threshold
);
570 qcn
->rpg_max_rate
[i
] =
571 be32_to_cpu(hw_qcn
->rpg_max_rate
);
572 qcn
->rpg_ai_rate
[i
] =
573 be32_to_cpu(hw_qcn
->rpg_ai_rate
);
574 qcn
->rpg_hai_rate
[i
] =
575 be32_to_cpu(hw_qcn
->rpg_hai_rate
);
577 be32_to_cpu(hw_qcn
->rpg_gd
);
578 qcn
->rpg_min_dec_fac
[i
] =
579 be32_to_cpu(hw_qcn
->rpg_min_dec_fac
);
580 qcn
->rpg_min_rate
[i
] =
581 be32_to_cpu(hw_qcn
->rpg_min_rate
);
582 qcn
->cndd_state_machine
[i
] =
585 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
589 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device
*dev
,
590 struct ieee_qcn
*qcn
)
592 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
593 struct mlx4_congestion_control_mb_prio_802_1_qau_params
*hw_qcn
;
594 struct mlx4_cmd_mailbox
*mailbox_in
= NULL
;
595 u64 mailbox_in_dma
= 0;
598 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000
599 #define MODIFY_ENABLE_LOW_MASK 0xffc00000
601 if (!(priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QCN
))
604 mailbox_in
= mlx4_alloc_cmd_mailbox(priv
->mdev
->dev
);
605 if (IS_ERR(mailbox_in
))
608 mailbox_in_dma
= mailbox_in
->dma
;
610 (struct mlx4_congestion_control_mb_prio_802_1_qau_params
*)mailbox_in
->buf
;
611 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
612 inmod
= priv
->port
| ((1 << i
) << 8) |
613 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT
<< 16);
615 /* Before updating QCN parameter,
616 * need to set it's modify enable bit to 1
619 hw_qcn
->modify_enable_high
= cpu_to_be32(
620 MODIFY_ENABLE_HIGH_MASK
);
621 hw_qcn
->modify_enable_low
= cpu_to_be32(MODIFY_ENABLE_LOW_MASK
);
623 hw_qcn
->extended_enable
= cpu_to_be32(qcn
->rpg_enable
[i
] << RPG_ENABLE_BIT
);
624 hw_qcn
->rppp_max_rps
= cpu_to_be32(qcn
->rppp_max_rps
[i
]);
625 hw_qcn
->rpg_time_reset
= cpu_to_be32(qcn
->rpg_time_reset
[i
]);
626 hw_qcn
->rpg_byte_reset
= cpu_to_be32(qcn
->rpg_byte_reset
[i
]);
627 hw_qcn
->rpg_threshold
= cpu_to_be32(qcn
->rpg_threshold
[i
]);
628 hw_qcn
->rpg_max_rate
= cpu_to_be32(qcn
->rpg_max_rate
[i
]);
629 hw_qcn
->rpg_ai_rate
= cpu_to_be32(qcn
->rpg_ai_rate
[i
]);
630 hw_qcn
->rpg_hai_rate
= cpu_to_be32(qcn
->rpg_hai_rate
[i
]);
631 hw_qcn
->rpg_gd
= cpu_to_be32(qcn
->rpg_gd
[i
]);
632 hw_qcn
->rpg_min_dec_fac
= cpu_to_be32(qcn
->rpg_min_dec_fac
[i
]);
633 hw_qcn
->rpg_min_rate
= cpu_to_be32(qcn
->rpg_min_rate
[i
]);
634 priv
->cndd_state
[i
] = qcn
->cndd_state_machine
[i
];
635 if (qcn
->cndd_state_machine
[i
] == DCB_CNDD_INTERIOR_READY
)
636 hw_qcn
->extended_enable
|= cpu_to_be32(1 << CN_TAG_BIT
);
638 err
= mlx4_cmd(priv
->mdev
->dev
, mailbox_in_dma
, inmod
,
639 MLX4_CONGESTION_CONTROL_SET_PARAMS
,
640 MLX4_CMD_CONGESTION_CTRL_OPCODE
,
641 MLX4_CMD_TIME_CLASS_C
,
644 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_in
);
648 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_in
);
652 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device
*dev
,
653 struct ieee_qcn_stats
*qcn_stats
)
655 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
656 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics
*hw_qcn_stats
;
657 struct mlx4_cmd_mailbox
*mailbox_out
= NULL
;
658 u64 mailbox_in_dma
= 0;
662 if (!(priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QCN
))
665 mailbox_out
= mlx4_alloc_cmd_mailbox(priv
->mdev
->dev
);
666 if (IS_ERR(mailbox_out
))
670 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics
*)
673 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
674 inmod
= priv
->port
| ((1 << i
) << 8) |
675 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT
<< 16);
676 err
= mlx4_cmd_box(priv
->mdev
->dev
, mailbox_in_dma
,
677 mailbox_out
->dma
, inmod
,
678 MLX4_CONGESTION_CONTROL_GET_STATISTICS
,
679 MLX4_CMD_CONGESTION_CTRL_OPCODE
,
680 MLX4_CMD_TIME_CLASS_C
,
683 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
686 qcn_stats
->rppp_rp_centiseconds
[i
] =
687 be64_to_cpu(hw_qcn_stats
->rppp_rp_centiseconds
);
688 qcn_stats
->rppp_created_rps
[i
] =
689 be32_to_cpu(hw_qcn_stats
->rppp_created_rps
);
691 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
695 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops
= {
696 .ieee_getets
= mlx4_en_dcbnl_ieee_getets
,
697 .ieee_setets
= mlx4_en_dcbnl_ieee_setets
,
698 .ieee_getmaxrate
= mlx4_en_dcbnl_ieee_getmaxrate
,
699 .ieee_setmaxrate
= mlx4_en_dcbnl_ieee_setmaxrate
,
700 .ieee_getqcn
= mlx4_en_dcbnl_ieee_getqcn
,
701 .ieee_setqcn
= mlx4_en_dcbnl_ieee_setqcn
,
702 .ieee_getqcnstats
= mlx4_en_dcbnl_ieee_getqcnstats
,
703 .ieee_getpfc
= mlx4_en_dcbnl_ieee_getpfc
,
704 .ieee_setpfc
= mlx4_en_dcbnl_ieee_setpfc
,
706 .getstate
= mlx4_en_dcbnl_get_state
,
707 .setstate
= mlx4_en_dcbnl_set_state
,
708 .getpfccfg
= mlx4_en_dcbnl_get_pfc_cfg
,
709 .setpfccfg
= mlx4_en_dcbnl_set_pfc_cfg
,
710 .setall
= mlx4_en_dcbnl_set_all
,
711 .getcap
= mlx4_en_dcbnl_getcap
,
712 .getnumtcs
= mlx4_en_dcbnl_getnumtcs
,
713 .getpfcstate
= mlx4_en_dcbnl_getpfcstate
,
714 .setpfcstate
= mlx4_en_dcbnl_setpfcstate
,
715 .getapp
= mlx4_en_dcbnl_getapp
,
716 .setapp
= mlx4_en_dcbnl_setapp
,
718 .getdcbx
= mlx4_en_dcbnl_getdcbx
,
719 .setdcbx
= mlx4_en_dcbnl_setdcbx
,
722 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops
= {
723 .ieee_getpfc
= mlx4_en_dcbnl_ieee_getpfc
,
724 .ieee_setpfc
= mlx4_en_dcbnl_ieee_setpfc
,
726 .setstate
= mlx4_en_dcbnl_set_state
,
727 .getpfccfg
= mlx4_en_dcbnl_get_pfc_cfg
,
728 .setpfccfg
= mlx4_en_dcbnl_set_pfc_cfg
,
729 .setall
= mlx4_en_dcbnl_set_all
,
730 .getnumtcs
= mlx4_en_dcbnl_getnumtcs
,
731 .getpfcstate
= mlx4_en_dcbnl_getpfcstate
,
732 .setpfcstate
= mlx4_en_dcbnl_setpfcstate
,
733 .getapp
= mlx4_en_dcbnl_getapp
,
734 .setapp
= mlx4_en_dcbnl_setapp
,
736 .getdcbx
= mlx4_en_dcbnl_getdcbx
,
737 .setdcbx
= mlx4_en_dcbnl_setdcbx
,