2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/dcbnl.h>
35 #include <linux/math64.h>
41 MLX4_CEE_STATE_DOWN
= 0,
42 MLX4_CEE_STATE_UP
= 1,
45 /* Definitions for QCN
48 struct mlx4_congestion_control_mb_prio_802_1_qau_params
{
49 __be32 modify_enable_high
;
50 __be32 modify_enable_low
;
52 __be32 extended_enable
;
54 __be32 rpg_time_reset
;
55 __be32 rpg_byte_reset
;
61 __be32 rpg_min_dec_fac
;
67 __be32 gd_coefficient
;
69 __be32 cp_sample_base
;
73 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics
{
74 __be64 rppp_rp_centiseconds
;
77 __be32 rppp_created_rps
;
78 __be32 estimated_total_rate
;
79 __be32 max_active_rate_limiter_index
;
80 __be32 dropped_cnms_busy_fw
;
82 __be32 cnms_handled_successfully
;
83 __be32 min_total_limiters_rate
;
84 __be32 max_total_limiters_rate
;
88 static u8
mlx4_en_dcbnl_getcap(struct net_device
*dev
, int capid
, u8
*cap
)
90 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
93 case DCB_CAP_ATTR_PFC
:
96 case DCB_CAP_ATTR_DCBX
:
97 *cap
= priv
->dcbx_cap
;
99 case DCB_CAP_ATTR_PFC_TCS
:
100 *cap
= 1 << mlx4_max_tc(priv
->mdev
->dev
);
110 static u8
mlx4_en_dcbnl_getpfcstate(struct net_device
*netdev
)
112 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
114 return priv
->cee_config
.pfc_state
;
117 static void mlx4_en_dcbnl_setpfcstate(struct net_device
*netdev
, u8 state
)
119 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
121 priv
->cee_config
.pfc_state
= state
;
124 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device
*netdev
, int priority
,
127 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
129 *setting
= priv
->cee_config
.dcb_pfc
[priority
];
132 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device
*netdev
, int priority
,
135 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
137 priv
->cee_config
.dcb_pfc
[priority
] = setting
;
138 priv
->cee_config
.pfc_state
= true;
141 static int mlx4_en_dcbnl_getnumtcs(struct net_device
*netdev
, int tcid
, u8
*num
)
143 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
145 if (!(priv
->flags
& MLX4_EN_FLAG_DCB_ENABLED
))
148 if (tcid
== DCB_NUMTCS_ATTR_PFC
)
149 *num
= mlx4_max_tc(priv
->mdev
->dev
);
156 static u8
mlx4_en_dcbnl_set_all(struct net_device
*netdev
)
158 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
159 struct mlx4_en_port_profile
*prof
= priv
->prof
;
160 struct mlx4_en_dev
*mdev
= priv
->mdev
;
161 u8 tx_pause
, tx_ppp
, rx_pause
, rx_ppp
;
163 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
166 if (priv
->cee_config
.pfc_state
) {
168 rx_ppp
= prof
->rx_ppp
;
169 tx_ppp
= prof
->tx_ppp
;
171 for (tc
= 0; tc
< CEE_DCBX_MAX_PRIO
; tc
++) {
172 u8 tc_mask
= 1 << tc
;
174 switch (priv
->cee_config
.dcb_pfc
[tc
]) {
179 case pfc_enabled_full
:
195 rx_pause
= !!(rx_ppp
|| tx_ppp
) ? 0 : prof
->rx_pause
;
196 tx_pause
= !!(rx_ppp
|| tx_ppp
) ? 0 : prof
->tx_pause
;
200 rx_pause
= prof
->rx_pause
;
201 tx_pause
= prof
->tx_pause
;
204 if (mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
205 priv
->rx_skb_size
+ ETH_FCS_LEN
,
206 tx_pause
, tx_ppp
, rx_pause
, rx_ppp
)) {
207 en_err(priv
, "Failed setting pause params\n");
211 prof
->tx_ppp
= tx_ppp
;
212 prof
->rx_ppp
= rx_ppp
;
213 prof
->tx_pause
= tx_pause
;
214 prof
->rx_pause
= rx_pause
;
219 static u8
mlx4_en_dcbnl_get_state(struct net_device
*dev
)
221 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
223 if (priv
->flags
& MLX4_EN_FLAG_DCB_ENABLED
)
224 return MLX4_CEE_STATE_UP
;
226 return MLX4_CEE_STATE_DOWN
;
229 static u8
mlx4_en_dcbnl_set_state(struct net_device
*dev
, u8 state
)
231 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
234 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
237 if (!!(state
) == !!(priv
->flags
& MLX4_EN_FLAG_DCB_ENABLED
))
241 priv
->flags
|= MLX4_EN_FLAG_DCB_ENABLED
;
242 num_tcs
= IEEE_8021QAZ_MAX_TCS
;
244 priv
->flags
&= ~MLX4_EN_FLAG_DCB_ENABLED
;
247 if (mlx4_en_alloc_tx_queue_per_tc(dev
, num_tcs
))
253 /* On success returns a non-zero 802.1p user priority bitmap
254 * otherwise returns 0 as the invalid user priority bitmap to
257 static int mlx4_en_dcbnl_getapp(struct net_device
*netdev
, u8 idtype
, u16 id
)
259 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
260 struct dcb_app app
= {
264 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
267 return dcb_getapp(netdev
, &app
);
270 static int mlx4_en_dcbnl_setapp(struct net_device
*netdev
, u8 idtype
,
273 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
276 if (!(priv
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
))
279 memset(&app
, 0, sizeof(struct dcb_app
));
280 app
.selector
= idtype
;
284 return dcb_setapp(netdev
, &app
);
287 static int mlx4_en_dcbnl_ieee_getets(struct net_device
*dev
,
288 struct ieee_ets
*ets
)
290 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
291 struct ieee_ets
*my_ets
= &priv
->ets
;
296 ets
->ets_cap
= IEEE_8021QAZ_MAX_TCS
;
297 ets
->cbs
= my_ets
->cbs
;
298 memcpy(ets
->tc_tx_bw
, my_ets
->tc_tx_bw
, sizeof(ets
->tc_tx_bw
));
299 memcpy(ets
->tc_tsa
, my_ets
->tc_tsa
, sizeof(ets
->tc_tsa
));
300 memcpy(ets
->prio_tc
, my_ets
->prio_tc
, sizeof(ets
->prio_tc
));
305 static int mlx4_en_ets_validate(struct mlx4_en_priv
*priv
, struct ieee_ets
*ets
)
308 int total_ets_bw
= 0;
311 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
312 if (ets
->prio_tc
[i
] >= MLX4_EN_NUM_UP_HIGH
) {
313 en_err(priv
, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
318 switch (ets
->tc_tsa
[i
]) {
319 case IEEE_8021QAZ_TSA_VENDOR
:
320 case IEEE_8021QAZ_TSA_STRICT
:
322 case IEEE_8021QAZ_TSA_ETS
:
324 total_ets_bw
+= ets
->tc_tx_bw
[i
];
327 en_err(priv
, "TC[%d]: Not supported TSA: %d\n",
333 if (has_ets_tc
&& total_ets_bw
!= MLX4_EN_BW_MAX
) {
334 en_err(priv
, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
342 static int mlx4_en_config_port_scheduler(struct mlx4_en_priv
*priv
,
343 struct ieee_ets
*ets
, u16
*ratelimit
)
345 struct mlx4_en_dev
*mdev
= priv
->mdev
;
348 __u8 tc_tx_bw
[IEEE_8021QAZ_MAX_TCS
] = { 0 };
349 __u8 pg
[IEEE_8021QAZ_MAX_TCS
] = { 0 };
351 ets
= ets
?: &priv
->ets
;
352 ratelimit
= ratelimit
?: priv
->maxrate
;
354 /* higher TC means higher priority => lower pg */
355 for (i
= IEEE_8021QAZ_MAX_TCS
- 1; i
>= 0; i
--) {
356 switch (ets
->tc_tsa
[i
]) {
357 case IEEE_8021QAZ_TSA_VENDOR
:
358 pg
[i
] = MLX4_EN_TC_VENDOR
;
359 tc_tx_bw
[i
] = MLX4_EN_BW_MAX
;
361 case IEEE_8021QAZ_TSA_STRICT
:
362 pg
[i
] = num_strict
++;
363 tc_tx_bw
[i
] = MLX4_EN_BW_MAX
;
365 case IEEE_8021QAZ_TSA_ETS
:
366 pg
[i
] = MLX4_EN_TC_ETS
;
367 tc_tx_bw
[i
] = ets
->tc_tx_bw
[i
] ?: MLX4_EN_BW_MIN
;
372 return mlx4_SET_PORT_SCHEDULER(mdev
->dev
, priv
->port
, tc_tx_bw
, pg
,
377 mlx4_en_dcbnl_ieee_setets(struct net_device
*dev
, struct ieee_ets
*ets
)
379 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
380 struct mlx4_en_dev
*mdev
= priv
->mdev
;
383 err
= mlx4_en_ets_validate(priv
, ets
);
387 err
= mlx4_SET_PORT_PRIO2TC(mdev
->dev
, priv
->port
, ets
->prio_tc
);
391 err
= mlx4_en_config_port_scheduler(priv
, ets
, NULL
);
395 memcpy(&priv
->ets
, ets
, sizeof(priv
->ets
));
400 static int mlx4_en_dcbnl_ieee_getpfc(struct net_device
*dev
,
401 struct ieee_pfc
*pfc
)
403 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
405 pfc
->pfc_cap
= IEEE_8021QAZ_MAX_TCS
;
406 pfc
->pfc_en
= priv
->prof
->tx_ppp
;
411 static int mlx4_en_dcbnl_ieee_setpfc(struct net_device
*dev
,
412 struct ieee_pfc
*pfc
)
414 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
415 struct mlx4_en_port_profile
*prof
= priv
->prof
;
416 struct mlx4_en_dev
*mdev
= priv
->mdev
;
417 u32 tx_pause
, tx_ppp
, rx_pause
, rx_ppp
;
420 en_dbg(DRV
, priv
, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
426 rx_pause
= prof
->rx_pause
&& !pfc
->pfc_en
;
427 tx_pause
= prof
->tx_pause
&& !pfc
->pfc_en
;
428 rx_ppp
= pfc
->pfc_en
;
429 tx_ppp
= pfc
->pfc_en
;
431 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
432 priv
->rx_skb_size
+ ETH_FCS_LEN
,
433 tx_pause
, tx_ppp
, rx_pause
, rx_ppp
);
435 en_err(priv
, "Failed setting pause params\n");
439 mlx4_en_update_pfc_stats_bitmap(mdev
->dev
, &priv
->stats_bitmap
,
440 rx_ppp
, rx_pause
, tx_ppp
, tx_pause
);
442 prof
->tx_ppp
= tx_ppp
;
443 prof
->rx_ppp
= rx_ppp
;
444 prof
->rx_pause
= rx_pause
;
445 prof
->tx_pause
= tx_pause
;
450 static u8
mlx4_en_dcbnl_getdcbx(struct net_device
*dev
)
452 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
454 return priv
->dcbx_cap
;
457 static u8
mlx4_en_dcbnl_setdcbx(struct net_device
*dev
, u8 mode
)
459 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
460 struct ieee_ets ets
= {0};
461 struct ieee_pfc pfc
= {0};
463 if (mode
== priv
->dcbx_cap
)
466 if ((mode
& DCB_CAP_DCBX_LLD_MANAGED
) ||
467 ((mode
& DCB_CAP_DCBX_VER_IEEE
) &&
468 (mode
& DCB_CAP_DCBX_VER_CEE
)) ||
469 !(mode
& DCB_CAP_DCBX_HOST
))
472 priv
->dcbx_cap
= mode
;
474 ets
.ets_cap
= IEEE_8021QAZ_MAX_TCS
;
475 pfc
.pfc_cap
= IEEE_8021QAZ_MAX_TCS
;
477 if (mode
& DCB_CAP_DCBX_VER_IEEE
) {
478 if (mlx4_en_dcbnl_ieee_setets(dev
, &ets
))
480 if (mlx4_en_dcbnl_ieee_setpfc(dev
, &pfc
))
482 } else if (mode
& DCB_CAP_DCBX_VER_CEE
) {
483 if (mlx4_en_dcbnl_set_all(dev
))
486 if (mlx4_en_dcbnl_ieee_setets(dev
, &ets
))
488 if (mlx4_en_dcbnl_ieee_setpfc(dev
, &pfc
))
490 if (mlx4_en_alloc_tx_queue_per_tc(dev
, 0))
499 #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
500 static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device
*dev
,
501 struct ieee_maxrate
*maxrate
)
503 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
506 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
507 maxrate
->tc_maxrate
[i
] =
508 priv
->maxrate
[i
] * MLX4_RATELIMIT_UNITS_IN_KB
;
513 static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device
*dev
,
514 struct ieee_maxrate
*maxrate
)
516 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
517 u16 tmp
[IEEE_8021QAZ_MAX_TCS
];
520 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
521 /* Convert from Kbps into HW units, rounding result up.
522 * Setting to 0, means unlimited BW.
524 tmp
[i
] = div_u64(maxrate
->tc_maxrate
[i
] +
525 MLX4_RATELIMIT_UNITS_IN_KB
- 1,
526 MLX4_RATELIMIT_UNITS_IN_KB
);
529 err
= mlx4_en_config_port_scheduler(priv
, NULL
, tmp
);
533 memcpy(priv
->maxrate
, tmp
, sizeof(priv
->maxrate
));
538 #define RPG_ENABLE_BIT 31
539 #define CN_TAG_BIT 30
541 static int mlx4_en_dcbnl_ieee_getqcn(struct net_device
*dev
,
542 struct ieee_qcn
*qcn
)
544 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
545 struct mlx4_congestion_control_mb_prio_802_1_qau_params
*hw_qcn
;
546 struct mlx4_cmd_mailbox
*mailbox_out
= NULL
;
547 u64 mailbox_in_dma
= 0;
551 if (!(priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QCN
))
554 mailbox_out
= mlx4_alloc_cmd_mailbox(priv
->mdev
->dev
);
555 if (IS_ERR(mailbox_out
))
558 (struct mlx4_congestion_control_mb_prio_802_1_qau_params
*)
561 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
562 inmod
= priv
->port
| ((1 << i
) << 8) |
563 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT
<< 16);
564 err
= mlx4_cmd_box(priv
->mdev
->dev
, mailbox_in_dma
,
566 inmod
, MLX4_CONGESTION_CONTROL_GET_PARAMS
,
567 MLX4_CMD_CONGESTION_CTRL_OPCODE
,
568 MLX4_CMD_TIME_CLASS_C
,
571 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
576 be32_to_cpu(hw_qcn
->extended_enable
) >> RPG_ENABLE_BIT
;
577 qcn
->rppp_max_rps
[i
] =
578 be32_to_cpu(hw_qcn
->rppp_max_rps
);
579 qcn
->rpg_time_reset
[i
] =
580 be32_to_cpu(hw_qcn
->rpg_time_reset
);
581 qcn
->rpg_byte_reset
[i
] =
582 be32_to_cpu(hw_qcn
->rpg_byte_reset
);
583 qcn
->rpg_threshold
[i
] =
584 be32_to_cpu(hw_qcn
->rpg_threshold
);
585 qcn
->rpg_max_rate
[i
] =
586 be32_to_cpu(hw_qcn
->rpg_max_rate
);
587 qcn
->rpg_ai_rate
[i
] =
588 be32_to_cpu(hw_qcn
->rpg_ai_rate
);
589 qcn
->rpg_hai_rate
[i
] =
590 be32_to_cpu(hw_qcn
->rpg_hai_rate
);
592 be32_to_cpu(hw_qcn
->rpg_gd
);
593 qcn
->rpg_min_dec_fac
[i
] =
594 be32_to_cpu(hw_qcn
->rpg_min_dec_fac
);
595 qcn
->rpg_min_rate
[i
] =
596 be32_to_cpu(hw_qcn
->rpg_min_rate
);
597 qcn
->cndd_state_machine
[i
] =
600 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
604 static int mlx4_en_dcbnl_ieee_setqcn(struct net_device
*dev
,
605 struct ieee_qcn
*qcn
)
607 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
608 struct mlx4_congestion_control_mb_prio_802_1_qau_params
*hw_qcn
;
609 struct mlx4_cmd_mailbox
*mailbox_in
= NULL
;
610 u64 mailbox_in_dma
= 0;
613 #define MODIFY_ENABLE_HIGH_MASK 0xc0000000
614 #define MODIFY_ENABLE_LOW_MASK 0xffc00000
616 if (!(priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QCN
))
619 mailbox_in
= mlx4_alloc_cmd_mailbox(priv
->mdev
->dev
);
620 if (IS_ERR(mailbox_in
))
623 mailbox_in_dma
= mailbox_in
->dma
;
625 (struct mlx4_congestion_control_mb_prio_802_1_qau_params
*)mailbox_in
->buf
;
626 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
627 inmod
= priv
->port
| ((1 << i
) << 8) |
628 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT
<< 16);
630 /* Before updating QCN parameter,
631 * need to set it's modify enable bit to 1
634 hw_qcn
->modify_enable_high
= cpu_to_be32(
635 MODIFY_ENABLE_HIGH_MASK
);
636 hw_qcn
->modify_enable_low
= cpu_to_be32(MODIFY_ENABLE_LOW_MASK
);
638 hw_qcn
->extended_enable
= cpu_to_be32(qcn
->rpg_enable
[i
] << RPG_ENABLE_BIT
);
639 hw_qcn
->rppp_max_rps
= cpu_to_be32(qcn
->rppp_max_rps
[i
]);
640 hw_qcn
->rpg_time_reset
= cpu_to_be32(qcn
->rpg_time_reset
[i
]);
641 hw_qcn
->rpg_byte_reset
= cpu_to_be32(qcn
->rpg_byte_reset
[i
]);
642 hw_qcn
->rpg_threshold
= cpu_to_be32(qcn
->rpg_threshold
[i
]);
643 hw_qcn
->rpg_max_rate
= cpu_to_be32(qcn
->rpg_max_rate
[i
]);
644 hw_qcn
->rpg_ai_rate
= cpu_to_be32(qcn
->rpg_ai_rate
[i
]);
645 hw_qcn
->rpg_hai_rate
= cpu_to_be32(qcn
->rpg_hai_rate
[i
]);
646 hw_qcn
->rpg_gd
= cpu_to_be32(qcn
->rpg_gd
[i
]);
647 hw_qcn
->rpg_min_dec_fac
= cpu_to_be32(qcn
->rpg_min_dec_fac
[i
]);
648 hw_qcn
->rpg_min_rate
= cpu_to_be32(qcn
->rpg_min_rate
[i
]);
649 priv
->cndd_state
[i
] = qcn
->cndd_state_machine
[i
];
650 if (qcn
->cndd_state_machine
[i
] == DCB_CNDD_INTERIOR_READY
)
651 hw_qcn
->extended_enable
|= cpu_to_be32(1 << CN_TAG_BIT
);
653 err
= mlx4_cmd(priv
->mdev
->dev
, mailbox_in_dma
, inmod
,
654 MLX4_CONGESTION_CONTROL_SET_PARAMS
,
655 MLX4_CMD_CONGESTION_CTRL_OPCODE
,
656 MLX4_CMD_TIME_CLASS_C
,
659 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_in
);
663 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_in
);
667 static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device
*dev
,
668 struct ieee_qcn_stats
*qcn_stats
)
670 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
671 struct mlx4_congestion_control_mb_prio_802_1_qau_statistics
*hw_qcn_stats
;
672 struct mlx4_cmd_mailbox
*mailbox_out
= NULL
;
673 u64 mailbox_in_dma
= 0;
677 if (!(priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QCN
))
680 mailbox_out
= mlx4_alloc_cmd_mailbox(priv
->mdev
->dev
);
681 if (IS_ERR(mailbox_out
))
685 (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics
*)
688 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
689 inmod
= priv
->port
| ((1 << i
) << 8) |
690 (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT
<< 16);
691 err
= mlx4_cmd_box(priv
->mdev
->dev
, mailbox_in_dma
,
692 mailbox_out
->dma
, inmod
,
693 MLX4_CONGESTION_CONTROL_GET_STATISTICS
,
694 MLX4_CMD_CONGESTION_CTRL_OPCODE
,
695 MLX4_CMD_TIME_CLASS_C
,
698 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
701 qcn_stats
->rppp_rp_centiseconds
[i
] =
702 be64_to_cpu(hw_qcn_stats
->rppp_rp_centiseconds
);
703 qcn_stats
->rppp_created_rps
[i
] =
704 be32_to_cpu(hw_qcn_stats
->rppp_created_rps
);
706 mlx4_free_cmd_mailbox(priv
->mdev
->dev
, mailbox_out
);
710 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops
= {
711 .ieee_getets
= mlx4_en_dcbnl_ieee_getets
,
712 .ieee_setets
= mlx4_en_dcbnl_ieee_setets
,
713 .ieee_getmaxrate
= mlx4_en_dcbnl_ieee_getmaxrate
,
714 .ieee_setmaxrate
= mlx4_en_dcbnl_ieee_setmaxrate
,
715 .ieee_getqcn
= mlx4_en_dcbnl_ieee_getqcn
,
716 .ieee_setqcn
= mlx4_en_dcbnl_ieee_setqcn
,
717 .ieee_getqcnstats
= mlx4_en_dcbnl_ieee_getqcnstats
,
718 .ieee_getpfc
= mlx4_en_dcbnl_ieee_getpfc
,
719 .ieee_setpfc
= mlx4_en_dcbnl_ieee_setpfc
,
721 .getstate
= mlx4_en_dcbnl_get_state
,
722 .setstate
= mlx4_en_dcbnl_set_state
,
723 .getpfccfg
= mlx4_en_dcbnl_get_pfc_cfg
,
724 .setpfccfg
= mlx4_en_dcbnl_set_pfc_cfg
,
725 .setall
= mlx4_en_dcbnl_set_all
,
726 .getcap
= mlx4_en_dcbnl_getcap
,
727 .getnumtcs
= mlx4_en_dcbnl_getnumtcs
,
728 .getpfcstate
= mlx4_en_dcbnl_getpfcstate
,
729 .setpfcstate
= mlx4_en_dcbnl_setpfcstate
,
730 .getapp
= mlx4_en_dcbnl_getapp
,
731 .setapp
= mlx4_en_dcbnl_setapp
,
733 .getdcbx
= mlx4_en_dcbnl_getdcbx
,
734 .setdcbx
= mlx4_en_dcbnl_setdcbx
,
737 const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops
= {
738 .ieee_getpfc
= mlx4_en_dcbnl_ieee_getpfc
,
739 .ieee_setpfc
= mlx4_en_dcbnl_ieee_setpfc
,
741 .setstate
= mlx4_en_dcbnl_set_state
,
742 .getpfccfg
= mlx4_en_dcbnl_get_pfc_cfg
,
743 .setpfccfg
= mlx4_en_dcbnl_set_pfc_cfg
,
744 .setall
= mlx4_en_dcbnl_set_all
,
745 .getnumtcs
= mlx4_en_dcbnl_getnumtcs
,
746 .getpfcstate
= mlx4_en_dcbnl_getpfcstate
,
747 .setpfcstate
= mlx4_en_dcbnl_setpfcstate
,
748 .getapp
= mlx4_en_dcbnl_getapp
,
749 .setapp
= mlx4_en_dcbnl_setapp
,
751 .getdcbx
= mlx4_en_dcbnl_getdcbx
,
752 .setdcbx
= mlx4_en_dcbnl_setdcbx
,