2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/vport.h>
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_smi.h>
36 #include <rdma/ib_pma.h>
41 MLX5_IB_VENDOR_CLASS1
= 0x9,
42 MLX5_IB_VENDOR_CLASS2
= 0xa
45 static bool can_do_mad_ifc(struct mlx5_ib_dev
*dev
, u8 port_num
,
46 struct ib_mad
*in_mad
)
48 if (in_mad
->mad_hdr
.mgmt_class
!= IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
49 in_mad
->mad_hdr
.mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
51 return dev
->mdev
->port_caps
[port_num
- 1].has_smi
;
54 static int mlx5_MAD_IFC(struct mlx5_ib_dev
*dev
, int ignore_mkey
,
55 int ignore_bkey
, u8 port
, const struct ib_wc
*in_wc
,
56 const struct ib_grh
*in_grh
, const void *in_mad
,
61 if (!can_do_mad_ifc(dev
, port
, (struct ib_mad
*)in_mad
))
64 /* Key check traps can't be generated unless we have in_wc to
65 * tell us where to send the trap.
67 if (ignore_mkey
|| !in_wc
)
69 if (ignore_bkey
|| !in_wc
)
72 return mlx5_cmd_mad_ifc(dev
->mdev
, in_mad
, response_mad
, op_modifier
,
76 static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext
*pma_cnt_ext
,
79 #define MLX5_SUM_CNT(p, cntr1, cntr2) \
80 (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
81 MLX5_GET64(query_vport_counter_out, p, cntr2))
83 pma_cnt_ext
->port_xmit_data
=
84 cpu_to_be64(MLX5_SUM_CNT(out
, transmitted_ib_unicast
.octets
,
85 transmitted_ib_multicast
.octets
) >> 2);
86 pma_cnt_ext
->port_rcv_data
=
87 cpu_to_be64(MLX5_SUM_CNT(out
, received_ib_unicast
.octets
,
88 received_ib_multicast
.octets
) >> 2);
89 pma_cnt_ext
->port_xmit_packets
=
90 cpu_to_be64(MLX5_SUM_CNT(out
, transmitted_ib_unicast
.packets
,
91 transmitted_ib_multicast
.packets
));
92 pma_cnt_ext
->port_rcv_packets
=
93 cpu_to_be64(MLX5_SUM_CNT(out
, received_ib_unicast
.packets
,
94 received_ib_multicast
.packets
));
95 pma_cnt_ext
->port_unicast_xmit_packets
=
96 MLX5_GET64_BE(query_vport_counter_out
,
97 out
, transmitted_ib_unicast
.packets
);
98 pma_cnt_ext
->port_unicast_rcv_packets
=
99 MLX5_GET64_BE(query_vport_counter_out
,
100 out
, received_ib_unicast
.packets
);
101 pma_cnt_ext
->port_multicast_xmit_packets
=
102 MLX5_GET64_BE(query_vport_counter_out
,
103 out
, transmitted_ib_multicast
.packets
);
104 pma_cnt_ext
->port_multicast_rcv_packets
=
105 MLX5_GET64_BE(query_vport_counter_out
,
106 out
, received_ib_multicast
.packets
);
109 static void pma_cnt_assign(struct ib_pma_portcounters
*pma_cnt
,
112 /* Traffic counters will be reported in
113 * their 64bit form via ib_pma_portcounters_ext by default.
115 void *out_pma
= MLX5_ADDR_OF(ppcnt_reg
, out
,
118 #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
119 counter_var = MLX5_GET_BE(typeof(counter_var), \
120 ib_port_cntrs_grp_data_layout, \
121 out_pma, counter_name); \
124 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->symbol_error_counter
,
125 symbol_error_counter
);
126 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->link_error_recovery_counter
,
127 link_error_recovery_counter
);
128 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->link_downed_counter
,
129 link_downed_counter
);
130 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->port_rcv_errors
,
132 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->port_rcv_remphys_errors
,
133 port_rcv_remote_physical_errors
);
134 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->port_rcv_switch_relay_errors
,
135 port_rcv_switch_relay_errors
);
136 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->port_xmit_discards
,
138 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->port_xmit_constraint_errors
,
139 port_xmit_constraint_errors
);
140 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->port_xmit_wait
,
142 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->port_rcv_constraint_errors
,
143 port_rcv_constraint_errors
);
144 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->link_overrun_errors
,
145 link_overrun_errors
);
146 MLX5_ASSIGN_PMA_CNTR(pma_cnt
->vl15_dropped
,
150 static int process_pma_cmd(struct mlx5_ib_dev
*dev
, u8 port_num
,
151 const struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
153 struct mlx5_core_dev
*mdev
;
154 bool native_port
= true;
159 mdev
= mlx5_ib_get_native_port_mdev(dev
, port_num
, &mdev_port_num
);
161 /* Fail to get the native port, likely due to 2nd port is still
162 * unaffiliated. In such case default to 1st port and attached
169 /* Declaring support of extended counters */
170 if (in_mad
->mad_hdr
.attr_id
== IB_PMA_CLASS_PORT_INFO
) {
171 struct ib_class_port_info cpi
= {};
173 cpi
.capability_mask
= IB_PMA_CLASS_CAP_EXT_WIDTH
;
174 memcpy((out_mad
->data
+ 40), &cpi
, sizeof(cpi
));
175 err
= IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
179 if (in_mad
->mad_hdr
.attr_id
== IB_PMA_PORT_COUNTERS_EXT
) {
180 struct ib_pma_portcounters_ext
*pma_cnt_ext
=
181 (struct ib_pma_portcounters_ext
*)(out_mad
->data
+ 40);
182 int sz
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
184 out_cnt
= kvzalloc(sz
, GFP_KERNEL
);
186 err
= IB_MAD_RESULT_FAILURE
;
190 err
= mlx5_core_query_vport_counter(mdev
, 0, 0, mdev_port_num
,
193 pma_cnt_ext_assign(pma_cnt_ext
, out_cnt
);
195 struct ib_pma_portcounters
*pma_cnt
=
196 (struct ib_pma_portcounters
*)(out_mad
->data
+ 40);
197 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
199 out_cnt
= kvzalloc(sz
, GFP_KERNEL
);
201 err
= IB_MAD_RESULT_FAILURE
;
205 err
= mlx5_core_query_ib_ppcnt(mdev
, mdev_port_num
,
208 pma_cnt_assign(pma_cnt
, out_cnt
);
211 err
= err
? IB_MAD_RESULT_FAILURE
:
212 IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
215 mlx5_ib_put_native_port_mdev(dev
, port_num
);
219 int mlx5_ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
220 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
221 const struct ib_mad
*in
, struct ib_mad
*out
,
222 size_t *out_mad_size
, u16
*out_mad_pkey_index
)
224 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
225 u8 mgmt_class
= in
->mad_hdr
.mgmt_class
;
226 u8 method
= in
->mad_hdr
.method
;
230 slid
= in_wc
? ib_lid_cpu16(in_wc
->slid
) :
231 be16_to_cpu(IB_LID_PERMISSIVE
);
233 if (method
== IB_MGMT_METHOD_TRAP
&& !slid
)
234 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
236 switch (mgmt_class
) {
237 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
238 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
: {
239 if (method
!= IB_MGMT_METHOD_GET
&&
240 method
!= IB_MGMT_METHOD_SET
&&
241 method
!= IB_MGMT_METHOD_TRAP_REPRESS
)
242 return IB_MAD_RESULT_SUCCESS
;
244 /* Don't process SMInfo queries -- the SMA can't handle them.
246 if (in
->mad_hdr
.attr_id
== IB_SMP_ATTR_SM_INFO
)
247 return IB_MAD_RESULT_SUCCESS
;
249 case IB_MGMT_CLASS_PERF_MGMT
:
250 if (MLX5_CAP_GEN(dev
->mdev
, vport_counters
) &&
251 method
== IB_MGMT_METHOD_GET
)
252 return process_pma_cmd(dev
, port_num
, in
, out
);
254 case MLX5_IB_VENDOR_CLASS1
:
255 case MLX5_IB_VENDOR_CLASS2
:
256 case IB_MGMT_CLASS_CONG_MGMT
: {
257 if (method
!= IB_MGMT_METHOD_GET
&&
258 method
!= IB_MGMT_METHOD_SET
)
259 return IB_MAD_RESULT_SUCCESS
;
262 return IB_MAD_RESULT_SUCCESS
;
265 err
= mlx5_MAD_IFC(to_mdev(ibdev
), mad_flags
& IB_MAD_IGNORE_MKEY
,
266 mad_flags
& IB_MAD_IGNORE_BKEY
, port_num
, in_wc
,
269 return IB_MAD_RESULT_FAILURE
;
271 /* set return bit in status of directed route responses */
272 if (mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
273 out
->mad_hdr
.status
|= cpu_to_be16(1 << 15);
275 if (method
== IB_MGMT_METHOD_TRAP_REPRESS
)
276 /* no response for trap repress */
277 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
;
279 return IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
;
282 int mlx5_query_ext_port_caps(struct mlx5_ib_dev
*dev
, u8 port
)
284 struct ib_smp
*in_mad
= NULL
;
285 struct ib_smp
*out_mad
= NULL
;
289 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
290 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
291 if (!in_mad
|| !out_mad
)
294 init_query_mad(in_mad
);
295 in_mad
->attr_id
= MLX5_ATTR_EXTENDED_PORT_INFO
;
296 in_mad
->attr_mod
= cpu_to_be32(port
);
298 err
= mlx5_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
300 packet_error
= be16_to_cpu(out_mad
->status
);
302 dev
->mdev
->port_caps
[port
- 1].ext_port_cap
= (!err
&& !packet_error
) ?
303 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
: 0;
311 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device
*ibdev
,
312 struct ib_smp
*out_mad
)
314 struct ib_smp
*in_mad
= NULL
;
317 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
321 init_query_mad(in_mad
);
322 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
324 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, 1, NULL
, NULL
, in_mad
,
331 int mlx5_query_mad_ifc_system_image_guid(struct ib_device
*ibdev
,
332 __be64
*sys_image_guid
)
334 struct ib_smp
*out_mad
= NULL
;
337 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
341 err
= mlx5_query_mad_ifc_smp_attr_node_info(ibdev
, out_mad
);
345 memcpy(sys_image_guid
, out_mad
->data
+ 4, 8);
353 int mlx5_query_mad_ifc_max_pkeys(struct ib_device
*ibdev
,
356 struct ib_smp
*out_mad
= NULL
;
359 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
363 err
= mlx5_query_mad_ifc_smp_attr_node_info(ibdev
, out_mad
);
367 *max_pkeys
= be16_to_cpup((__be16
*)(out_mad
->data
+ 28));
375 int mlx5_query_mad_ifc_vendor_id(struct ib_device
*ibdev
,
378 struct ib_smp
*out_mad
= NULL
;
381 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
385 err
= mlx5_query_mad_ifc_smp_attr_node_info(ibdev
, out_mad
);
389 *vendor_id
= be32_to_cpup((__be32
*)(out_mad
->data
+ 36)) & 0xffff;
397 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev
*dev
, char *node_desc
)
399 struct ib_smp
*in_mad
= NULL
;
400 struct ib_smp
*out_mad
= NULL
;
403 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
404 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
405 if (!in_mad
|| !out_mad
)
408 init_query_mad(in_mad
);
409 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
411 err
= mlx5_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
415 memcpy(node_desc
, out_mad
->data
, IB_DEVICE_NODE_DESC_MAX
);
422 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev
*dev
, __be64
*node_guid
)
424 struct ib_smp
*in_mad
= NULL
;
425 struct ib_smp
*out_mad
= NULL
;
428 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
429 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
430 if (!in_mad
|| !out_mad
)
433 init_query_mad(in_mad
);
434 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
436 err
= mlx5_MAD_IFC(dev
, 1, 1, 1, NULL
, NULL
, in_mad
, out_mad
);
440 memcpy(node_guid
, out_mad
->data
+ 12, 8);
447 int mlx5_query_mad_ifc_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
450 struct ib_smp
*in_mad
= NULL
;
451 struct ib_smp
*out_mad
= NULL
;
454 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
455 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
456 if (!in_mad
|| !out_mad
)
459 init_query_mad(in_mad
);
460 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
461 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
463 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
,
468 *pkey
= be16_to_cpu(((__be16
*)out_mad
->data
)[index
% 32]);
476 int mlx5_query_mad_ifc_gids(struct ib_device
*ibdev
, u8 port
, int index
,
479 struct ib_smp
*in_mad
= NULL
;
480 struct ib_smp
*out_mad
= NULL
;
483 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
484 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
485 if (!in_mad
|| !out_mad
)
488 init_query_mad(in_mad
);
489 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
490 in_mad
->attr_mod
= cpu_to_be32(port
);
492 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
,
497 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
499 init_query_mad(in_mad
);
500 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
501 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
503 err
= mlx5_MAD_IFC(to_mdev(ibdev
), 1, 1, port
, NULL
, NULL
, in_mad
,
508 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
516 int mlx5_query_mad_ifc_port(struct ib_device
*ibdev
, u8 port
,
517 struct ib_port_attr
*props
)
519 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
520 struct mlx5_core_dev
*mdev
= dev
->mdev
;
521 struct ib_smp
*in_mad
= NULL
;
522 struct ib_smp
*out_mad
= NULL
;
523 int ext_active_speed
;
526 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
527 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
528 if (!in_mad
|| !out_mad
)
531 /* props being zeroed by the caller, avoid zeroing it here */
533 init_query_mad(in_mad
);
534 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
535 in_mad
->attr_mod
= cpu_to_be32(port
);
537 err
= mlx5_MAD_IFC(dev
, 1, 1, port
, NULL
, NULL
, in_mad
, out_mad
);
539 mlx5_ib_warn(dev
, "err %d\n", err
);
543 props
->lid
= be16_to_cpup((__be16
*)(out_mad
->data
+ 16));
544 props
->lmc
= out_mad
->data
[34] & 0x7;
545 props
->sm_lid
= be16_to_cpup((__be16
*)(out_mad
->data
+ 18));
546 props
->sm_sl
= out_mad
->data
[36] & 0xf;
547 props
->state
= out_mad
->data
[32] & 0xf;
548 props
->phys_state
= out_mad
->data
[33] >> 4;
549 props
->port_cap_flags
= be32_to_cpup((__be32
*)(out_mad
->data
+ 20));
550 props
->gid_tbl_len
= out_mad
->data
[50];
551 props
->max_msg_sz
= 1 << MLX5_CAP_GEN(mdev
, log_max_msg
);
552 props
->pkey_tbl_len
= mdev
->port_caps
[port
- 1].pkey_table_len
;
553 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*)(out_mad
->data
+ 46));
554 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*)(out_mad
->data
+ 48));
555 props
->active_width
= out_mad
->data
[31] & 0xf;
556 props
->active_speed
= out_mad
->data
[35] >> 4;
557 props
->max_mtu
= out_mad
->data
[41] & 0xf;
558 props
->active_mtu
= out_mad
->data
[36] >> 4;
559 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
560 props
->max_vl_num
= out_mad
->data
[37] >> 4;
561 props
->init_type_reply
= out_mad
->data
[41] >> 4;
563 if (props
->port_cap_flags
& IB_PORT_CAP_MASK2_SUP
) {
564 props
->port_cap_flags2
=
565 be16_to_cpup((__be16
*)(out_mad
->data
+ 60));
567 if (props
->port_cap_flags2
& IB_PORT_LINK_WIDTH_2X_SUP
)
568 props
->active_width
= out_mad
->data
[31] & 0x1f;
571 /* Check if extended speeds (EDR/FDR/...) are supported */
572 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
573 ext_active_speed
= out_mad
->data
[62] >> 4;
575 switch (ext_active_speed
) {
577 props
->active_speed
= 16; /* FDR */
580 props
->active_speed
= 32; /* EDR */
583 if (props
->port_cap_flags
& IB_PORT_CAP_MASK2_SUP
&&
584 props
->port_cap_flags2
& IB_PORT_LINK_SPEED_HDR_SUP
)
585 props
->active_speed
= IB_SPEED_HDR
;
590 /* If reported active speed is QDR, check if is FDR-10 */
591 if (props
->active_speed
== 4) {
592 if (mdev
->port_caps
[port
- 1].ext_port_cap
&
593 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
) {
594 init_query_mad(in_mad
);
595 in_mad
->attr_id
= MLX5_ATTR_EXTENDED_PORT_INFO
;
596 in_mad
->attr_mod
= cpu_to_be32(port
);
598 err
= mlx5_MAD_IFC(dev
, 1, 1, port
,
599 NULL
, NULL
, in_mad
, out_mad
);
603 /* Checking LinkSpeedActive for FDR-10 */
604 if (out_mad
->data
[15] & 0x1)
605 props
->active_speed
= 8;