2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/cmd.h>
35 #include <linux/module.h>
36 #include "mlx5_core.h"
37 #include "../../mlxfw/mlxfw.h"
39 static int mlx5_cmd_query_adapter(struct mlx5_core_dev
*dev
, u32
*out
,
42 u32 in
[MLX5_ST_SZ_DW(query_adapter_in
)] = {0};
44 MLX5_SET(query_adapter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_ADAPTER
);
45 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, outlen
);
48 int mlx5_query_board_id(struct mlx5_core_dev
*dev
)
51 int outlen
= MLX5_ST_SZ_BYTES(query_adapter_out
);
54 out
= kzalloc(outlen
, GFP_KERNEL
);
58 err
= mlx5_cmd_query_adapter(dev
, out
, outlen
);
63 MLX5_ADDR_OF(query_adapter_out
, out
,
64 query_adapter_struct
.vsd_contd_psid
),
65 MLX5_FLD_SZ_BYTES(query_adapter_out
,
66 query_adapter_struct
.vsd_contd_psid
));
73 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
)
76 int outlen
= MLX5_ST_SZ_BYTES(query_adapter_out
);
79 out
= kzalloc(outlen
, GFP_KERNEL
);
83 err
= mlx5_cmd_query_adapter(mdev
, out
, outlen
);
87 *vendor_id
= MLX5_GET(query_adapter_out
, out
,
88 query_adapter_struct
.ieee_vendor_id
);
93 EXPORT_SYMBOL(mlx5_core_query_vendor_id
);
95 static int mlx5_get_pcam_reg(struct mlx5_core_dev
*dev
)
97 return mlx5_query_pcam_reg(dev
, dev
->caps
.pcam
,
98 MLX5_PCAM_FEATURE_ENHANCED_FEATURES
,
99 MLX5_PCAM_REGS_5000_TO_507F
);
102 static int mlx5_get_mcam_reg(struct mlx5_core_dev
*dev
)
104 return mlx5_query_mcam_reg(dev
, dev
->caps
.mcam
,
105 MLX5_MCAM_FEATURE_ENHANCED_FEATURES
,
106 MLX5_MCAM_REGS_FIRST_128
);
109 static int mlx5_get_qcam_reg(struct mlx5_core_dev
*dev
)
111 return mlx5_query_qcam_reg(dev
, dev
->caps
.qcam
,
112 MLX5_QCAM_FEATURE_ENHANCED_FEATURES
,
113 MLX5_QCAM_REGS_FIRST_128
);
116 int mlx5_query_hca_caps(struct mlx5_core_dev
*dev
)
120 err
= mlx5_core_get_caps(dev
, MLX5_CAP_GENERAL
);
124 if (MLX5_CAP_GEN(dev
, eth_net_offloads
)) {
125 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ETHERNET_OFFLOADS
);
130 if (MLX5_CAP_GEN(dev
, ipoib_enhanced_offloads
)) {
131 err
= mlx5_core_get_caps(dev
, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS
);
136 if (MLX5_CAP_GEN(dev
, pg
)) {
137 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ODP
);
142 if (MLX5_CAP_GEN(dev
, atomic
)) {
143 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ATOMIC
);
148 if (MLX5_CAP_GEN(dev
, roce
)) {
149 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ROCE
);
154 if (MLX5_CAP_GEN(dev
, nic_flow_table
) ||
155 MLX5_CAP_GEN(dev
, ipoib_enhanced_offloads
)) {
156 err
= mlx5_core_get_caps(dev
, MLX5_CAP_FLOW_TABLE
);
161 if (MLX5_CAP_GEN(dev
, vport_group_manager
) &&
162 MLX5_CAP_GEN(dev
, eswitch_flow_table
)) {
163 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ESWITCH_FLOW_TABLE
);
168 if (MLX5_CAP_GEN(dev
, eswitch_flow_table
)) {
169 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ESWITCH
);
174 if (MLX5_CAP_GEN(dev
, vector_calc
)) {
175 err
= mlx5_core_get_caps(dev
, MLX5_CAP_VECTOR_CALC
);
180 if (MLX5_CAP_GEN(dev
, qos
)) {
181 err
= mlx5_core_get_caps(dev
, MLX5_CAP_QOS
);
186 if (MLX5_CAP_GEN(dev
, pcam_reg
))
187 mlx5_get_pcam_reg(dev
);
189 if (MLX5_CAP_GEN(dev
, mcam_reg
))
190 mlx5_get_mcam_reg(dev
);
192 if (MLX5_CAP_GEN(dev
, qcam_reg
))
193 mlx5_get_qcam_reg(dev
);
198 int mlx5_cmd_init_hca(struct mlx5_core_dev
*dev
, uint32_t *sw_owner_id
)
200 u32 out
[MLX5_ST_SZ_DW(init_hca_out
)] = {0};
201 u32 in
[MLX5_ST_SZ_DW(init_hca_in
)] = {0};
204 MLX5_SET(init_hca_in
, in
, opcode
, MLX5_CMD_OP_INIT_HCA
);
206 if (MLX5_CAP_GEN(dev
, sw_owner_id
)) {
207 for (i
= 0; i
< 4; i
++)
208 MLX5_ARRAY_SET(init_hca_in
, in
, sw_owner_id
, i
,
212 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
215 int mlx5_cmd_teardown_hca(struct mlx5_core_dev
*dev
)
217 u32 out
[MLX5_ST_SZ_DW(teardown_hca_out
)] = {0};
218 u32 in
[MLX5_ST_SZ_DW(teardown_hca_in
)] = {0};
220 MLX5_SET(teardown_hca_in
, in
, opcode
, MLX5_CMD_OP_TEARDOWN_HCA
);
221 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
224 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev
*dev
)
226 u32 out
[MLX5_ST_SZ_DW(teardown_hca_out
)] = {0};
227 u32 in
[MLX5_ST_SZ_DW(teardown_hca_in
)] = {0};
231 if (!MLX5_CAP_GEN(dev
, force_teardown
)) {
232 mlx5_core_dbg(dev
, "force teardown is not supported in the firmware\n");
236 MLX5_SET(teardown_hca_in
, in
, opcode
, MLX5_CMD_OP_TEARDOWN_HCA
);
237 MLX5_SET(teardown_hca_in
, in
, profile
, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE
);
239 ret
= mlx5_cmd_exec_polling(dev
, in
, sizeof(in
), out
, sizeof(out
));
243 force_state
= MLX5_GET(teardown_hca_out
, out
, force_state
);
244 if (force_state
== MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL
) {
245 mlx5_core_err(dev
, "teardown with force mode failed\n");
252 enum mlxsw_reg_mcc_instruction
{
253 MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE
= 0x01,
254 MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE
= 0x02,
255 MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT
= 0x03,
256 MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT
= 0x04,
257 MLX5_REG_MCC_INSTRUCTION_ACTIVATE
= 0x06,
258 MLX5_REG_MCC_INSTRUCTION_CANCEL
= 0x08,
261 static int mlx5_reg_mcc_set(struct mlx5_core_dev
*dev
,
262 enum mlxsw_reg_mcc_instruction instr
,
263 u16 component_index
, u32 update_handle
,
266 u32 out
[MLX5_ST_SZ_DW(mcc_reg
)];
267 u32 in
[MLX5_ST_SZ_DW(mcc_reg
)];
269 memset(in
, 0, sizeof(in
));
271 MLX5_SET(mcc_reg
, in
, instruction
, instr
);
272 MLX5_SET(mcc_reg
, in
, component_index
, component_index
);
273 MLX5_SET(mcc_reg
, in
, update_handle
, update_handle
);
274 MLX5_SET(mcc_reg
, in
, component_size
, component_size
);
276 return mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
277 sizeof(out
), MLX5_REG_MCC
, 0, 1);
280 static int mlx5_reg_mcc_query(struct mlx5_core_dev
*dev
,
281 u32
*update_handle
, u8
*error_code
,
284 u32 out
[MLX5_ST_SZ_DW(mcc_reg
)];
285 u32 in
[MLX5_ST_SZ_DW(mcc_reg
)];
288 memset(in
, 0, sizeof(in
));
289 memset(out
, 0, sizeof(out
));
290 MLX5_SET(mcc_reg
, in
, update_handle
, *update_handle
);
292 err
= mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
293 sizeof(out
), MLX5_REG_MCC
, 0, 0);
297 *update_handle
= MLX5_GET(mcc_reg
, out
, update_handle
);
298 *error_code
= MLX5_GET(mcc_reg
, out
, error_code
);
299 *control_state
= MLX5_GET(mcc_reg
, out
, control_state
);
305 static int mlx5_reg_mcda_set(struct mlx5_core_dev
*dev
,
307 u32 offset
, u16 size
,
310 int err
, in_size
= MLX5_ST_SZ_BYTES(mcda_reg
) + size
;
311 u32 out
[MLX5_ST_SZ_DW(mcda_reg
)];
312 int i
, j
, dw_size
= size
>> 2;
316 in
= kzalloc(in_size
, GFP_KERNEL
);
320 MLX5_SET(mcda_reg
, in
, update_handle
, update_handle
);
321 MLX5_SET(mcda_reg
, in
, offset
, offset
);
322 MLX5_SET(mcda_reg
, in
, size
, size
);
324 for (i
= 0; i
< dw_size
; i
++) {
326 data_element
= htonl(*(u32
*)&data
[j
]);
327 memcpy(MLX5_ADDR_OF(mcda_reg
, in
, data
) + j
, &data_element
, 4);
330 err
= mlx5_core_access_reg(dev
, in
, in_size
, out
,
331 sizeof(out
), MLX5_REG_MCDA
, 0, 1);
336 static int mlx5_reg_mcqi_query(struct mlx5_core_dev
*dev
,
338 u32
*max_component_size
,
339 u8
*log_mcda_word_size
,
340 u16
*mcda_max_write_size
)
342 u32 out
[MLX5_ST_SZ_DW(mcqi_reg
) + MLX5_ST_SZ_DW(mcqi_cap
)];
343 int offset
= MLX5_ST_SZ_DW(mcqi_reg
);
344 u32 in
[MLX5_ST_SZ_DW(mcqi_reg
)];
347 memset(in
, 0, sizeof(in
));
348 memset(out
, 0, sizeof(out
));
350 MLX5_SET(mcqi_reg
, in
, component_index
, component_index
);
351 MLX5_SET(mcqi_reg
, in
, data_size
, MLX5_ST_SZ_BYTES(mcqi_cap
));
353 err
= mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
354 sizeof(out
), MLX5_REG_MCQI
, 0, 0);
358 *max_component_size
= MLX5_GET(mcqi_cap
, out
+ offset
, max_component_size
);
359 *log_mcda_word_size
= MLX5_GET(mcqi_cap
, out
+ offset
, log_mcda_word_size
);
360 *mcda_max_write_size
= MLX5_GET(mcqi_cap
, out
+ offset
, mcda_max_write_size
);
366 struct mlx5_mlxfw_dev
{
367 struct mlxfw_dev mlxfw_dev
;
368 struct mlx5_core_dev
*mlx5_core_dev
;
371 static int mlx5_component_query(struct mlxfw_dev
*mlxfw_dev
,
372 u16 component_index
, u32
*p_max_size
,
373 u8
*p_align_bits
, u16
*p_max_write_size
)
375 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
376 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
377 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
379 return mlx5_reg_mcqi_query(dev
, component_index
, p_max_size
,
380 p_align_bits
, p_max_write_size
);
383 static int mlx5_fsm_lock(struct mlxfw_dev
*mlxfw_dev
, u32
*fwhandle
)
385 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
386 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
387 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
388 u8 control_state
, error_code
;
392 err
= mlx5_reg_mcc_query(dev
, fwhandle
, &error_code
, &control_state
);
396 if (control_state
!= MLXFW_FSM_STATE_IDLE
)
399 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE
,
403 static int mlx5_fsm_component_update(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
404 u16 component_index
, u32 component_size
)
406 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
407 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
408 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
410 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT
,
411 component_index
, fwhandle
, component_size
);
414 static int mlx5_fsm_block_download(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
415 u8
*data
, u16 size
, u32 offset
)
417 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
418 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
419 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
421 return mlx5_reg_mcda_set(dev
, fwhandle
, offset
, size
, data
);
424 static int mlx5_fsm_component_verify(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
427 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
428 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
429 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
431 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT
,
432 component_index
, fwhandle
, 0);
435 static int mlx5_fsm_activate(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
437 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
438 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
439 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
441 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_ACTIVATE
, 0,
445 static int mlx5_fsm_query_state(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
446 enum mlxfw_fsm_state
*fsm_state
,
447 enum mlxfw_fsm_state_err
*fsm_state_err
)
449 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
450 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
451 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
452 u8 control_state
, error_code
;
455 err
= mlx5_reg_mcc_query(dev
, &fwhandle
, &error_code
, &control_state
);
459 *fsm_state
= control_state
;
460 *fsm_state_err
= min_t(enum mlxfw_fsm_state_err
, error_code
,
461 MLXFW_FSM_STATE_ERR_MAX
);
465 static void mlx5_fsm_cancel(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
467 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
468 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
469 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
471 mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_CANCEL
, 0, fwhandle
, 0);
474 static void mlx5_fsm_release(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
476 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
477 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
478 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
480 mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE
, 0,
484 static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops
= {
485 .component_query
= mlx5_component_query
,
486 .fsm_lock
= mlx5_fsm_lock
,
487 .fsm_component_update
= mlx5_fsm_component_update
,
488 .fsm_block_download
= mlx5_fsm_block_download
,
489 .fsm_component_verify
= mlx5_fsm_component_verify
,
490 .fsm_activate
= mlx5_fsm_activate
,
491 .fsm_query_state
= mlx5_fsm_query_state
,
492 .fsm_cancel
= mlx5_fsm_cancel
,
493 .fsm_release
= mlx5_fsm_release
496 int mlx5_firmware_flash(struct mlx5_core_dev
*dev
,
497 const struct firmware
*firmware
)
499 struct mlx5_mlxfw_dev mlx5_mlxfw_dev
= {
501 .ops
= &mlx5_mlxfw_dev_ops
,
502 .psid
= dev
->board_id
,
503 .psid_size
= strlen(dev
->board_id
),
508 if (!MLX5_CAP_GEN(dev
, mcam_reg
) ||
509 !MLX5_CAP_MCAM_REG(dev
, mcqi
) ||
510 !MLX5_CAP_MCAM_REG(dev
, mcc
) ||
511 !MLX5_CAP_MCAM_REG(dev
, mcda
)) {
512 pr_info("%s flashing isn't supported by the running FW\n", __func__
);
516 return mlxfw_firmware_flash(&mlx5_mlxfw_dev
.mlxfw_dev
, firmware
);