2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/module.h>
36 #include "mlx5_core.h"
37 #include "../../mlxfw/mlxfw.h"
38 #include "accel/tls.h"
41 MCQS_IDENTIFIER_BOOT_IMG
= 0x1,
42 MCQS_IDENTIFIER_OEM_NVCONFIG
= 0x4,
43 MCQS_IDENTIFIER_MLNX_NVCONFIG
= 0x5,
44 MCQS_IDENTIFIER_CS_TOKEN
= 0x6,
45 MCQS_IDENTIFIER_DBG_TOKEN
= 0x7,
46 MCQS_IDENTIFIER_GEARBOX
= 0xA,
50 MCQS_UPDATE_STATE_IDLE
,
51 MCQS_UPDATE_STATE_IN_PROGRESS
,
52 MCQS_UPDATE_STATE_APPLIED
,
53 MCQS_UPDATE_STATE_ACTIVE
,
54 MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET
,
55 MCQS_UPDATE_STATE_FAILED
,
56 MCQS_UPDATE_STATE_CANCELED
,
57 MCQS_UPDATE_STATE_BUSY
,
61 MCQI_INFO_TYPE_CAPABILITIES
= 0x0,
62 MCQI_INFO_TYPE_VERSION
= 0x1,
63 MCQI_INFO_TYPE_ACTIVATION_METHOD
= 0x5,
67 MCQI_FW_RUNNING_VERSION
= 0,
68 MCQI_FW_STORED_VERSION
= 1,
71 int mlx5_query_board_id(struct mlx5_core_dev
*dev
)
74 int outlen
= MLX5_ST_SZ_BYTES(query_adapter_out
);
75 u32 in
[MLX5_ST_SZ_DW(query_adapter_in
)] = {};
78 out
= kzalloc(outlen
, GFP_KERNEL
);
82 MLX5_SET(query_adapter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_ADAPTER
);
83 err
= mlx5_cmd_exec_inout(dev
, query_adapter
, in
, out
);
88 MLX5_ADDR_OF(query_adapter_out
, out
,
89 query_adapter_struct
.vsd_contd_psid
),
90 MLX5_FLD_SZ_BYTES(query_adapter_out
,
91 query_adapter_struct
.vsd_contd_psid
));
98 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
)
101 int outlen
= MLX5_ST_SZ_BYTES(query_adapter_out
);
102 u32 in
[MLX5_ST_SZ_DW(query_adapter_in
)] = {};
105 out
= kzalloc(outlen
, GFP_KERNEL
);
109 MLX5_SET(query_adapter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_ADAPTER
);
110 err
= mlx5_cmd_exec_inout(mdev
, query_adapter
, in
, out
);
114 *vendor_id
= MLX5_GET(query_adapter_out
, out
,
115 query_adapter_struct
.ieee_vendor_id
);
120 EXPORT_SYMBOL(mlx5_core_query_vendor_id
);
122 static int mlx5_get_pcam_reg(struct mlx5_core_dev
*dev
)
124 return mlx5_query_pcam_reg(dev
, dev
->caps
.pcam
,
125 MLX5_PCAM_FEATURE_ENHANCED_FEATURES
,
126 MLX5_PCAM_REGS_5000_TO_507F
);
129 static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev
*dev
,
130 enum mlx5_mcam_reg_groups group
)
132 return mlx5_query_mcam_reg(dev
, dev
->caps
.mcam
[group
],
133 MLX5_MCAM_FEATURE_ENHANCED_FEATURES
, group
);
136 static int mlx5_get_qcam_reg(struct mlx5_core_dev
*dev
)
138 return mlx5_query_qcam_reg(dev
, dev
->caps
.qcam
,
139 MLX5_QCAM_FEATURE_ENHANCED_FEATURES
,
140 MLX5_QCAM_REGS_FIRST_128
);
143 int mlx5_query_hca_caps(struct mlx5_core_dev
*dev
)
147 err
= mlx5_core_get_caps(dev
, MLX5_CAP_GENERAL
);
151 if (MLX5_CAP_GEN(dev
, eth_net_offloads
)) {
152 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ETHERNET_OFFLOADS
);
157 if (MLX5_CAP_GEN(dev
, ipoib_enhanced_offloads
)) {
158 err
= mlx5_core_get_caps(dev
, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS
);
163 if (MLX5_CAP_GEN(dev
, pg
)) {
164 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ODP
);
169 if (MLX5_CAP_GEN(dev
, atomic
)) {
170 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ATOMIC
);
175 if (MLX5_CAP_GEN(dev
, roce
)) {
176 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ROCE
);
181 if (MLX5_CAP_GEN(dev
, nic_flow_table
) ||
182 MLX5_CAP_GEN(dev
, ipoib_enhanced_offloads
)) {
183 err
= mlx5_core_get_caps(dev
, MLX5_CAP_FLOW_TABLE
);
188 if (MLX5_CAP_GEN(dev
, vport_group_manager
) &&
189 MLX5_ESWITCH_MANAGER(dev
)) {
190 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ESWITCH_FLOW_TABLE
);
195 if (MLX5_ESWITCH_MANAGER(dev
)) {
196 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ESWITCH
);
201 if (MLX5_CAP_GEN(dev
, vector_calc
)) {
202 err
= mlx5_core_get_caps(dev
, MLX5_CAP_VECTOR_CALC
);
207 if (MLX5_CAP_GEN(dev
, qos
)) {
208 err
= mlx5_core_get_caps(dev
, MLX5_CAP_QOS
);
213 if (MLX5_CAP_GEN(dev
, debug
))
214 mlx5_core_get_caps(dev
, MLX5_CAP_DEBUG
);
216 if (MLX5_CAP_GEN(dev
, pcam_reg
))
217 mlx5_get_pcam_reg(dev
);
219 if (MLX5_CAP_GEN(dev
, mcam_reg
)) {
220 mlx5_get_mcam_access_reg_group(dev
, MLX5_MCAM_REGS_FIRST_128
);
221 mlx5_get_mcam_access_reg_group(dev
, MLX5_MCAM_REGS_0x9080_0x90FF
);
222 mlx5_get_mcam_access_reg_group(dev
, MLX5_MCAM_REGS_0x9100_0x917F
);
225 if (MLX5_CAP_GEN(dev
, qcam_reg
))
226 mlx5_get_qcam_reg(dev
);
228 if (MLX5_CAP_GEN(dev
, device_memory
)) {
229 err
= mlx5_core_get_caps(dev
, MLX5_CAP_DEV_MEM
);
234 if (MLX5_CAP_GEN(dev
, event_cap
)) {
235 err
= mlx5_core_get_caps(dev
, MLX5_CAP_DEV_EVENT
);
240 if (mlx5_accel_is_ktls_tx(dev
) || mlx5_accel_is_ktls_rx(dev
)) {
241 err
= mlx5_core_get_caps(dev
, MLX5_CAP_TLS
);
246 if (MLX5_CAP_GEN_64(dev
, general_obj_types
) &
247 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q
) {
248 err
= mlx5_core_get_caps(dev
, MLX5_CAP_VDPA_EMULATION
);
253 if (MLX5_CAP_GEN(dev
, ipsec_offload
)) {
254 err
= mlx5_core_get_caps(dev
, MLX5_CAP_IPSEC
);
262 int mlx5_cmd_init_hca(struct mlx5_core_dev
*dev
, uint32_t *sw_owner_id
)
264 u32 in
[MLX5_ST_SZ_DW(init_hca_in
)] = {};
267 MLX5_SET(init_hca_in
, in
, opcode
, MLX5_CMD_OP_INIT_HCA
);
269 if (MLX5_CAP_GEN(dev
, sw_owner_id
)) {
270 for (i
= 0; i
< 4; i
++)
271 MLX5_ARRAY_SET(init_hca_in
, in
, sw_owner_id
, i
,
275 return mlx5_cmd_exec_in(dev
, init_hca
, in
);
278 int mlx5_cmd_teardown_hca(struct mlx5_core_dev
*dev
)
280 u32 in
[MLX5_ST_SZ_DW(teardown_hca_in
)] = {};
282 MLX5_SET(teardown_hca_in
, in
, opcode
, MLX5_CMD_OP_TEARDOWN_HCA
);
283 return mlx5_cmd_exec_in(dev
, teardown_hca
, in
);
286 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev
*dev
)
288 u32 out
[MLX5_ST_SZ_DW(teardown_hca_out
)] = {0};
289 u32 in
[MLX5_ST_SZ_DW(teardown_hca_in
)] = {0};
293 if (!MLX5_CAP_GEN(dev
, force_teardown
)) {
294 mlx5_core_dbg(dev
, "force teardown is not supported in the firmware\n");
298 MLX5_SET(teardown_hca_in
, in
, opcode
, MLX5_CMD_OP_TEARDOWN_HCA
);
299 MLX5_SET(teardown_hca_in
, in
, profile
, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE
);
301 ret
= mlx5_cmd_exec_polling(dev
, in
, sizeof(in
), out
, sizeof(out
));
305 force_state
= MLX5_GET(teardown_hca_out
, out
, state
);
306 if (force_state
== MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL
) {
307 mlx5_core_warn(dev
, "teardown with force mode failed, doing normal teardown\n");
314 #define MLX5_FAST_TEARDOWN_WAIT_MS 3000
315 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev
*dev
)
317 unsigned long end
, delay_ms
= MLX5_FAST_TEARDOWN_WAIT_MS
;
318 u32 out
[MLX5_ST_SZ_DW(teardown_hca_out
)] = {};
319 u32 in
[MLX5_ST_SZ_DW(teardown_hca_in
)] = {};
323 if (!MLX5_CAP_GEN(dev
, fast_teardown
)) {
324 mlx5_core_dbg(dev
, "fast teardown is not supported in the firmware\n");
328 MLX5_SET(teardown_hca_in
, in
, opcode
, MLX5_CMD_OP_TEARDOWN_HCA
);
329 MLX5_SET(teardown_hca_in
, in
, profile
,
330 MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN
);
332 ret
= mlx5_cmd_exec_inout(dev
, teardown_hca
, in
, out
);
336 state
= MLX5_GET(teardown_hca_out
, out
, state
);
337 if (state
== MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL
) {
338 mlx5_core_warn(dev
, "teardown with fast mode failed\n");
342 mlx5_set_nic_state(dev
, MLX5_NIC_IFC_DISABLED
);
344 /* Loop until device state turns to disable */
345 end
= jiffies
+ msecs_to_jiffies(delay_ms
);
347 if (mlx5_get_nic_state(dev
) == MLX5_NIC_IFC_DISABLED
)
351 } while (!time_after(jiffies
, end
));
353 if (mlx5_get_nic_state(dev
) != MLX5_NIC_IFC_DISABLED
) {
354 dev_err(&dev
->pdev
->dev
, "NIC IFC still %d after %lums.\n",
355 mlx5_get_nic_state(dev
), delay_ms
);
362 enum mlxsw_reg_mcc_instruction
{
363 MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE
= 0x01,
364 MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE
= 0x02,
365 MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT
= 0x03,
366 MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT
= 0x04,
367 MLX5_REG_MCC_INSTRUCTION_ACTIVATE
= 0x06,
368 MLX5_REG_MCC_INSTRUCTION_CANCEL
= 0x08,
371 static int mlx5_reg_mcc_set(struct mlx5_core_dev
*dev
,
372 enum mlxsw_reg_mcc_instruction instr
,
373 u16 component_index
, u32 update_handle
,
376 u32 out
[MLX5_ST_SZ_DW(mcc_reg
)];
377 u32 in
[MLX5_ST_SZ_DW(mcc_reg
)];
379 memset(in
, 0, sizeof(in
));
381 MLX5_SET(mcc_reg
, in
, instruction
, instr
);
382 MLX5_SET(mcc_reg
, in
, component_index
, component_index
);
383 MLX5_SET(mcc_reg
, in
, update_handle
, update_handle
);
384 MLX5_SET(mcc_reg
, in
, component_size
, component_size
);
386 return mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
387 sizeof(out
), MLX5_REG_MCC
, 0, 1);
390 static int mlx5_reg_mcc_query(struct mlx5_core_dev
*dev
,
391 u32
*update_handle
, u8
*error_code
,
394 u32 out
[MLX5_ST_SZ_DW(mcc_reg
)];
395 u32 in
[MLX5_ST_SZ_DW(mcc_reg
)];
398 memset(in
, 0, sizeof(in
));
399 memset(out
, 0, sizeof(out
));
400 MLX5_SET(mcc_reg
, in
, update_handle
, *update_handle
);
402 err
= mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
403 sizeof(out
), MLX5_REG_MCC
, 0, 0);
407 *update_handle
= MLX5_GET(mcc_reg
, out
, update_handle
);
408 *error_code
= MLX5_GET(mcc_reg
, out
, error_code
);
409 *control_state
= MLX5_GET(mcc_reg
, out
, control_state
);
415 static int mlx5_reg_mcda_set(struct mlx5_core_dev
*dev
,
417 u32 offset
, u16 size
,
420 int err
, in_size
= MLX5_ST_SZ_BYTES(mcda_reg
) + size
;
421 u32 out
[MLX5_ST_SZ_DW(mcda_reg
)];
422 int i
, j
, dw_size
= size
>> 2;
426 in
= kzalloc(in_size
, GFP_KERNEL
);
430 MLX5_SET(mcda_reg
, in
, update_handle
, update_handle
);
431 MLX5_SET(mcda_reg
, in
, offset
, offset
);
432 MLX5_SET(mcda_reg
, in
, size
, size
);
434 for (i
= 0; i
< dw_size
; i
++) {
436 data_element
= htonl(*(u32
*)&data
[j
]);
437 memcpy(MLX5_ADDR_OF(mcda_reg
, in
, data
) + j
, &data_element
, 4);
440 err
= mlx5_core_access_reg(dev
, in
, in_size
, out
,
441 sizeof(out
), MLX5_REG_MCDA
, 0, 1);
446 static int mlx5_reg_mcqi_query(struct mlx5_core_dev
*dev
,
447 u16 component_index
, bool read_pending
,
448 u8 info_type
, u16 data_size
, void *mcqi_data
)
450 u32 out
[MLX5_ST_SZ_DW(mcqi_reg
) + MLX5_UN_SZ_DW(mcqi_reg_data
)] = {};
451 u32 in
[MLX5_ST_SZ_DW(mcqi_reg
)] = {};
455 MLX5_SET(mcqi_reg
, in
, component_index
, component_index
);
456 MLX5_SET(mcqi_reg
, in
, read_pending_component
, read_pending
);
457 MLX5_SET(mcqi_reg
, in
, info_type
, info_type
);
458 MLX5_SET(mcqi_reg
, in
, data_size
, data_size
);
460 err
= mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
461 MLX5_ST_SZ_BYTES(mcqi_reg
) + data_size
,
462 MLX5_REG_MCQI
, 0, 0);
466 data
= MLX5_ADDR_OF(mcqi_reg
, out
, data
);
467 memcpy(mcqi_data
, data
, data_size
);
472 static int mlx5_reg_mcqi_caps_query(struct mlx5_core_dev
*dev
, u16 component_index
,
473 u32
*max_component_size
, u8
*log_mcda_word_size
,
474 u16
*mcda_max_write_size
)
476 u32 mcqi_reg
[MLX5_ST_SZ_DW(mcqi_cap
)] = {};
479 err
= mlx5_reg_mcqi_query(dev
, component_index
, 0,
480 MCQI_INFO_TYPE_CAPABILITIES
,
481 MLX5_ST_SZ_BYTES(mcqi_cap
), mcqi_reg
);
485 *max_component_size
= MLX5_GET(mcqi_cap
, mcqi_reg
, max_component_size
);
486 *log_mcda_word_size
= MLX5_GET(mcqi_cap
, mcqi_reg
, log_mcda_word_size
);
487 *mcda_max_write_size
= MLX5_GET(mcqi_cap
, mcqi_reg
, mcda_max_write_size
);
492 struct mlx5_mlxfw_dev
{
493 struct mlxfw_dev mlxfw_dev
;
494 struct mlx5_core_dev
*mlx5_core_dev
;
497 static int mlx5_component_query(struct mlxfw_dev
*mlxfw_dev
,
498 u16 component_index
, u32
*p_max_size
,
499 u8
*p_align_bits
, u16
*p_max_write_size
)
501 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
502 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
503 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
505 if (!MLX5_CAP_GEN(dev
, mcam_reg
) || !MLX5_CAP_MCAM_REG(dev
, mcqi
)) {
506 mlx5_core_warn(dev
, "caps query isn't supported by running FW\n");
510 return mlx5_reg_mcqi_caps_query(dev
, component_index
, p_max_size
,
511 p_align_bits
, p_max_write_size
);
514 static int mlx5_fsm_lock(struct mlxfw_dev
*mlxfw_dev
, u32
*fwhandle
)
516 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
517 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
518 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
519 u8 control_state
, error_code
;
523 err
= mlx5_reg_mcc_query(dev
, fwhandle
, &error_code
, &control_state
);
527 if (control_state
!= MLXFW_FSM_STATE_IDLE
)
530 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE
,
534 static int mlx5_fsm_component_update(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
535 u16 component_index
, u32 component_size
)
537 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
538 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
539 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
541 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT
,
542 component_index
, fwhandle
, component_size
);
545 static int mlx5_fsm_block_download(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
546 u8
*data
, u16 size
, u32 offset
)
548 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
549 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
550 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
552 return mlx5_reg_mcda_set(dev
, fwhandle
, offset
, size
, data
);
555 static int mlx5_fsm_component_verify(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
558 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
559 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
560 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
562 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT
,
563 component_index
, fwhandle
, 0);
566 static int mlx5_fsm_activate(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
568 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
569 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
570 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
572 return mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_ACTIVATE
, 0,
576 static int mlx5_fsm_query_state(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
577 enum mlxfw_fsm_state
*fsm_state
,
578 enum mlxfw_fsm_state_err
*fsm_state_err
)
580 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
581 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
582 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
583 u8 control_state
, error_code
;
586 err
= mlx5_reg_mcc_query(dev
, &fwhandle
, &error_code
, &control_state
);
590 *fsm_state
= control_state
;
591 *fsm_state_err
= min_t(enum mlxfw_fsm_state_err
, error_code
,
592 MLXFW_FSM_STATE_ERR_MAX
);
596 static void mlx5_fsm_cancel(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
598 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
599 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
600 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
602 mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_CANCEL
, 0, fwhandle
, 0);
605 static void mlx5_fsm_release(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
607 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
608 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
609 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
611 mlx5_reg_mcc_set(dev
, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE
, 0,
615 #define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */
616 static int mlx5_fsm_reactivate(struct mlxfw_dev
*mlxfw_dev
, u8
*status
)
618 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT
);
619 struct mlx5_mlxfw_dev
*mlx5_mlxfw_dev
=
620 container_of(mlxfw_dev
, struct mlx5_mlxfw_dev
, mlxfw_dev
);
621 struct mlx5_core_dev
*dev
= mlx5_mlxfw_dev
->mlx5_core_dev
;
622 u32 out
[MLX5_ST_SZ_DW(mirc_reg
)];
623 u32 in
[MLX5_ST_SZ_DW(mirc_reg
)];
626 if (!MLX5_CAP_MCAM_REG2(dev
, mirc
))
629 memset(in
, 0, sizeof(in
));
631 err
= mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
632 sizeof(out
), MLX5_REG_MIRC
, 0, 1);
637 memset(out
, 0, sizeof(out
));
638 err
= mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
639 sizeof(out
), MLX5_REG_MIRC
, 0, 0);
643 *status
= MLX5_GET(mirc_reg
, out
, status_code
);
644 if (*status
!= MLXFW_FSM_REACTIVATE_STATUS_BUSY
)
648 } while (time_before(jiffies
, exp_time
));
653 static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops
= {
654 .component_query
= mlx5_component_query
,
655 .fsm_lock
= mlx5_fsm_lock
,
656 .fsm_component_update
= mlx5_fsm_component_update
,
657 .fsm_block_download
= mlx5_fsm_block_download
,
658 .fsm_component_verify
= mlx5_fsm_component_verify
,
659 .fsm_activate
= mlx5_fsm_activate
,
660 .fsm_reactivate
= mlx5_fsm_reactivate
,
661 .fsm_query_state
= mlx5_fsm_query_state
,
662 .fsm_cancel
= mlx5_fsm_cancel
,
663 .fsm_release
= mlx5_fsm_release
666 int mlx5_firmware_flash(struct mlx5_core_dev
*dev
,
667 const struct firmware
*firmware
,
668 struct netlink_ext_ack
*extack
)
670 struct mlx5_mlxfw_dev mlx5_mlxfw_dev
= {
672 .ops
= &mlx5_mlxfw_dev_ops
,
673 .psid
= dev
->board_id
,
674 .psid_size
= strlen(dev
->board_id
),
675 .devlink
= priv_to_devlink(dev
),
680 if (!MLX5_CAP_GEN(dev
, mcam_reg
) ||
681 !MLX5_CAP_MCAM_REG(dev
, mcqi
) ||
682 !MLX5_CAP_MCAM_REG(dev
, mcc
) ||
683 !MLX5_CAP_MCAM_REG(dev
, mcda
)) {
684 pr_info("%s flashing isn't supported by the running FW\n", __func__
);
688 return mlxfw_firmware_flash(&mlx5_mlxfw_dev
.mlxfw_dev
,
692 static int mlx5_reg_mcqi_version_query(struct mlx5_core_dev
*dev
,
693 u16 component_index
, bool read_pending
,
694 u32
*mcqi_version_out
)
696 return mlx5_reg_mcqi_query(dev
, component_index
, read_pending
,
697 MCQI_INFO_TYPE_VERSION
,
698 MLX5_ST_SZ_BYTES(mcqi_version
),
702 static int mlx5_reg_mcqs_query(struct mlx5_core_dev
*dev
, u32
*out
,
705 u8 out_sz
= MLX5_ST_SZ_BYTES(mcqs_reg
);
706 u32 in
[MLX5_ST_SZ_DW(mcqs_reg
)] = {};
709 memset(out
, 0, out_sz
);
711 MLX5_SET(mcqs_reg
, in
, component_index
, component_index
);
713 err
= mlx5_core_access_reg(dev
, in
, sizeof(in
), out
,
714 out_sz
, MLX5_REG_MCQS
, 0, 0);
718 /* scans component index sequentially, to find the boot img index */
719 static int mlx5_get_boot_img_component_index(struct mlx5_core_dev
*dev
)
721 u32 out
[MLX5_ST_SZ_DW(mcqs_reg
)] = {};
722 u16 identifier
, component_idx
= 0;
727 err
= mlx5_reg_mcqs_query(dev
, out
, component_idx
);
731 identifier
= MLX5_GET(mcqs_reg
, out
, identifier
);
732 quit
= !!MLX5_GET(mcqs_reg
, out
, last_index_flag
);
733 quit
|= identifier
== MCQS_IDENTIFIER_BOOT_IMG
;
734 } while (!quit
&& ++component_idx
);
736 if (identifier
!= MCQS_IDENTIFIER_BOOT_IMG
) {
737 mlx5_core_warn(dev
, "mcqs: can't find boot_img component ix, last scanned idx %d\n",
742 return component_idx
;
746 mlx5_fw_image_pending(struct mlx5_core_dev
*dev
,
748 bool *pending_version_exists
)
750 u32 out
[MLX5_ST_SZ_DW(mcqs_reg
)];
751 u8 component_update_state
;
754 err
= mlx5_reg_mcqs_query(dev
, out
, component_index
);
758 component_update_state
= MLX5_GET(mcqs_reg
, out
, component_update_state
);
760 if (component_update_state
== MCQS_UPDATE_STATE_IDLE
) {
761 *pending_version_exists
= false;
762 } else if (component_update_state
== MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET
) {
763 *pending_version_exists
= true;
766 "mcqs: can't read pending fw version while fw state is %d\n",
767 component_update_state
);
773 int mlx5_fw_version_query(struct mlx5_core_dev
*dev
,
774 u32
*running_ver
, u32
*pending_ver
)
776 u32 reg_mcqi_version
[MLX5_ST_SZ_DW(mcqi_version
)] = {};
777 bool pending_version_exists
;
781 if (!MLX5_CAP_GEN(dev
, mcam_reg
) || !MLX5_CAP_MCAM_REG(dev
, mcqi
) ||
782 !MLX5_CAP_MCAM_REG(dev
, mcqs
)) {
783 mlx5_core_warn(dev
, "fw query isn't supported by the FW\n");
787 component_index
= mlx5_get_boot_img_component_index(dev
);
788 if (component_index
< 0)
789 return component_index
;
791 err
= mlx5_reg_mcqi_version_query(dev
, component_index
,
792 MCQI_FW_RUNNING_VERSION
,
797 *running_ver
= MLX5_GET(mcqi_version
, reg_mcqi_version
, version
);
799 err
= mlx5_fw_image_pending(dev
, component_index
, &pending_version_exists
);
803 if (!pending_version_exists
) {
808 err
= mlx5_reg_mcqi_version_query(dev
, component_index
,
809 MCQI_FW_STORED_VERSION
,
814 *pending_ver
= MLX5_GET(mcqi_version
, reg_mcqi_version
, version
);