2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/random.h>
36 #include <linux/vmalloc.h>
37 #include <linux/hardirq.h>
38 #include <linux/mlx5/driver.h>
39 #include "mlx5_core.h"
42 #include "lib/pci_vsc.h"
43 #include "diag/fw_tracer.h"
46 MLX5_HEALTH_POLL_INTERVAL
= 2 * HZ
,
51 MLX5_HEALTH_SYNDR_FW_ERR
= 0x1,
52 MLX5_HEALTH_SYNDR_IRISC_ERR
= 0x7,
53 MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR
= 0x8,
54 MLX5_HEALTH_SYNDR_CRC_ERR
= 0x9,
55 MLX5_HEALTH_SYNDR_FETCH_PCI_ERR
= 0xa,
56 MLX5_HEALTH_SYNDR_HW_FTL_ERR
= 0xb,
57 MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR
= 0xc,
58 MLX5_HEALTH_SYNDR_EQ_ERR
= 0xd,
59 MLX5_HEALTH_SYNDR_EQ_INV
= 0xe,
60 MLX5_HEALTH_SYNDR_FFSER_ERR
= 0xf,
61 MLX5_HEALTH_SYNDR_HIGH_TEMP
= 0x10
65 MLX5_DROP_NEW_HEALTH_WORK
,
69 MLX5_SENSOR_NO_ERR
= 0,
70 MLX5_SENSOR_PCI_COMM_ERR
= 1,
71 MLX5_SENSOR_PCI_ERR
= 2,
72 MLX5_SENSOR_NIC_DISABLED
= 3,
73 MLX5_SENSOR_NIC_SW_RESET
= 4,
74 MLX5_SENSOR_FW_SYND_RFR
= 5,
77 u8
mlx5_get_nic_state(struct mlx5_core_dev
*dev
)
79 return (ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) >> 8) & 7;
82 void mlx5_set_nic_state(struct mlx5_core_dev
*dev
, u8 state
)
84 u32 cur_cmdq_addr_l_sz
;
86 cur_cmdq_addr_l_sz
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
);
87 iowrite32be((cur_cmdq_addr_l_sz
& 0xFFFFF000) |
88 state
<< MLX5_NIC_IFC_OFFSET
,
89 &dev
->iseg
->cmdq_addr_l_sz
);
92 static bool sensor_pci_not_working(struct mlx5_core_dev
*dev
)
94 struct mlx5_core_health
*health
= &dev
->priv
.health
;
95 struct health_buffer __iomem
*h
= health
->health
;
97 /* Offline PCI reads return 0xffffffff */
98 return (ioread32be(&h
->fw_ver
) == 0xffffffff);
101 static bool sensor_fw_synd_rfr(struct mlx5_core_dev
*dev
)
103 struct mlx5_core_health
*health
= &dev
->priv
.health
;
104 struct health_buffer __iomem
*h
= health
->health
;
105 u32 rfr
= ioread32be(&h
->rfr
) >> MLX5_RFR_OFFSET
;
106 u8 synd
= ioread8(&h
->synd
);
109 mlx5_core_dbg(dev
, "FW requests reset, synd: %d\n", synd
);
113 u32
mlx5_health_check_fatal_sensors(struct mlx5_core_dev
*dev
)
115 if (sensor_pci_not_working(dev
))
116 return MLX5_SENSOR_PCI_COMM_ERR
;
117 if (pci_channel_offline(dev
->pdev
))
118 return MLX5_SENSOR_PCI_ERR
;
119 if (mlx5_get_nic_state(dev
) == MLX5_NIC_IFC_DISABLED
)
120 return MLX5_SENSOR_NIC_DISABLED
;
121 if (mlx5_get_nic_state(dev
) == MLX5_NIC_IFC_SW_RESET
)
122 return MLX5_SENSOR_NIC_SW_RESET
;
123 if (sensor_fw_synd_rfr(dev
))
124 return MLX5_SENSOR_FW_SYND_RFR
;
126 return MLX5_SENSOR_NO_ERR
;
129 static int lock_sem_sw_reset(struct mlx5_core_dev
*dev
, bool lock
)
131 enum mlx5_vsc_state state
;
134 if (!mlx5_core_is_pf(dev
))
137 /* Try to lock GW access, this stage doesn't return
138 * EBUSY because locked GW does not mean that other PF
139 * already started the reset.
141 ret
= mlx5_vsc_gw_lock(dev
);
147 state
= lock
? MLX5_VSC_LOCK
: MLX5_VSC_UNLOCK
;
148 /* At this stage, if the return status == EBUSY, then we know
149 * for sure that another PF started the reset, so don't allow
152 ret
= mlx5_vsc_sem_set_space(dev
, MLX5_SEMAPHORE_SW_RESET
, state
);
154 mlx5_core_warn(dev
, "Failed to lock SW reset semaphore\n");
156 /* Unlock GW access */
157 mlx5_vsc_gw_unlock(dev
);
162 static bool reset_fw_if_needed(struct mlx5_core_dev
*dev
)
164 bool supported
= (ioread32be(&dev
->iseg
->initializing
) >>
165 MLX5_FW_RESET_SUPPORTED_OFFSET
) & 1;
171 /* The reset only needs to be issued by one PF. The health buffer is
172 * shared between all functions, and will be cleared during a reset.
173 * Check again to avoid a redundant 2nd reset. If the fatal erros was
174 * PCI related a reset won't help.
176 fatal_error
= mlx5_health_check_fatal_sensors(dev
);
177 if (fatal_error
== MLX5_SENSOR_PCI_COMM_ERR
||
178 fatal_error
== MLX5_SENSOR_NIC_DISABLED
||
179 fatal_error
== MLX5_SENSOR_NIC_SW_RESET
) {
180 mlx5_core_warn(dev
, "Not issuing FW reset. Either it's already done or won't help.");
184 mlx5_core_warn(dev
, "Issuing FW Reset\n");
185 /* Write the NIC interface field to initiate the reset, the command
186 * interface address also resides here, don't overwrite it.
188 mlx5_set_nic_state(dev
, MLX5_NIC_IFC_SW_RESET
);
193 void mlx5_enter_error_state(struct mlx5_core_dev
*dev
, bool force
)
195 bool err_detected
= false;
197 /* Mark the device as fatal in order to abort FW commands */
198 if ((mlx5_health_check_fatal_sensors(dev
) || force
) &&
199 dev
->state
== MLX5_DEVICE_STATE_UP
) {
200 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
203 mutex_lock(&dev
->intf_state_mutex
);
204 if (!err_detected
&& dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
205 goto unlock
;/* a previous error is still being handled */
206 if (dev
->state
== MLX5_DEVICE_STATE_UNINITIALIZED
) {
207 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
211 if (mlx5_health_check_fatal_sensors(dev
) || force
) { /* protected state setting */
212 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
216 mlx5_notifier_call_chain(dev
->priv
.events
, MLX5_DEV_EVENT_SYS_ERROR
, (void *)1);
218 mutex_unlock(&dev
->intf_state_mutex
);
221 #define MLX5_CRDUMP_WAIT_MS 60000
222 #define MLX5_FW_RESET_WAIT_MS 1000
223 void mlx5_error_sw_reset(struct mlx5_core_dev
*dev
)
225 unsigned long end
, delay_ms
= MLX5_FW_RESET_WAIT_MS
;
228 mutex_lock(&dev
->intf_state_mutex
);
229 if (dev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
)
232 mlx5_core_err(dev
, "start\n");
234 if (mlx5_health_check_fatal_sensors(dev
) == MLX5_SENSOR_FW_SYND_RFR
) {
235 /* Get cr-dump and reset FW semaphore */
236 lock
= lock_sem_sw_reset(dev
, true);
238 if (lock
== -EBUSY
) {
239 delay_ms
= MLX5_CRDUMP_WAIT_MS
;
240 goto recover_from_sw_reset
;
242 /* Execute SW reset */
243 reset_fw_if_needed(dev
);
246 recover_from_sw_reset
:
247 /* Recover from SW reset */
248 end
= jiffies
+ msecs_to_jiffies(delay_ms
);
250 if (mlx5_get_nic_state(dev
) == MLX5_NIC_IFC_DISABLED
)
254 } while (!time_after(jiffies
, end
));
256 if (mlx5_get_nic_state(dev
) != MLX5_NIC_IFC_DISABLED
) {
257 dev_err(&dev
->pdev
->dev
, "NIC IFC still %d after %lums.\n",
258 mlx5_get_nic_state(dev
), delay_ms
);
261 /* Release FW semaphore if you are the lock owner */
263 lock_sem_sw_reset(dev
, false);
265 mlx5_core_err(dev
, "end\n");
268 mutex_unlock(&dev
->intf_state_mutex
);
271 static void mlx5_handle_bad_state(struct mlx5_core_dev
*dev
)
273 u8 nic_interface
= mlx5_get_nic_state(dev
);
275 switch (nic_interface
) {
276 case MLX5_NIC_IFC_FULL
:
277 mlx5_core_warn(dev
, "Expected to see disabled NIC but it is full driver\n");
280 case MLX5_NIC_IFC_DISABLED
:
281 mlx5_core_warn(dev
, "starting teardown\n");
284 case MLX5_NIC_IFC_NO_DRAM_NIC
:
285 mlx5_core_warn(dev
, "Expected to see disabled NIC but it is no dram nic\n");
288 case MLX5_NIC_IFC_SW_RESET
:
289 /* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
290 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
291 * and this is a VF), this is not recoverable by SW reset.
292 * Logging of this is handled elsewhere.
293 * 2. FW reset has been issued by another function, driver can
294 * be reloaded to recover after the mode switches to
295 * MLX5_NIC_IFC_DISABLED.
297 if (dev
->priv
.health
.fatal_error
!= MLX5_SENSOR_PCI_COMM_ERR
)
298 mlx5_core_warn(dev
, "NIC SW reset in progress\n");
302 mlx5_core_warn(dev
, "Expected to see disabled NIC but it is has invalid value %d\n",
306 mlx5_disable_device(dev
);
309 /* How much time to wait until health resetting the driver (in msecs) */
310 #define MLX5_RECOVERY_WAIT_MSECS 60000
311 int mlx5_health_wait_pci_up(struct mlx5_core_dev
*dev
)
315 end
= jiffies
+ msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS
);
316 while (sensor_pci_not_working(dev
)) {
317 if (time_after(jiffies
, end
))
324 static int mlx5_health_try_recover(struct mlx5_core_dev
*dev
)
326 mlx5_core_warn(dev
, "handling bad device here\n");
327 mlx5_handle_bad_state(dev
);
328 if (mlx5_health_wait_pci_up(dev
)) {
329 mlx5_core_err(dev
, "health recovery flow aborted, PCI reads still not working\n");
332 mlx5_core_err(dev
, "starting health recovery flow\n");
333 mlx5_recover_device(dev
);
334 if (!test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
) ||
335 mlx5_health_check_fatal_sensors(dev
)) {
336 mlx5_core_err(dev
, "health recovery failed\n");
342 static const char *hsynd_str(u8 synd
)
345 case MLX5_HEALTH_SYNDR_FW_ERR
:
346 return "firmware internal error";
347 case MLX5_HEALTH_SYNDR_IRISC_ERR
:
348 return "irisc not responding";
349 case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR
:
350 return "unrecoverable hardware error";
351 case MLX5_HEALTH_SYNDR_CRC_ERR
:
352 return "firmware CRC error";
353 case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR
:
354 return "ICM fetch PCI error";
355 case MLX5_HEALTH_SYNDR_HW_FTL_ERR
:
356 return "HW fatal error\n";
357 case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR
:
358 return "async EQ buffer overrun";
359 case MLX5_HEALTH_SYNDR_EQ_ERR
:
361 case MLX5_HEALTH_SYNDR_EQ_INV
:
362 return "Invalid EQ referenced";
363 case MLX5_HEALTH_SYNDR_FFSER_ERR
:
364 return "FFSER error";
365 case MLX5_HEALTH_SYNDR_HIGH_TEMP
:
366 return "High temperature";
368 return "unrecognized error";
372 static void print_health_info(struct mlx5_core_dev
*dev
)
374 struct mlx5_core_health
*health
= &dev
->priv
.health
;
375 struct health_buffer __iomem
*h
= health
->health
;
380 /* If the syndrome is 0, the device is OK and no need to print buffer */
381 if (!ioread8(&h
->synd
))
384 for (i
= 0; i
< ARRAY_SIZE(h
->assert_var
); i
++)
385 mlx5_core_err(dev
, "assert_var[%d] 0x%08x\n", i
,
386 ioread32be(h
->assert_var
+ i
));
388 mlx5_core_err(dev
, "assert_exit_ptr 0x%08x\n",
389 ioread32be(&h
->assert_exit_ptr
));
390 mlx5_core_err(dev
, "assert_callra 0x%08x\n",
391 ioread32be(&h
->assert_callra
));
392 sprintf(fw_str
, "%d.%d.%d", fw_rev_maj(dev
), fw_rev_min(dev
), fw_rev_sub(dev
));
393 mlx5_core_err(dev
, "fw_ver %s\n", fw_str
);
394 mlx5_core_err(dev
, "hw_id 0x%08x\n", ioread32be(&h
->hw_id
));
395 mlx5_core_err(dev
, "irisc_index %d\n", ioread8(&h
->irisc_index
));
396 mlx5_core_err(dev
, "synd 0x%x: %s\n", ioread8(&h
->synd
),
397 hsynd_str(ioread8(&h
->synd
)));
398 mlx5_core_err(dev
, "ext_synd 0x%04x\n", ioread16be(&h
->ext_synd
));
399 fw
= ioread32be(&h
->fw_ver
);
400 mlx5_core_err(dev
, "raw fw_ver 0x%08x\n", fw
);
404 mlx5_fw_reporter_diagnose(struct devlink_health_reporter
*reporter
,
405 struct devlink_fmsg
*fmsg
,
406 struct netlink_ext_ack
*extack
)
408 struct mlx5_core_dev
*dev
= devlink_health_reporter_priv(reporter
);
409 struct mlx5_core_health
*health
= &dev
->priv
.health
;
410 struct health_buffer __iomem
*h
= health
->health
;
414 synd
= ioread8(&h
->synd
);
415 err
= devlink_fmsg_u8_pair_put(fmsg
, "Syndrome", synd
);
418 return devlink_fmsg_string_pair_put(fmsg
, "Description", hsynd_str(synd
));
421 struct mlx5_fw_reporter_ctx
{
427 mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg
*fmsg
,
428 struct mlx5_fw_reporter_ctx
*fw_reporter_ctx
)
432 err
= devlink_fmsg_u8_pair_put(fmsg
, "syndrome",
433 fw_reporter_ctx
->err_synd
);
436 err
= devlink_fmsg_u32_pair_put(fmsg
, "fw_miss_counter",
437 fw_reporter_ctx
->miss_counter
);
444 mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev
*dev
,
445 struct devlink_fmsg
*fmsg
)
447 struct mlx5_core_health
*health
= &dev
->priv
.health
;
448 struct health_buffer __iomem
*h
= health
->health
;
452 if (!ioread8(&h
->synd
))
455 err
= devlink_fmsg_pair_nest_start(fmsg
, "health buffer");
458 err
= devlink_fmsg_obj_nest_start(fmsg
);
461 err
= devlink_fmsg_arr_pair_nest_start(fmsg
, "assert_var");
465 for (i
= 0; i
< ARRAY_SIZE(h
->assert_var
); i
++) {
466 err
= devlink_fmsg_u32_put(fmsg
, ioread32be(h
->assert_var
+ i
));
470 err
= devlink_fmsg_arr_pair_nest_end(fmsg
);
473 err
= devlink_fmsg_u32_pair_put(fmsg
, "assert_exit_ptr",
474 ioread32be(&h
->assert_exit_ptr
));
477 err
= devlink_fmsg_u32_pair_put(fmsg
, "assert_callra",
478 ioread32be(&h
->assert_callra
));
481 err
= devlink_fmsg_u32_pair_put(fmsg
, "hw_id", ioread32be(&h
->hw_id
));
484 err
= devlink_fmsg_u8_pair_put(fmsg
, "irisc_index",
485 ioread8(&h
->irisc_index
));
488 err
= devlink_fmsg_u8_pair_put(fmsg
, "synd", ioread8(&h
->synd
));
491 err
= devlink_fmsg_u32_pair_put(fmsg
, "ext_synd",
492 ioread16be(&h
->ext_synd
));
495 err
= devlink_fmsg_u32_pair_put(fmsg
, "raw_fw_ver",
496 ioread32be(&h
->fw_ver
));
499 err
= devlink_fmsg_obj_nest_end(fmsg
);
502 return devlink_fmsg_pair_nest_end(fmsg
);
506 mlx5_fw_reporter_dump(struct devlink_health_reporter
*reporter
,
507 struct devlink_fmsg
*fmsg
, void *priv_ctx
,
508 struct netlink_ext_ack
*extack
)
510 struct mlx5_core_dev
*dev
= devlink_health_reporter_priv(reporter
);
513 err
= mlx5_fw_tracer_trigger_core_dump_general(dev
);
518 struct mlx5_fw_reporter_ctx
*fw_reporter_ctx
= priv_ctx
;
520 err
= mlx5_fw_reporter_ctx_pairs_put(fmsg
, fw_reporter_ctx
);
525 err
= mlx5_fw_reporter_heath_buffer_data_put(dev
, fmsg
);
528 return mlx5_fw_tracer_get_saved_traces_objects(dev
->tracer
, fmsg
);
531 static void mlx5_fw_reporter_err_work(struct work_struct
*work
)
533 struct mlx5_fw_reporter_ctx fw_reporter_ctx
;
534 struct mlx5_core_health
*health
;
536 health
= container_of(work
, struct mlx5_core_health
, report_work
);
538 if (IS_ERR_OR_NULL(health
->fw_reporter
))
541 fw_reporter_ctx
.err_synd
= health
->synd
;
542 fw_reporter_ctx
.miss_counter
= health
->miss_counter
;
543 if (fw_reporter_ctx
.err_synd
) {
544 devlink_health_report(health
->fw_reporter
,
545 "FW syndrom reported", &fw_reporter_ctx
);
548 if (fw_reporter_ctx
.miss_counter
)
549 devlink_health_report(health
->fw_reporter
,
550 "FW miss counter reported",
554 static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops
= {
556 .diagnose
= mlx5_fw_reporter_diagnose
,
557 .dump
= mlx5_fw_reporter_dump
,
561 mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter
*reporter
,
563 struct netlink_ext_ack
*extack
)
565 struct mlx5_core_dev
*dev
= devlink_health_reporter_priv(reporter
);
567 return mlx5_health_try_recover(dev
);
571 mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter
*reporter
,
572 struct devlink_fmsg
*fmsg
, void *priv_ctx
,
573 struct netlink_ext_ack
*extack
)
575 struct mlx5_core_dev
*dev
= devlink_health_reporter_priv(reporter
);
576 u32 crdump_size
= dev
->priv
.health
.crdump_size
;
580 if (!mlx5_core_is_pf(dev
))
583 cr_data
= kvmalloc(crdump_size
, GFP_KERNEL
);
586 err
= mlx5_crdump_collect(dev
, cr_data
);
591 struct mlx5_fw_reporter_ctx
*fw_reporter_ctx
= priv_ctx
;
593 err
= mlx5_fw_reporter_ctx_pairs_put(fmsg
, fw_reporter_ctx
);
598 err
= devlink_fmsg_binary_pair_put(fmsg
, "crdump_data", cr_data
, crdump_size
);
605 static void mlx5_fw_fatal_reporter_err_work(struct work_struct
*work
)
607 struct mlx5_fw_reporter_ctx fw_reporter_ctx
;
608 struct mlx5_core_health
*health
;
609 struct mlx5_core_dev
*dev
;
610 struct mlx5_priv
*priv
;
612 health
= container_of(work
, struct mlx5_core_health
, fatal_report_work
);
613 priv
= container_of(health
, struct mlx5_priv
, health
);
614 dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
616 mlx5_enter_error_state(dev
, false);
617 if (IS_ERR_OR_NULL(health
->fw_fatal_reporter
)) {
618 if (mlx5_health_try_recover(dev
))
619 mlx5_core_err(dev
, "health recovery failed\n");
622 fw_reporter_ctx
.err_synd
= health
->synd
;
623 fw_reporter_ctx
.miss_counter
= health
->miss_counter
;
624 devlink_health_report(health
->fw_fatal_reporter
,
625 "FW fatal error reported", &fw_reporter_ctx
);
628 static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops
= {
630 .recover
= mlx5_fw_fatal_reporter_recover
,
631 .dump
= mlx5_fw_fatal_reporter_dump
,
634 #define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
635 static void mlx5_fw_reporters_create(struct mlx5_core_dev
*dev
)
637 struct mlx5_core_health
*health
= &dev
->priv
.health
;
638 struct devlink
*devlink
= priv_to_devlink(dev
);
640 health
->fw_reporter
=
641 devlink_health_reporter_create(devlink
, &mlx5_fw_reporter_ops
,
643 if (IS_ERR(health
->fw_reporter
))
644 mlx5_core_warn(dev
, "Failed to create fw reporter, err = %ld\n",
645 PTR_ERR(health
->fw_reporter
));
647 health
->fw_fatal_reporter
=
648 devlink_health_reporter_create(devlink
,
649 &mlx5_fw_fatal_reporter_ops
,
650 MLX5_REPORTER_FW_GRACEFUL_PERIOD
,
652 if (IS_ERR(health
->fw_fatal_reporter
))
653 mlx5_core_warn(dev
, "Failed to create fw fatal reporter, err = %ld\n",
654 PTR_ERR(health
->fw_fatal_reporter
));
657 static void mlx5_fw_reporters_destroy(struct mlx5_core_dev
*dev
)
659 struct mlx5_core_health
*health
= &dev
->priv
.health
;
661 if (!IS_ERR_OR_NULL(health
->fw_reporter
))
662 devlink_health_reporter_destroy(health
->fw_reporter
);
664 if (!IS_ERR_OR_NULL(health
->fw_fatal_reporter
))
665 devlink_health_reporter_destroy(health
->fw_fatal_reporter
);
668 static unsigned long get_next_poll_jiffies(void)
672 get_random_bytes(&next
, sizeof(next
));
674 next
+= jiffies
+ MLX5_HEALTH_POLL_INTERVAL
;
679 void mlx5_trigger_health_work(struct mlx5_core_dev
*dev
)
681 struct mlx5_core_health
*health
= &dev
->priv
.health
;
684 spin_lock_irqsave(&health
->wq_lock
, flags
);
685 if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK
, &health
->flags
))
686 queue_work(health
->wq
, &health
->fatal_report_work
);
688 mlx5_core_err(dev
, "new health works are not permitted at this stage\n");
689 spin_unlock_irqrestore(&health
->wq_lock
, flags
);
692 static void poll_health(struct timer_list
*t
)
694 struct mlx5_core_dev
*dev
= from_timer(dev
, t
, priv
.health
.timer
);
695 struct mlx5_core_health
*health
= &dev
->priv
.health
;
696 struct health_buffer __iomem
*h
= health
->health
;
701 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
704 fatal_error
= mlx5_health_check_fatal_sensors(dev
);
706 if (fatal_error
&& !health
->fatal_error
) {
707 mlx5_core_err(dev
, "Fatal error %u detected\n", fatal_error
);
708 dev
->priv
.health
.fatal_error
= fatal_error
;
709 print_health_info(dev
);
710 mlx5_trigger_health_work(dev
);
714 count
= ioread32be(health
->health_counter
);
715 if (count
== health
->prev
)
716 ++health
->miss_counter
;
718 health
->miss_counter
= 0;
720 health
->prev
= count
;
721 if (health
->miss_counter
== MAX_MISSES
) {
722 mlx5_core_err(dev
, "device's health compromised - reached miss count\n");
723 print_health_info(dev
);
724 queue_work(health
->wq
, &health
->report_work
);
727 prev_synd
= health
->synd
;
728 health
->synd
= ioread8(&h
->synd
);
729 if (health
->synd
&& health
->synd
!= prev_synd
)
730 queue_work(health
->wq
, &health
->report_work
);
733 mod_timer(&health
->timer
, get_next_poll_jiffies());
736 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
)
738 struct mlx5_core_health
*health
= &dev
->priv
.health
;
740 timer_setup(&health
->timer
, poll_health
, 0);
741 health
->fatal_error
= MLX5_SENSOR_NO_ERR
;
742 clear_bit(MLX5_DROP_NEW_HEALTH_WORK
, &health
->flags
);
743 health
->health
= &dev
->iseg
->health
;
744 health
->health_counter
= &dev
->iseg
->health_counter
;
746 health
->timer
.expires
= round_jiffies(jiffies
+ MLX5_HEALTH_POLL_INTERVAL
);
747 add_timer(&health
->timer
);
750 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
, bool disable_health
)
752 struct mlx5_core_health
*health
= &dev
->priv
.health
;
755 if (disable_health
) {
756 spin_lock_irqsave(&health
->wq_lock
, flags
);
757 set_bit(MLX5_DROP_NEW_HEALTH_WORK
, &health
->flags
);
758 spin_unlock_irqrestore(&health
->wq_lock
, flags
);
761 del_timer_sync(&health
->timer
);
764 void mlx5_drain_health_wq(struct mlx5_core_dev
*dev
)
766 struct mlx5_core_health
*health
= &dev
->priv
.health
;
769 spin_lock_irqsave(&health
->wq_lock
, flags
);
770 set_bit(MLX5_DROP_NEW_HEALTH_WORK
, &health
->flags
);
771 spin_unlock_irqrestore(&health
->wq_lock
, flags
);
772 cancel_work_sync(&health
->report_work
);
773 cancel_work_sync(&health
->fatal_report_work
);
776 void mlx5_health_flush(struct mlx5_core_dev
*dev
)
778 struct mlx5_core_health
*health
= &dev
->priv
.health
;
780 flush_workqueue(health
->wq
);
783 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
)
785 struct mlx5_core_health
*health
= &dev
->priv
.health
;
787 destroy_workqueue(health
->wq
);
788 mlx5_fw_reporters_destroy(dev
);
791 int mlx5_health_init(struct mlx5_core_dev
*dev
)
793 struct mlx5_core_health
*health
;
796 mlx5_fw_reporters_create(dev
);
798 health
= &dev
->priv
.health
;
799 name
= kmalloc(64, GFP_KERNEL
);
803 strcpy(name
, "mlx5_health");
804 strcat(name
, dev_name(dev
->device
));
805 health
->wq
= create_singlethread_workqueue(name
);
809 spin_lock_init(&health
->wq_lock
);
810 INIT_WORK(&health
->fatal_report_work
, mlx5_fw_fatal_reporter_err_work
);
811 INIT_WORK(&health
->report_work
, mlx5_fw_reporter_err_work
);
816 mlx5_fw_reporters_destroy(dev
);