2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/random.h>
36 #include <linux/vmalloc.h>
37 #include <linux/hardirq.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/mlx5/cmd.h>
40 #include "mlx5_core.h"
43 MLX5_HEALTH_POLL_INTERVAL
= 2 * HZ
,
48 MLX5_HEALTH_SYNDR_FW_ERR
= 0x1,
49 MLX5_HEALTH_SYNDR_IRISC_ERR
= 0x7,
50 MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR
= 0x8,
51 MLX5_HEALTH_SYNDR_CRC_ERR
= 0x9,
52 MLX5_HEALTH_SYNDR_FETCH_PCI_ERR
= 0xa,
53 MLX5_HEALTH_SYNDR_HW_FTL_ERR
= 0xb,
54 MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR
= 0xc,
55 MLX5_HEALTH_SYNDR_EQ_ERR
= 0xd,
56 MLX5_HEALTH_SYNDR_EQ_INV
= 0xe,
57 MLX5_HEALTH_SYNDR_FFSER_ERR
= 0xf,
58 MLX5_HEALTH_SYNDR_HIGH_TEMP
= 0x10
62 MLX5_NIC_IFC_FULL
= 0,
63 MLX5_NIC_IFC_DISABLED
= 1,
64 MLX5_NIC_IFC_NO_DRAM_NIC
= 2,
65 MLX5_NIC_IFC_INVALID
= 3
69 MLX5_DROP_NEW_HEALTH_WORK
,
70 MLX5_DROP_NEW_RECOVERY_WORK
,
73 static u8
get_nic_state(struct mlx5_core_dev
*dev
)
75 return (ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) >> 8) & 3;
78 static void trigger_cmd_completions(struct mlx5_core_dev
*dev
)
83 /* wait for pending handlers to complete */
84 synchronize_irq(pci_irq_vector(dev
->pdev
, MLX5_EQ_VEC_CMD
));
85 spin_lock_irqsave(&dev
->cmd
.alloc_lock
, flags
);
86 vector
= ~dev
->cmd
.bitmask
& ((1ul << (1 << dev
->cmd
.log_sz
)) - 1);
90 vector
|= MLX5_TRIGGERED_CMD_COMP
;
91 spin_unlock_irqrestore(&dev
->cmd
.alloc_lock
, flags
);
93 mlx5_core_dbg(dev
, "vector 0x%llx\n", vector
);
94 mlx5_cmd_comp_handler(dev
, vector
, true);
98 spin_unlock_irqrestore(&dev
->cmd
.alloc_lock
, flags
);
101 static int in_fatal(struct mlx5_core_dev
*dev
)
103 struct mlx5_core_health
*health
= &dev
->priv
.health
;
104 struct health_buffer __iomem
*h
= health
->health
;
106 if (get_nic_state(dev
) == MLX5_NIC_IFC_DISABLED
)
109 if (ioread32be(&h
->fw_ver
) == 0xffffffff)
115 void mlx5_enter_error_state(struct mlx5_core_dev
*dev
, bool force
)
117 mutex_lock(&dev
->intf_state_mutex
);
118 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
121 mlx5_core_err(dev
, "start\n");
122 if (pci_channel_offline(dev
->pdev
) || in_fatal(dev
) || force
) {
123 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
124 trigger_cmd_completions(dev
);
127 mlx5_core_event(dev
, MLX5_DEV_EVENT_SYS_ERROR
, 0);
128 mlx5_core_err(dev
, "end\n");
131 mutex_unlock(&dev
->intf_state_mutex
);
134 static void mlx5_handle_bad_state(struct mlx5_core_dev
*dev
)
136 u8 nic_interface
= get_nic_state(dev
);
138 switch (nic_interface
) {
139 case MLX5_NIC_IFC_FULL
:
140 mlx5_core_warn(dev
, "Expected to see disabled NIC but it is full driver\n");
143 case MLX5_NIC_IFC_DISABLED
:
144 mlx5_core_warn(dev
, "starting teardown\n");
147 case MLX5_NIC_IFC_NO_DRAM_NIC
:
148 mlx5_core_warn(dev
, "Expected to see disabled NIC but it is no dram nic\n");
151 mlx5_core_warn(dev
, "Expected to see disabled NIC but it is has invalid value %d\n",
155 mlx5_disable_device(dev
);
158 static void health_recover(struct work_struct
*work
)
160 struct mlx5_core_health
*health
;
161 struct delayed_work
*dwork
;
162 struct mlx5_core_dev
*dev
;
163 struct mlx5_priv
*priv
;
166 dwork
= container_of(work
, struct delayed_work
, work
);
167 health
= container_of(dwork
, struct mlx5_core_health
, recover_work
);
168 priv
= container_of(health
, struct mlx5_priv
, health
);
169 dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
171 nic_state
= get_nic_state(dev
);
172 if (nic_state
== MLX5_NIC_IFC_INVALID
) {
173 dev_err(&dev
->pdev
->dev
, "health recovery flow aborted since the nic state is invalid\n");
177 dev_err(&dev
->pdev
->dev
, "starting health recovery flow\n");
178 mlx5_recover_device(dev
);
181 /* How much time to wait until health resetting the driver (in msecs) */
182 #define MLX5_RECOVERY_DELAY_MSECS 60000
183 static void health_care(struct work_struct
*work
)
185 unsigned long recover_delay
= msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS
);
186 struct mlx5_core_health
*health
;
187 struct mlx5_core_dev
*dev
;
188 struct mlx5_priv
*priv
;
191 health
= container_of(work
, struct mlx5_core_health
, work
);
192 priv
= container_of(health
, struct mlx5_priv
, health
);
193 dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
194 mlx5_core_warn(dev
, "handling bad device here\n");
195 mlx5_handle_bad_state(dev
);
197 spin_lock_irqsave(&health
->wq_lock
, flags
);
198 if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK
, &health
->flags
))
199 schedule_delayed_work(&health
->recover_work
, recover_delay
);
201 dev_err(&dev
->pdev
->dev
,
202 "new health works are not permitted at this stage\n");
203 spin_unlock_irqrestore(&health
->wq_lock
, flags
);
206 static const char *hsynd_str(u8 synd
)
209 case MLX5_HEALTH_SYNDR_FW_ERR
:
210 return "firmware internal error";
211 case MLX5_HEALTH_SYNDR_IRISC_ERR
:
212 return "irisc not responding";
213 case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR
:
214 return "unrecoverable hardware error";
215 case MLX5_HEALTH_SYNDR_CRC_ERR
:
216 return "firmware CRC error";
217 case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR
:
218 return "ICM fetch PCI error";
219 case MLX5_HEALTH_SYNDR_HW_FTL_ERR
:
220 return "HW fatal error\n";
221 case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR
:
222 return "async EQ buffer overrun";
223 case MLX5_HEALTH_SYNDR_EQ_ERR
:
225 case MLX5_HEALTH_SYNDR_EQ_INV
:
226 return "Invalid EQ referenced";
227 case MLX5_HEALTH_SYNDR_FFSER_ERR
:
228 return "FFSER error";
229 case MLX5_HEALTH_SYNDR_HIGH_TEMP
:
230 return "High temperature";
232 return "unrecognized error";
236 static void print_health_info(struct mlx5_core_dev
*dev
)
238 struct mlx5_core_health
*health
= &dev
->priv
.health
;
239 struct health_buffer __iomem
*h
= health
->health
;
244 /* If the syndrome is 0, the device is OK and no need to print buffer */
245 if (!ioread8(&h
->synd
))
248 for (i
= 0; i
< ARRAY_SIZE(h
->assert_var
); i
++)
249 dev_err(&dev
->pdev
->dev
, "assert_var[%d] 0x%08x\n", i
, ioread32be(h
->assert_var
+ i
));
251 dev_err(&dev
->pdev
->dev
, "assert_exit_ptr 0x%08x\n", ioread32be(&h
->assert_exit_ptr
));
252 dev_err(&dev
->pdev
->dev
, "assert_callra 0x%08x\n", ioread32be(&h
->assert_callra
));
253 sprintf(fw_str
, "%d.%d.%d", fw_rev_maj(dev
), fw_rev_min(dev
), fw_rev_sub(dev
));
254 dev_err(&dev
->pdev
->dev
, "fw_ver %s\n", fw_str
);
255 dev_err(&dev
->pdev
->dev
, "hw_id 0x%08x\n", ioread32be(&h
->hw_id
));
256 dev_err(&dev
->pdev
->dev
, "irisc_index %d\n", ioread8(&h
->irisc_index
));
257 dev_err(&dev
->pdev
->dev
, "synd 0x%x: %s\n", ioread8(&h
->synd
), hsynd_str(ioread8(&h
->synd
)));
258 dev_err(&dev
->pdev
->dev
, "ext_synd 0x%04x\n", ioread16be(&h
->ext_synd
));
259 fw
= ioread32be(&h
->fw_ver
);
260 dev_err(&dev
->pdev
->dev
, "raw fw_ver 0x%08x\n", fw
);
263 static unsigned long get_next_poll_jiffies(void)
267 get_random_bytes(&next
, sizeof(next
));
269 next
+= jiffies
+ MLX5_HEALTH_POLL_INTERVAL
;
274 void mlx5_trigger_health_work(struct mlx5_core_dev
*dev
)
276 struct mlx5_core_health
*health
= &dev
->priv
.health
;
279 spin_lock_irqsave(&health
->wq_lock
, flags
);
280 if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK
, &health
->flags
))
281 queue_work(health
->wq
, &health
->work
);
283 dev_err(&dev
->pdev
->dev
,
284 "new health works are not permitted at this stage\n");
285 spin_unlock_irqrestore(&health
->wq_lock
, flags
);
288 static void poll_health(struct timer_list
*t
)
290 struct mlx5_core_dev
*dev
= from_timer(dev
, t
, priv
.health
.timer
);
291 struct mlx5_core_health
*health
= &dev
->priv
.health
;
294 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
297 count
= ioread32be(health
->health_counter
);
298 if (count
== health
->prev
)
299 ++health
->miss_counter
;
301 health
->miss_counter
= 0;
303 health
->prev
= count
;
304 if (health
->miss_counter
== MAX_MISSES
) {
305 dev_err(&dev
->pdev
->dev
, "device's health compromised - reached miss count\n");
306 print_health_info(dev
);
309 if (in_fatal(dev
) && !health
->sick
) {
311 print_health_info(dev
);
312 mlx5_trigger_health_work(dev
);
316 mod_timer(&health
->timer
, get_next_poll_jiffies());
319 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
)
321 struct mlx5_core_health
*health
= &dev
->priv
.health
;
323 timer_setup(&health
->timer
, poll_health
, 0);
325 clear_bit(MLX5_DROP_NEW_HEALTH_WORK
, &health
->flags
);
326 clear_bit(MLX5_DROP_NEW_RECOVERY_WORK
, &health
->flags
);
327 health
->health
= &dev
->iseg
->health
;
328 health
->health_counter
= &dev
->iseg
->health_counter
;
330 health
->timer
.expires
= round_jiffies(jiffies
+ MLX5_HEALTH_POLL_INTERVAL
);
331 add_timer(&health
->timer
);
334 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
)
336 struct mlx5_core_health
*health
= &dev
->priv
.health
;
338 del_timer_sync(&health
->timer
);
341 void mlx5_drain_health_wq(struct mlx5_core_dev
*dev
)
343 struct mlx5_core_health
*health
= &dev
->priv
.health
;
346 spin_lock_irqsave(&health
->wq_lock
, flags
);
347 set_bit(MLX5_DROP_NEW_HEALTH_WORK
, &health
->flags
);
348 set_bit(MLX5_DROP_NEW_RECOVERY_WORK
, &health
->flags
);
349 spin_unlock_irqrestore(&health
->wq_lock
, flags
);
350 cancel_delayed_work_sync(&health
->recover_work
);
351 cancel_work_sync(&health
->work
);
354 void mlx5_drain_health_recovery(struct mlx5_core_dev
*dev
)
356 struct mlx5_core_health
*health
= &dev
->priv
.health
;
359 spin_lock_irqsave(&health
->wq_lock
, flags
);
360 set_bit(MLX5_DROP_NEW_RECOVERY_WORK
, &health
->flags
);
361 spin_unlock_irqrestore(&health
->wq_lock
, flags
);
362 cancel_delayed_work_sync(&dev
->priv
.health
.recover_work
);
365 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
)
367 struct mlx5_core_health
*health
= &dev
->priv
.health
;
369 destroy_workqueue(health
->wq
);
372 int mlx5_health_init(struct mlx5_core_dev
*dev
)
374 struct mlx5_core_health
*health
;
377 health
= &dev
->priv
.health
;
378 name
= kmalloc(64, GFP_KERNEL
);
382 strcpy(name
, "mlx5_health");
383 strcat(name
, dev_name(&dev
->pdev
->dev
));
384 health
->wq
= create_singlethread_workqueue(name
);
388 spin_lock_init(&health
->wq_lock
);
389 INIT_WORK(&health
->work
, health_care
);
390 INIT_DELAYED_WORK(&health
->recover_work
, health_recover
);