2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
38 struct list_head edev_list
;
43 * eeh_pcid_name - Retrieve name of PCI device driver
46 * This routine is used to retrieve the name of PCI device driver
49 static inline const char *eeh_pcid_name(struct pci_dev
*pdev
)
51 if (pdev
&& pdev
->dev
.driver
)
52 return pdev
->dev
.driver
->name
;
57 * eeh_pcid_get - Get the PCI device driver
60 * The function is used to retrieve the PCI device driver for
61 * the indicated PCI device. Besides, we will increase the reference
62 * of the PCI device driver to prevent that being unloaded on
63 * the fly. Otherwise, kernel crash would be seen.
65 static inline struct pci_driver
*eeh_pcid_get(struct pci_dev
*pdev
)
67 if (!pdev
|| !pdev
->driver
)
70 if (!try_module_get(pdev
->driver
->driver
.owner
))
77 * eeh_pcid_put - Dereference on the PCI device driver
80 * The function is called to do dereference on the PCI device
81 * driver of the indicated PCI device.
83 static inline void eeh_pcid_put(struct pci_dev
*pdev
)
85 if (!pdev
|| !pdev
->driver
)
88 module_put(pdev
->driver
->driver
.owner
);
92 * eeh_disable_irq - Disable interrupt for the recovering device
95 * This routine must be called when reporting temporary or permanent
96 * error to the particular PCI device to disable interrupt of that
97 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
98 * do real work because EEH should freeze DMA transfers for those PCI
99 * devices encountering EEH errors, which includes MSI or MSI-X.
101 static void eeh_disable_irq(struct pci_dev
*dev
)
103 struct eeh_dev
*edev
= pci_dev_to_eeh_dev(dev
);
105 /* Don't disable MSI and MSI-X interrupts. They are
106 * effectively disabled by the DMA Stopped state
107 * when an EEH error occurs.
109 if (dev
->msi_enabled
|| dev
->msix_enabled
)
112 if (!irq_has_action(dev
->irq
))
115 edev
->mode
|= EEH_DEV_IRQ_DISABLED
;
116 disable_irq_nosync(dev
->irq
);
120 * eeh_enable_irq - Enable interrupt for the recovering device
123 * This routine must be called to enable interrupt while failed
124 * device could be resumed.
126 static void eeh_enable_irq(struct pci_dev
*dev
)
128 struct eeh_dev
*edev
= pci_dev_to_eeh_dev(dev
);
130 if ((edev
->mode
) & EEH_DEV_IRQ_DISABLED
) {
131 edev
->mode
&= ~EEH_DEV_IRQ_DISABLED
;
135 * This is just ass backwards. This maze has
136 * unbalanced irq_enable/disable calls. So instead of
137 * finding the root cause it works around the warning
138 * in the irq_enable code by conditionally calling
141 * That's just wrong.The warning in the core code is
142 * there to tell people to fix their asymmetries in
143 * their own code, not by abusing the core information
146 * I so wish that the assymetry would be the other way
147 * round and a few more irq_disable calls render that
148 * shit unusable forever.
152 if (irqd_irq_disabled(irq_get_irq_data(dev
->irq
)))
153 enable_irq(dev
->irq
);
157 static bool eeh_dev_removed(struct eeh_dev
*edev
)
159 /* EEH device removed ? */
160 if (!edev
|| (edev
->mode
& EEH_DEV_REMOVED
))
166 static void *eeh_dev_save_state(void *data
, void *userdata
)
168 struct eeh_dev
*edev
= data
;
169 struct pci_dev
*pdev
;
175 * We cannot access the config space on some adapters.
176 * Otherwise, it will cause fenced PHB. We don't save
177 * the content in their config space and will restore
178 * from the initial config space saved when the EEH
181 if (edev
->pe
&& (edev
->pe
->state
& EEH_PE_CFG_RESTRICTED
))
184 pdev
= eeh_dev_to_pci_dev(edev
);
188 pci_save_state(pdev
);
193 * eeh_report_error - Report pci error to each device driver
195 * @userdata: return value
197 * Report an EEH error to each device driver, collect up and
198 * merge the device driver responses. Cumulative response
199 * passed back in "userdata".
201 static void *eeh_report_error(void *data
, void *userdata
)
203 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
204 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
205 enum pci_ers_result rc
, *res
= userdata
;
206 struct pci_driver
*driver
;
208 if (!dev
|| eeh_dev_removed(edev
) || eeh_pe_passed(edev
->pe
))
210 dev
->error_state
= pci_channel_io_frozen
;
212 driver
= eeh_pcid_get(dev
);
213 if (!driver
) return NULL
;
215 eeh_disable_irq(dev
);
217 if (!driver
->err_handler
||
218 !driver
->err_handler
->error_detected
) {
223 rc
= driver
->err_handler
->error_detected(dev
, pci_channel_io_frozen
);
225 /* A driver that needs a reset trumps all others */
226 if (rc
== PCI_ERS_RESULT_NEED_RESET
) *res
= rc
;
227 if (*res
== PCI_ERS_RESULT_NONE
) *res
= rc
;
229 edev
->in_error
= true;
235 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
237 * @userdata: return value
239 * Tells each device driver that IO ports, MMIO and config space I/O
240 * are now enabled. Collects up and merges the device driver responses.
241 * Cumulative response passed back in "userdata".
243 static void *eeh_report_mmio_enabled(void *data
, void *userdata
)
245 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
246 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
247 enum pci_ers_result rc
, *res
= userdata
;
248 struct pci_driver
*driver
;
250 if (!dev
|| eeh_dev_removed(edev
) || eeh_pe_passed(edev
->pe
))
253 driver
= eeh_pcid_get(dev
);
254 if (!driver
) return NULL
;
256 if (!driver
->err_handler
||
257 !driver
->err_handler
->mmio_enabled
||
258 (edev
->mode
& EEH_DEV_NO_HANDLER
)) {
263 rc
= driver
->err_handler
->mmio_enabled(dev
);
265 /* A driver that needs a reset trumps all others */
266 if (rc
== PCI_ERS_RESULT_NEED_RESET
) *res
= rc
;
267 if (*res
== PCI_ERS_RESULT_NONE
) *res
= rc
;
274 * eeh_report_reset - Tell device that slot has been reset
276 * @userdata: return value
278 * This routine must be called while EEH tries to reset particular
279 * PCI device so that the associated PCI device driver could take
280 * some actions, usually to save data the driver needs so that the
281 * driver can work again while the device is recovered.
283 static void *eeh_report_reset(void *data
, void *userdata
)
285 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
286 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
287 enum pci_ers_result rc
, *res
= userdata
;
288 struct pci_driver
*driver
;
290 if (!dev
|| eeh_dev_removed(edev
) || eeh_pe_passed(edev
->pe
))
292 dev
->error_state
= pci_channel_io_normal
;
294 driver
= eeh_pcid_get(dev
);
295 if (!driver
) return NULL
;
299 if (!driver
->err_handler
||
300 !driver
->err_handler
->slot_reset
||
301 (edev
->mode
& EEH_DEV_NO_HANDLER
) ||
307 rc
= driver
->err_handler
->slot_reset(dev
);
308 if ((*res
== PCI_ERS_RESULT_NONE
) ||
309 (*res
== PCI_ERS_RESULT_RECOVERED
)) *res
= rc
;
310 if (*res
== PCI_ERS_RESULT_DISCONNECT
&&
311 rc
== PCI_ERS_RESULT_NEED_RESET
) *res
= rc
;
317 static void *eeh_dev_restore_state(void *data
, void *userdata
)
319 struct eeh_dev
*edev
= data
;
320 struct pci_dev
*pdev
;
326 * The content in the config space isn't saved because
327 * the blocked config space on some adapters. We have
328 * to restore the initial saved config space when the
329 * EEH device is created.
331 if (edev
->pe
&& (edev
->pe
->state
& EEH_PE_CFG_RESTRICTED
)) {
332 if (list_is_last(&edev
->list
, &edev
->pe
->edevs
))
333 eeh_pe_restore_bars(edev
->pe
);
338 pdev
= eeh_dev_to_pci_dev(edev
);
342 pci_restore_state(pdev
);
347 * eeh_report_resume - Tell device to resume normal operations
349 * @userdata: return value
351 * This routine must be called to notify the device driver that it
352 * could resume so that the device driver can do some initialization
353 * to make the recovered device work again.
355 static void *eeh_report_resume(void *data
, void *userdata
)
357 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
358 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
360 struct pci_driver
*driver
;
362 if (!dev
|| eeh_dev_removed(edev
) || eeh_pe_passed(edev
->pe
))
364 dev
->error_state
= pci_channel_io_normal
;
366 driver
= eeh_pcid_get(dev
);
367 if (!driver
) return NULL
;
369 was_in_error
= edev
->in_error
;
370 edev
->in_error
= false;
373 if (!driver
->err_handler
||
374 !driver
->err_handler
->resume
||
375 (edev
->mode
& EEH_DEV_NO_HANDLER
) || !was_in_error
) {
376 edev
->mode
&= ~EEH_DEV_NO_HANDLER
;
381 driver
->err_handler
->resume(dev
);
388 * eeh_report_failure - Tell device driver that device is dead.
390 * @userdata: return value
392 * This informs the device driver that the device is permanently
393 * dead, and that no further recovery attempts will be made on it.
395 static void *eeh_report_failure(void *data
, void *userdata
)
397 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
398 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
399 struct pci_driver
*driver
;
401 if (!dev
|| eeh_dev_removed(edev
) || eeh_pe_passed(edev
->pe
))
403 dev
->error_state
= pci_channel_io_perm_failure
;
405 driver
= eeh_pcid_get(dev
);
406 if (!driver
) return NULL
;
408 eeh_disable_irq(dev
);
410 if (!driver
->err_handler
||
411 !driver
->err_handler
->error_detected
) {
416 driver
->err_handler
->error_detected(dev
, pci_channel_io_perm_failure
);
422 static void *eeh_add_virt_device(void *data
, void *userdata
)
424 struct pci_driver
*driver
;
425 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
426 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
427 struct pci_dn
*pdn
= eeh_dev_to_pdn(edev
);
429 if (!(edev
->physfn
)) {
430 pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
431 __func__
, pdn
->phb
->global_number
, pdn
->busno
,
432 PCI_SLOT(pdn
->devfn
), PCI_FUNC(pdn
->devfn
));
436 driver
= eeh_pcid_get(dev
);
439 if (driver
->err_handler
)
443 #ifdef CONFIG_PPC_POWERNV
444 pci_iov_add_virtfn(edev
->physfn
, pdn
->vf_index
);
449 static void *eeh_rmv_device(void *data
, void *userdata
)
451 struct pci_driver
*driver
;
452 struct eeh_dev
*edev
= (struct eeh_dev
*)data
;
453 struct pci_dev
*dev
= eeh_dev_to_pci_dev(edev
);
454 struct eeh_rmv_data
*rmv_data
= (struct eeh_rmv_data
*)userdata
;
455 int *removed
= rmv_data
? &rmv_data
->removed
: NULL
;
458 * Actually, we should remove the PCI bridges as well.
459 * However, that's lots of complexity to do that,
460 * particularly some of devices under the bridge might
461 * support EEH. So we just care about PCI devices for
464 if (!dev
|| (dev
->hdr_type
== PCI_HEADER_TYPE_BRIDGE
))
468 * We rely on count-based pcibios_release_device() to
469 * detach permanently offlined PEs. Unfortunately, that's
470 * not reliable enough. We might have the permanently
471 * offlined PEs attached, but we needn't take care of
472 * them and their child devices.
474 if (eeh_dev_removed(edev
))
477 driver
= eeh_pcid_get(dev
);
481 eeh_pe_passed(edev
->pe
))
484 driver
->err_handler
&&
485 driver
->err_handler
->error_detected
&&
486 driver
->err_handler
->slot_reset
)
490 /* Remove it from PCI subsystem */
491 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
493 edev
->bus
= dev
->bus
;
494 edev
->mode
|= EEH_DEV_DISCONNECTED
;
499 #ifdef CONFIG_PPC_POWERNV
500 struct pci_dn
*pdn
= eeh_dev_to_pdn(edev
);
502 pci_iov_remove_virtfn(edev
->physfn
, pdn
->vf_index
);
506 * We have to set the VF PE number to invalid one, which is
507 * required to plug the VF successfully.
509 pdn
->pe_number
= IODA_INVALID_PE
;
512 list_add(&edev
->rmv_list
, &rmv_data
->edev_list
);
514 pci_lock_rescan_remove();
515 pci_stop_and_remove_bus_device(dev
);
516 pci_unlock_rescan_remove();
522 static void *eeh_pe_detach_dev(void *data
, void *userdata
)
524 struct eeh_pe
*pe
= (struct eeh_pe
*)data
;
525 struct eeh_dev
*edev
, *tmp
;
527 eeh_pe_for_each_dev(pe
, edev
, tmp
) {
528 if (!(edev
->mode
& EEH_DEV_DISCONNECTED
))
531 edev
->mode
&= ~(EEH_DEV_DISCONNECTED
| EEH_DEV_IRQ_DISABLED
);
532 eeh_rmv_from_parent_pe(edev
);
539 * Explicitly clear PE's frozen state for PowerNV where
540 * we have frozen PE until BAR restore is completed. It's
541 * harmless to clear it for pSeries. To be consistent with
542 * PE reset (for 3 times), we try to clear the frozen state
543 * for 3 times as well.
545 static void *__eeh_clear_pe_frozen_state(void *data
, void *flag
)
547 struct eeh_pe
*pe
= (struct eeh_pe
*)data
;
548 bool clear_sw_state
= *(bool *)flag
;
551 for (i
= 0; rc
&& i
< 3; i
++)
552 rc
= eeh_unfreeze_pe(pe
, clear_sw_state
);
554 /* Stop immediately on any errors */
556 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
557 __func__
, rc
, pe
->phb
->global_number
, pe
->addr
);
564 static int eeh_clear_pe_frozen_state(struct eeh_pe
*pe
,
569 rc
= eeh_pe_traverse(pe
, __eeh_clear_pe_frozen_state
, &clear_sw_state
);
571 eeh_pe_state_clear(pe
, EEH_PE_ISOLATED
);
573 return rc
? -EIO
: 0;
576 int eeh_pe_reset_and_recover(struct eeh_pe
*pe
)
580 /* Bail if the PE is being recovered */
581 if (pe
->state
& EEH_PE_RECOVERING
)
584 /* Put the PE into recovery mode */
585 eeh_pe_state_mark(pe
, EEH_PE_RECOVERING
);
588 eeh_pe_dev_traverse(pe
, eeh_dev_save_state
, NULL
);
591 ret
= eeh_pe_reset_full(pe
);
593 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
597 /* Unfreeze the PE */
598 ret
= eeh_clear_pe_frozen_state(pe
, true);
600 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
604 /* Restore device state */
605 eeh_pe_dev_traverse(pe
, eeh_dev_restore_state
, NULL
);
607 /* Clear recovery mode */
608 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
614 * eeh_reset_device - Perform actual reset of a pci slot
616 * @bus: PCI bus corresponding to the isolcated slot
618 * This routine must be called to do reset on the indicated PE.
619 * During the reset, udev might be invoked because those affected
620 * PCI devices will be removed and then added.
622 static int eeh_reset_device(struct eeh_pe
*pe
, struct pci_bus
*bus
,
623 struct eeh_rmv_data
*rmv_data
)
625 struct pci_bus
*frozen_bus
= eeh_pe_bus_get(pe
);
628 struct eeh_dev
*edev
;
630 /* pcibios will clear the counter; save the value */
631 cnt
= pe
->freeze_count
;
635 * We don't remove the corresponding PE instances because
636 * we need the information afterwords. The attached EEH
637 * devices are expected to be attached soon when calling
638 * into pci_hp_add_devices().
640 eeh_pe_state_mark(pe
, EEH_PE_KEEP
);
642 if (pe
->type
& EEH_PE_VF
) {
643 eeh_pe_dev_traverse(pe
, eeh_rmv_device
, NULL
);
645 pci_lock_rescan_remove();
646 pci_hp_remove_devices(bus
);
647 pci_unlock_rescan_remove();
649 } else if (frozen_bus
) {
650 eeh_pe_dev_traverse(pe
, eeh_rmv_device
, rmv_data
);
654 * Reset the pci controller. (Asserts RST#; resets config space).
655 * Reconfigure bridges and devices. Don't try to bring the system
656 * up if the reset failed for some reason.
658 * During the reset, it's very dangerous to have uncontrolled PCI
659 * config accesses. So we prefer to block them. However, controlled
660 * PCI config accesses initiated from EEH itself are allowed.
662 rc
= eeh_pe_reset_full(pe
);
666 pci_lock_rescan_remove();
669 eeh_ops
->configure_bridge(pe
);
670 eeh_pe_restore_bars(pe
);
672 /* Clear frozen state */
673 rc
= eeh_clear_pe_frozen_state(pe
, false);
675 pci_unlock_rescan_remove();
679 /* Give the system 5 seconds to finish running the user-space
680 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
681 * this is a hack, but if we don't do this, and try to bring
682 * the device up before the scripts have taken it down,
683 * potentially weird things happen.
686 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
690 * The EEH device is still connected with its parent
691 * PE. We should disconnect it so the binding can be
692 * rebuilt when adding PCI devices.
694 edev
= list_first_entry(&pe
->edevs
, struct eeh_dev
, list
);
695 eeh_pe_traverse(pe
, eeh_pe_detach_dev
, NULL
);
696 if (pe
->type
& EEH_PE_VF
) {
697 eeh_add_virt_device(edev
, NULL
);
699 eeh_pe_state_clear(pe
, EEH_PE_PRI_BUS
);
700 pci_hp_add_devices(bus
);
702 } else if (frozen_bus
&& rmv_data
->removed
) {
703 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
706 edev
= list_first_entry(&pe
->edevs
, struct eeh_dev
, list
);
707 eeh_pe_traverse(pe
, eeh_pe_detach_dev
, NULL
);
708 if (pe
->type
& EEH_PE_VF
)
709 eeh_add_virt_device(edev
, NULL
);
711 pci_hp_add_devices(frozen_bus
);
713 eeh_pe_state_clear(pe
, EEH_PE_KEEP
);
716 pe
->freeze_count
= cnt
;
718 pci_unlock_rescan_remove();
722 /* The longest amount of time to wait for a pci device
723 * to come back on line, in seconds.
725 #define MAX_WAIT_FOR_RECOVERY 300
728 * eeh_handle_normal_event - Handle EEH events on a specific PE
731 * Attempts to recover the given PE. If recovery fails or the PE has failed
732 * too many times, remove the PE.
734 * Returns true if @pe should no longer be used, else false.
736 static bool eeh_handle_normal_event(struct eeh_pe
*pe
)
738 struct pci_bus
*frozen_bus
;
739 struct eeh_dev
*edev
, *tmp
;
741 enum pci_ers_result result
= PCI_ERS_RESULT_NONE
;
742 struct eeh_rmv_data rmv_data
= {LIST_HEAD_INIT(rmv_data
.edev_list
), 0};
744 frozen_bus
= eeh_pe_bus_get(pe
);
746 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
747 __func__
, pe
->phb
->global_number
, pe
->addr
);
751 eeh_pe_update_time_stamp(pe
);
753 if (pe
->freeze_count
> eeh_max_freezes
) {
754 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
755 "last hour and has been permanently disabled.\n",
756 pe
->phb
->global_number
, pe
->addr
,
760 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
763 /* Walk the various device drivers attached to this slot through
764 * a reset sequence, giving each an opportunity to do what it needs
765 * to accomplish the reset. Each child gets a report of the
766 * status ... if any child can't handle the reset, then the entire
767 * slot is dlpar removed and added.
769 * When the PHB is fenced, we have to issue a reset to recover from
770 * the error. Override the result if necessary to have partially
771 * hotplug for this case.
773 pr_info("EEH: Notify device drivers to shutdown\n");
774 eeh_pe_dev_traverse(pe
, eeh_report_error
, &result
);
775 if ((pe
->type
& EEH_PE_PHB
) &&
776 result
!= PCI_ERS_RESULT_NONE
&&
777 result
!= PCI_ERS_RESULT_NEED_RESET
)
778 result
= PCI_ERS_RESULT_NEED_RESET
;
780 /* Get the current PCI slot state. This can take a long time,
781 * sometimes over 300 seconds for certain systems.
783 rc
= eeh_ops
->wait_state(pe
, MAX_WAIT_FOR_RECOVERY
*1000);
784 if (rc
< 0 || rc
== EEH_STATE_NOT_SUPPORT
) {
785 pr_warn("EEH: Permanent failure\n");
789 /* Since rtas may enable MMIO when posting the error log,
790 * don't post the error log until after all dev drivers
791 * have been informed.
793 pr_info("EEH: Collect temporary log\n");
794 eeh_slot_error_detail(pe
, EEH_LOG_TEMP
);
796 /* If all device drivers were EEH-unaware, then shut
797 * down all of the device drivers, and hope they
798 * go down willingly, without panicing the system.
800 if (result
== PCI_ERS_RESULT_NONE
) {
801 pr_info("EEH: Reset with hotplug activity\n");
802 rc
= eeh_reset_device(pe
, frozen_bus
, NULL
);
804 pr_warn("%s: Unable to reset, err=%d\n",
810 /* If all devices reported they can proceed, then re-enable MMIO */
811 if (result
== PCI_ERS_RESULT_CAN_RECOVER
) {
812 pr_info("EEH: Enable I/O for affected devices\n");
813 rc
= eeh_pci_enable(pe
, EEH_OPT_THAW_MMIO
);
818 result
= PCI_ERS_RESULT_NEED_RESET
;
820 pr_info("EEH: Notify device drivers to resume I/O\n");
821 eeh_pe_dev_traverse(pe
, eeh_report_mmio_enabled
, &result
);
825 /* If all devices reported they can proceed, then re-enable DMA */
826 if (result
== PCI_ERS_RESULT_CAN_RECOVER
) {
827 pr_info("EEH: Enabled DMA for affected devices\n");
828 rc
= eeh_pci_enable(pe
, EEH_OPT_THAW_DMA
);
833 result
= PCI_ERS_RESULT_NEED_RESET
;
836 * We didn't do PE reset for the case. The PE
837 * is still in frozen state. Clear it before
840 eeh_pe_state_clear(pe
, EEH_PE_ISOLATED
);
841 result
= PCI_ERS_RESULT_RECOVERED
;
845 /* If any device has a hard failure, then shut off everything. */
846 if (result
== PCI_ERS_RESULT_DISCONNECT
) {
847 pr_warn("EEH: Device driver gave up\n");
851 /* If any device called out for a reset, then reset the slot */
852 if (result
== PCI_ERS_RESULT_NEED_RESET
) {
853 pr_info("EEH: Reset without hotplug activity\n");
854 rc
= eeh_reset_device(pe
, NULL
, &rmv_data
);
856 pr_warn("%s: Cannot reset, err=%d\n",
861 pr_info("EEH: Notify device drivers "
862 "the completion of reset\n");
863 result
= PCI_ERS_RESULT_NONE
;
864 eeh_pe_dev_traverse(pe
, eeh_report_reset
, &result
);
867 /* All devices should claim they have recovered by now. */
868 if ((result
!= PCI_ERS_RESULT_RECOVERED
) &&
869 (result
!= PCI_ERS_RESULT_NONE
)) {
870 pr_warn("EEH: Not recovered\n");
875 * For those hot removed VFs, we should add back them after PF get
876 * recovered properly.
878 list_for_each_entry_safe(edev
, tmp
, &rmv_data
.edev_list
, rmv_list
) {
879 eeh_add_virt_device(edev
, NULL
);
880 list_del(&edev
->rmv_list
);
883 /* Tell all device drivers that they can resume operations */
884 pr_info("EEH: Notify device driver to resume\n");
885 eeh_pe_dev_traverse(pe
, eeh_report_resume
, NULL
);
891 * About 90% of all real-life EEH failures in the field
892 * are due to poorly seated PCI cards. Only 10% or so are
893 * due to actual, failed cards.
895 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
896 "Please try reseating or replacing it\n",
897 pe
->phb
->global_number
, pe
->addr
);
899 eeh_slot_error_detail(pe
, EEH_LOG_PERM
);
901 /* Notify all devices that they're about to go down. */
902 eeh_pe_dev_traverse(pe
, eeh_report_failure
, NULL
);
904 /* Mark the PE to be removed permanently */
905 eeh_pe_state_mark(pe
, EEH_PE_REMOVED
);
908 * Shut down the device drivers for good. We mark
909 * all removed devices correctly to avoid access
910 * the their PCI config any more.
913 if (pe
->type
& EEH_PE_VF
) {
914 eeh_pe_dev_traverse(pe
, eeh_rmv_device
, NULL
);
915 eeh_pe_dev_mode_mark(pe
, EEH_DEV_REMOVED
);
917 eeh_pe_state_clear(pe
, EEH_PE_PRI_BUS
);
918 eeh_pe_dev_mode_mark(pe
, EEH_DEV_REMOVED
);
920 pci_lock_rescan_remove();
921 pci_hp_remove_devices(frozen_bus
);
922 pci_unlock_rescan_remove();
924 /* The passed PE should no longer be used */
932 * eeh_handle_special_event - Handle EEH events without a specific failing PE
934 * Called when an EEH event is detected but can't be narrowed down to a
935 * specific PE. Iterates through possible failures and handles them as
938 static void eeh_handle_special_event(void)
940 struct eeh_pe
*pe
, *phb_pe
;
942 struct pci_controller
*hose
;
948 rc
= eeh_ops
->next_error(&pe
);
951 case EEH_NEXT_ERR_DEAD_IOC
:
952 /* Mark all PHBs in dead state */
953 eeh_serialize_lock(&flags
);
955 /* Purge all events */
956 eeh_remove_event(NULL
, true);
958 list_for_each_entry(hose
, &hose_list
, list_node
) {
959 phb_pe
= eeh_phb_pe_get(hose
);
960 if (!phb_pe
) continue;
962 eeh_pe_state_mark(phb_pe
, EEH_PE_ISOLATED
);
965 eeh_serialize_unlock(flags
);
968 case EEH_NEXT_ERR_FROZEN_PE
:
969 case EEH_NEXT_ERR_FENCED_PHB
:
970 case EEH_NEXT_ERR_DEAD_PHB
:
971 /* Mark the PE in fenced state */
972 eeh_serialize_lock(&flags
);
974 /* Purge all events of the PHB */
975 eeh_remove_event(pe
, true);
977 if (rc
== EEH_NEXT_ERR_DEAD_PHB
)
978 eeh_pe_state_mark(pe
, EEH_PE_ISOLATED
);
980 eeh_pe_state_mark(pe
,
981 EEH_PE_ISOLATED
| EEH_PE_RECOVERING
);
983 eeh_serialize_unlock(flags
);
986 case EEH_NEXT_ERR_NONE
:
989 pr_warn("%s: Invalid value %d from next_error()\n",
995 * For fenced PHB and frozen PE, it's handled as normal
996 * event. We have to remove the affected PHBs for dead
999 if (rc
== EEH_NEXT_ERR_FROZEN_PE
||
1000 rc
== EEH_NEXT_ERR_FENCED_PHB
) {
1002 * eeh_handle_normal_event() can make the PE stale if it
1003 * determines that the PE cannot possibly be recovered.
1004 * Don't modify the PE state if that's the case.
1006 if (eeh_handle_normal_event(pe
))
1009 eeh_pe_state_clear(pe
, EEH_PE_RECOVERING
);
1011 pci_lock_rescan_remove();
1012 list_for_each_entry(hose
, &hose_list
, list_node
) {
1013 phb_pe
= eeh_phb_pe_get(hose
);
1015 !(phb_pe
->state
& EEH_PE_ISOLATED
) ||
1016 (phb_pe
->state
& EEH_PE_RECOVERING
))
1019 /* Notify all devices to be down */
1020 eeh_pe_state_clear(pe
, EEH_PE_PRI_BUS
);
1021 eeh_pe_dev_traverse(pe
,
1022 eeh_report_failure
, NULL
);
1023 bus
= eeh_pe_bus_get(phb_pe
);
1025 pr_err("%s: Cannot find PCI bus for "
1028 pe
->phb
->global_number
,
1032 pci_hp_remove_devices(bus
);
1034 pci_unlock_rescan_remove();
1038 * If we have detected dead IOC, we needn't proceed
1039 * any more since all PHBs would have been removed
1041 if (rc
== EEH_NEXT_ERR_DEAD_IOC
)
1043 } while (rc
!= EEH_NEXT_ERR_NONE
);
1047 * eeh_handle_event - Reset a PCI device after hard lockup.
1050 * While PHB detects address or data parity errors on particular PCI
1051 * slot, the associated PE will be frozen. Besides, DMA's occurring
1052 * to wild addresses (which usually happen due to bugs in device
1053 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
1054 * #PERR or other misc PCI-related errors also can trigger EEH errors.
1056 * Recovery process consists of unplugging the device driver (which
1057 * generated hotplug events to userspace), then issuing a PCI #RST to
1058 * the device, then reconfiguring the PCI config space for all bridges
1059 * & devices under this slot, and then finally restarting the device
1060 * drivers (which cause a second set of hotplug events to go out to
1063 void eeh_handle_event(struct eeh_pe
*pe
)
1066 eeh_handle_normal_event(pe
);
1068 eeh_handle_special_event();