gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / arch / powerpc / kernel / eeh_driver.c
blob6d04c9efb4966feac1dc4b42c6205c5662b91428
1 /*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
6 * All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
37 /**
38 * eeh_pcid_name - Retrieve name of PCI device driver
39 * @pdev: PCI device
41 * This routine is used to retrieve the name of PCI device driver
42 * if that's valid.
44 static inline const char *eeh_pcid_name(struct pci_dev *pdev)
46 if (pdev && pdev->dev.driver)
47 return pdev->dev.driver->name;
48 return "";
51 /**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
60 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
62 if (!pdev || !pdev->driver)
63 return NULL;
65 if (!try_module_get(pdev->driver->driver.owner))
66 return NULL;
68 return pdev->driver;
71 /**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
78 static inline void eeh_pcid_put(struct pci_dev *pdev)
80 if (!pdev || !pdev->driver)
81 return;
83 module_put(pdev->driver->driver.owner);
86 /**
87 * eeh_disable_irq - Disable interrupt for the recovering device
88 * @dev: PCI device
90 * This routine must be called when reporting temporary or permanent
91 * error to the particular PCI device to disable interrupt of that
92 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
93 * do real work because EEH should freeze DMA transfers for those PCI
94 * devices encountering EEH errors, which includes MSI or MSI-X.
96 static void eeh_disable_irq(struct pci_dev *dev)
98 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
100 /* Don't disable MSI and MSI-X interrupts. They are
101 * effectively disabled by the DMA Stopped state
102 * when an EEH error occurs.
104 if (dev->msi_enabled || dev->msix_enabled)
105 return;
107 if (!irq_has_action(dev->irq))
108 return;
110 edev->mode |= EEH_DEV_IRQ_DISABLED;
111 disable_irq_nosync(dev->irq);
115 * eeh_enable_irq - Enable interrupt for the recovering device
116 * @dev: PCI device
118 * This routine must be called to enable interrupt while failed
119 * device could be resumed.
121 static void eeh_enable_irq(struct pci_dev *dev)
123 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
125 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
126 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
128 * FIXME !!!!!
130 * This is just ass backwards. This maze has
131 * unbalanced irq_enable/disable calls. So instead of
132 * finding the root cause it works around the warning
133 * in the irq_enable code by conditionally calling
134 * into it.
136 * That's just wrong.The warning in the core code is
137 * there to tell people to fix their assymetries in
138 * their own code, not by abusing the core information
139 * to avoid it.
141 * I so wish that the assymetry would be the other way
142 * round and a few more irq_disable calls render that
143 * shit unusable forever.
145 * tglx
147 if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
148 enable_irq(dev->irq);
152 static bool eeh_dev_removed(struct eeh_dev *edev)
154 /* EEH device removed ? */
155 if (!edev || (edev->mode & EEH_DEV_REMOVED))
156 return true;
158 return false;
161 static void *eeh_dev_save_state(void *data, void *userdata)
163 struct eeh_dev *edev = data;
164 struct pci_dev *pdev;
166 if (!edev)
167 return NULL;
170 * We cannot access the config space on some adapters.
171 * Otherwise, it will cause fenced PHB. We don't save
172 * the content in their config space and will restore
173 * from the initial config space saved when the EEH
174 * device is created.
176 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
177 return NULL;
179 pdev = eeh_dev_to_pci_dev(edev);
180 if (!pdev)
181 return NULL;
183 pci_save_state(pdev);
184 return NULL;
188 * eeh_report_error - Report pci error to each device driver
189 * @data: eeh device
190 * @userdata: return value
192 * Report an EEH error to each device driver, collect up and
193 * merge the device driver responses. Cumulative response
194 * passed back in "userdata".
196 static void *eeh_report_error(void *data, void *userdata)
198 struct eeh_dev *edev = (struct eeh_dev *)data;
199 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
200 enum pci_ers_result rc, *res = userdata;
201 struct pci_driver *driver;
203 if (!dev || eeh_dev_removed(edev))
204 return NULL;
205 dev->error_state = pci_channel_io_frozen;
207 driver = eeh_pcid_get(dev);
208 if (!driver) return NULL;
210 eeh_disable_irq(dev);
212 if (!driver->err_handler ||
213 !driver->err_handler->error_detected) {
214 eeh_pcid_put(dev);
215 return NULL;
218 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
220 /* A driver that needs a reset trumps all others */
221 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
222 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
224 eeh_pcid_put(dev);
225 return NULL;
229 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
230 * @data: eeh device
231 * @userdata: return value
233 * Tells each device driver that IO ports, MMIO and config space I/O
234 * are now enabled. Collects up and merges the device driver responses.
235 * Cumulative response passed back in "userdata".
237 static void *eeh_report_mmio_enabled(void *data, void *userdata)
239 struct eeh_dev *edev = (struct eeh_dev *)data;
240 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
241 enum pci_ers_result rc, *res = userdata;
242 struct pci_driver *driver;
244 if (!dev || eeh_dev_removed(edev))
245 return NULL;
247 driver = eeh_pcid_get(dev);
248 if (!driver) return NULL;
250 if (!driver->err_handler ||
251 !driver->err_handler->mmio_enabled ||
252 (edev->mode & EEH_DEV_NO_HANDLER)) {
253 eeh_pcid_put(dev);
254 return NULL;
257 rc = driver->err_handler->mmio_enabled(dev);
259 /* A driver that needs a reset trumps all others */
260 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
261 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
263 eeh_pcid_put(dev);
264 return NULL;
268 * eeh_report_reset - Tell device that slot has been reset
269 * @data: eeh device
270 * @userdata: return value
272 * This routine must be called while EEH tries to reset particular
273 * PCI device so that the associated PCI device driver could take
274 * some actions, usually to save data the driver needs so that the
275 * driver can work again while the device is recovered.
277 static void *eeh_report_reset(void *data, void *userdata)
279 struct eeh_dev *edev = (struct eeh_dev *)data;
280 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
281 enum pci_ers_result rc, *res = userdata;
282 struct pci_driver *driver;
284 if (!dev || eeh_dev_removed(edev))
285 return NULL;
286 dev->error_state = pci_channel_io_normal;
288 driver = eeh_pcid_get(dev);
289 if (!driver) return NULL;
291 eeh_enable_irq(dev);
293 if (!driver->err_handler ||
294 !driver->err_handler->slot_reset ||
295 (edev->mode & EEH_DEV_NO_HANDLER)) {
296 eeh_pcid_put(dev);
297 return NULL;
300 rc = driver->err_handler->slot_reset(dev);
301 if ((*res == PCI_ERS_RESULT_NONE) ||
302 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
303 if (*res == PCI_ERS_RESULT_DISCONNECT &&
304 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
306 eeh_pcid_put(dev);
307 return NULL;
310 static void *eeh_dev_restore_state(void *data, void *userdata)
312 struct eeh_dev *edev = data;
313 struct pci_dev *pdev;
315 if (!edev)
316 return NULL;
319 * The content in the config space isn't saved because
320 * the blocked config space on some adapters. We have
321 * to restore the initial saved config space when the
322 * EEH device is created.
324 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
325 if (list_is_last(&edev->list, &edev->pe->edevs))
326 eeh_pe_restore_bars(edev->pe);
328 return NULL;
331 pdev = eeh_dev_to_pci_dev(edev);
332 if (!pdev)
333 return NULL;
335 pci_restore_state(pdev);
336 return NULL;
340 * eeh_report_resume - Tell device to resume normal operations
341 * @data: eeh device
342 * @userdata: return value
344 * This routine must be called to notify the device driver that it
345 * could resume so that the device driver can do some initialization
346 * to make the recovered device work again.
348 static void *eeh_report_resume(void *data, void *userdata)
350 struct eeh_dev *edev = (struct eeh_dev *)data;
351 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
352 struct pci_driver *driver;
354 if (!dev || eeh_dev_removed(edev))
355 return NULL;
356 dev->error_state = pci_channel_io_normal;
358 driver = eeh_pcid_get(dev);
359 if (!driver) return NULL;
361 eeh_enable_irq(dev);
363 if (!driver->err_handler ||
364 !driver->err_handler->resume ||
365 (edev->mode & EEH_DEV_NO_HANDLER)) {
366 edev->mode &= ~EEH_DEV_NO_HANDLER;
367 eeh_pcid_put(dev);
368 return NULL;
371 driver->err_handler->resume(dev);
373 eeh_pcid_put(dev);
374 return NULL;
378 * eeh_report_failure - Tell device driver that device is dead.
379 * @data: eeh device
380 * @userdata: return value
382 * This informs the device driver that the device is permanently
383 * dead, and that no further recovery attempts will be made on it.
385 static void *eeh_report_failure(void *data, void *userdata)
387 struct eeh_dev *edev = (struct eeh_dev *)data;
388 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
389 struct pci_driver *driver;
391 if (!dev || eeh_dev_removed(edev))
392 return NULL;
393 dev->error_state = pci_channel_io_perm_failure;
395 driver = eeh_pcid_get(dev);
396 if (!driver) return NULL;
398 eeh_disable_irq(dev);
400 if (!driver->err_handler ||
401 !driver->err_handler->error_detected) {
402 eeh_pcid_put(dev);
403 return NULL;
406 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
408 eeh_pcid_put(dev);
409 return NULL;
412 static void *eeh_rmv_device(void *data, void *userdata)
414 struct pci_driver *driver;
415 struct eeh_dev *edev = (struct eeh_dev *)data;
416 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
417 int *removed = (int *)userdata;
420 * Actually, we should remove the PCI bridges as well.
421 * However, that's lots of complexity to do that,
422 * particularly some of devices under the bridge might
423 * support EEH. So we just care about PCI devices for
424 * simplicity here.
426 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
427 return NULL;
430 * We rely on count-based pcibios_release_device() to
431 * detach permanently offlined PEs. Unfortunately, that's
432 * not reliable enough. We might have the permanently
433 * offlined PEs attached, but we needn't take care of
434 * them and their child devices.
436 if (eeh_dev_removed(edev))
437 return NULL;
439 driver = eeh_pcid_get(dev);
440 if (driver) {
441 eeh_pcid_put(dev);
442 if (driver->err_handler)
443 return NULL;
446 /* Remove it from PCI subsystem */
447 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
448 pci_name(dev));
449 edev->bus = dev->bus;
450 edev->mode |= EEH_DEV_DISCONNECTED;
451 (*removed)++;
453 pci_lock_rescan_remove();
454 pci_stop_and_remove_bus_device(dev);
455 pci_unlock_rescan_remove();
457 return NULL;
460 static void *eeh_pe_detach_dev(void *data, void *userdata)
462 struct eeh_pe *pe = (struct eeh_pe *)data;
463 struct eeh_dev *edev, *tmp;
465 eeh_pe_for_each_dev(pe, edev, tmp) {
466 if (!(edev->mode & EEH_DEV_DISCONNECTED))
467 continue;
469 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
470 eeh_rmv_from_parent_pe(edev);
473 return NULL;
477 * Explicitly clear PE's frozen state for PowerNV where
478 * we have frozen PE until BAR restore is completed. It's
479 * harmless to clear it for pSeries. To be consistent with
480 * PE reset (for 3 times), we try to clear the frozen state
481 * for 3 times as well.
483 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
485 struct eeh_pe *pe = (struct eeh_pe *)data;
486 bool *clear_sw_state = flag;
487 int i, rc = 1;
489 for (i = 0; rc && i < 3; i++)
490 rc = eeh_unfreeze_pe(pe, clear_sw_state);
492 /* Stop immediately on any errors */
493 if (rc) {
494 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
495 __func__, rc, pe->phb->global_number, pe->addr);
496 return (void *)pe;
499 return NULL;
502 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
503 bool clear_sw_state)
505 void *rc;
507 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
508 if (!rc)
509 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
511 return rc ? -EIO : 0;
514 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
516 int result, ret;
518 /* Bail if the PE is being recovered */
519 if (pe->state & EEH_PE_RECOVERING)
520 return 0;
522 /* Put the PE into recovery mode */
523 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
525 /* Save states */
526 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
528 /* Issue reset */
529 ret = eeh_reset_pe(pe);
530 if (ret) {
531 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
532 return ret;
535 /* Unfreeze the PE */
536 ret = eeh_clear_pe_frozen_state(pe, true);
537 if (ret) {
538 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
539 return ret;
542 /* Notify completion of reset */
543 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
545 /* Restore device state */
546 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
548 /* Resume */
549 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
551 /* Clear recovery mode */
552 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
554 return 0;
558 * eeh_reset_device - Perform actual reset of a pci slot
559 * @pe: EEH PE
560 * @bus: PCI bus corresponding to the isolcated slot
562 * This routine must be called to do reset on the indicated PE.
563 * During the reset, udev might be invoked because those affected
564 * PCI devices will be removed and then added.
566 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
568 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
569 struct timeval tstamp;
570 int cnt, rc, removed = 0;
572 /* pcibios will clear the counter; save the value */
573 cnt = pe->freeze_count;
574 tstamp = pe->tstamp;
577 * We don't remove the corresponding PE instances because
578 * we need the information afterwords. The attached EEH
579 * devices are expected to be attached soon when calling
580 * into pcibios_add_pci_devices().
582 eeh_pe_state_mark(pe, EEH_PE_KEEP);
583 if (bus) {
584 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
585 pci_lock_rescan_remove();
586 pcibios_remove_pci_devices(bus);
587 pci_unlock_rescan_remove();
588 } else if (frozen_bus) {
589 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
593 * Reset the pci controller. (Asserts RST#; resets config space).
594 * Reconfigure bridges and devices. Don't try to bring the system
595 * up if the reset failed for some reason.
597 * During the reset, it's very dangerous to have uncontrolled PCI
598 * config accesses. So we prefer to block them. However, controlled
599 * PCI config accesses initiated from EEH itself are allowed.
601 rc = eeh_reset_pe(pe);
602 if (rc)
603 return rc;
605 pci_lock_rescan_remove();
607 /* Restore PE */
608 eeh_ops->configure_bridge(pe);
609 eeh_pe_restore_bars(pe);
611 /* Clear frozen state */
612 rc = eeh_clear_pe_frozen_state(pe, false);
613 if (rc)
614 return rc;
616 /* Give the system 5 seconds to finish running the user-space
617 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
618 * this is a hack, but if we don't do this, and try to bring
619 * the device up before the scripts have taken it down,
620 * potentially weird things happen.
622 if (bus) {
623 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
624 ssleep(5);
627 * The EEH device is still connected with its parent
628 * PE. We should disconnect it so the binding can be
629 * rebuilt when adding PCI devices.
631 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
632 pcibios_add_pci_devices(bus);
633 } else if (frozen_bus && removed) {
634 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
635 ssleep(5);
637 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
638 pcibios_add_pci_devices(frozen_bus);
640 eeh_pe_state_clear(pe, EEH_PE_KEEP);
642 pe->tstamp = tstamp;
643 pe->freeze_count = cnt;
645 pci_unlock_rescan_remove();
646 return 0;
649 /* The longest amount of time to wait for a pci device
650 * to come back on line, in seconds.
652 #define MAX_WAIT_FOR_RECOVERY 300
654 static void eeh_handle_normal_event(struct eeh_pe *pe)
656 struct pci_bus *frozen_bus;
657 int rc = 0;
658 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
660 frozen_bus = eeh_pe_bus_get(pe);
661 if (!frozen_bus) {
662 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
663 __func__, pe->phb->global_number, pe->addr);
664 return;
667 eeh_pe_update_time_stamp(pe);
668 pe->freeze_count++;
669 if (pe->freeze_count > eeh_max_freezes)
670 goto excess_failures;
671 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
672 pe->freeze_count);
674 /* Walk the various device drivers attached to this slot through
675 * a reset sequence, giving each an opportunity to do what it needs
676 * to accomplish the reset. Each child gets a report of the
677 * status ... if any child can't handle the reset, then the entire
678 * slot is dlpar removed and added.
680 pr_info("EEH: Notify device drivers to shutdown\n");
681 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
683 /* Get the current PCI slot state. This can take a long time,
684 * sometimes over 3 seconds for certain systems.
686 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
687 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
688 pr_warn("EEH: Permanent failure\n");
689 goto hard_fail;
692 /* Since rtas may enable MMIO when posting the error log,
693 * don't post the error log until after all dev drivers
694 * have been informed.
696 pr_info("EEH: Collect temporary log\n");
697 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
699 /* If all device drivers were EEH-unaware, then shut
700 * down all of the device drivers, and hope they
701 * go down willingly, without panicing the system.
703 if (result == PCI_ERS_RESULT_NONE) {
704 pr_info("EEH: Reset with hotplug activity\n");
705 rc = eeh_reset_device(pe, frozen_bus);
706 if (rc) {
707 pr_warn("%s: Unable to reset, err=%d\n",
708 __func__, rc);
709 goto hard_fail;
713 /* If all devices reported they can proceed, then re-enable MMIO */
714 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
715 pr_info("EEH: Enable I/O for affected devices\n");
716 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
718 if (rc < 0)
719 goto hard_fail;
720 if (rc) {
721 result = PCI_ERS_RESULT_NEED_RESET;
722 } else {
723 pr_info("EEH: Notify device drivers to resume I/O\n");
724 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
728 /* If all devices reported they can proceed, then re-enable DMA */
729 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
730 pr_info("EEH: Enabled DMA for affected devices\n");
731 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
733 if (rc < 0)
734 goto hard_fail;
735 if (rc) {
736 result = PCI_ERS_RESULT_NEED_RESET;
737 } else {
739 * We didn't do PE reset for the case. The PE
740 * is still in frozen state. Clear it before
741 * resuming the PE.
743 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
744 result = PCI_ERS_RESULT_RECOVERED;
748 /* If any device has a hard failure, then shut off everything. */
749 if (result == PCI_ERS_RESULT_DISCONNECT) {
750 pr_warn("EEH: Device driver gave up\n");
751 goto hard_fail;
754 /* If any device called out for a reset, then reset the slot */
755 if (result == PCI_ERS_RESULT_NEED_RESET) {
756 pr_info("EEH: Reset without hotplug activity\n");
757 rc = eeh_reset_device(pe, NULL);
758 if (rc) {
759 pr_warn("%s: Cannot reset, err=%d\n",
760 __func__, rc);
761 goto hard_fail;
764 pr_info("EEH: Notify device drivers "
765 "the completion of reset\n");
766 result = PCI_ERS_RESULT_NONE;
767 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
770 /* All devices should claim they have recovered by now. */
771 if ((result != PCI_ERS_RESULT_RECOVERED) &&
772 (result != PCI_ERS_RESULT_NONE)) {
773 pr_warn("EEH: Not recovered\n");
774 goto hard_fail;
777 /* Tell all device drivers that they can resume operations */
778 pr_info("EEH: Notify device driver to resume\n");
779 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
781 return;
783 excess_failures:
785 * About 90% of all real-life EEH failures in the field
786 * are due to poorly seated PCI cards. Only 10% or so are
787 * due to actual, failed cards.
789 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
790 "last hour and has been permanently disabled.\n"
791 "Please try reseating or replacing it.\n",
792 pe->phb->global_number, pe->addr,
793 pe->freeze_count);
794 goto perm_error;
796 hard_fail:
797 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
798 "Please try reseating or replacing it\n",
799 pe->phb->global_number, pe->addr);
801 perm_error:
802 eeh_slot_error_detail(pe, EEH_LOG_PERM);
804 /* Notify all devices that they're about to go down. */
805 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
807 /* Mark the PE to be removed permanently */
808 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
811 * Shut down the device drivers for good. We mark
812 * all removed devices correctly to avoid access
813 * the their PCI config any more.
815 if (frozen_bus) {
816 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
817 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
819 pci_lock_rescan_remove();
820 pcibios_remove_pci_devices(frozen_bus);
821 pci_unlock_rescan_remove();
825 static void eeh_handle_special_event(void)
827 struct eeh_pe *pe, *phb_pe;
828 struct pci_bus *bus;
829 struct pci_controller *hose;
830 unsigned long flags;
831 int rc;
834 do {
835 rc = eeh_ops->next_error(&pe);
837 switch (rc) {
838 case EEH_NEXT_ERR_DEAD_IOC:
839 /* Mark all PHBs in dead state */
840 eeh_serialize_lock(&flags);
842 /* Purge all events */
843 eeh_remove_event(NULL, true);
845 list_for_each_entry(hose, &hose_list, list_node) {
846 phb_pe = eeh_phb_pe_get(hose);
847 if (!phb_pe) continue;
849 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
852 eeh_serialize_unlock(flags);
854 break;
855 case EEH_NEXT_ERR_FROZEN_PE:
856 case EEH_NEXT_ERR_FENCED_PHB:
857 case EEH_NEXT_ERR_DEAD_PHB:
858 /* Mark the PE in fenced state */
859 eeh_serialize_lock(&flags);
861 /* Purge all events of the PHB */
862 eeh_remove_event(pe, true);
864 if (rc == EEH_NEXT_ERR_DEAD_PHB)
865 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
866 else
867 eeh_pe_state_mark(pe,
868 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
870 eeh_serialize_unlock(flags);
872 break;
873 case EEH_NEXT_ERR_NONE:
874 return;
875 default:
876 pr_warn("%s: Invalid value %d from next_error()\n",
877 __func__, rc);
878 return;
882 * For fenced PHB and frozen PE, it's handled as normal
883 * event. We have to remove the affected PHBs for dead
884 * PHB and IOC
886 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
887 rc == EEH_NEXT_ERR_FENCED_PHB) {
888 eeh_handle_normal_event(pe);
889 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
890 } else {
891 pci_lock_rescan_remove();
892 list_for_each_entry(hose, &hose_list, list_node) {
893 phb_pe = eeh_phb_pe_get(hose);
894 if (!phb_pe ||
895 !(phb_pe->state & EEH_PE_ISOLATED) ||
896 (phb_pe->state & EEH_PE_RECOVERING))
897 continue;
899 /* Notify all devices to be down */
900 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
901 bus = eeh_pe_bus_get(phb_pe);
902 eeh_pe_dev_traverse(pe,
903 eeh_report_failure, NULL);
904 pcibios_remove_pci_devices(bus);
906 pci_unlock_rescan_remove();
910 * If we have detected dead IOC, we needn't proceed
911 * any more since all PHBs would have been removed
913 if (rc == EEH_NEXT_ERR_DEAD_IOC)
914 break;
915 } while (rc != EEH_NEXT_ERR_NONE);
919 * eeh_handle_event - Reset a PCI device after hard lockup.
920 * @pe: EEH PE
922 * While PHB detects address or data parity errors on particular PCI
923 * slot, the associated PE will be frozen. Besides, DMA's occurring
924 * to wild addresses (which usually happen due to bugs in device
925 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
926 * #PERR or other misc PCI-related errors also can trigger EEH errors.
928 * Recovery process consists of unplugging the device driver (which
929 * generated hotplug events to userspace), then issuing a PCI #RST to
930 * the device, then reconfiguring the PCI config space for all bridges
931 * & devices under this slot, and then finally restarting the device
932 * drivers (which cause a second set of hotplug events to go out to
933 * userspace).
935 void eeh_handle_event(struct eeh_pe *pe)
937 if (pe)
938 eeh_handle_normal_event(pe);
939 else
940 eeh_handle_special_event();