Linux 4.1.18
[linux/fpc-iii.git] / arch / powerpc / kernel / eeh_event.c
blob4eefb6e34dbb2f6edbf4990349e9c11b0b5d07f8
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/sched.h>
22 #include <linux/semaphore.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/kthread.h>
26 #include <asm/eeh_event.h>
27 #include <asm/ppc-pci.h>
29 /** Overview:
30 * EEH error states may be detected within exception handlers;
31 * however, the recovery processing needs to occur asynchronously
32 * in a normal kernel context and not an interrupt context.
33 * This pair of routines creates an event and queues it onto a
34 * work-queue, where a worker thread can drive recovery.
37 static DEFINE_SPINLOCK(eeh_eventlist_lock);
38 static struct semaphore eeh_eventlist_sem;
39 LIST_HEAD(eeh_eventlist);
41 /**
42 * eeh_event_handler - Dispatch EEH events.
43 * @dummy - unused
45 * The detection of a frozen slot can occur inside an interrupt,
46 * where it can be hard to do anything about it. The goal of this
47 * routine is to pull these detection events out of the context
48 * of the interrupt handler, and re-dispatch them for processing
49 * at a later time in a normal context.
51 static int eeh_event_handler(void * dummy)
53 unsigned long flags;
54 struct eeh_event *event;
55 struct eeh_pe *pe;
57 while (!kthread_should_stop()) {
58 if (down_interruptible(&eeh_eventlist_sem))
59 break;
61 /* Fetch EEH event from the queue */
62 spin_lock_irqsave(&eeh_eventlist_lock, flags);
63 event = NULL;
64 if (!list_empty(&eeh_eventlist)) {
65 event = list_entry(eeh_eventlist.next,
66 struct eeh_event, list);
67 list_del(&event->list);
69 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
70 if (!event)
71 continue;
73 /* We might have event without binding PE */
74 pe = event->pe;
75 if (pe) {
76 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
77 if (pe->type & EEH_PE_PHB)
78 pr_info("EEH: Detected error on PHB#%d\n",
79 pe->phb->global_number);
80 else
81 pr_info("EEH: Detected PCI bus error on "
82 "PHB#%d-PE#%x\n",
83 pe->phb->global_number, pe->addr);
84 eeh_handle_event(pe);
85 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
86 } else {
87 eeh_handle_event(NULL);
90 kfree(event);
93 return 0;
96 /**
97 * eeh_event_init - Start kernel thread to handle EEH events
99 * This routine is called to start the kernel thread for processing
100 * EEH event.
102 int eeh_event_init(void)
104 struct task_struct *t;
105 int ret = 0;
107 /* Initialize semaphore */
108 sema_init(&eeh_eventlist_sem, 0);
110 t = kthread_run(eeh_event_handler, NULL, "eehd");
111 if (IS_ERR(t)) {
112 ret = PTR_ERR(t);
113 pr_err("%s: Failed to start EEH daemon (%d)\n",
114 __func__, ret);
115 return ret;
118 return 0;
122 * eeh_send_failure_event - Generate a PCI error event
123 * @pe: EEH PE
125 * This routine can be called within an interrupt context;
126 * the actual event will be delivered in a normal context
127 * (from a workqueue).
129 int eeh_send_failure_event(struct eeh_pe *pe)
131 unsigned long flags;
132 struct eeh_event *event;
134 event = kzalloc(sizeof(*event), GFP_ATOMIC);
135 if (!event) {
136 pr_err("EEH: out of memory, event not handled\n");
137 return -ENOMEM;
139 event->pe = pe;
141 /* We may or may not be called in an interrupt context */
142 spin_lock_irqsave(&eeh_eventlist_lock, flags);
143 list_add(&event->list, &eeh_eventlist);
144 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
146 /* For EEH deamon to knick in */
147 up(&eeh_eventlist_sem);
149 return 0;
153 * eeh_remove_event - Remove EEH event from the queue
154 * @pe: Event binding to the PE
155 * @force: Event will be removed unconditionally
157 * On PowerNV platform, we might have subsequent coming events
158 * is part of the former one. For that case, those subsequent
159 * coming events are totally duplicated and unnecessary, thus
160 * they should be removed.
162 void eeh_remove_event(struct eeh_pe *pe, bool force)
164 unsigned long flags;
165 struct eeh_event *event, *tmp;
168 * If we have NULL PE passed in, we have dead IOC
169 * or we're sure we can report all existing errors
170 * by the caller.
172 * With "force", the event with associated PE that
173 * have been isolated, the event won't be removed
174 * to avoid event lost.
176 spin_lock_irqsave(&eeh_eventlist_lock, flags);
177 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
178 if (!force && event->pe &&
179 (event->pe->state & EEH_PE_ISOLATED))
180 continue;
182 if (!pe) {
183 list_del(&event->list);
184 kfree(event);
185 } else if (pe->type & EEH_PE_PHB) {
186 if (event->pe && event->pe->phb == pe->phb) {
187 list_del(&event->list);
188 kfree(event);
190 } else if (event->pe == pe) {
191 list_del(&event->list);
192 kfree(event);
195 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);