Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / powerpc / platforms / pseries / eeh_pseries.c
blob823cb27efa8b88a76f3fdd0d495fb7b6f6f0b0c2
1 /*
2 * The file intends to implement the platform dependent EEH operations on pseries.
3 * Actually, the pseries platform is built based on RTAS heavily. That means the
4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions
5 * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
6 * been done.
8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
9 * Copyright IBM Corporation 2001, 2005, 2006
10 * Copyright Dave Engebretsen & Todd Inglett 2001
11 * Copyright Linas Vepstas 2005, 2006
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/atomic.h>
29 #include <linux/delay.h>
30 #include <linux/export.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/of.h>
34 #include <linux/pci.h>
35 #include <linux/proc_fs.h>
36 #include <linux/rbtree.h>
37 #include <linux/sched.h>
38 #include <linux/seq_file.h>
39 #include <linux/spinlock.h>
41 #include <asm/eeh.h>
42 #include <asm/eeh_event.h>
43 #include <asm/io.h>
44 #include <asm/machdep.h>
45 #include <asm/ppc-pci.h>
46 #include <asm/rtas.h>
48 /* RTAS tokens */
49 static int ibm_set_eeh_option;
50 static int ibm_set_slot_reset;
51 static int ibm_read_slot_reset_state;
52 static int ibm_read_slot_reset_state2;
53 static int ibm_slot_error_detail;
54 static int ibm_get_config_addr_info;
55 static int ibm_get_config_addr_info2;
56 static int ibm_configure_pe;
58 #ifdef CONFIG_PCI_IOV
59 void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
61 struct pci_dn *pdn = pci_get_pdn(pdev);
62 struct pci_dn *physfn_pdn;
63 struct eeh_dev *edev;
65 if (!pdev->is_virtfn)
66 return;
68 pdn->device_id = pdev->device;
69 pdn->vendor_id = pdev->vendor;
70 pdn->class_code = pdev->class;
72 * Last allow unfreeze return code used for retrieval
73 * by user space in eeh-sysfs to show the last command
74 * completion from platform.
76 pdn->last_allow_rc = 0;
77 physfn_pdn = pci_get_pdn(pdev->physfn);
78 pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index];
79 edev = pdn_to_eeh_dev(pdn);
82 * The following operations will fail if VF's sysfs files
83 * aren't created or its resources aren't finalized.
85 eeh_add_device_early(pdn);
86 eeh_add_device_late(pdev);
87 edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
88 eeh_rmv_from_parent_pe(edev); /* Remove as it is adding to bus pe */
89 eeh_add_to_parent_pe(edev); /* Add as VF PE type */
90 eeh_sysfs_add_device(pdev);
93 #endif
96 * Buffer for reporting slot-error-detail rtas calls. Its here
97 * in BSS, and not dynamically alloced, so that it ends up in
98 * RMO where RTAS can access it.
100 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
101 static DEFINE_SPINLOCK(slot_errbuf_lock);
102 static int eeh_error_buf_size;
105 * pseries_eeh_init - EEH platform dependent initialization
107 * EEH platform dependent initialization on pseries.
109 static int pseries_eeh_init(void)
111 /* figure out EEH RTAS function call tokens */
112 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
113 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
114 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
115 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
116 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
117 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
118 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
119 ibm_configure_pe = rtas_token("ibm,configure-pe");
122 * ibm,configure-pe and ibm,configure-bridge have the same semantics,
123 * however ibm,configure-pe can be faster. If we can't find
124 * ibm,configure-pe then fall back to using ibm,configure-bridge.
126 if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
127 ibm_configure_pe = rtas_token("ibm,configure-bridge");
130 * Necessary sanity check. We needn't check "get-config-addr-info"
131 * and its variant since the old firmware probably support address
132 * of domain/bus/slot/function for EEH RTAS operations.
134 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
135 ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
136 (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
137 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
138 ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
139 ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
140 pr_info("EEH functionality not supported\n");
141 return -EINVAL;
144 /* Initialize error log lock and size */
145 spin_lock_init(&slot_errbuf_lock);
146 eeh_error_buf_size = rtas_token("rtas-error-log-max");
147 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
148 pr_info("%s: unknown EEH error log size\n",
149 __func__);
150 eeh_error_buf_size = 1024;
151 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
152 pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
153 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
154 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
157 /* Set EEH probe mode */
158 eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
160 #ifdef CONFIG_PCI_IOV
161 /* Set EEH machine dependent code */
162 ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
163 #endif
165 return 0;
168 static int pseries_eeh_cap_start(struct pci_dn *pdn)
170 u32 status;
172 if (!pdn)
173 return 0;
175 rtas_read_config(pdn, PCI_STATUS, 2, &status);
176 if (!(status & PCI_STATUS_CAP_LIST))
177 return 0;
179 return PCI_CAPABILITY_LIST;
183 static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
185 int pos = pseries_eeh_cap_start(pdn);
186 int cnt = 48; /* Maximal number of capabilities */
187 u32 id;
189 if (!pos)
190 return 0;
192 while (cnt--) {
193 rtas_read_config(pdn, pos, 1, &pos);
194 if (pos < 0x40)
195 break;
196 pos &= ~3;
197 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
198 if (id == 0xff)
199 break;
200 if (id == cap)
201 return pos;
202 pos += PCI_CAP_LIST_NEXT;
205 return 0;
208 static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
210 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
211 u32 header;
212 int pos = 256;
213 int ttl = (4096 - 256) / 8;
215 if (!edev || !edev->pcie_cap)
216 return 0;
217 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
218 return 0;
219 else if (!header)
220 return 0;
222 while (ttl-- > 0) {
223 if (PCI_EXT_CAP_ID(header) == cap && pos)
224 return pos;
226 pos = PCI_EXT_CAP_NEXT(header);
227 if (pos < 256)
228 break;
230 if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
231 break;
234 return 0;
238 * pseries_eeh_probe - EEH probe on the given device
239 * @pdn: PCI device node
240 * @data: Unused
242 * When EEH module is installed during system boot, all PCI devices
243 * are checked one by one to see if it supports EEH. The function
244 * is introduced for the purpose.
246 static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
248 struct eeh_dev *edev;
249 struct eeh_pe pe;
250 u32 pcie_flags;
251 int enable = 0;
252 int ret;
254 /* Retrieve OF node and eeh device */
255 edev = pdn_to_eeh_dev(pdn);
256 if (!edev || edev->pe)
257 return NULL;
259 /* Check class/vendor/device IDs */
260 if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
261 return NULL;
263 /* Skip for PCI-ISA bridge */
264 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
265 return NULL;
268 * Update class code and mode of eeh device. We need
269 * correctly reflects that current device is root port
270 * or PCIe switch downstream port.
272 edev->class_code = pdn->class_code;
273 edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
274 edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
275 edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
276 edev->mode &= 0xFFFFFF00;
277 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
278 edev->mode |= EEH_DEV_BRIDGE;
279 if (edev->pcie_cap) {
280 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
281 2, &pcie_flags);
282 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
283 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
284 edev->mode |= EEH_DEV_ROOT_PORT;
285 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
286 edev->mode |= EEH_DEV_DS_PORT;
290 /* Initialize the fake PE */
291 memset(&pe, 0, sizeof(struct eeh_pe));
292 pe.phb = pdn->phb;
293 pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
295 /* Enable EEH on the device */
296 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
297 if (!ret) {
298 /* Retrieve PE address */
299 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
300 pe.addr = edev->pe_config_addr;
302 /* Some older systems (Power4) allow the ibm,set-eeh-option
303 * call to succeed even on nodes where EEH is not supported.
304 * Verify support explicitly.
306 ret = eeh_ops->get_state(&pe, NULL);
307 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
308 enable = 1;
310 if (enable) {
311 eeh_add_flag(EEH_ENABLED);
312 eeh_add_to_parent_pe(edev);
314 pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n",
315 __func__, pdn->busno, PCI_SLOT(pdn->devfn),
316 PCI_FUNC(pdn->devfn), pe.phb->global_number,
317 pe.addr);
318 } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
319 (pdn_to_eeh_dev(pdn->parent))->pe) {
320 /* This device doesn't support EEH, but it may have an
321 * EEH parent, in which case we mark it as supported.
323 edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
324 eeh_add_to_parent_pe(edev);
328 /* Save memory bars */
329 eeh_save_bars(edev);
331 return NULL;
335 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
336 * @pe: EEH PE
337 * @option: operation to be issued
339 * The function is used to control the EEH functionality globally.
340 * Currently, following options are support according to PAPR:
341 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
343 static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
345 int ret = 0;
346 int config_addr;
349 * When we're enabling or disabling EEH functioality on
350 * the particular PE, the PE config address is possibly
351 * unavailable. Therefore, we have to figure it out from
352 * the FDT node.
354 switch (option) {
355 case EEH_OPT_DISABLE:
356 case EEH_OPT_ENABLE:
357 case EEH_OPT_THAW_MMIO:
358 case EEH_OPT_THAW_DMA:
359 config_addr = pe->config_addr;
360 if (pe->addr)
361 config_addr = pe->addr;
362 break;
363 case EEH_OPT_FREEZE_PE:
364 /* Not support */
365 return 0;
366 default:
367 pr_err("%s: Invalid option %d\n",
368 __func__, option);
369 return -EINVAL;
372 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
373 config_addr, BUID_HI(pe->phb->buid),
374 BUID_LO(pe->phb->buid), option);
376 return ret;
380 * pseries_eeh_get_pe_addr - Retrieve PE address
381 * @pe: EEH PE
383 * Retrieve the assocated PE address. Actually, there're 2 RTAS
384 * function calls dedicated for the purpose. We need implement
385 * it through the new function and then the old one. Besides,
386 * you should make sure the config address is figured out from
387 * FDT node before calling the function.
389 * It's notable that zero'ed return value means invalid PE config
390 * address.
392 static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
394 int ret = 0;
395 int rets[3];
397 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
399 * First of all, we need to make sure there has one PE
400 * associated with the device. Otherwise, PE address is
401 * meaningless.
403 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
404 pe->config_addr, BUID_HI(pe->phb->buid),
405 BUID_LO(pe->phb->buid), 1);
406 if (ret || (rets[0] == 0))
407 return 0;
409 /* Retrieve the associated PE config address */
410 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
411 pe->config_addr, BUID_HI(pe->phb->buid),
412 BUID_LO(pe->phb->buid), 0);
413 if (ret) {
414 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
415 __func__, pe->phb->global_number, pe->config_addr);
416 return 0;
419 return rets[0];
422 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
423 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
424 pe->config_addr, BUID_HI(pe->phb->buid),
425 BUID_LO(pe->phb->buid), 0);
426 if (ret) {
427 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
428 __func__, pe->phb->global_number, pe->config_addr);
429 return 0;
432 return rets[0];
435 return ret;
439 * pseries_eeh_get_state - Retrieve PE state
440 * @pe: EEH PE
441 * @state: return value
443 * Retrieve the state of the specified PE. On RTAS compliant
444 * pseries platform, there already has one dedicated RTAS function
445 * for the purpose. It's notable that the associated PE config address
446 * might be ready when calling the function. Therefore, endeavour to
447 * use the PE config address if possible. Further more, there're 2
448 * RTAS calls for the purpose, we need to try the new one and back
449 * to the old one if the new one couldn't work properly.
451 static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
453 int config_addr;
454 int ret;
455 int rets[4];
456 int result;
458 /* Figure out PE config address if possible */
459 config_addr = pe->config_addr;
460 if (pe->addr)
461 config_addr = pe->addr;
463 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
464 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
465 config_addr, BUID_HI(pe->phb->buid),
466 BUID_LO(pe->phb->buid));
467 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
468 /* Fake PE unavailable info */
469 rets[2] = 0;
470 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
471 config_addr, BUID_HI(pe->phb->buid),
472 BUID_LO(pe->phb->buid));
473 } else {
474 return EEH_STATE_NOT_SUPPORT;
477 if (ret)
478 return ret;
480 /* Parse the result out */
481 if (!rets[1])
482 return EEH_STATE_NOT_SUPPORT;
484 switch(rets[0]) {
485 case 0:
486 result = EEH_STATE_MMIO_ACTIVE |
487 EEH_STATE_DMA_ACTIVE;
488 break;
489 case 1:
490 result = EEH_STATE_RESET_ACTIVE |
491 EEH_STATE_MMIO_ACTIVE |
492 EEH_STATE_DMA_ACTIVE;
493 break;
494 case 2:
495 result = 0;
496 break;
497 case 4:
498 result = EEH_STATE_MMIO_ENABLED;
499 break;
500 case 5:
501 if (rets[2]) {
502 if (state) *state = rets[2];
503 result = EEH_STATE_UNAVAILABLE;
504 } else {
505 result = EEH_STATE_NOT_SUPPORT;
507 break;
508 default:
509 result = EEH_STATE_NOT_SUPPORT;
512 return result;
516 * pseries_eeh_reset - Reset the specified PE
517 * @pe: EEH PE
518 * @option: reset option
520 * Reset the specified PE
522 static int pseries_eeh_reset(struct eeh_pe *pe, int option)
524 int config_addr;
525 int ret;
527 /* Figure out PE address */
528 config_addr = pe->config_addr;
529 if (pe->addr)
530 config_addr = pe->addr;
532 /* Reset PE through RTAS call */
533 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
534 config_addr, BUID_HI(pe->phb->buid),
535 BUID_LO(pe->phb->buid), option);
537 /* If fundamental-reset not supported, try hot-reset */
538 if (option == EEH_RESET_FUNDAMENTAL &&
539 ret == -8) {
540 option = EEH_RESET_HOT;
541 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
542 config_addr, BUID_HI(pe->phb->buid),
543 BUID_LO(pe->phb->buid), option);
546 /* We need reset hold or settlement delay */
547 if (option == EEH_RESET_FUNDAMENTAL ||
548 option == EEH_RESET_HOT)
549 msleep(EEH_PE_RST_HOLD_TIME);
550 else
551 msleep(EEH_PE_RST_SETTLE_TIME);
553 return ret;
557 * pseries_eeh_wait_state - Wait for PE state
558 * @pe: EEH PE
559 * @max_wait: maximal period in millisecond
561 * Wait for the state of associated PE. It might take some time
562 * to retrieve the PE's state.
564 static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait)
566 int ret;
567 int mwait;
570 * According to PAPR, the state of PE might be temporarily
571 * unavailable. Under the circumstance, we have to wait
572 * for indicated time determined by firmware. The maximal
573 * wait time is 5 minutes, which is acquired from the original
574 * EEH implementation. Also, the original implementation
575 * also defined the minimal wait time as 1 second.
577 #define EEH_STATE_MIN_WAIT_TIME (1000)
578 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
580 while (1) {
581 ret = pseries_eeh_get_state(pe, &mwait);
584 * If the PE's state is temporarily unavailable,
585 * we have to wait for the specified time. Otherwise,
586 * the PE's state will be returned immediately.
588 if (ret != EEH_STATE_UNAVAILABLE)
589 return ret;
591 if (max_wait <= 0) {
592 pr_warn("%s: Timeout when getting PE's state (%d)\n",
593 __func__, max_wait);
594 return EEH_STATE_NOT_SUPPORT;
597 if (mwait <= 0) {
598 pr_warn("%s: Firmware returned bad wait value %d\n",
599 __func__, mwait);
600 mwait = EEH_STATE_MIN_WAIT_TIME;
601 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
602 pr_warn("%s: Firmware returned too long wait value %d\n",
603 __func__, mwait);
604 mwait = EEH_STATE_MAX_WAIT_TIME;
607 max_wait -= mwait;
608 msleep(mwait);
611 return EEH_STATE_NOT_SUPPORT;
615 * pseries_eeh_get_log - Retrieve error log
616 * @pe: EEH PE
617 * @severity: temporary or permanent error log
618 * @drv_log: driver log to be combined with retrieved error log
619 * @len: length of driver log
621 * Retrieve the temporary or permanent error from the PE.
622 * Actually, the error will be retrieved through the dedicated
623 * RTAS call.
625 static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
627 int config_addr;
628 unsigned long flags;
629 int ret;
631 spin_lock_irqsave(&slot_errbuf_lock, flags);
632 memset(slot_errbuf, 0, eeh_error_buf_size);
634 /* Figure out the PE address */
635 config_addr = pe->config_addr;
636 if (pe->addr)
637 config_addr = pe->addr;
639 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
640 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
641 virt_to_phys(drv_log), len,
642 virt_to_phys(slot_errbuf), eeh_error_buf_size,
643 severity);
644 if (!ret)
645 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
646 spin_unlock_irqrestore(&slot_errbuf_lock, flags);
648 return ret;
652 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
653 * @pe: EEH PE
655 * The function will be called to reconfigure the bridges included
656 * in the specified PE so that the mulfunctional PE would be recovered
657 * again.
659 static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
661 int config_addr;
662 int ret;
663 /* Waiting 0.2s maximum before skipping configuration */
664 int max_wait = 200;
666 /* Figure out the PE address */
667 config_addr = pe->config_addr;
668 if (pe->addr)
669 config_addr = pe->addr;
671 while (max_wait > 0) {
672 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
673 config_addr, BUID_HI(pe->phb->buid),
674 BUID_LO(pe->phb->buid));
676 if (!ret)
677 return ret;
680 * If RTAS returns a delay value that's above 100ms, cut it
681 * down to 100ms in case firmware made a mistake. For more
682 * on how these delay values work see rtas_busy_delay_time
684 if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
685 ret <= RTAS_EXTENDED_DELAY_MAX)
686 ret = RTAS_EXTENDED_DELAY_MIN+2;
688 max_wait -= rtas_busy_delay_time(ret);
690 if (max_wait < 0)
691 break;
693 rtas_busy_delay(ret);
696 pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
697 __func__, pe->phb->global_number, pe->addr, ret);
698 return ret;
702 * pseries_eeh_read_config - Read PCI config space
703 * @pdn: PCI device node
704 * @where: PCI address
705 * @size: size to read
706 * @val: return value
708 * Read config space from the speicifed device
710 static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
712 return rtas_read_config(pdn, where, size, val);
716 * pseries_eeh_write_config - Write PCI config space
717 * @pdn: PCI device node
718 * @where: PCI address
719 * @size: size to write
720 * @val: value to be written
722 * Write config space to the specified device
724 static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
726 return rtas_write_config(pdn, where, size, val);
729 static int pseries_eeh_restore_config(struct pci_dn *pdn)
731 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
732 s64 ret = 0;
734 if (!edev)
735 return -EEXIST;
738 * FIXME: The MPS, error routing rules, timeout setting are worthy
739 * to be exported by firmware in extendible way.
741 if (edev->physfn)
742 ret = eeh_restore_vf_config(pdn);
744 if (ret) {
745 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
746 __func__, edev->pe_config_addr, ret);
747 return -EIO;
750 return ret;
753 #ifdef CONFIG_PCI_IOV
754 int pseries_send_allow_unfreeze(struct pci_dn *pdn,
755 u16 *vf_pe_array, int cur_vfs)
757 int rc;
758 int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze");
759 unsigned long buid, addr;
761 addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
762 buid = pdn->phb->buid;
763 spin_lock(&rtas_data_buf_lock);
764 memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE);
765 rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL,
766 addr,
767 BUID_HI(buid),
768 BUID_LO(buid),
769 rtas_data_buf, cur_vfs * sizeof(u16));
770 spin_unlock(&rtas_data_buf_lock);
771 if (rc)
772 pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n",
773 __func__,
774 pdn->phb->global_number, addr, rc);
775 return rc;
778 static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
780 struct pci_dn *pdn, *tmp, *parent, *physfn_pdn;
781 int cur_vfs = 0, rc = 0, vf_index, bus, devfn;
782 u16 *vf_pe_array;
784 vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
785 if (!vf_pe_array)
786 return -ENOMEM;
787 if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) {
788 if (edev->pdev->is_physfn) {
789 cur_vfs = pci_num_vf(edev->pdev);
790 pdn = eeh_dev_to_pdn(edev);
791 parent = pdn->parent;
792 for (vf_index = 0; vf_index < cur_vfs; vf_index++)
793 vf_pe_array[vf_index] =
794 cpu_to_be16(pdn->pe_num_map[vf_index]);
795 rc = pseries_send_allow_unfreeze(pdn, vf_pe_array,
796 cur_vfs);
797 pdn->last_allow_rc = rc;
798 for (vf_index = 0; vf_index < cur_vfs; vf_index++) {
799 list_for_each_entry_safe(pdn, tmp,
800 &parent->child_list,
801 list) {
802 bus = pci_iov_virtfn_bus(edev->pdev,
803 vf_index);
804 devfn = pci_iov_virtfn_devfn(edev->pdev,
805 vf_index);
806 if (pdn->busno != bus ||
807 pdn->devfn != devfn)
808 continue;
809 pdn->last_allow_rc = rc;
812 } else {
813 pdn = pci_get_pdn(edev->pdev);
814 vf_pe_array[0] = cpu_to_be16(pdn->pe_number);
815 physfn_pdn = pci_get_pdn(edev->physfn);
816 rc = pseries_send_allow_unfreeze(physfn_pdn,
817 vf_pe_array, 1);
818 pdn->last_allow_rc = rc;
822 kfree(vf_pe_array);
823 return rc;
826 static int pseries_notify_resume(struct pci_dn *pdn)
828 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
830 if (!edev)
831 return -EEXIST;
833 if (rtas_token("ibm,open-sriov-allow-unfreeze")
834 == RTAS_UNKNOWN_SERVICE)
835 return -EINVAL;
837 if (edev->pdev->is_physfn || edev->pdev->is_virtfn)
838 return pseries_call_allow_unfreeze(edev);
840 return 0;
842 #endif
844 static struct eeh_ops pseries_eeh_ops = {
845 .name = "pseries",
846 .init = pseries_eeh_init,
847 .probe = pseries_eeh_probe,
848 .set_option = pseries_eeh_set_option,
849 .get_pe_addr = pseries_eeh_get_pe_addr,
850 .get_state = pseries_eeh_get_state,
851 .reset = pseries_eeh_reset,
852 .wait_state = pseries_eeh_wait_state,
853 .get_log = pseries_eeh_get_log,
854 .configure_bridge = pseries_eeh_configure_bridge,
855 .err_inject = NULL,
856 .read_config = pseries_eeh_read_config,
857 .write_config = pseries_eeh_write_config,
858 .next_error = NULL,
859 .restore_config = pseries_eeh_restore_config,
860 #ifdef CONFIG_PCI_IOV
861 .notify_resume = pseries_notify_resume
862 #endif
866 * eeh_pseries_init - Register platform dependent EEH operations
868 * EEH initialization on pseries platform. This function should be
869 * called before any EEH related functions.
871 static int __init eeh_pseries_init(void)
873 int ret;
875 ret = eeh_ops_register(&pseries_eeh_ops);
876 if (!ret)
877 pr_info("EEH: pSeries platform initialized\n");
878 else
879 pr_info("EEH: pSeries platform initialization failure (%d)\n",
880 ret);
882 return ret;
884 machine_early_initcall(pseries, eeh_pseries_init);