2 * The file intends to implement the platform dependent EEH operations on pseries.
3 * Actually, the pseries platform is built based on RTAS heavily. That means the
4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions
5 * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
9 * Copyright IBM Corporation 2001, 2005, 2006
10 * Copyright Dave Engebretsen & Todd Inglett 2001
11 * Copyright Linas Vepstas 2005, 2006
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/atomic.h>
29 #include <linux/delay.h>
30 #include <linux/export.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
34 #include <linux/pci.h>
35 #include <linux/proc_fs.h>
36 #include <linux/rbtree.h>
37 #include <linux/sched.h>
38 #include <linux/seq_file.h>
39 #include <linux/spinlock.h>
42 #include <asm/eeh_event.h>
44 #include <asm/machdep.h>
45 #include <asm/ppc-pci.h>
49 static int ibm_set_eeh_option
;
50 static int ibm_set_slot_reset
;
51 static int ibm_read_slot_reset_state
;
52 static int ibm_read_slot_reset_state2
;
53 static int ibm_slot_error_detail
;
54 static int ibm_get_config_addr_info
;
55 static int ibm_get_config_addr_info2
;
56 static int ibm_configure_pe
;
59 void pseries_pcibios_bus_add_device(struct pci_dev
*pdev
)
61 struct pci_dn
*pdn
= pci_get_pdn(pdev
);
62 struct pci_dn
*physfn_pdn
;
68 pdn
->device_id
= pdev
->device
;
69 pdn
->vendor_id
= pdev
->vendor
;
70 pdn
->class_code
= pdev
->class;
72 * Last allow unfreeze return code used for retrieval
73 * by user space in eeh-sysfs to show the last command
74 * completion from platform.
76 pdn
->last_allow_rc
= 0;
77 physfn_pdn
= pci_get_pdn(pdev
->physfn
);
78 pdn
->pe_number
= physfn_pdn
->pe_num_map
[pdn
->vf_index
];
79 edev
= pdn_to_eeh_dev(pdn
);
82 * The following operations will fail if VF's sysfs files
83 * aren't created or its resources aren't finalized.
85 eeh_add_device_early(pdn
);
86 eeh_add_device_late(pdev
);
87 edev
->pe_config_addr
= (pdn
->busno
<< 16) | (pdn
->devfn
<< 8);
88 eeh_rmv_from_parent_pe(edev
); /* Remove as it is adding to bus pe */
89 eeh_add_to_parent_pe(edev
); /* Add as VF PE type */
90 eeh_sysfs_add_device(pdev
);
96 * Buffer for reporting slot-error-detail rtas calls. Its here
97 * in BSS, and not dynamically alloced, so that it ends up in
98 * RMO where RTAS can access it.
100 static unsigned char slot_errbuf
[RTAS_ERROR_LOG_MAX
];
101 static DEFINE_SPINLOCK(slot_errbuf_lock
);
102 static int eeh_error_buf_size
;
105 * pseries_eeh_init - EEH platform dependent initialization
107 * EEH platform dependent initialization on pseries.
109 static int pseries_eeh_init(void)
111 /* figure out EEH RTAS function call tokens */
112 ibm_set_eeh_option
= rtas_token("ibm,set-eeh-option");
113 ibm_set_slot_reset
= rtas_token("ibm,set-slot-reset");
114 ibm_read_slot_reset_state2
= rtas_token("ibm,read-slot-reset-state2");
115 ibm_read_slot_reset_state
= rtas_token("ibm,read-slot-reset-state");
116 ibm_slot_error_detail
= rtas_token("ibm,slot-error-detail");
117 ibm_get_config_addr_info2
= rtas_token("ibm,get-config-addr-info2");
118 ibm_get_config_addr_info
= rtas_token("ibm,get-config-addr-info");
119 ibm_configure_pe
= rtas_token("ibm,configure-pe");
122 * ibm,configure-pe and ibm,configure-bridge have the same semantics,
123 * however ibm,configure-pe can be faster. If we can't find
124 * ibm,configure-pe then fall back to using ibm,configure-bridge.
126 if (ibm_configure_pe
== RTAS_UNKNOWN_SERVICE
)
127 ibm_configure_pe
= rtas_token("ibm,configure-bridge");
130 * Necessary sanity check. We needn't check "get-config-addr-info"
131 * and its variant since the old firmware probably support address
132 * of domain/bus/slot/function for EEH RTAS operations.
134 if (ibm_set_eeh_option
== RTAS_UNKNOWN_SERVICE
||
135 ibm_set_slot_reset
== RTAS_UNKNOWN_SERVICE
||
136 (ibm_read_slot_reset_state2
== RTAS_UNKNOWN_SERVICE
&&
137 ibm_read_slot_reset_state
== RTAS_UNKNOWN_SERVICE
) ||
138 ibm_slot_error_detail
== RTAS_UNKNOWN_SERVICE
||
139 ibm_configure_pe
== RTAS_UNKNOWN_SERVICE
) {
140 pr_info("EEH functionality not supported\n");
144 /* Initialize error log lock and size */
145 spin_lock_init(&slot_errbuf_lock
);
146 eeh_error_buf_size
= rtas_token("rtas-error-log-max");
147 if (eeh_error_buf_size
== RTAS_UNKNOWN_SERVICE
) {
148 pr_info("%s: unknown EEH error log size\n",
150 eeh_error_buf_size
= 1024;
151 } else if (eeh_error_buf_size
> RTAS_ERROR_LOG_MAX
) {
152 pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
153 __func__
, eeh_error_buf_size
, RTAS_ERROR_LOG_MAX
);
154 eeh_error_buf_size
= RTAS_ERROR_LOG_MAX
;
157 /* Set EEH probe mode */
158 eeh_add_flag(EEH_PROBE_MODE_DEVTREE
| EEH_ENABLE_IO_FOR_LOG
);
160 #ifdef CONFIG_PCI_IOV
161 /* Set EEH machine dependent code */
162 ppc_md
.pcibios_bus_add_device
= pseries_pcibios_bus_add_device
;
168 static int pseries_eeh_cap_start(struct pci_dn
*pdn
)
175 rtas_read_config(pdn
, PCI_STATUS
, 2, &status
);
176 if (!(status
& PCI_STATUS_CAP_LIST
))
179 return PCI_CAPABILITY_LIST
;
183 static int pseries_eeh_find_cap(struct pci_dn
*pdn
, int cap
)
185 int pos
= pseries_eeh_cap_start(pdn
);
186 int cnt
= 48; /* Maximal number of capabilities */
193 rtas_read_config(pdn
, pos
, 1, &pos
);
197 rtas_read_config(pdn
, pos
+ PCI_CAP_LIST_ID
, 1, &id
);
202 pos
+= PCI_CAP_LIST_NEXT
;
208 static int pseries_eeh_find_ecap(struct pci_dn
*pdn
, int cap
)
210 struct eeh_dev
*edev
= pdn_to_eeh_dev(pdn
);
213 int ttl
= (4096 - 256) / 8;
215 if (!edev
|| !edev
->pcie_cap
)
217 if (rtas_read_config(pdn
, pos
, 4, &header
) != PCIBIOS_SUCCESSFUL
)
223 if (PCI_EXT_CAP_ID(header
) == cap
&& pos
)
226 pos
= PCI_EXT_CAP_NEXT(header
);
230 if (rtas_read_config(pdn
, pos
, 4, &header
) != PCIBIOS_SUCCESSFUL
)
238 * pseries_eeh_probe - EEH probe on the given device
239 * @pdn: PCI device node
242 * When EEH module is installed during system boot, all PCI devices
243 * are checked one by one to see if it supports EEH. The function
244 * is introduced for the purpose.
246 static void *pseries_eeh_probe(struct pci_dn
*pdn
, void *data
)
248 struct eeh_dev
*edev
;
254 /* Retrieve OF node and eeh device */
255 edev
= pdn_to_eeh_dev(pdn
);
256 if (!edev
|| edev
->pe
)
259 /* Check class/vendor/device IDs */
260 if (!pdn
->vendor_id
|| !pdn
->device_id
|| !pdn
->class_code
)
263 /* Skip for PCI-ISA bridge */
264 if ((pdn
->class_code
>> 8) == PCI_CLASS_BRIDGE_ISA
)
268 * Update class code and mode of eeh device. We need
269 * correctly reflects that current device is root port
270 * or PCIe switch downstream port.
272 edev
->class_code
= pdn
->class_code
;
273 edev
->pcix_cap
= pseries_eeh_find_cap(pdn
, PCI_CAP_ID_PCIX
);
274 edev
->pcie_cap
= pseries_eeh_find_cap(pdn
, PCI_CAP_ID_EXP
);
275 edev
->aer_cap
= pseries_eeh_find_ecap(pdn
, PCI_EXT_CAP_ID_ERR
);
276 edev
->mode
&= 0xFFFFFF00;
277 if ((edev
->class_code
>> 8) == PCI_CLASS_BRIDGE_PCI
) {
278 edev
->mode
|= EEH_DEV_BRIDGE
;
279 if (edev
->pcie_cap
) {
280 rtas_read_config(pdn
, edev
->pcie_cap
+ PCI_EXP_FLAGS
,
282 pcie_flags
= (pcie_flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
283 if (pcie_flags
== PCI_EXP_TYPE_ROOT_PORT
)
284 edev
->mode
|= EEH_DEV_ROOT_PORT
;
285 else if (pcie_flags
== PCI_EXP_TYPE_DOWNSTREAM
)
286 edev
->mode
|= EEH_DEV_DS_PORT
;
290 /* Initialize the fake PE */
291 memset(&pe
, 0, sizeof(struct eeh_pe
));
293 pe
.config_addr
= (pdn
->busno
<< 16) | (pdn
->devfn
<< 8);
295 /* Enable EEH on the device */
296 ret
= eeh_ops
->set_option(&pe
, EEH_OPT_ENABLE
);
298 /* Retrieve PE address */
299 edev
->pe_config_addr
= eeh_ops
->get_pe_addr(&pe
);
300 pe
.addr
= edev
->pe_config_addr
;
302 /* Some older systems (Power4) allow the ibm,set-eeh-option
303 * call to succeed even on nodes where EEH is not supported.
304 * Verify support explicitly.
306 ret
= eeh_ops
->get_state(&pe
, NULL
);
307 if (ret
> 0 && ret
!= EEH_STATE_NOT_SUPPORT
)
311 eeh_add_flag(EEH_ENABLED
);
312 eeh_add_to_parent_pe(edev
);
314 pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n",
315 __func__
, pdn
->busno
, PCI_SLOT(pdn
->devfn
),
316 PCI_FUNC(pdn
->devfn
), pe
.phb
->global_number
,
318 } else if (pdn
->parent
&& pdn_to_eeh_dev(pdn
->parent
) &&
319 (pdn_to_eeh_dev(pdn
->parent
))->pe
) {
320 /* This device doesn't support EEH, but it may have an
321 * EEH parent, in which case we mark it as supported.
323 edev
->pe_config_addr
= pdn_to_eeh_dev(pdn
->parent
)->pe_config_addr
;
324 eeh_add_to_parent_pe(edev
);
328 /* Save memory bars */
335 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
337 * @option: operation to be issued
339 * The function is used to control the EEH functionality globally.
340 * Currently, following options are support according to PAPR:
341 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
343 static int pseries_eeh_set_option(struct eeh_pe
*pe
, int option
)
349 * When we're enabling or disabling EEH functioality on
350 * the particular PE, the PE config address is possibly
351 * unavailable. Therefore, we have to figure it out from
355 case EEH_OPT_DISABLE
:
357 case EEH_OPT_THAW_MMIO
:
358 case EEH_OPT_THAW_DMA
:
359 config_addr
= pe
->config_addr
;
361 config_addr
= pe
->addr
;
363 case EEH_OPT_FREEZE_PE
:
367 pr_err("%s: Invalid option %d\n",
372 ret
= rtas_call(ibm_set_eeh_option
, 4, 1, NULL
,
373 config_addr
, BUID_HI(pe
->phb
->buid
),
374 BUID_LO(pe
->phb
->buid
), option
);
380 * pseries_eeh_get_pe_addr - Retrieve PE address
383 * Retrieve the assocated PE address. Actually, there're 2 RTAS
384 * function calls dedicated for the purpose. We need implement
385 * it through the new function and then the old one. Besides,
386 * you should make sure the config address is figured out from
387 * FDT node before calling the function.
389 * It's notable that zero'ed return value means invalid PE config
392 static int pseries_eeh_get_pe_addr(struct eeh_pe
*pe
)
397 if (ibm_get_config_addr_info2
!= RTAS_UNKNOWN_SERVICE
) {
399 * First of all, we need to make sure there has one PE
400 * associated with the device. Otherwise, PE address is
403 ret
= rtas_call(ibm_get_config_addr_info2
, 4, 2, rets
,
404 pe
->config_addr
, BUID_HI(pe
->phb
->buid
),
405 BUID_LO(pe
->phb
->buid
), 1);
406 if (ret
|| (rets
[0] == 0))
409 /* Retrieve the associated PE config address */
410 ret
= rtas_call(ibm_get_config_addr_info2
, 4, 2, rets
,
411 pe
->config_addr
, BUID_HI(pe
->phb
->buid
),
412 BUID_LO(pe
->phb
->buid
), 0);
414 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
415 __func__
, pe
->phb
->global_number
, pe
->config_addr
);
422 if (ibm_get_config_addr_info
!= RTAS_UNKNOWN_SERVICE
) {
423 ret
= rtas_call(ibm_get_config_addr_info
, 4, 2, rets
,
424 pe
->config_addr
, BUID_HI(pe
->phb
->buid
),
425 BUID_LO(pe
->phb
->buid
), 0);
427 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
428 __func__
, pe
->phb
->global_number
, pe
->config_addr
);
439 * pseries_eeh_get_state - Retrieve PE state
441 * @state: return value
443 * Retrieve the state of the specified PE. On RTAS compliant
444 * pseries platform, there already has one dedicated RTAS function
445 * for the purpose. It's notable that the associated PE config address
446 * might be ready when calling the function. Therefore, endeavour to
447 * use the PE config address if possible. Further more, there're 2
448 * RTAS calls for the purpose, we need to try the new one and back
449 * to the old one if the new one couldn't work properly.
451 static int pseries_eeh_get_state(struct eeh_pe
*pe
, int *state
)
458 /* Figure out PE config address if possible */
459 config_addr
= pe
->config_addr
;
461 config_addr
= pe
->addr
;
463 if (ibm_read_slot_reset_state2
!= RTAS_UNKNOWN_SERVICE
) {
464 ret
= rtas_call(ibm_read_slot_reset_state2
, 3, 4, rets
,
465 config_addr
, BUID_HI(pe
->phb
->buid
),
466 BUID_LO(pe
->phb
->buid
));
467 } else if (ibm_read_slot_reset_state
!= RTAS_UNKNOWN_SERVICE
) {
468 /* Fake PE unavailable info */
470 ret
= rtas_call(ibm_read_slot_reset_state
, 3, 3, rets
,
471 config_addr
, BUID_HI(pe
->phb
->buid
),
472 BUID_LO(pe
->phb
->buid
));
474 return EEH_STATE_NOT_SUPPORT
;
480 /* Parse the result out */
482 return EEH_STATE_NOT_SUPPORT
;
486 result
= EEH_STATE_MMIO_ACTIVE
|
487 EEH_STATE_DMA_ACTIVE
;
490 result
= EEH_STATE_RESET_ACTIVE
|
491 EEH_STATE_MMIO_ACTIVE
|
492 EEH_STATE_DMA_ACTIVE
;
498 result
= EEH_STATE_MMIO_ENABLED
;
502 if (state
) *state
= rets
[2];
503 result
= EEH_STATE_UNAVAILABLE
;
505 result
= EEH_STATE_NOT_SUPPORT
;
509 result
= EEH_STATE_NOT_SUPPORT
;
516 * pseries_eeh_reset - Reset the specified PE
518 * @option: reset option
520 * Reset the specified PE
522 static int pseries_eeh_reset(struct eeh_pe
*pe
, int option
)
527 /* Figure out PE address */
528 config_addr
= pe
->config_addr
;
530 config_addr
= pe
->addr
;
532 /* Reset PE through RTAS call */
533 ret
= rtas_call(ibm_set_slot_reset
, 4, 1, NULL
,
534 config_addr
, BUID_HI(pe
->phb
->buid
),
535 BUID_LO(pe
->phb
->buid
), option
);
537 /* If fundamental-reset not supported, try hot-reset */
538 if (option
== EEH_RESET_FUNDAMENTAL
&&
540 option
= EEH_RESET_HOT
;
541 ret
= rtas_call(ibm_set_slot_reset
, 4, 1, NULL
,
542 config_addr
, BUID_HI(pe
->phb
->buid
),
543 BUID_LO(pe
->phb
->buid
), option
);
546 /* We need reset hold or settlement delay */
547 if (option
== EEH_RESET_FUNDAMENTAL
||
548 option
== EEH_RESET_HOT
)
549 msleep(EEH_PE_RST_HOLD_TIME
);
551 msleep(EEH_PE_RST_SETTLE_TIME
);
557 * pseries_eeh_wait_state - Wait for PE state
559 * @max_wait: maximal period in millisecond
561 * Wait for the state of associated PE. It might take some time
562 * to retrieve the PE's state.
564 static int pseries_eeh_wait_state(struct eeh_pe
*pe
, int max_wait
)
570 * According to PAPR, the state of PE might be temporarily
571 * unavailable. Under the circumstance, we have to wait
572 * for indicated time determined by firmware. The maximal
573 * wait time is 5 minutes, which is acquired from the original
574 * EEH implementation. Also, the original implementation
575 * also defined the minimal wait time as 1 second.
577 #define EEH_STATE_MIN_WAIT_TIME (1000)
578 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
581 ret
= pseries_eeh_get_state(pe
, &mwait
);
584 * If the PE's state is temporarily unavailable,
585 * we have to wait for the specified time. Otherwise,
586 * the PE's state will be returned immediately.
588 if (ret
!= EEH_STATE_UNAVAILABLE
)
592 pr_warn("%s: Timeout when getting PE's state (%d)\n",
594 return EEH_STATE_NOT_SUPPORT
;
598 pr_warn("%s: Firmware returned bad wait value %d\n",
600 mwait
= EEH_STATE_MIN_WAIT_TIME
;
601 } else if (mwait
> EEH_STATE_MAX_WAIT_TIME
) {
602 pr_warn("%s: Firmware returned too long wait value %d\n",
604 mwait
= EEH_STATE_MAX_WAIT_TIME
;
611 return EEH_STATE_NOT_SUPPORT
;
615 * pseries_eeh_get_log - Retrieve error log
617 * @severity: temporary or permanent error log
618 * @drv_log: driver log to be combined with retrieved error log
619 * @len: length of driver log
621 * Retrieve the temporary or permanent error from the PE.
622 * Actually, the error will be retrieved through the dedicated
625 static int pseries_eeh_get_log(struct eeh_pe
*pe
, int severity
, char *drv_log
, unsigned long len
)
631 spin_lock_irqsave(&slot_errbuf_lock
, flags
);
632 memset(slot_errbuf
, 0, eeh_error_buf_size
);
634 /* Figure out the PE address */
635 config_addr
= pe
->config_addr
;
637 config_addr
= pe
->addr
;
639 ret
= rtas_call(ibm_slot_error_detail
, 8, 1, NULL
, config_addr
,
640 BUID_HI(pe
->phb
->buid
), BUID_LO(pe
->phb
->buid
),
641 virt_to_phys(drv_log
), len
,
642 virt_to_phys(slot_errbuf
), eeh_error_buf_size
,
645 log_error(slot_errbuf
, ERR_TYPE_RTAS_LOG
, 0);
646 spin_unlock_irqrestore(&slot_errbuf_lock
, flags
);
652 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
655 * The function will be called to reconfigure the bridges included
656 * in the specified PE so that the mulfunctional PE would be recovered
659 static int pseries_eeh_configure_bridge(struct eeh_pe
*pe
)
663 /* Waiting 0.2s maximum before skipping configuration */
666 /* Figure out the PE address */
667 config_addr
= pe
->config_addr
;
669 config_addr
= pe
->addr
;
671 while (max_wait
> 0) {
672 ret
= rtas_call(ibm_configure_pe
, 3, 1, NULL
,
673 config_addr
, BUID_HI(pe
->phb
->buid
),
674 BUID_LO(pe
->phb
->buid
));
680 * If RTAS returns a delay value that's above 100ms, cut it
681 * down to 100ms in case firmware made a mistake. For more
682 * on how these delay values work see rtas_busy_delay_time
684 if (ret
> RTAS_EXTENDED_DELAY_MIN
+2 &&
685 ret
<= RTAS_EXTENDED_DELAY_MAX
)
686 ret
= RTAS_EXTENDED_DELAY_MIN
+2;
688 max_wait
-= rtas_busy_delay_time(ret
);
693 rtas_busy_delay(ret
);
696 pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
697 __func__
, pe
->phb
->global_number
, pe
->addr
, ret
);
702 * pseries_eeh_read_config - Read PCI config space
703 * @pdn: PCI device node
704 * @where: PCI address
705 * @size: size to read
708 * Read config space from the speicifed device
710 static int pseries_eeh_read_config(struct pci_dn
*pdn
, int where
, int size
, u32
*val
)
712 return rtas_read_config(pdn
, where
, size
, val
);
716 * pseries_eeh_write_config - Write PCI config space
717 * @pdn: PCI device node
718 * @where: PCI address
719 * @size: size to write
720 * @val: value to be written
722 * Write config space to the specified device
724 static int pseries_eeh_write_config(struct pci_dn
*pdn
, int where
, int size
, u32 val
)
726 return rtas_write_config(pdn
, where
, size
, val
);
729 static int pseries_eeh_restore_config(struct pci_dn
*pdn
)
731 struct eeh_dev
*edev
= pdn_to_eeh_dev(pdn
);
738 * FIXME: The MPS, error routing rules, timeout setting are worthy
739 * to be exported by firmware in extendible way.
742 ret
= eeh_restore_vf_config(pdn
);
745 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
746 __func__
, edev
->pe_config_addr
, ret
);
753 #ifdef CONFIG_PCI_IOV
754 int pseries_send_allow_unfreeze(struct pci_dn
*pdn
,
755 u16
*vf_pe_array
, int cur_vfs
)
758 int ibm_allow_unfreeze
= rtas_token("ibm,open-sriov-allow-unfreeze");
759 unsigned long buid
, addr
;
761 addr
= rtas_config_addr(pdn
->busno
, pdn
->devfn
, 0);
762 buid
= pdn
->phb
->buid
;
763 spin_lock(&rtas_data_buf_lock
);
764 memcpy(rtas_data_buf
, vf_pe_array
, RTAS_DATA_BUF_SIZE
);
765 rc
= rtas_call(ibm_allow_unfreeze
, 5, 1, NULL
,
769 rtas_data_buf
, cur_vfs
* sizeof(u16
));
770 spin_unlock(&rtas_data_buf_lock
);
772 pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n",
774 pdn
->phb
->global_number
, addr
, rc
);
778 static int pseries_call_allow_unfreeze(struct eeh_dev
*edev
)
780 struct pci_dn
*pdn
, *tmp
, *parent
, *physfn_pdn
;
781 int cur_vfs
= 0, rc
= 0, vf_index
, bus
, devfn
;
784 vf_pe_array
= kzalloc(RTAS_DATA_BUF_SIZE
, GFP_KERNEL
);
787 if (pci_num_vf(edev
->physfn
? edev
->physfn
: edev
->pdev
)) {
788 if (edev
->pdev
->is_physfn
) {
789 cur_vfs
= pci_num_vf(edev
->pdev
);
790 pdn
= eeh_dev_to_pdn(edev
);
791 parent
= pdn
->parent
;
792 for (vf_index
= 0; vf_index
< cur_vfs
; vf_index
++)
793 vf_pe_array
[vf_index
] =
794 cpu_to_be16(pdn
->pe_num_map
[vf_index
]);
795 rc
= pseries_send_allow_unfreeze(pdn
, vf_pe_array
,
797 pdn
->last_allow_rc
= rc
;
798 for (vf_index
= 0; vf_index
< cur_vfs
; vf_index
++) {
799 list_for_each_entry_safe(pdn
, tmp
,
802 bus
= pci_iov_virtfn_bus(edev
->pdev
,
804 devfn
= pci_iov_virtfn_devfn(edev
->pdev
,
806 if (pdn
->busno
!= bus
||
809 pdn
->last_allow_rc
= rc
;
813 pdn
= pci_get_pdn(edev
->pdev
);
814 vf_pe_array
[0] = cpu_to_be16(pdn
->pe_number
);
815 physfn_pdn
= pci_get_pdn(edev
->physfn
);
816 rc
= pseries_send_allow_unfreeze(physfn_pdn
,
818 pdn
->last_allow_rc
= rc
;
826 static int pseries_notify_resume(struct pci_dn
*pdn
)
828 struct eeh_dev
*edev
= pdn_to_eeh_dev(pdn
);
833 if (rtas_token("ibm,open-sriov-allow-unfreeze")
834 == RTAS_UNKNOWN_SERVICE
)
837 if (edev
->pdev
->is_physfn
|| edev
->pdev
->is_virtfn
)
838 return pseries_call_allow_unfreeze(edev
);
844 static struct eeh_ops pseries_eeh_ops
= {
846 .init
= pseries_eeh_init
,
847 .probe
= pseries_eeh_probe
,
848 .set_option
= pseries_eeh_set_option
,
849 .get_pe_addr
= pseries_eeh_get_pe_addr
,
850 .get_state
= pseries_eeh_get_state
,
851 .reset
= pseries_eeh_reset
,
852 .wait_state
= pseries_eeh_wait_state
,
853 .get_log
= pseries_eeh_get_log
,
854 .configure_bridge
= pseries_eeh_configure_bridge
,
856 .read_config
= pseries_eeh_read_config
,
857 .write_config
= pseries_eeh_write_config
,
859 .restore_config
= pseries_eeh_restore_config
,
860 #ifdef CONFIG_PCI_IOV
861 .notify_resume
= pseries_notify_resume
866 * eeh_pseries_init - Register platform dependent EEH operations
868 * EEH initialization on pseries platform. This function should be
869 * called before any EEH related functions.
871 static int __init
eeh_pseries_init(void)
875 ret
= eeh_ops_register(&pseries_eeh_ops
);
877 pr_info("EEH: pSeries platform initialized\n");
879 pr_info("EEH: pSeries platform initialization failure (%d)\n",
884 machine_early_initcall(pseries
, eeh_pseries_init
);