2 * Support PCI/PCIe on PowerNV platforms
4 * Currently supports only P5IOC2
6 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/bootmem.h>
20 #include <linux/irq.h>
22 #include <linux/msi.h>
24 #include <asm/sections.h>
27 #include <asm/pci-bridge.h>
28 #include <asm/machdep.h>
29 #include <asm/ppc-pci.h>
31 #include <asm/iommu.h>
33 #include <asm/abs_addr.h>
39 #define PCI_RESET_DELAY_US 3000000
41 #define cfg_dbg(fmt...) do { } while(0)
42 //#define cfg_dbg(fmt...) printk(fmt)
45 static int pnv_msi_check_device(struct pci_dev
* pdev
, int nvec
, int type
)
47 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
48 struct pnv_phb
*phb
= hose
->private_data
;
50 return (phb
&& phb
->msi_map
) ? 0 : -ENODEV
;
53 static unsigned int pnv_get_one_msi(struct pnv_phb
*phb
)
57 spin_lock(&phb
->lock
);
58 id
= find_next_zero_bit(phb
->msi_map
, phb
->msi_count
, phb
->msi_next
);
59 if (id
>= phb
->msi_count
&& phb
->msi_next
)
60 id
= find_next_zero_bit(phb
->msi_map
, phb
->msi_count
, 0);
61 if (id
>= phb
->msi_count
) {
62 spin_unlock(&phb
->lock
);
65 __set_bit(id
, phb
->msi_map
);
66 spin_unlock(&phb
->lock
);
67 return id
+ phb
->msi_base
;
70 static void pnv_put_msi(struct pnv_phb
*phb
, unsigned int hwirq
)
74 if (WARN_ON(hwirq
< phb
->msi_base
||
75 hwirq
>= (phb
->msi_base
+ phb
->msi_count
)))
77 id
= hwirq
- phb
->msi_base
;
78 spin_lock(&phb
->lock
);
79 __clear_bit(id
, phb
->msi_map
);
80 spin_unlock(&phb
->lock
);
83 static int pnv_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
85 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
86 struct pnv_phb
*phb
= hose
->private_data
;
87 struct msi_desc
*entry
;
89 unsigned int hwirq
, virq
;
95 list_for_each_entry(entry
, &pdev
->msi_list
, list
) {
96 if (!entry
->msi_attrib
.is_64
&& !phb
->msi32_support
) {
97 pr_warn("%s: Supports only 64-bit MSIs\n",
101 hwirq
= pnv_get_one_msi(phb
);
103 pr_warn("%s: Failed to find a free MSI\n",
107 virq
= irq_create_mapping(NULL
, hwirq
);
108 if (virq
== NO_IRQ
) {
109 pr_warn("%s: Failed to map MSI to linux irq\n",
111 pnv_put_msi(phb
, hwirq
);
114 rc
= phb
->msi_setup(phb
, pdev
, hwirq
, entry
->msi_attrib
.is_64
,
117 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev
));
118 irq_dispose_mapping(virq
);
119 pnv_put_msi(phb
, hwirq
);
122 irq_set_msi_desc(virq
, entry
);
123 write_msi_msg(virq
, &msg
);
128 static void pnv_teardown_msi_irqs(struct pci_dev
*pdev
)
130 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
131 struct pnv_phb
*phb
= hose
->private_data
;
132 struct msi_desc
*entry
;
137 list_for_each_entry(entry
, &pdev
->msi_list
, list
) {
138 if (entry
->irq
== NO_IRQ
)
140 irq_set_msi_desc(entry
->irq
, NULL
);
141 pnv_put_msi(phb
, virq_to_hw(entry
->irq
));
142 irq_dispose_mapping(entry
->irq
);
145 #endif /* CONFIG_PCI_MSI */
147 static void pnv_pci_config_check_eeh(struct pnv_phb
*phb
, struct pci_bus
*bus
,
155 /* Get PE# if we support IODA */
156 pe_no
= phb
->bdfn_to_pe
? phb
->bdfn_to_pe(phb
, bus
, bdfn
& 0xff) : 0;
158 /* Read freeze status */
159 rc
= opal_pci_eeh_freeze_status(phb
->opal_id
, pe_no
, &fstate
, &pcierr
,
162 pr_warning("PCI %d: Failed to read EEH status for PE#%d,"
163 " err %lld\n", phb
->hose
->global_number
, pe_no
, rc
);
166 cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
167 bdfn
, pe_no
, fstate
);
169 rc
= opal_pci_eeh_freeze_clear(phb
->opal_id
, pe_no
,
170 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL
);
172 pr_warning("PCI %d: Failed to clear EEH freeze state"
173 " for PE#%d, err %lld\n",
174 phb
->hose
->global_number
, pe_no
, rc
);
179 static int pnv_pci_read_config(struct pci_bus
*bus
,
181 int where
, int size
, u32
*val
)
183 struct pci_controller
*hose
= pci_bus_to_host(bus
);
184 struct pnv_phb
*phb
= hose
->private_data
;
185 u32 bdfn
= (((uint64_t)bus
->number
) << 8) | devfn
;
189 return PCIBIOS_DEVICE_NOT_FOUND
;
194 rc
= opal_pci_config_read_byte(phb
->opal_id
, bdfn
, where
, &v8
);
195 *val
= (rc
== OPAL_SUCCESS
) ? v8
: 0xff;
200 rc
= opal_pci_config_read_half_word(phb
->opal_id
, bdfn
, where
,
202 *val
= (rc
== OPAL_SUCCESS
) ? v16
: 0xffff;
207 rc
= opal_pci_config_read_word(phb
->opal_id
, bdfn
, where
, &v32
);
208 *val
= (rc
== OPAL_SUCCESS
) ? v32
: 0xffffffff;
212 return PCIBIOS_FUNC_NOT_SUPPORTED
;
214 cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n",
215 bus
->number
, devfn
, where
, size
, *val
);
217 /* Check if the PHB got frozen due to an error (no response) */
218 pnv_pci_config_check_eeh(phb
, bus
, bdfn
);
220 return PCIBIOS_SUCCESSFUL
;
223 static int pnv_pci_write_config(struct pci_bus
*bus
,
225 int where
, int size
, u32 val
)
227 struct pci_controller
*hose
= pci_bus_to_host(bus
);
228 struct pnv_phb
*phb
= hose
->private_data
;
229 u32 bdfn
= (((uint64_t)bus
->number
) << 8) | devfn
;
232 return PCIBIOS_DEVICE_NOT_FOUND
;
234 cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n",
235 bus
->number
, devfn
, where
, size
, val
);
238 opal_pci_config_write_byte(phb
->opal_id
, bdfn
, where
, val
);
241 opal_pci_config_write_half_word(phb
->opal_id
, bdfn
, where
, val
);
244 opal_pci_config_write_word(phb
->opal_id
, bdfn
, where
, val
);
247 return PCIBIOS_FUNC_NOT_SUPPORTED
;
249 /* Check if the PHB got frozen due to an error (no response) */
250 pnv_pci_config_check_eeh(phb
, bus
, bdfn
);
252 return PCIBIOS_SUCCESSFUL
;
255 struct pci_ops pnv_pci_ops
= {
256 .read
= pnv_pci_read_config
,
257 .write
= pnv_pci_write_config
,
260 static int pnv_tce_build(struct iommu_table
*tbl
, long index
, long npages
,
261 unsigned long uaddr
, enum dma_data_direction direction
,
262 struct dma_attrs
*attrs
)
268 proto_tce
= TCE_PCI_READ
; // Read allowed
270 if (direction
!= DMA_TO_DEVICE
)
271 proto_tce
|= TCE_PCI_WRITE
;
273 tcep
= ((u64
*)tbl
->it_base
) + index
;
276 /* can't move this out since we might cross LMB boundary */
277 rpn
= (virt_to_abs(uaddr
)) >> TCE_SHIFT
;
278 *tcep
= proto_tce
| (rpn
& TCE_RPN_MASK
) << TCE_RPN_SHIFT
;
280 uaddr
+= TCE_PAGE_SIZE
;
286 static void pnv_tce_free(struct iommu_table
*tbl
, long index
, long npages
)
288 u64
*tcep
= ((u64
*)tbl
->it_base
) + index
;
294 void pnv_pci_setup_iommu_table(struct iommu_table
*tbl
,
295 void *tce_mem
, u64 tce_size
,
298 tbl
->it_blocksize
= 16;
299 tbl
->it_base
= (unsigned long)tce_mem
;
300 tbl
->it_offset
= dma_offset
>> IOMMU_PAGE_SHIFT
;
302 tbl
->it_size
= tce_size
>> 3;
304 tbl
->it_type
= TCE_PCI
;
307 static struct iommu_table
* __devinit
308 pnv_pci_setup_bml_iommu(struct pci_controller
*hose
)
310 struct iommu_table
*tbl
;
314 basep
= of_get_property(hose
->dn
, "linux,tce-base", NULL
);
315 sizep
= of_get_property(hose
->dn
, "linux,tce-size", NULL
);
316 if (basep
== NULL
|| sizep
== NULL
) {
317 pr_err("PCI: %s has missing tce entries !\n", hose
->dn
->full_name
);
320 tbl
= kzalloc_node(sizeof(struct iommu_table
), GFP_KERNEL
, hose
->node
);
323 pnv_pci_setup_iommu_table(tbl
, __va(be64_to_cpup(basep
)),
324 be32_to_cpup(sizep
), 0);
325 iommu_init_table(tbl
, hose
->node
);
329 static void __devinit
pnv_pci_dma_fallback_setup(struct pci_controller
*hose
,
330 struct pci_dev
*pdev
)
332 struct device_node
*np
= pci_bus_to_OF_node(hose
->bus
);
338 if (!pdn
->iommu_table
)
339 pdn
->iommu_table
= pnv_pci_setup_bml_iommu(hose
);
340 if (!pdn
->iommu_table
)
342 set_iommu_table_base(&pdev
->dev
, pdn
->iommu_table
);
345 static void __devinit
pnv_pci_dma_dev_setup(struct pci_dev
*pdev
)
347 struct pci_controller
*hose
= pci_bus_to_host(pdev
->bus
);
348 struct pnv_phb
*phb
= hose
->private_data
;
350 /* If we have no phb structure, try to setup a fallback based on
351 * the device-tree (RTAS PCI for example)
353 if (phb
&& phb
->dma_dev_setup
)
354 phb
->dma_dev_setup(phb
, pdev
);
356 pnv_pci_dma_fallback_setup(hose
, pdev
);
359 static int pnv_pci_probe_mode(struct pci_bus
*bus
)
361 struct pci_controller
*hose
= pci_bus_to_host(bus
);
362 const __be64
*tstamp
;
366 /* We hijack this as a way to ensure we have waited long
367 * enough since the reset was lifted on the PCI bus
369 if (bus
!= hose
->bus
)
370 return PCI_PROBE_NORMAL
;
371 tstamp
= of_get_property(hose
->dn
, "reset-clear-timestamp", NULL
);
372 if (!tstamp
|| !*tstamp
)
373 return PCI_PROBE_NORMAL
;
375 now
= mftb() / tb_ticks_per_usec
;
376 target
= (be64_to_cpup(tstamp
) / tb_ticks_per_usec
)
377 + PCI_RESET_DELAY_US
;
379 pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
380 hose
->global_number
, target
, now
);
383 msleep((target
- now
+ 999) / 1000);
385 return PCI_PROBE_NORMAL
;
388 void __init
pnv_pci_init(void)
390 struct device_node
*np
;
392 pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN
);
394 /* We do not want to just probe */
397 /* OPAL absent, try POPAL first then RTAS detection of PHBs */
398 if (!firmware_has_feature(FW_FEATURE_OPAL
)) {
399 #ifdef CONFIG_PPC_POWERNV_RTAS
400 init_pci_config_tokens();
401 find_and_init_phbs();
402 #endif /* CONFIG_PPC_POWERNV_RTAS */
404 /* OPAL is here, do our normal stuff */
406 /* Look for p5ioc2 IO-Hubs */
407 for_each_compatible_node(np
, NULL
, "ibm,p5ioc2")
408 pnv_pci_init_p5ioc2_hub(np
);
411 /* Setup the linkage between OF nodes and PHBs */
414 /* Configure IOMMU DMA hooks */
415 ppc_md
.pci_dma_dev_setup
= pnv_pci_dma_dev_setup
;
416 ppc_md
.tce_build
= pnv_tce_build
;
417 ppc_md
.tce_free
= pnv_tce_free
;
418 ppc_md
.pci_probe_mode
= pnv_pci_probe_mode
;
419 set_pci_dma_ops(&dma_iommu_ops
);
422 #ifdef CONFIG_PCI_MSI
423 ppc_md
.msi_check_device
= pnv_msi_check_device
;
424 ppc_md
.setup_msi_irqs
= pnv_setup_msi_irqs
;
425 ppc_md
.teardown_msi_irqs
= pnv_teardown_msi_irqs
;