1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Port for PPC64 David Engebretsen, IBM Corp.
4 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
6 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
7 * Rework, based on alpha PCI code.
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/string.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/list.h>
19 #include <linux/syscalls.h>
20 #include <linux/irq.h>
21 #include <linux/vmalloc.h>
23 #include <asm/processor.h>
26 #include <asm/pci-bridge.h>
27 #include <asm/byteorder.h>
28 #include <asm/machdep.h>
29 #include <asm/ppc-pci.h>
31 /* pci_io_base -- the base address from which io bars are offsets.
32 * This is the lowest I/O base address (so bar values are always positive),
33 * and it *must* be the start of ISA space if an ISA bus exists because
34 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
35 * is mapped on the first 64K of IO space
37 unsigned long pci_io_base
;
38 EXPORT_SYMBOL(pci_io_base
);
40 static int __init
pcibios_init(void)
42 struct pci_controller
*hose
, *tmp
;
44 printk(KERN_INFO
"PCI: Probing PCI hardware\n");
46 /* For now, override phys_mem_access_prot. If we need it,g
47 * later, we may move that initialization to each ppc_md
49 ppc_md
.phys_mem_access_prot
= pci_phys_mem_access_prot
;
51 /* On ppc64, we always enable PCI domains and we keep domain 0
52 * backward compatible in /proc for video cards
54 pci_add_flags(PCI_ENABLE_PROC_DOMAINS
| PCI_COMPAT_DOMAIN_0
);
56 /* Scan all of the recorded PCI controllers. */
57 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
)
58 pcibios_scan_phb(hose
);
60 /* Call common code to handle resource allocation */
61 pcibios_resource_survey();
64 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
)
65 pci_bus_add_devices(hose
->bus
);
67 /* Call machine dependent fixup */
68 if (ppc_md
.pcibios_fixup
)
69 ppc_md
.pcibios_fixup();
71 printk(KERN_DEBUG
"PCI: Probing PCI hardware done\n");
76 subsys_initcall(pcibios_init
);
78 int pcibios_unmap_io_space(struct pci_bus
*bus
)
80 struct pci_controller
*hose
;
84 /* If this is not a PHB, we only flush the hash table over
85 * the area mapped by this bridge. We don't play with the PTE
86 * mappings since we might have to deal with sub-page alignments
87 * so flushing the hash table is the only sane way to make sure
88 * that no hash entries are covering that removed bridge area
89 * while still allowing other busses overlapping those pages
91 * Note: If we ever support P2P hotplug on Book3E, we'll have
92 * to do an appropriate TLB flush here too
95 #ifdef CONFIG_PPC_BOOK3S_64
96 struct resource
*res
= bus
->resource
[0];
99 pr_debug("IO unmapping for PCI-PCI bridge %s\n",
100 pci_name(bus
->self
));
102 #ifdef CONFIG_PPC_BOOK3S_64
103 __flush_hash_table_range(res
->start
+ _IO_BASE
,
104 res
->end
+ _IO_BASE
+ 1);
109 /* Get the host bridge */
110 hose
= pci_bus_to_host(bus
);
112 pr_debug("IO unmapping for PHB %pOF\n", hose
->dn
);
113 pr_debug(" alloc=0x%p\n", hose
->io_base_alloc
);
115 iounmap(hose
->io_base_alloc
);
118 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space
);
120 void __iomem
*ioremap_phb(phys_addr_t paddr
, unsigned long size
)
122 struct vm_struct
*area
;
125 WARN_ON_ONCE(paddr
& ~PAGE_MASK
);
126 WARN_ON_ONCE(size
& ~PAGE_MASK
);
129 * Let's allocate some IO space for that guy. We don't pass VM_IOREMAP
130 * because we don't care about alignment tricks that the core does in
131 * that case. Maybe we should due to stupid card with incomplete
132 * address decoding but I'd rather not deal with those outside of the
133 * reserved 64K legacy region.
135 area
= __get_vm_area_caller(size
, 0, PHB_IO_BASE
, PHB_IO_END
,
136 __builtin_return_address(0));
140 addr
= (unsigned long)area
->addr
;
141 if (ioremap_page_range(addr
, addr
+ size
, paddr
,
142 pgprot_noncached(PAGE_KERNEL
))) {
143 unmap_kernel_range(addr
, size
);
147 return (void __iomem
*)addr
;
149 EXPORT_SYMBOL_GPL(ioremap_phb
);
151 static int pcibios_map_phb_io_space(struct pci_controller
*hose
)
153 unsigned long phys_page
;
154 unsigned long size_page
;
155 unsigned long io_virt_offset
;
157 phys_page
= ALIGN_DOWN(hose
->io_base_phys
, PAGE_SIZE
);
158 size_page
= ALIGN(hose
->pci_io_size
, PAGE_SIZE
);
160 /* Make sure IO area address is clear */
161 hose
->io_base_alloc
= NULL
;
163 /* If there's no IO to map on that bus, get away too */
164 if (hose
->pci_io_size
== 0 || hose
->io_base_phys
== 0)
167 /* Let's allocate some IO space for that guy. We don't pass
168 * VM_IOREMAP because we don't care about alignment tricks that
169 * the core does in that case. Maybe we should due to stupid card
170 * with incomplete address decoding but I'd rather not deal with
171 * those outside of the reserved 64K legacy region.
173 hose
->io_base_alloc
= ioremap_phb(phys_page
, size_page
);
174 if (!hose
->io_base_alloc
)
176 hose
->io_base_virt
= hose
->io_base_alloc
+
177 hose
->io_base_phys
- phys_page
;
179 pr_debug("IO mapping for PHB %pOF\n", hose
->dn
);
180 pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
181 hose
->io_base_phys
, hose
->io_base_virt
, hose
->io_base_alloc
);
182 pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
183 hose
->pci_io_size
, size_page
);
185 /* Fixup hose IO resource */
186 io_virt_offset
= pcibios_io_space_offset(hose
);
187 hose
->io_resource
.start
+= io_virt_offset
;
188 hose
->io_resource
.end
+= io_virt_offset
;
190 pr_debug(" hose->io_resource=%pR\n", &hose
->io_resource
);
195 int pcibios_map_io_space(struct pci_bus
*bus
)
197 WARN_ON(bus
== NULL
);
199 /* If this not a PHB, nothing to do, page tables still exist and
200 * thus HPTEs will be faulted in when needed
203 pr_debug("IO mapping for PCI-PCI bridge %s\n",
204 pci_name(bus
->self
));
205 pr_debug(" virt=0x%016llx...0x%016llx\n",
206 bus
->resource
[0]->start
+ _IO_BASE
,
207 bus
->resource
[0]->end
+ _IO_BASE
);
211 return pcibios_map_phb_io_space(pci_bus_to_host(bus
));
213 EXPORT_SYMBOL_GPL(pcibios_map_io_space
);
215 void pcibios_setup_phb_io_space(struct pci_controller
*hose
)
217 pcibios_map_phb_io_space(hose
);
220 #define IOBASE_BRIDGE_NUMBER 0
221 #define IOBASE_MEMORY 1
223 #define IOBASE_ISA_IO 3
224 #define IOBASE_ISA_MEM 4
226 SYSCALL_DEFINE3(pciconfig_iobase
, long, which
, unsigned long, in_bus
,
227 unsigned long, in_devfn
)
229 struct pci_controller
* hose
;
230 struct pci_bus
*tmp_bus
, *bus
= NULL
;
231 struct device_node
*hose_node
;
233 /* Argh ! Please forgive me for that hack, but that's the
234 * simplest way to get existing XFree to not lockup on some
235 * G5 machines... So when something asks for bus 0 io base
236 * (bus 0 is HT root), we return the AGP one instead.
238 if (in_bus
== 0 && of_machine_is_compatible("MacRISC4")) {
239 struct device_node
*agp
;
241 agp
= of_find_compatible_node(NULL
, NULL
, "u3-agp");
247 /* That syscall isn't quite compatible with PCI domains, but it's
248 * used on pre-domains setup. We return the first match
251 list_for_each_entry(tmp_bus
, &pci_root_buses
, node
) {
252 if (in_bus
>= tmp_bus
->number
&&
253 in_bus
<= tmp_bus
->busn_res
.end
) {
258 if (bus
== NULL
|| bus
->dev
.of_node
== NULL
)
261 hose_node
= bus
->dev
.of_node
;
262 hose
= PCI_DN(hose_node
)->phb
;
265 case IOBASE_BRIDGE_NUMBER
:
266 return (long)hose
->first_busno
;
268 return (long)hose
->mem_offset
[0];
270 return (long)hose
->io_base_phys
;
272 return (long)isa_io_base
;
281 int pcibus_to_node(struct pci_bus
*bus
)
283 struct pci_controller
*phb
= pci_bus_to_host(bus
);
286 EXPORT_SYMBOL(pcibus_to_node
);