1 // SPDX-License-Identifier: GPL-2.0-only
3 * pci.c - Low-Level PCI Access in IA-64
5 * Derived from bios32.c of i386 tree.
7 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Bjorn Helgaas <bjorn.helgaas@hp.com>
10 * Copyright (C) 2004 Silicon Graphics, Inc.
12 * Note: Above list of copyright holders is incomplete...
15 #include <linux/acpi.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/pci.h>
19 #include <linux/pci-acpi.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/memblock.h>
25 #include <linux/export.h>
32 #include <asm/hw_irq.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
43 /* SAL 3.2 adds support for extended config space. */
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
48 int raw_pci_read(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
49 int reg
, int len
, u32
*value
)
54 if (!value
|| (seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
57 if ((seg
| reg
) <= 255) {
58 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
60 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
61 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
67 result
= ia64_sal_pci_config_read(addr
, mode
, len
, &data
);
75 int raw_pci_write(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
76 int reg
, int len
, u32 value
)
81 if ((seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
84 if ((seg
| reg
) <= 255) {
85 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
87 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
88 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
93 result
= ia64_sal_pci_config_write(addr
, mode
, len
, value
);
99 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
100 int size
, u32
*value
)
102 return raw_pci_read(pci_domain_nr(bus
), bus
->number
,
103 devfn
, where
, size
, value
);
106 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
109 return raw_pci_write(pci_domain_nr(bus
), bus
->number
,
110 devfn
, where
, size
, value
);
113 struct pci_ops pci_root_ops
= {
118 struct pci_root_info
{
119 struct acpi_pci_root_info common
;
120 struct pci_controller controller
;
121 struct list_head io_resources
;
124 static unsigned int new_space(u64 phys_base
, int sparse
)
130 return 0; /* legacy I/O port space */
132 mmio_base
= (u64
) ioremap(phys_base
, 0);
133 for (i
= 0; i
< num_io_spaces
; i
++)
134 if (io_space
[i
].mmio_base
== mmio_base
&&
135 io_space
[i
].sparse
== sparse
)
138 if (num_io_spaces
== MAX_IO_SPACES
) {
139 pr_err("PCI: Too many IO port spaces "
140 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES
);
145 io_space
[i
].mmio_base
= mmio_base
;
146 io_space
[i
].sparse
= sparse
;
151 static int add_io_space(struct device
*dev
, struct pci_root_info
*info
,
152 struct resource_entry
*entry
)
154 struct resource_entry
*iospace
;
155 struct resource
*resource
, *res
= entry
->res
;
157 unsigned long base
, min
, max
, base_port
;
158 unsigned int sparse
= 0, space_nr
, len
;
160 len
= strlen(info
->common
.name
) + 32;
161 iospace
= resource_list_create_entry(NULL
, len
);
163 dev_err(dev
, "PCI: No memory for %s I/O port space\n",
168 if (res
->flags
& IORESOURCE_IO_SPARSE
)
170 space_nr
= new_space(entry
->offset
, sparse
);
174 name
= (char *)(iospace
+ 1);
175 min
= res
->start
- entry
->offset
;
176 max
= res
->end
- entry
->offset
;
177 base
= __pa(io_space
[space_nr
].mmio_base
);
178 base_port
= IO_SPACE_BASE(space_nr
);
179 snprintf(name
, len
, "%s I/O Ports %08lx-%08lx", info
->common
.name
,
180 base_port
+ min
, base_port
+ max
);
183 * The SDM guarantees the legacy 0-64K space is sparse, but if the
184 * mapping is done by the processor (not the bridge), ACPI may not
190 resource
= iospace
->res
;
191 resource
->name
= name
;
192 resource
->flags
= IORESOURCE_MEM
;
193 resource
->start
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(min
) : min
);
194 resource
->end
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(max
) : max
);
195 if (insert_resource(&iomem_resource
, resource
)) {
197 "can't allocate host bridge io space resource %pR\n",
202 entry
->offset
= base_port
;
203 res
->start
= min
+ base_port
;
204 res
->end
= max
+ base_port
;
205 resource_list_add_tail(iospace
, &info
->io_resources
);
210 resource_list_free_entry(iospace
);
215 * An IO port or MMIO resource assigned to a PCI host bridge may be
216 * consumed by the host bridge itself or available to its child
217 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
218 * to tell whether the resource is consumed by the host bridge itself,
219 * but firmware hasn't used that bit consistently, so we can't rely on it.
221 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
222 * to be available to child bus/devices except one special case:
223 * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
224 * to access PCI configuration space.
226 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
228 static bool resource_is_pcicfg_ioport(struct resource
*res
)
230 return (res
->flags
& IORESOURCE_IO
) &&
231 res
->start
== 0xCF8 && res
->end
== 0xCFF;
234 static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info
*ci
)
236 struct device
*dev
= &ci
->bridge
->dev
;
237 struct pci_root_info
*info
;
238 struct resource
*res
;
239 struct resource_entry
*entry
, *tmp
;
242 status
= acpi_pci_probe_root_resources(ci
);
244 info
= container_of(ci
, struct pci_root_info
, common
);
245 resource_list_for_each_entry_safe(entry
, tmp
, &ci
->resources
) {
247 if (res
->flags
& IORESOURCE_MEM
) {
249 * HP's firmware has a hack to work around a
250 * Windows bug. Ignore these tiny memory ranges.
252 if (resource_size(res
) <= 16) {
253 resource_list_del(entry
);
254 insert_resource(&iomem_resource
,
256 resource_list_add_tail(entry
,
257 &info
->io_resources
);
259 } else if (res
->flags
& IORESOURCE_IO
) {
260 if (resource_is_pcicfg_ioport(entry
->res
))
261 resource_list_destroy_entry(entry
);
262 else if (add_io_space(dev
, info
, entry
))
263 resource_list_destroy_entry(entry
);
271 static void pci_acpi_root_release_info(struct acpi_pci_root_info
*ci
)
273 struct pci_root_info
*info
;
274 struct resource_entry
*entry
, *tmp
;
276 info
= container_of(ci
, struct pci_root_info
, common
);
277 resource_list_for_each_entry_safe(entry
, tmp
, &info
->io_resources
) {
278 release_resource(entry
->res
);
279 resource_list_destroy_entry(entry
);
284 static struct acpi_pci_root_ops pci_acpi_root_ops
= {
285 .pci_ops
= &pci_root_ops
,
286 .release_info
= pci_acpi_root_release_info
,
287 .prepare_resources
= pci_acpi_root_prepare_resources
,
290 struct pci_bus
*pci_acpi_scan_root(struct acpi_pci_root
*root
)
292 struct acpi_device
*device
= root
->device
;
293 struct pci_root_info
*info
;
295 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
297 dev_err(&device
->dev
,
298 "pci_bus %04x:%02x: ignored (out of memory)\n",
299 root
->segment
, (int)root
->secondary
.start
);
303 info
->controller
.segment
= root
->segment
;
304 info
->controller
.companion
= device
;
305 info
->controller
.node
= acpi_get_node(device
->handle
);
306 INIT_LIST_HEAD(&info
->io_resources
);
307 return acpi_pci_root_create(root
, &pci_acpi_root_ops
,
308 &info
->common
, &info
->controller
);
311 int pcibios_root_bridge_prepare(struct pci_host_bridge
*bridge
)
314 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
315 * here, pci_create_root_bus() has been called by someone else and
316 * sysdata is likely to be different from what we expect. Let it go in
319 if (!bridge
->dev
.parent
) {
320 struct pci_controller
*controller
= bridge
->bus
->sysdata
;
321 ACPI_COMPANION_SET(&bridge
->dev
, controller
->companion
);
326 void pcibios_fixup_device_resources(struct pci_dev
*dev
)
333 for (idx
= 0; idx
< PCI_BRIDGE_RESOURCES
; idx
++) {
334 struct resource
*r
= &dev
->resource
[idx
];
336 if (!r
->flags
|| r
->parent
|| !r
->start
)
339 pci_claim_resource(dev
, idx
);
342 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources
);
344 static void pcibios_fixup_bridge_resources(struct pci_dev
*dev
)
351 for (idx
= PCI_BRIDGE_RESOURCES
; idx
< PCI_NUM_RESOURCES
; idx
++) {
352 struct resource
*r
= &dev
->resource
[idx
];
354 if (!r
->flags
|| r
->parent
|| !r
->start
)
357 pci_claim_bridge_resource(dev
, idx
);
362 * Called after each bus is probed, but before its children are examined.
364 void pcibios_fixup_bus(struct pci_bus
*b
)
369 pci_read_bridge_bases(b
);
370 pcibios_fixup_bridge_resources(b
->self
);
372 list_for_each_entry(dev
, &b
->devices
, bus_list
)
373 pcibios_fixup_device_resources(dev
);
376 void pcibios_add_bus(struct pci_bus
*bus
)
378 acpi_pci_add_bus(bus
);
381 void pcibios_remove_bus(struct pci_bus
*bus
)
383 acpi_pci_remove_bus(bus
);
386 void pcibios_set_master (struct pci_dev
*dev
)
388 /* No special bus mastering setup handling */
392 pcibios_enable_device (struct pci_dev
*dev
, int mask
)
396 ret
= pci_enable_resources(dev
, mask
);
400 if (!pci_dev_msi_enabled(dev
))
401 return acpi_pci_irq_enable(dev
);
406 pcibios_disable_device (struct pci_dev
*dev
)
408 BUG_ON(atomic_read(&dev
->enable_cnt
));
409 if (!pci_dev_msi_enabled(dev
))
410 acpi_pci_irq_disable(dev
);
414 * pci_get_legacy_mem - generic legacy mem routine
415 * @bus: bus to get legacy memory base address for
417 * Find the base of legacy memory for @bus. This is typically the first
418 * megabyte of bus address space for @bus or is simply 0 on platforms whose
419 * chipsets support legacy I/O and memory routing. Returns the base address
420 * or an error pointer if an error occurred.
422 * This is the ia64 generic version of this routine. Other platforms
423 * are free to override it with a machine vector.
425 char *pci_get_legacy_mem(struct pci_bus
*bus
)
427 return (char *)__IA64_UNCACHED_OFFSET
;
431 * pci_mmap_legacy_page_range - map legacy memory space to userland
432 * @bus: bus whose legacy space we're mapping
433 * @vma: vma passed in by mmap
435 * Map legacy memory space for this device back to userspace using a machine
436 * vector to get the base address.
439 pci_mmap_legacy_page_range(struct pci_bus
*bus
, struct vm_area_struct
*vma
,
440 enum pci_mmap_state mmap_state
)
442 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
446 /* We only support mmap'ing of legacy memory space */
447 if (mmap_state
!= pci_mmap_mem
)
451 * Avoid attribute aliasing. See Documentation/ia64/aliasing.rst
454 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
456 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
459 addr
= pci_get_legacy_mem(bus
);
461 return PTR_ERR(addr
);
463 vma
->vm_pgoff
+= (unsigned long)addr
>> PAGE_SHIFT
;
464 vma
->vm_page_prot
= prot
;
466 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
467 size
, vma
->vm_page_prot
))
474 * pci_legacy_read - read from legacy I/O space
476 * @port: legacy port value
477 * @val: caller allocated storage for returned value
478 * @size: number of bytes to read
480 * Simply reads @size bytes from @port and puts the result in @val.
482 * Again, this (and the write routine) are generic versions that can be
483 * overridden by the platform. This is necessary on platforms that don't
484 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
486 int pci_legacy_read(struct pci_bus
*bus
, u16 port
, u32
*val
, u8 size
)
509 * pci_legacy_write - perform a legacy I/O write
511 * @port: port to write
512 * @val: value to write
513 * @size: number of bytes to write from @val
515 * Simply writes @size bytes of @val to @port.
517 int pci_legacy_write(struct pci_bus
*bus
, u16 port
, u32 val
, u8 size
)
540 * set_pci_cacheline_size - determine cacheline size for PCI devices
542 * We want to use the line-size of the outer-most cache. We assume
543 * that this line-size is the same for all CPUs.
545 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
547 static void __init
set_pci_dfl_cacheline_size(void)
549 unsigned long levels
, unique_caches
;
551 pal_cache_config_info_t cci
;
553 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
555 pr_err("%s: ia64_pal_cache_summary() failed "
556 "(status=%ld)\n", __func__
, status
);
560 status
= ia64_pal_cache_config_info(levels
- 1,
561 /* cache_type (data_or_unified)= */ 2, &cci
);
563 pr_err("%s: ia64_pal_cache_config_info() failed "
564 "(status=%ld)\n", __func__
, status
);
567 pci_dfl_cache_line_size
= (1 << cci
.pcci_line_size
) / 4;
570 static int __init
pcibios_init(void)
572 set_pci_dfl_cacheline_size();
576 subsys_initcall(pcibios_init
);