2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/pci-acpi.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
26 #include <asm/machvec.h>
32 #include <asm/hw_irq.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
43 /* SAL 3.2 adds support for extended config space. */
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
48 int raw_pci_read(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
49 int reg
, int len
, u32
*value
)
54 if (!value
|| (seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
57 if ((seg
| reg
) <= 255) {
58 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
60 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
61 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
67 result
= ia64_sal_pci_config_read(addr
, mode
, len
, &data
);
75 int raw_pci_write(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
76 int reg
, int len
, u32 value
)
81 if ((seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
84 if ((seg
| reg
) <= 255) {
85 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
87 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
88 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
93 result
= ia64_sal_pci_config_write(addr
, mode
, len
, value
);
99 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
100 int size
, u32
*value
)
102 return raw_pci_read(pci_domain_nr(bus
), bus
->number
,
103 devfn
, where
, size
, value
);
106 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
109 return raw_pci_write(pci_domain_nr(bus
), bus
->number
,
110 devfn
, where
, size
, value
);
113 struct pci_ops pci_root_ops
= {
118 struct pci_root_info
{
119 struct acpi_pci_root_info common
;
120 struct pci_controller controller
;
121 struct list_head io_resources
;
124 static unsigned int new_space(u64 phys_base
, int sparse
)
130 return 0; /* legacy I/O port space */
132 mmio_base
= (u64
) ioremap(phys_base
, 0);
133 for (i
= 0; i
< num_io_spaces
; i
++)
134 if (io_space
[i
].mmio_base
== mmio_base
&&
135 io_space
[i
].sparse
== sparse
)
138 if (num_io_spaces
== MAX_IO_SPACES
) {
139 pr_err("PCI: Too many IO port spaces "
140 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES
);
145 io_space
[i
].mmio_base
= mmio_base
;
146 io_space
[i
].sparse
= sparse
;
151 static int add_io_space(struct device
*dev
, struct pci_root_info
*info
,
152 struct resource_entry
*entry
)
154 struct resource_entry
*iospace
;
155 struct resource
*resource
, *res
= entry
->res
;
157 unsigned long base
, min
, max
, base_port
;
158 unsigned int sparse
= 0, space_nr
, len
;
160 len
= strlen(info
->common
.name
) + 32;
161 iospace
= resource_list_create_entry(NULL
, len
);
163 dev_err(dev
, "PCI: No memory for %s I/O port space\n",
168 if (res
->flags
& IORESOURCE_IO_SPARSE
)
170 space_nr
= new_space(entry
->offset
, sparse
);
174 name
= (char *)(iospace
+ 1);
175 min
= res
->start
- entry
->offset
;
176 max
= res
->end
- entry
->offset
;
177 base
= __pa(io_space
[space_nr
].mmio_base
);
178 base_port
= IO_SPACE_BASE(space_nr
);
179 snprintf(name
, len
, "%s I/O Ports %08lx-%08lx", info
->common
.name
,
180 base_port
+ min
, base_port
+ max
);
183 * The SDM guarantees the legacy 0-64K space is sparse, but if the
184 * mapping is done by the processor (not the bridge), ACPI may not
190 resource
= iospace
->res
;
191 resource
->name
= name
;
192 resource
->flags
= IORESOURCE_MEM
;
193 resource
->start
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(min
) : min
);
194 resource
->end
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(max
) : max
);
195 if (insert_resource(&iomem_resource
, resource
)) {
197 "can't allocate host bridge io space resource %pR\n",
202 entry
->offset
= base_port
;
203 res
->start
= min
+ base_port
;
204 res
->end
= max
+ base_port
;
205 resource_list_add_tail(iospace
, &info
->io_resources
);
210 resource_list_free_entry(iospace
);
215 * An IO port or MMIO resource assigned to a PCI host bridge may be
216 * consumed by the host bridge itself or available to its child
217 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
218 * to tell whether the resource is consumed by the host bridge itself,
219 * but firmware hasn't used that bit consistently, so we can't rely on it.
221 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
222 * to be available to child bus/devices except one special case:
223 * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
224 * to access PCI configuration space.
226 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
228 static bool resource_is_pcicfg_ioport(struct resource
*res
)
230 return (res
->flags
& IORESOURCE_IO
) &&
231 res
->start
== 0xCF8 && res
->end
== 0xCFF;
234 static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info
*ci
)
236 struct device
*dev
= &ci
->bridge
->dev
;
237 struct pci_root_info
*info
;
238 struct resource
*res
;
239 struct resource_entry
*entry
, *tmp
;
242 status
= acpi_pci_probe_root_resources(ci
);
244 info
= container_of(ci
, struct pci_root_info
, common
);
245 resource_list_for_each_entry_safe(entry
, tmp
, &ci
->resources
) {
247 if (res
->flags
& IORESOURCE_MEM
) {
249 * HP's firmware has a hack to work around a
250 * Windows bug. Ignore these tiny memory ranges.
252 if (resource_size(res
) <= 16) {
253 resource_list_del(entry
);
254 insert_resource(&iomem_resource
,
256 resource_list_add_tail(entry
,
257 &info
->io_resources
);
259 } else if (res
->flags
& IORESOURCE_IO
) {
260 if (resource_is_pcicfg_ioport(entry
->res
))
261 resource_list_destroy_entry(entry
);
262 else if (add_io_space(dev
, info
, entry
))
263 resource_list_destroy_entry(entry
);
271 static void pci_acpi_root_release_info(struct acpi_pci_root_info
*ci
)
273 struct pci_root_info
*info
;
274 struct resource_entry
*entry
, *tmp
;
276 info
= container_of(ci
, struct pci_root_info
, common
);
277 resource_list_for_each_entry_safe(entry
, tmp
, &info
->io_resources
) {
278 release_resource(entry
->res
);
279 resource_list_destroy_entry(entry
);
284 static struct acpi_pci_root_ops pci_acpi_root_ops
= {
285 .pci_ops
= &pci_root_ops
,
286 .release_info
= pci_acpi_root_release_info
,
287 .prepare_resources
= pci_acpi_root_prepare_resources
,
290 struct pci_bus
*pci_acpi_scan_root(struct acpi_pci_root
*root
)
292 struct acpi_device
*device
= root
->device
;
293 struct pci_root_info
*info
;
295 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
297 dev_err(&device
->dev
,
298 "pci_bus %04x:%02x: ignored (out of memory)\n",
299 root
->segment
, (int)root
->secondary
.start
);
303 info
->controller
.segment
= root
->segment
;
304 info
->controller
.companion
= device
;
305 info
->controller
.node
= acpi_get_node(device
->handle
);
306 INIT_LIST_HEAD(&info
->io_resources
);
307 return acpi_pci_root_create(root
, &pci_acpi_root_ops
,
308 &info
->common
, &info
->controller
);
311 int pcibios_root_bridge_prepare(struct pci_host_bridge
*bridge
)
314 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
315 * here, pci_create_root_bus() has been called by someone else and
316 * sysdata is likely to be different from what we expect. Let it go in
319 if (!bridge
->dev
.parent
) {
320 struct pci_controller
*controller
= bridge
->bus
->sysdata
;
321 ACPI_COMPANION_SET(&bridge
->dev
, controller
->companion
);
326 void pcibios_fixup_device_resources(struct pci_dev
*dev
)
333 for (idx
= 0; idx
< PCI_BRIDGE_RESOURCES
; idx
++) {
334 struct resource
*r
= &dev
->resource
[idx
];
336 if (!r
->flags
|| r
->parent
|| !r
->start
)
339 pci_claim_resource(dev
, idx
);
342 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources
);
344 static void pcibios_fixup_bridge_resources(struct pci_dev
*dev
)
351 for (idx
= PCI_BRIDGE_RESOURCES
; idx
< PCI_NUM_RESOURCES
; idx
++) {
352 struct resource
*r
= &dev
->resource
[idx
];
354 if (!r
->flags
|| r
->parent
|| !r
->start
)
357 pci_claim_bridge_resource(dev
, idx
);
362 * Called after each bus is probed, but before its children are examined.
364 void pcibios_fixup_bus(struct pci_bus
*b
)
369 pci_read_bridge_bases(b
);
370 pcibios_fixup_bridge_resources(b
->self
);
372 list_for_each_entry(dev
, &b
->devices
, bus_list
)
373 pcibios_fixup_device_resources(dev
);
374 platform_pci_fixup_bus(b
);
377 void pcibios_add_bus(struct pci_bus
*bus
)
379 acpi_pci_add_bus(bus
);
382 void pcibios_remove_bus(struct pci_bus
*bus
)
384 acpi_pci_remove_bus(bus
);
387 void pcibios_set_master (struct pci_dev
*dev
)
389 /* No special bus mastering setup handling */
393 pcibios_enable_device (struct pci_dev
*dev
, int mask
)
397 ret
= pci_enable_resources(dev
, mask
);
401 if (!dev
->msi_enabled
)
402 return acpi_pci_irq_enable(dev
);
407 pcibios_disable_device (struct pci_dev
*dev
)
409 BUG_ON(atomic_read(&dev
->enable_cnt
));
410 if (!dev
->msi_enabled
)
411 acpi_pci_irq_disable(dev
);
415 * ia64_pci_get_legacy_mem - generic legacy mem routine
416 * @bus: bus to get legacy memory base address for
418 * Find the base of legacy memory for @bus. This is typically the first
419 * megabyte of bus address space for @bus or is simply 0 on platforms whose
420 * chipsets support legacy I/O and memory routing. Returns the base address
421 * or an error pointer if an error occurred.
423 * This is the ia64 generic version of this routine. Other platforms
424 * are free to override it with a machine vector.
426 char *ia64_pci_get_legacy_mem(struct pci_bus
*bus
)
428 return (char *)__IA64_UNCACHED_OFFSET
;
432 * pci_mmap_legacy_page_range - map legacy memory space to userland
433 * @bus: bus whose legacy space we're mapping
434 * @vma: vma passed in by mmap
436 * Map legacy memory space for this device back to userspace using a machine
437 * vector to get the base address.
440 pci_mmap_legacy_page_range(struct pci_bus
*bus
, struct vm_area_struct
*vma
,
441 enum pci_mmap_state mmap_state
)
443 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
447 /* We only support mmap'ing of legacy memory space */
448 if (mmap_state
!= pci_mmap_mem
)
452 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
455 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
457 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
460 addr
= pci_get_legacy_mem(bus
);
462 return PTR_ERR(addr
);
464 vma
->vm_pgoff
+= (unsigned long)addr
>> PAGE_SHIFT
;
465 vma
->vm_page_prot
= prot
;
467 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
468 size
, vma
->vm_page_prot
))
475 * ia64_pci_legacy_read - read from legacy I/O space
477 * @port: legacy port value
478 * @val: caller allocated storage for returned value
479 * @size: number of bytes to read
481 * Simply reads @size bytes from @port and puts the result in @val.
483 * Again, this (and the write routine) are generic versions that can be
484 * overridden by the platform. This is necessary on platforms that don't
485 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
487 int ia64_pci_legacy_read(struct pci_bus
*bus
, u16 port
, u32
*val
, u8 size
)
510 * ia64_pci_legacy_write - perform a legacy I/O write
512 * @port: port to write
513 * @val: value to write
514 * @size: number of bytes to write from @val
516 * Simply writes @size bytes of @val to @port.
518 int ia64_pci_legacy_write(struct pci_bus
*bus
, u16 port
, u32 val
, u8 size
)
541 * set_pci_cacheline_size - determine cacheline size for PCI devices
543 * We want to use the line-size of the outer-most cache. We assume
544 * that this line-size is the same for all CPUs.
546 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
548 static void __init
set_pci_dfl_cacheline_size(void)
550 unsigned long levels
, unique_caches
;
552 pal_cache_config_info_t cci
;
554 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
556 pr_err("%s: ia64_pal_cache_summary() failed "
557 "(status=%ld)\n", __func__
, status
);
561 status
= ia64_pal_cache_config_info(levels
- 1,
562 /* cache_type (data_or_unified)= */ 2, &cci
);
564 pr_err("%s: ia64_pal_cache_config_info() failed "
565 "(status=%ld)\n", __func__
, status
);
568 pci_dfl_cache_line_size
= (1 << cci
.pcci_line_size
) / 4;
571 u64
ia64_dma_get_required_mask(struct device
*dev
)
573 u32 low_totalram
= ((max_pfn
- 1) << PAGE_SHIFT
);
574 u32 high_totalram
= ((max_pfn
- 1) >> (32 - PAGE_SHIFT
));
577 if (!high_totalram
) {
578 /* convert to mask just covering totalram */
579 low_totalram
= (1 << (fls(low_totalram
) - 1));
580 low_totalram
+= low_totalram
- 1;
583 high_totalram
= (1 << (fls(high_totalram
) - 1));
584 high_totalram
+= high_totalram
- 1;
585 mask
= (((u64
)high_totalram
) << 32) + 0xffffffff;
589 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask
);
591 u64
dma_get_required_mask(struct device
*dev
)
593 return platform_dma_get_required_mask(dev
);
595 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
597 static int __init
pcibios_init(void)
599 set_pci_dfl_cacheline_size();
603 subsys_initcall(pcibios_init
);