2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/pci-acpi.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
26 #include <asm/machvec.h>
32 #include <asm/hw_irq.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
43 /* SAL 3.2 adds support for extended config space. */
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
48 int raw_pci_read(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
49 int reg
, int len
, u32
*value
)
54 if (!value
|| (seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
57 if ((seg
| reg
) <= 255) {
58 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
60 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
61 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
67 result
= ia64_sal_pci_config_read(addr
, mode
, len
, &data
);
75 int raw_pci_write(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
76 int reg
, int len
, u32 value
)
81 if ((seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
84 if ((seg
| reg
) <= 255) {
85 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
87 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
88 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
93 result
= ia64_sal_pci_config_write(addr
, mode
, len
, value
);
99 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
100 int size
, u32
*value
)
102 return raw_pci_read(pci_domain_nr(bus
), bus
->number
,
103 devfn
, where
, size
, value
);
106 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
109 return raw_pci_write(pci_domain_nr(bus
), bus
->number
,
110 devfn
, where
, size
, value
);
113 struct pci_ops pci_root_ops
= {
118 /* Called by ACPI when it finds a new root bus. */
120 static struct pci_controller
*alloc_pci_controller(int seg
)
122 struct pci_controller
*controller
;
124 controller
= kzalloc(sizeof(*controller
), GFP_KERNEL
);
128 controller
->segment
= seg
;
129 controller
->node
= -1;
133 struct pci_root_info
{
134 struct acpi_device
*bridge
;
135 struct pci_controller
*controller
;
136 struct list_head resources
;
137 struct resource
*res
;
138 resource_size_t
*res_offset
;
139 unsigned int res_num
;
140 struct list_head io_resources
;
145 new_space (u64 phys_base
, int sparse
)
151 return 0; /* legacy I/O port space */
153 mmio_base
= (u64
) ioremap(phys_base
, 0);
154 for (i
= 0; i
< num_io_spaces
; i
++)
155 if (io_space
[i
].mmio_base
== mmio_base
&&
156 io_space
[i
].sparse
== sparse
)
159 if (num_io_spaces
== MAX_IO_SPACES
) {
160 pr_err("PCI: Too many IO port spaces "
161 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES
);
166 io_space
[i
].mmio_base
= mmio_base
;
167 io_space
[i
].sparse
= sparse
;
172 static u64
add_io_space(struct pci_root_info
*info
,
173 struct acpi_resource_address64
*addr
)
175 struct iospace_resource
*iospace
;
176 struct resource
*resource
;
178 unsigned long base
, min
, max
, base_port
;
179 unsigned int sparse
= 0, space_nr
, len
;
181 len
= strlen(info
->name
) + 32;
182 iospace
= kzalloc(sizeof(*iospace
) + len
, GFP_KERNEL
);
184 dev_err(&info
->bridge
->dev
,
185 "PCI: No memory for %s I/O port space\n",
190 name
= (char *)(iospace
+ 1);
193 max
= min
+ addr
->address_length
- 1;
194 if (addr
->info
.io
.translation_type
== ACPI_SPARSE_TRANSLATION
)
197 space_nr
= new_space(addr
->translation_offset
, sparse
);
201 base
= __pa(io_space
[space_nr
].mmio_base
);
202 base_port
= IO_SPACE_BASE(space_nr
);
203 snprintf(name
, len
, "%s I/O Ports %08lx-%08lx", info
->name
,
204 base_port
+ min
, base_port
+ max
);
207 * The SDM guarantees the legacy 0-64K space is sparse, but if the
208 * mapping is done by the processor (not the bridge), ACPI may not
214 resource
= &iospace
->res
;
215 resource
->name
= name
;
216 resource
->flags
= IORESOURCE_MEM
;
217 resource
->start
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(min
) : min
);
218 resource
->end
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(max
) : max
);
219 if (insert_resource(&iomem_resource
, resource
)) {
220 dev_err(&info
->bridge
->dev
,
221 "can't allocate host bridge io space resource %pR\n",
226 list_add_tail(&iospace
->list
, &info
->io_resources
);
235 static acpi_status
resource_to_window(struct acpi_resource
*resource
,
236 struct acpi_resource_address64
*addr
)
241 * We're only interested in _CRS descriptors that are
242 * - address space descriptors for memory or I/O space
244 * - producers, i.e., the address space is routed downstream,
245 * not consumed by the bridge itself
247 status
= acpi_resource_to_address64(resource
, addr
);
248 if (ACPI_SUCCESS(status
) &&
249 (addr
->resource_type
== ACPI_MEMORY_RANGE
||
250 addr
->resource_type
== ACPI_IO_RANGE
) &&
251 addr
->address_length
&&
252 addr
->producer_consumer
== ACPI_PRODUCER
)
258 static acpi_status
count_window(struct acpi_resource
*resource
, void *data
)
260 unsigned int *windows
= (unsigned int *) data
;
261 struct acpi_resource_address64 addr
;
264 status
= resource_to_window(resource
, &addr
);
265 if (ACPI_SUCCESS(status
))
271 static acpi_status
add_window(struct acpi_resource
*res
, void *data
)
273 struct pci_root_info
*info
= data
;
274 struct resource
*resource
;
275 struct acpi_resource_address64 addr
;
277 unsigned long flags
, offset
= 0;
278 struct resource
*root
;
280 /* Return AE_OK for non-window resources to keep scanning for more */
281 status
= resource_to_window(res
, &addr
);
282 if (!ACPI_SUCCESS(status
))
285 if (addr
.resource_type
== ACPI_MEMORY_RANGE
) {
286 flags
= IORESOURCE_MEM
;
287 root
= &iomem_resource
;
288 offset
= addr
.translation_offset
;
289 } else if (addr
.resource_type
== ACPI_IO_RANGE
) {
290 flags
= IORESOURCE_IO
;
291 root
= &ioport_resource
;
292 offset
= add_io_space(info
, &addr
);
298 resource
= &info
->res
[info
->res_num
];
299 resource
->name
= info
->name
;
300 resource
->flags
= flags
;
301 resource
->start
= addr
.minimum
+ offset
;
302 resource
->end
= resource
->start
+ addr
.address_length
- 1;
303 info
->res_offset
[info
->res_num
] = offset
;
305 if (insert_resource(root
, resource
)) {
306 dev_err(&info
->bridge
->dev
,
307 "can't allocate host bridge window %pR\n",
311 dev_info(&info
->bridge
->dev
, "host bridge window %pR "
312 "(PCI address [%#llx-%#llx])\n",
314 resource
->start
- offset
,
315 resource
->end
- offset
);
317 dev_info(&info
->bridge
->dev
,
318 "host bridge window %pR\n", resource
);
320 /* HP's firmware has a hack to work around a Windows bug.
321 * Ignore these tiny memory ranges */
322 if (!((resource
->flags
& IORESOURCE_MEM
) &&
323 (resource
->end
- resource
->start
< 16)))
324 pci_add_resource_offset(&info
->resources
, resource
,
325 info
->res_offset
[info
->res_num
]);
331 static void free_pci_root_info_res(struct pci_root_info
*info
)
333 struct iospace_resource
*iospace
, *tmp
;
335 list_for_each_entry_safe(iospace
, tmp
, &info
->io_resources
, list
)
341 kfree(info
->res_offset
);
342 info
->res_offset
= NULL
;
344 kfree(info
->controller
);
345 info
->controller
= NULL
;
348 static void __release_pci_root_info(struct pci_root_info
*info
)
351 struct resource
*res
;
352 struct iospace_resource
*iospace
;
354 list_for_each_entry(iospace
, &info
->io_resources
, list
)
355 release_resource(&iospace
->res
);
357 for (i
= 0; i
< info
->res_num
; i
++) {
363 if (!(res
->flags
& (IORESOURCE_MEM
| IORESOURCE_IO
)))
366 release_resource(res
);
369 free_pci_root_info_res(info
);
373 static void release_pci_root_info(struct pci_host_bridge
*bridge
)
375 struct pci_root_info
*info
= bridge
->release_data
;
377 __release_pci_root_info(info
);
381 probe_pci_root_info(struct pci_root_info
*info
, struct acpi_device
*device
,
382 int busnum
, int domain
)
386 name
= kmalloc(16, GFP_KERNEL
);
390 sprintf(name
, "PCI Bus %04x:%02x", domain
, busnum
);
391 info
->bridge
= device
;
394 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
, count_window
,
398 kzalloc_node(sizeof(*info
->res
) * info
->res_num
,
399 GFP_KERNEL
, info
->controller
->node
);
406 kzalloc_node(sizeof(*info
->res_offset
) * info
->res_num
,
407 GFP_KERNEL
, info
->controller
->node
);
408 if (!info
->res_offset
) {
416 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
424 struct pci_bus
*pci_acpi_scan_root(struct acpi_pci_root
*root
)
426 struct acpi_device
*device
= root
->device
;
427 int domain
= root
->segment
;
428 int bus
= root
->secondary
.start
;
429 struct pci_controller
*controller
;
430 struct pci_root_info
*info
= NULL
;
431 int busnum
= root
->secondary
.start
;
432 struct pci_bus
*pbus
;
435 controller
= alloc_pci_controller(domain
);
439 controller
->acpi_handle
= device
->handle
;
441 pxm
= acpi_get_pxm(controller
->acpi_handle
);
444 controller
->node
= pxm_to_node(pxm
);
447 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
449 dev_err(&device
->dev
,
450 "pci_bus %04x:%02x: ignored (out of memory)\n",
456 info
->controller
= controller
;
457 INIT_LIST_HEAD(&info
->io_resources
);
458 INIT_LIST_HEAD(&info
->resources
);
460 ret
= probe_pci_root_info(info
, device
, busnum
, domain
);
462 kfree(info
->controller
);
466 /* insert busn resource at first */
467 pci_add_resource(&info
->resources
, &root
->secondary
);
469 * See arch/x86/pci/acpi.c.
470 * The desired pci bus might already be scanned in a quirk. We
471 * should handle the case here, but it appears that IA64 hasn't
472 * such quirk. So we just ignore the case now.
474 pbus
= pci_create_root_bus(NULL
, bus
, &pci_root_ops
, controller
,
477 pci_free_resource_list(&info
->resources
);
478 __release_pci_root_info(info
);
482 pci_set_host_bridge_release(to_pci_host_bridge(pbus
->bridge
),
483 release_pci_root_info
, info
);
484 pci_scan_child_bus(pbus
);
488 int pcibios_root_bridge_prepare(struct pci_host_bridge
*bridge
)
490 struct pci_controller
*controller
= bridge
->bus
->sysdata
;
492 ACPI_HANDLE_SET(&bridge
->dev
, controller
->acpi_handle
);
496 static int is_valid_resource(struct pci_dev
*dev
, int idx
)
498 unsigned int i
, type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
;
499 struct resource
*devr
= &dev
->resource
[idx
], *busr
;
504 pci_bus_for_each_resource(dev
->bus
, busr
, i
) {
505 if (!busr
|| ((busr
->flags
^ devr
->flags
) & type_mask
))
507 if ((devr
->start
) && (devr
->start
>= busr
->start
) &&
508 (devr
->end
<= busr
->end
))
514 static void pcibios_fixup_resources(struct pci_dev
*dev
, int start
, int limit
)
518 for (i
= start
; i
< limit
; i
++) {
519 if (!dev
->resource
[i
].flags
)
521 if ((is_valid_resource(dev
, i
)))
522 pci_claim_resource(dev
, i
);
526 void pcibios_fixup_device_resources(struct pci_dev
*dev
)
528 pcibios_fixup_resources(dev
, 0, PCI_BRIDGE_RESOURCES
);
530 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources
);
532 static void pcibios_fixup_bridge_resources(struct pci_dev
*dev
)
534 pcibios_fixup_resources(dev
, PCI_BRIDGE_RESOURCES
, PCI_NUM_RESOURCES
);
538 * Called after each bus is probed, but before its children are examined.
540 void pcibios_fixup_bus(struct pci_bus
*b
)
545 pci_read_bridge_bases(b
);
546 pcibios_fixup_bridge_resources(b
->self
);
548 list_for_each_entry(dev
, &b
->devices
, bus_list
)
549 pcibios_fixup_device_resources(dev
);
550 platform_pci_fixup_bus(b
);
553 void pcibios_add_bus(struct pci_bus
*bus
)
555 acpi_pci_add_bus(bus
);
558 void pcibios_remove_bus(struct pci_bus
*bus
)
560 acpi_pci_remove_bus(bus
);
563 void pcibios_set_master (struct pci_dev
*dev
)
565 /* No special bus mastering setup handling */
569 pcibios_enable_device (struct pci_dev
*dev
, int mask
)
573 ret
= pci_enable_resources(dev
, mask
);
577 if (!dev
->msi_enabled
)
578 return acpi_pci_irq_enable(dev
);
583 pcibios_disable_device (struct pci_dev
*dev
)
585 BUG_ON(atomic_read(&dev
->enable_cnt
));
586 if (!dev
->msi_enabled
)
587 acpi_pci_irq_disable(dev
);
591 pcibios_align_resource (void *data
, const struct resource
*res
,
592 resource_size_t size
, resource_size_t align
)
598 pci_mmap_page_range (struct pci_dev
*dev
, struct vm_area_struct
*vma
,
599 enum pci_mmap_state mmap_state
, int write_combine
)
601 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
605 * I/O space cannot be accessed via normal processor loads and
606 * stores on this platform.
608 if (mmap_state
== pci_mmap_io
)
610 * XXX we could relax this for I/O spaces for which ACPI
611 * indicates that the space is 1-to-1 mapped. But at the
612 * moment, we don't support multiple PCI address spaces and
613 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
617 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
620 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
624 * If the user requested WC, the kernel uses UC or WC for this region,
625 * and the chipset supports WC, we can use WC. Otherwise, we have to
626 * use the same attribute the kernel uses.
629 ((pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_UC
||
630 (pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_WC
) &&
631 efi_range_is_wc(vma
->vm_start
, vma
->vm_end
- vma
->vm_start
))
632 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
634 vma
->vm_page_prot
= prot
;
636 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
637 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
644 * ia64_pci_get_legacy_mem - generic legacy mem routine
645 * @bus: bus to get legacy memory base address for
647 * Find the base of legacy memory for @bus. This is typically the first
648 * megabyte of bus address space for @bus or is simply 0 on platforms whose
649 * chipsets support legacy I/O and memory routing. Returns the base address
650 * or an error pointer if an error occurred.
652 * This is the ia64 generic version of this routine. Other platforms
653 * are free to override it with a machine vector.
655 char *ia64_pci_get_legacy_mem(struct pci_bus
*bus
)
657 return (char *)__IA64_UNCACHED_OFFSET
;
661 * pci_mmap_legacy_page_range - map legacy memory space to userland
662 * @bus: bus whose legacy space we're mapping
663 * @vma: vma passed in by mmap
665 * Map legacy memory space for this device back to userspace using a machine
666 * vector to get the base address.
669 pci_mmap_legacy_page_range(struct pci_bus
*bus
, struct vm_area_struct
*vma
,
670 enum pci_mmap_state mmap_state
)
672 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
676 /* We only support mmap'ing of legacy memory space */
677 if (mmap_state
!= pci_mmap_mem
)
681 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
684 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
686 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
689 addr
= pci_get_legacy_mem(bus
);
691 return PTR_ERR(addr
);
693 vma
->vm_pgoff
+= (unsigned long)addr
>> PAGE_SHIFT
;
694 vma
->vm_page_prot
= prot
;
696 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
697 size
, vma
->vm_page_prot
))
704 * ia64_pci_legacy_read - read from legacy I/O space
706 * @port: legacy port value
707 * @val: caller allocated storage for returned value
708 * @size: number of bytes to read
710 * Simply reads @size bytes from @port and puts the result in @val.
712 * Again, this (and the write routine) are generic versions that can be
713 * overridden by the platform. This is necessary on platforms that don't
714 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
716 int ia64_pci_legacy_read(struct pci_bus
*bus
, u16 port
, u32
*val
, u8 size
)
739 * ia64_pci_legacy_write - perform a legacy I/O write
741 * @port: port to write
742 * @val: value to write
743 * @size: number of bytes to write from @val
745 * Simply writes @size bytes of @val to @port.
747 int ia64_pci_legacy_write(struct pci_bus
*bus
, u16 port
, u32 val
, u8 size
)
770 * set_pci_cacheline_size - determine cacheline size for PCI devices
772 * We want to use the line-size of the outer-most cache. We assume
773 * that this line-size is the same for all CPUs.
775 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
777 static void __init
set_pci_dfl_cacheline_size(void)
779 unsigned long levels
, unique_caches
;
781 pal_cache_config_info_t cci
;
783 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
785 pr_err("%s: ia64_pal_cache_summary() failed "
786 "(status=%ld)\n", __func__
, status
);
790 status
= ia64_pal_cache_config_info(levels
- 1,
791 /* cache_type (data_or_unified)= */ 2, &cci
);
793 pr_err("%s: ia64_pal_cache_config_info() failed "
794 "(status=%ld)\n", __func__
, status
);
797 pci_dfl_cache_line_size
= (1 << cci
.pcci_line_size
) / 4;
800 u64
ia64_dma_get_required_mask(struct device
*dev
)
802 u32 low_totalram
= ((max_pfn
- 1) << PAGE_SHIFT
);
803 u32 high_totalram
= ((max_pfn
- 1) >> (32 - PAGE_SHIFT
));
806 if (!high_totalram
) {
807 /* convert to mask just covering totalram */
808 low_totalram
= (1 << (fls(low_totalram
) - 1));
809 low_totalram
+= low_totalram
- 1;
812 high_totalram
= (1 << (fls(high_totalram
) - 1));
813 high_totalram
+= high_totalram
- 1;
814 mask
= (((u64
)high_totalram
) << 32) + 0xffffffff;
818 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask
);
820 u64
dma_get_required_mask(struct device
*dev
)
822 return platform_dma_get_required_mask(dev
);
824 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
826 static int __init
pcibios_init(void)
828 set_pci_dfl_cacheline_size();
832 subsys_initcall(pcibios_init
);