2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/pci-acpi.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
26 #include <asm/machvec.h>
32 #include <asm/hw_irq.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
43 /* SAL 3.2 adds support for extended config space. */
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
48 int raw_pci_read(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
49 int reg
, int len
, u32
*value
)
54 if (!value
|| (seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
57 if ((seg
| reg
) <= 255) {
58 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
60 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
61 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
67 result
= ia64_sal_pci_config_read(addr
, mode
, len
, &data
);
75 int raw_pci_write(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
76 int reg
, int len
, u32 value
)
81 if ((seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
84 if ((seg
| reg
) <= 255) {
85 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
87 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
88 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
93 result
= ia64_sal_pci_config_write(addr
, mode
, len
, value
);
99 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
100 int size
, u32
*value
)
102 return raw_pci_read(pci_domain_nr(bus
), bus
->number
,
103 devfn
, where
, size
, value
);
106 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
109 return raw_pci_write(pci_domain_nr(bus
), bus
->number
,
110 devfn
, where
, size
, value
);
113 struct pci_ops pci_root_ops
= {
118 /* Called by ACPI when it finds a new root bus. */
120 static struct pci_controller
*alloc_pci_controller(int seg
)
122 struct pci_controller
*controller
;
124 controller
= kzalloc(sizeof(*controller
), GFP_KERNEL
);
128 controller
->segment
= seg
;
132 struct pci_root_info
{
133 struct acpi_device
*bridge
;
134 struct pci_controller
*controller
;
135 struct list_head resources
;
136 struct resource
*res
;
137 resource_size_t
*res_offset
;
138 unsigned int res_num
;
139 struct list_head io_resources
;
144 new_space (u64 phys_base
, int sparse
)
150 return 0; /* legacy I/O port space */
152 mmio_base
= (u64
) ioremap(phys_base
, 0);
153 for (i
= 0; i
< num_io_spaces
; i
++)
154 if (io_space
[i
].mmio_base
== mmio_base
&&
155 io_space
[i
].sparse
== sparse
)
158 if (num_io_spaces
== MAX_IO_SPACES
) {
159 pr_err("PCI: Too many IO port spaces "
160 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES
);
165 io_space
[i
].mmio_base
= mmio_base
;
166 io_space
[i
].sparse
= sparse
;
171 static u64
add_io_space(struct pci_root_info
*info
,
172 struct acpi_resource_address64
*addr
)
174 struct iospace_resource
*iospace
;
175 struct resource
*resource
;
177 unsigned long base
, min
, max
, base_port
;
178 unsigned int sparse
= 0, space_nr
, len
;
180 len
= strlen(info
->name
) + 32;
181 iospace
= kzalloc(sizeof(*iospace
) + len
, GFP_KERNEL
);
183 dev_err(&info
->bridge
->dev
,
184 "PCI: No memory for %s I/O port space\n",
189 name
= (char *)(iospace
+ 1);
191 min
= addr
->address
.minimum
;
192 max
= min
+ addr
->address
.address_length
- 1;
193 if (addr
->info
.io
.translation_type
== ACPI_SPARSE_TRANSLATION
)
196 space_nr
= new_space(addr
->address
.translation_offset
, sparse
);
200 base
= __pa(io_space
[space_nr
].mmio_base
);
201 base_port
= IO_SPACE_BASE(space_nr
);
202 snprintf(name
, len
, "%s I/O Ports %08lx-%08lx", info
->name
,
203 base_port
+ min
, base_port
+ max
);
206 * The SDM guarantees the legacy 0-64K space is sparse, but if the
207 * mapping is done by the processor (not the bridge), ACPI may not
213 resource
= &iospace
->res
;
214 resource
->name
= name
;
215 resource
->flags
= IORESOURCE_MEM
;
216 resource
->start
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(min
) : min
);
217 resource
->end
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(max
) : max
);
218 if (insert_resource(&iomem_resource
, resource
)) {
219 dev_err(&info
->bridge
->dev
,
220 "can't allocate host bridge io space resource %pR\n",
225 list_add_tail(&iospace
->list
, &info
->io_resources
);
234 static acpi_status
resource_to_window(struct acpi_resource
*resource
,
235 struct acpi_resource_address64
*addr
)
240 * We're only interested in _CRS descriptors that are
241 * - address space descriptors for memory or I/O space
244 status
= acpi_resource_to_address64(resource
, addr
);
245 if (ACPI_SUCCESS(status
) &&
246 (addr
->resource_type
== ACPI_MEMORY_RANGE
||
247 addr
->resource_type
== ACPI_IO_RANGE
) &&
248 addr
->address
.address_length
)
254 static acpi_status
count_window(struct acpi_resource
*resource
, void *data
)
256 unsigned int *windows
= (unsigned int *) data
;
257 struct acpi_resource_address64 addr
;
260 status
= resource_to_window(resource
, &addr
);
261 if (ACPI_SUCCESS(status
))
267 static acpi_status
add_window(struct acpi_resource
*res
, void *data
)
269 struct pci_root_info
*info
= data
;
270 struct resource
*resource
;
271 struct acpi_resource_address64 addr
;
273 unsigned long flags
, offset
= 0;
274 struct resource
*root
;
276 /* Return AE_OK for non-window resources to keep scanning for more */
277 status
= resource_to_window(res
, &addr
);
278 if (!ACPI_SUCCESS(status
))
281 if (addr
.resource_type
== ACPI_MEMORY_RANGE
) {
282 flags
= IORESOURCE_MEM
;
283 root
= &iomem_resource
;
284 offset
= addr
.address
.translation_offset
;
285 } else if (addr
.resource_type
== ACPI_IO_RANGE
) {
286 flags
= IORESOURCE_IO
;
287 root
= &ioport_resource
;
288 offset
= add_io_space(info
, &addr
);
294 resource
= &info
->res
[info
->res_num
];
295 resource
->name
= info
->name
;
296 resource
->flags
= flags
;
297 resource
->start
= addr
.address
.minimum
+ offset
;
298 resource
->end
= resource
->start
+ addr
.address
.address_length
- 1;
299 info
->res_offset
[info
->res_num
] = offset
;
301 if (insert_resource(root
, resource
)) {
302 dev_err(&info
->bridge
->dev
,
303 "can't allocate host bridge window %pR\n",
307 dev_info(&info
->bridge
->dev
, "host bridge window %pR "
308 "(PCI address [%#llx-%#llx])\n",
310 resource
->start
- offset
,
311 resource
->end
- offset
);
313 dev_info(&info
->bridge
->dev
,
314 "host bridge window %pR\n", resource
);
316 /* HP's firmware has a hack to work around a Windows bug.
317 * Ignore these tiny memory ranges */
318 if (!((resource
->flags
& IORESOURCE_MEM
) &&
319 (resource
->end
- resource
->start
< 16)))
320 pci_add_resource_offset(&info
->resources
, resource
,
321 info
->res_offset
[info
->res_num
]);
327 static void free_pci_root_info_res(struct pci_root_info
*info
)
329 struct iospace_resource
*iospace
, *tmp
;
331 list_for_each_entry_safe(iospace
, tmp
, &info
->io_resources
, list
)
337 kfree(info
->res_offset
);
338 info
->res_offset
= NULL
;
340 kfree(info
->controller
);
341 info
->controller
= NULL
;
344 static void __release_pci_root_info(struct pci_root_info
*info
)
347 struct resource
*res
;
348 struct iospace_resource
*iospace
;
350 list_for_each_entry(iospace
, &info
->io_resources
, list
)
351 release_resource(&iospace
->res
);
353 for (i
= 0; i
< info
->res_num
; i
++) {
359 if (!(res
->flags
& (IORESOURCE_MEM
| IORESOURCE_IO
)))
362 release_resource(res
);
365 free_pci_root_info_res(info
);
369 static void release_pci_root_info(struct pci_host_bridge
*bridge
)
371 struct pci_root_info
*info
= bridge
->release_data
;
373 __release_pci_root_info(info
);
377 probe_pci_root_info(struct pci_root_info
*info
, struct acpi_device
*device
,
378 int busnum
, int domain
)
382 name
= kmalloc(16, GFP_KERNEL
);
386 sprintf(name
, "PCI Bus %04x:%02x", domain
, busnum
);
387 info
->bridge
= device
;
390 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
, count_window
,
394 kzalloc_node(sizeof(*info
->res
) * info
->res_num
,
395 GFP_KERNEL
, info
->controller
->node
);
402 kzalloc_node(sizeof(*info
->res_offset
) * info
->res_num
,
403 GFP_KERNEL
, info
->controller
->node
);
404 if (!info
->res_offset
) {
412 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
420 struct pci_bus
*pci_acpi_scan_root(struct acpi_pci_root
*root
)
422 struct acpi_device
*device
= root
->device
;
423 int domain
= root
->segment
;
424 int bus
= root
->secondary
.start
;
425 struct pci_controller
*controller
;
426 struct pci_root_info
*info
= NULL
;
427 int busnum
= root
->secondary
.start
;
428 struct pci_bus
*pbus
;
431 controller
= alloc_pci_controller(domain
);
435 controller
->companion
= device
;
436 controller
->node
= acpi_get_node(device
->handle
);
438 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
440 dev_err(&device
->dev
,
441 "pci_bus %04x:%02x: ignored (out of memory)\n",
447 info
->controller
= controller
;
448 INIT_LIST_HEAD(&info
->io_resources
);
449 INIT_LIST_HEAD(&info
->resources
);
451 ret
= probe_pci_root_info(info
, device
, busnum
, domain
);
453 kfree(info
->controller
);
457 /* insert busn resource at first */
458 pci_add_resource(&info
->resources
, &root
->secondary
);
460 * See arch/x86/pci/acpi.c.
461 * The desired pci bus might already be scanned in a quirk. We
462 * should handle the case here, but it appears that IA64 hasn't
463 * such quirk. So we just ignore the case now.
465 pbus
= pci_create_root_bus(NULL
, bus
, &pci_root_ops
, controller
,
468 pci_free_resource_list(&info
->resources
);
469 __release_pci_root_info(info
);
473 pci_set_host_bridge_release(to_pci_host_bridge(pbus
->bridge
),
474 release_pci_root_info
, info
);
475 pci_scan_child_bus(pbus
);
479 int pcibios_root_bridge_prepare(struct pci_host_bridge
*bridge
)
482 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
483 * here, pci_create_root_bus() has been called by someone else and
484 * sysdata is likely to be different from what we expect. Let it go in
487 if (!bridge
->dev
.parent
) {
488 struct pci_controller
*controller
= bridge
->bus
->sysdata
;
489 ACPI_COMPANION_SET(&bridge
->dev
, controller
->companion
);
494 void pcibios_fixup_device_resources(struct pci_dev
*dev
)
501 for (idx
= 0; idx
< PCI_BRIDGE_RESOURCES
; idx
++) {
502 struct resource
*r
= &dev
->resource
[idx
];
504 if (!r
->flags
|| r
->parent
|| !r
->start
)
507 pci_claim_resource(dev
, idx
);
510 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources
);
512 static void pcibios_fixup_bridge_resources(struct pci_dev
*dev
)
519 for (idx
= PCI_BRIDGE_RESOURCES
; idx
< PCI_NUM_RESOURCES
; idx
++) {
520 struct resource
*r
= &dev
->resource
[idx
];
522 if (!r
->flags
|| r
->parent
|| !r
->start
)
525 pci_claim_bridge_resource(dev
, idx
);
530 * Called after each bus is probed, but before its children are examined.
532 void pcibios_fixup_bus(struct pci_bus
*b
)
537 pci_read_bridge_bases(b
);
538 pcibios_fixup_bridge_resources(b
->self
);
540 list_for_each_entry(dev
, &b
->devices
, bus_list
)
541 pcibios_fixup_device_resources(dev
);
542 platform_pci_fixup_bus(b
);
545 void pcibios_add_bus(struct pci_bus
*bus
)
547 acpi_pci_add_bus(bus
);
550 void pcibios_remove_bus(struct pci_bus
*bus
)
552 acpi_pci_remove_bus(bus
);
555 void pcibios_set_master (struct pci_dev
*dev
)
557 /* No special bus mastering setup handling */
561 pcibios_enable_device (struct pci_dev
*dev
, int mask
)
565 ret
= pci_enable_resources(dev
, mask
);
569 if (!dev
->msi_enabled
)
570 return acpi_pci_irq_enable(dev
);
575 pcibios_disable_device (struct pci_dev
*dev
)
577 BUG_ON(atomic_read(&dev
->enable_cnt
));
578 if (!dev
->msi_enabled
)
579 acpi_pci_irq_disable(dev
);
583 pcibios_align_resource (void *data
, const struct resource
*res
,
584 resource_size_t size
, resource_size_t align
)
590 pci_mmap_page_range (struct pci_dev
*dev
, struct vm_area_struct
*vma
,
591 enum pci_mmap_state mmap_state
, int write_combine
)
593 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
597 * I/O space cannot be accessed via normal processor loads and
598 * stores on this platform.
600 if (mmap_state
== pci_mmap_io
)
602 * XXX we could relax this for I/O spaces for which ACPI
603 * indicates that the space is 1-to-1 mapped. But at the
604 * moment, we don't support multiple PCI address spaces and
605 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
609 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
612 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
616 * If the user requested WC, the kernel uses UC or WC for this region,
617 * and the chipset supports WC, we can use WC. Otherwise, we have to
618 * use the same attribute the kernel uses.
621 ((pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_UC
||
622 (pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_WC
) &&
623 efi_range_is_wc(vma
->vm_start
, vma
->vm_end
- vma
->vm_start
))
624 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
626 vma
->vm_page_prot
= prot
;
628 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
629 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
636 * ia64_pci_get_legacy_mem - generic legacy mem routine
637 * @bus: bus to get legacy memory base address for
639 * Find the base of legacy memory for @bus. This is typically the first
640 * megabyte of bus address space for @bus or is simply 0 on platforms whose
641 * chipsets support legacy I/O and memory routing. Returns the base address
642 * or an error pointer if an error occurred.
644 * This is the ia64 generic version of this routine. Other platforms
645 * are free to override it with a machine vector.
647 char *ia64_pci_get_legacy_mem(struct pci_bus
*bus
)
649 return (char *)__IA64_UNCACHED_OFFSET
;
653 * pci_mmap_legacy_page_range - map legacy memory space to userland
654 * @bus: bus whose legacy space we're mapping
655 * @vma: vma passed in by mmap
657 * Map legacy memory space for this device back to userspace using a machine
658 * vector to get the base address.
661 pci_mmap_legacy_page_range(struct pci_bus
*bus
, struct vm_area_struct
*vma
,
662 enum pci_mmap_state mmap_state
)
664 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
668 /* We only support mmap'ing of legacy memory space */
669 if (mmap_state
!= pci_mmap_mem
)
673 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
676 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
678 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
681 addr
= pci_get_legacy_mem(bus
);
683 return PTR_ERR(addr
);
685 vma
->vm_pgoff
+= (unsigned long)addr
>> PAGE_SHIFT
;
686 vma
->vm_page_prot
= prot
;
688 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
689 size
, vma
->vm_page_prot
))
696 * ia64_pci_legacy_read - read from legacy I/O space
698 * @port: legacy port value
699 * @val: caller allocated storage for returned value
700 * @size: number of bytes to read
702 * Simply reads @size bytes from @port and puts the result in @val.
704 * Again, this (and the write routine) are generic versions that can be
705 * overridden by the platform. This is necessary on platforms that don't
706 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
708 int ia64_pci_legacy_read(struct pci_bus
*bus
, u16 port
, u32
*val
, u8 size
)
731 * ia64_pci_legacy_write - perform a legacy I/O write
733 * @port: port to write
734 * @val: value to write
735 * @size: number of bytes to write from @val
737 * Simply writes @size bytes of @val to @port.
739 int ia64_pci_legacy_write(struct pci_bus
*bus
, u16 port
, u32 val
, u8 size
)
762 * set_pci_cacheline_size - determine cacheline size for PCI devices
764 * We want to use the line-size of the outer-most cache. We assume
765 * that this line-size is the same for all CPUs.
767 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
769 static void __init
set_pci_dfl_cacheline_size(void)
771 unsigned long levels
, unique_caches
;
773 pal_cache_config_info_t cci
;
775 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
777 pr_err("%s: ia64_pal_cache_summary() failed "
778 "(status=%ld)\n", __func__
, status
);
782 status
= ia64_pal_cache_config_info(levels
- 1,
783 /* cache_type (data_or_unified)= */ 2, &cci
);
785 pr_err("%s: ia64_pal_cache_config_info() failed "
786 "(status=%ld)\n", __func__
, status
);
789 pci_dfl_cache_line_size
= (1 << cci
.pcci_line_size
) / 4;
792 u64
ia64_dma_get_required_mask(struct device
*dev
)
794 u32 low_totalram
= ((max_pfn
- 1) << PAGE_SHIFT
);
795 u32 high_totalram
= ((max_pfn
- 1) >> (32 - PAGE_SHIFT
));
798 if (!high_totalram
) {
799 /* convert to mask just covering totalram */
800 low_totalram
= (1 << (fls(low_totalram
) - 1));
801 low_totalram
+= low_totalram
- 1;
804 high_totalram
= (1 << (fls(high_totalram
) - 1));
805 high_totalram
+= high_totalram
- 1;
806 mask
= (((u64
)high_totalram
) << 32) + 0xffffffff;
810 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask
);
812 u64
dma_get_required_mask(struct device
*dev
)
814 return platform_dma_get_required_mask(dev
);
816 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
818 static int __init
pcibios_init(void)
820 set_pci_dfl_cacheline_size();
824 subsys_initcall(pcibios_init
);