2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
23 #include <asm/machvec.h>
25 #include <asm/system.h>
30 #include <asm/hw_irq.h>
33 * Low-level SAL-based PCI configuration access functions. Note that SAL
34 * calls are already serialized (via sal_lock), so we don't need another
35 * synchronization mechanism here.
38 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
39 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
41 /* SAL 3.2 adds support for extended config space. */
43 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
44 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
47 pci_sal_read (unsigned int seg
, unsigned int bus
, unsigned int devfn
,
48 int reg
, int len
, u32
*value
)
53 if (!value
|| (seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
56 if ((seg
| reg
) <= 255) {
57 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
60 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
63 result
= ia64_sal_pci_config_read(addr
, mode
, len
, &data
);
72 pci_sal_write (unsigned int seg
, unsigned int bus
, unsigned int devfn
,
73 int reg
, int len
, u32 value
)
78 if ((seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
81 if ((seg
| reg
) <= 255) {
82 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
85 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
88 result
= ia64_sal_pci_config_write(addr
, mode
, len
, value
);
94 static struct pci_raw_ops pci_sal_ops
= {
96 .write
= pci_sal_write
99 struct pci_raw_ops
*raw_pci_ops
= &pci_sal_ops
;
102 pci_read (struct pci_bus
*bus
, unsigned int devfn
, int where
, int size
, u32
*value
)
104 return raw_pci_ops
->read(pci_domain_nr(bus
), bus
->number
,
105 devfn
, where
, size
, value
);
109 pci_write (struct pci_bus
*bus
, unsigned int devfn
, int where
, int size
, u32 value
)
111 return raw_pci_ops
->write(pci_domain_nr(bus
), bus
->number
,
112 devfn
, where
, size
, value
);
115 struct pci_ops pci_root_ops
= {
120 /* Called by ACPI when it finds a new root bus. */
122 static struct pci_controller
* __devinit
123 alloc_pci_controller (int seg
)
125 struct pci_controller
*controller
;
127 controller
= kzalloc(sizeof(*controller
), GFP_KERNEL
);
131 controller
->segment
= seg
;
132 controller
->node
= -1;
136 struct pci_root_info
{
137 struct pci_controller
*controller
;
142 new_space (u64 phys_base
, int sparse
)
148 return 0; /* legacy I/O port space */
150 mmio_base
= (u64
) ioremap(phys_base
, 0);
151 for (i
= 0; i
< num_io_spaces
; i
++)
152 if (io_space
[i
].mmio_base
== mmio_base
&&
153 io_space
[i
].sparse
== sparse
)
156 if (num_io_spaces
== MAX_IO_SPACES
) {
157 printk(KERN_ERR
"PCI: Too many IO port spaces "
158 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES
);
163 io_space
[i
].mmio_base
= mmio_base
;
164 io_space
[i
].sparse
= sparse
;
170 add_io_space (struct pci_root_info
*info
, struct acpi_resource_address64
*addr
)
172 struct resource
*resource
;
174 u64 base
, min
, max
, base_port
;
175 unsigned int sparse
= 0, space_nr
, len
;
177 resource
= kzalloc(sizeof(*resource
), GFP_KERNEL
);
179 printk(KERN_ERR
"PCI: No memory for %s I/O port space\n",
184 len
= strlen(info
->name
) + 32;
185 name
= kzalloc(len
, GFP_KERNEL
);
187 printk(KERN_ERR
"PCI: No memory for %s I/O port space name\n",
193 max
= min
+ addr
->address_length
- 1;
194 if (addr
->info
.io
.translation_type
== ACPI_SPARSE_TRANSLATION
)
197 space_nr
= new_space(addr
->translation_offset
, sparse
);
201 base
= __pa(io_space
[space_nr
].mmio_base
);
202 base_port
= IO_SPACE_BASE(space_nr
);
203 snprintf(name
, len
, "%s I/O Ports %08lx-%08lx", info
->name
,
204 base_port
+ min
, base_port
+ max
);
207 * The SDM guarantees the legacy 0-64K space is sparse, but if the
208 * mapping is done by the processor (not the bridge), ACPI may not
214 resource
->name
= name
;
215 resource
->flags
= IORESOURCE_MEM
;
216 resource
->start
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(min
) : min
);
217 resource
->end
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(max
) : max
);
218 insert_resource(&iomem_resource
, resource
);
230 static acpi_status __devinit
resource_to_window(struct acpi_resource
*resource
,
231 struct acpi_resource_address64
*addr
)
236 * We're only interested in _CRS descriptors that are
237 * - address space descriptors for memory or I/O space
239 * - producers, i.e., the address space is routed downstream,
240 * not consumed by the bridge itself
242 status
= acpi_resource_to_address64(resource
, addr
);
243 if (ACPI_SUCCESS(status
) &&
244 (addr
->resource_type
== ACPI_MEMORY_RANGE
||
245 addr
->resource_type
== ACPI_IO_RANGE
) &&
246 addr
->address_length
&&
247 addr
->producer_consumer
== ACPI_PRODUCER
)
253 static acpi_status __devinit
254 count_window (struct acpi_resource
*resource
, void *data
)
256 unsigned int *windows
= (unsigned int *) data
;
257 struct acpi_resource_address64 addr
;
260 status
= resource_to_window(resource
, &addr
);
261 if (ACPI_SUCCESS(status
))
267 static __devinit acpi_status
add_window(struct acpi_resource
*res
, void *data
)
269 struct pci_root_info
*info
= data
;
270 struct pci_window
*window
;
271 struct acpi_resource_address64 addr
;
273 unsigned long flags
, offset
= 0;
274 struct resource
*root
;
276 /* Return AE_OK for non-window resources to keep scanning for more */
277 status
= resource_to_window(res
, &addr
);
278 if (!ACPI_SUCCESS(status
))
281 if (addr
.resource_type
== ACPI_MEMORY_RANGE
) {
282 flags
= IORESOURCE_MEM
;
283 root
= &iomem_resource
;
284 offset
= addr
.translation_offset
;
285 } else if (addr
.resource_type
== ACPI_IO_RANGE
) {
286 flags
= IORESOURCE_IO
;
287 root
= &ioport_resource
;
288 offset
= add_io_space(info
, &addr
);
294 window
= &info
->controller
->window
[info
->controller
->windows
++];
295 window
->resource
.name
= info
->name
;
296 window
->resource
.flags
= flags
;
297 window
->resource
.start
= addr
.minimum
+ offset
;
298 window
->resource
.end
= window
->resource
.start
+ addr
.address_length
- 1;
299 window
->resource
.child
= NULL
;
300 window
->offset
= offset
;
302 if (insert_resource(root
, &window
->resource
)) {
303 printk(KERN_ERR
"alloc 0x%lx-0x%lx from %s for %s failed\n",
304 window
->resource
.start
, window
->resource
.end
,
305 root
->name
, info
->name
);
311 static void __devinit
312 pcibios_setup_root_windows(struct pci_bus
*bus
, struct pci_controller
*ctrl
)
317 for (i
= 0; i
< ctrl
->windows
; i
++) {
318 struct resource
*res
= &ctrl
->window
[i
].resource
;
319 /* HP's firmware has a hack to work around a Windows bug.
320 * Ignore these tiny memory ranges */
321 if ((res
->flags
& IORESOURCE_MEM
) &&
322 (res
->end
- res
->start
< 16))
324 if (j
>= PCI_BUS_NUM_RESOURCES
) {
325 printk("Ignoring range [%lx-%lx] (%lx)\n", res
->start
,
326 res
->end
, res
->flags
);
329 bus
->resource
[j
++] = res
;
333 struct pci_bus
* __devinit
334 pci_acpi_scan_root(struct acpi_device
*device
, int domain
, int bus
)
336 struct pci_root_info info
;
337 struct pci_controller
*controller
;
338 unsigned int windows
= 0;
339 struct pci_bus
*pbus
;
343 controller
= alloc_pci_controller(domain
);
347 controller
->acpi_handle
= device
->handle
;
349 pxm
= acpi_get_pxm(controller
->acpi_handle
);
352 controller
->node
= pxm_to_node(pxm
);
355 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
, count_window
,
359 kmalloc_node(sizeof(*controller
->window
) * windows
,
360 GFP_KERNEL
, controller
->node
);
361 if (!controller
->window
)
365 name
= kmalloc(16, GFP_KERNEL
);
369 sprintf(name
, "PCI Bus %04x:%02x", domain
, bus
);
370 info
.controller
= controller
;
372 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
, add_window
,
375 pbus
= pci_scan_bus_parented(NULL
, bus
, &pci_root_ops
, controller
);
377 pcibios_setup_root_windows(pbus
, controller
);
382 kfree(controller
->window
);
389 void pcibios_resource_to_bus(struct pci_dev
*dev
,
390 struct pci_bus_region
*region
, struct resource
*res
)
392 struct pci_controller
*controller
= PCI_CONTROLLER(dev
);
393 unsigned long offset
= 0;
396 for (i
= 0; i
< controller
->windows
; i
++) {
397 struct pci_window
*window
= &controller
->window
[i
];
398 if (!(window
->resource
.flags
& res
->flags
))
400 if (window
->resource
.start
> res
->start
)
402 if (window
->resource
.end
< res
->end
)
404 offset
= window
->offset
;
408 region
->start
= res
->start
- offset
;
409 region
->end
= res
->end
- offset
;
411 EXPORT_SYMBOL(pcibios_resource_to_bus
);
413 void pcibios_bus_to_resource(struct pci_dev
*dev
,
414 struct resource
*res
, struct pci_bus_region
*region
)
416 struct pci_controller
*controller
= PCI_CONTROLLER(dev
);
417 unsigned long offset
= 0;
420 for (i
= 0; i
< controller
->windows
; i
++) {
421 struct pci_window
*window
= &controller
->window
[i
];
422 if (!(window
->resource
.flags
& res
->flags
))
424 if (window
->resource
.start
- window
->offset
> region
->start
)
426 if (window
->resource
.end
- window
->offset
< region
->end
)
428 offset
= window
->offset
;
432 res
->start
= region
->start
+ offset
;
433 res
->end
= region
->end
+ offset
;
435 EXPORT_SYMBOL(pcibios_bus_to_resource
);
437 static int __devinit
is_valid_resource(struct pci_dev
*dev
, int idx
)
439 unsigned int i
, type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
;
440 struct resource
*devr
= &dev
->resource
[idx
];
444 for (i
=0; i
<PCI_BUS_NUM_RESOURCES
; i
++) {
445 struct resource
*busr
= dev
->bus
->resource
[i
];
447 if (!busr
|| ((busr
->flags
^ devr
->flags
) & type_mask
))
449 if ((devr
->start
) && (devr
->start
>= busr
->start
) &&
450 (devr
->end
<= busr
->end
))
456 static void __devinit
457 pcibios_fixup_resources(struct pci_dev
*dev
, int start
, int limit
)
459 struct pci_bus_region region
;
462 for (i
= start
; i
< limit
; i
++) {
463 if (!dev
->resource
[i
].flags
)
465 region
.start
= dev
->resource
[i
].start
;
466 region
.end
= dev
->resource
[i
].end
;
467 pcibios_bus_to_resource(dev
, &dev
->resource
[i
], ®ion
);
468 if ((is_valid_resource(dev
, i
)))
469 pci_claim_resource(dev
, i
);
473 void __devinit
pcibios_fixup_device_resources(struct pci_dev
*dev
)
475 pcibios_fixup_resources(dev
, 0, PCI_BRIDGE_RESOURCES
);
477 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources
);
479 static void __devinit
pcibios_fixup_bridge_resources(struct pci_dev
*dev
)
481 pcibios_fixup_resources(dev
, PCI_BRIDGE_RESOURCES
, PCI_NUM_RESOURCES
);
485 * Called after each bus is probed, but before its children are examined.
488 pcibios_fixup_bus (struct pci_bus
*b
)
493 pci_read_bridge_bases(b
);
494 pcibios_fixup_bridge_resources(b
->self
);
496 list_for_each_entry(dev
, &b
->devices
, bus_list
)
497 pcibios_fixup_device_resources(dev
);
498 platform_pci_fixup_bus(b
);
504 pcibios_update_irq (struct pci_dev
*dev
, int irq
)
506 pci_write_config_byte(dev
, PCI_INTERRUPT_LINE
, irq
);
508 /* ??? FIXME -- record old value for shutdown. */
512 pcibios_enable_resources (struct pci_dev
*dev
, int mask
)
517 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
;
522 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
524 for (idx
=0; idx
<PCI_NUM_RESOURCES
; idx
++) {
525 /* Only set up the desired resources. */
526 if (!(mask
& (1 << idx
)))
529 r
= &dev
->resource
[idx
];
530 if (!(r
->flags
& type_mask
))
532 if ((idx
== PCI_ROM_RESOURCE
) &&
533 (!(r
->flags
& IORESOURCE_ROM_ENABLE
)))
535 if (!r
->start
&& r
->end
) {
537 "PCI: Device %s not available because of resource collisions\n",
541 if (r
->flags
& IORESOURCE_IO
)
542 cmd
|= PCI_COMMAND_IO
;
543 if (r
->flags
& IORESOURCE_MEM
)
544 cmd
|= PCI_COMMAND_MEMORY
;
546 if (cmd
!= old_cmd
) {
547 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev
), old_cmd
, cmd
);
548 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
554 pcibios_enable_device (struct pci_dev
*dev
, int mask
)
558 ret
= pcibios_enable_resources(dev
, mask
);
562 if (!dev
->msi_enabled
)
563 return acpi_pci_irq_enable(dev
);
568 pcibios_disable_device (struct pci_dev
*dev
)
570 BUG_ON(atomic_read(&dev
->enable_cnt
));
571 if (!dev
->msi_enabled
)
572 acpi_pci_irq_disable(dev
);
576 pcibios_align_resource (void *data
, struct resource
*res
,
577 resource_size_t size
, resource_size_t align
)
582 * PCI BIOS setup, always defaults to SAL interface
585 pcibios_setup (char *str
)
591 pci_mmap_page_range (struct pci_dev
*dev
, struct vm_area_struct
*vma
,
592 enum pci_mmap_state mmap_state
, int write_combine
)
594 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
598 * I/O space cannot be accessed via normal processor loads and
599 * stores on this platform.
601 if (mmap_state
== pci_mmap_io
)
603 * XXX we could relax this for I/O spaces for which ACPI
604 * indicates that the space is 1-to-1 mapped. But at the
605 * moment, we don't support multiple PCI address spaces and
606 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
610 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
613 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
617 * If the user requested WC, the kernel uses UC or WC for this region,
618 * and the chipset supports WC, we can use WC. Otherwise, we have to
619 * use the same attribute the kernel uses.
622 ((pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_UC
||
623 (pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_WC
) &&
624 efi_range_is_wc(vma
->vm_start
, vma
->vm_end
- vma
->vm_start
))
625 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
627 vma
->vm_page_prot
= prot
;
629 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
630 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
637 * ia64_pci_get_legacy_mem - generic legacy mem routine
638 * @bus: bus to get legacy memory base address for
640 * Find the base of legacy memory for @bus. This is typically the first
641 * megabyte of bus address space for @bus or is simply 0 on platforms whose
642 * chipsets support legacy I/O and memory routing. Returns the base address
643 * or an error pointer if an error occurred.
645 * This is the ia64 generic version of this routine. Other platforms
646 * are free to override it with a machine vector.
648 char *ia64_pci_get_legacy_mem(struct pci_bus
*bus
)
650 return (char *)__IA64_UNCACHED_OFFSET
;
654 * pci_mmap_legacy_page_range - map legacy memory space to userland
655 * @bus: bus whose legacy space we're mapping
656 * @vma: vma passed in by mmap
658 * Map legacy memory space for this device back to userspace using a machine
659 * vector to get the base address.
662 pci_mmap_legacy_page_range(struct pci_bus
*bus
, struct vm_area_struct
*vma
)
664 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
669 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
672 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
674 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
677 addr
= pci_get_legacy_mem(bus
);
679 return PTR_ERR(addr
);
681 vma
->vm_pgoff
+= (unsigned long)addr
>> PAGE_SHIFT
;
682 vma
->vm_page_prot
= prot
;
684 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
685 size
, vma
->vm_page_prot
))
692 * ia64_pci_legacy_read - read from legacy I/O space
694 * @port: legacy port value
695 * @val: caller allocated storage for returned value
696 * @size: number of bytes to read
698 * Simply reads @size bytes from @port and puts the result in @val.
700 * Again, this (and the write routine) are generic versions that can be
701 * overridden by the platform. This is necessary on platforms that don't
702 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
704 int ia64_pci_legacy_read(struct pci_bus
*bus
, u16 port
, u32
*val
, u8 size
)
727 * ia64_pci_legacy_write - perform a legacy I/O write
729 * @port: port to write
730 * @val: value to write
731 * @size: number of bytes to write from @val
733 * Simply writes @size bytes of @val to @port.
735 int ia64_pci_legacy_write(struct pci_bus
*bus
, u16 port
, u32 val
, u8 size
)
757 /* It's defined in drivers/pci/pci.c */
758 extern u8 pci_cache_line_size
;
761 * set_pci_cacheline_size - determine cacheline size for PCI devices
763 * We want to use the line-size of the outer-most cache. We assume
764 * that this line-size is the same for all CPUs.
766 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
768 static void __init
set_pci_cacheline_size(void)
770 u64 levels
, unique_caches
;
772 pal_cache_config_info_t cci
;
774 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
776 printk(KERN_ERR
"%s: ia64_pal_cache_summary() failed "
777 "(status=%ld)\n", __FUNCTION__
, status
);
781 status
= ia64_pal_cache_config_info(levels
- 1,
782 /* cache_type (data_or_unified)= */ 2, &cci
);
784 printk(KERN_ERR
"%s: ia64_pal_cache_config_info() failed "
785 "(status=%ld)\n", __FUNCTION__
, status
);
788 pci_cache_line_size
= (1 << cci
.pcci_line_size
) / 4;
791 static int __init
pcibios_init(void)
793 set_pci_cacheline_size();
797 subsys_initcall(pcibios_init
);