[CPUFREQ] powernow-k8: clarify number of cores.
[pv_ops_mirror.git] / include / asm-ia64 / pci.h
blob5a5d1c2ce39d1510e1d2a56dc1322929bab25029
1 #ifndef _ASM_IA64_PCI_H
2 #define _ASM_IA64_PCI_H
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/spinlock.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
10 #include <asm/io.h>
11 #include <asm/scatterlist.h>
12 #include <asm/hw_irq.h>
15 * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
16 * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
17 * loader.
19 #define pcibios_assign_all_busses() 0
20 #define pcibios_scan_all_fns(a, b) 0
22 #define PCIBIOS_MIN_IO 0x1000
23 #define PCIBIOS_MIN_MEM 0x10000000
25 void pcibios_config_init(void);
27 struct pci_dev;
30 * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
31 * correspondence between device bus addresses and CPU physical addresses.
32 * Platforms with a hardware I/O MMU _must_ turn this off to suppress the
33 * bounce buffer handling code in the block and network device layers.
34 * Platforms with separate bus address spaces _must_ turn this off and provide
35 * a device DMA mapping implementation that takes care of the necessary
36 * address translation.
38 * For now, the ia64 platforms which may have separate/multiple bus address
39 * spaces all have I/O MMUs which support the merging of physically
40 * discontiguous buffers, so we can use that as the sole factor to determine
41 * the setting of PCI_DMA_BUS_IS_PHYS.
43 extern unsigned long ia64_max_iommu_merge_mask;
44 #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
46 static inline void
47 pcibios_set_master (struct pci_dev *dev)
49 /* No special bus mastering setup handling */
52 static inline void
53 pcibios_penalize_isa_irq (int irq, int active)
55 /* We don't do dynamic PCI IRQ allocation */
58 #include <asm-generic/pci-dma-compat.h>
60 /* pci_unmap_{single,page} is not a nop, thus... */
61 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
62 dma_addr_t ADDR_NAME;
63 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
64 __u32 LEN_NAME;
65 #define pci_unmap_addr(PTR, ADDR_NAME) \
66 ((PTR)->ADDR_NAME)
67 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
68 (((PTR)->ADDR_NAME) = (VAL))
69 #define pci_unmap_len(PTR, LEN_NAME) \
70 ((PTR)->LEN_NAME)
71 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
72 (((PTR)->LEN_NAME) = (VAL))
74 /* The ia64 platform always supports 64-bit addressing. */
75 #define pci_dac_dma_supported(pci_dev, mask) (1)
76 #define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off))
77 #define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr)))
78 #define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr)
79 #define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0)
80 #define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0)
82 #ifdef CONFIG_PCI
83 static inline void pci_dma_burst_advice(struct pci_dev *pdev,
84 enum pci_dma_burst_strategy *strat,
85 unsigned long *strategy_parameter)
87 unsigned long cacheline_size;
88 u8 byte;
90 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
91 if (byte == 0)
92 cacheline_size = 1024;
93 else
94 cacheline_size = (int) byte * 4;
96 *strat = PCI_DMA_BURST_MULTIPLE;
97 *strategy_parameter = cacheline_size;
99 #endif
101 #define HAVE_PCI_MMAP
102 extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
103 enum pci_mmap_state mmap_state, int write_combine);
104 #define HAVE_PCI_LEGACY
105 extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
106 struct vm_area_struct *vma);
107 extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
108 size_t count);
109 extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
110 size_t count);
111 extern int pci_mmap_legacy_mem(struct kobject *kobj,
112 struct bin_attribute *attr,
113 struct vm_area_struct *vma);
115 #define pci_get_legacy_mem platform_pci_get_legacy_mem
116 #define pci_legacy_read platform_pci_legacy_read
117 #define pci_legacy_write platform_pci_legacy_write
119 struct pci_window {
120 struct resource resource;
121 u64 offset;
124 struct pci_controller {
125 void *acpi_handle;
126 void *iommu;
127 int segment;
128 int node; /* nearest node with memory or -1 for global allocation */
130 unsigned int windows;
131 struct pci_window *window;
133 void *platform_data;
136 #define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
137 #define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
139 extern struct pci_ops pci_root_ops;
141 static inline int pci_proc_domain(struct pci_bus *bus)
143 return (pci_domain_nr(bus) != 0);
146 static inline void pcibios_add_platform_entries(struct pci_dev *dev)
150 extern void pcibios_resource_to_bus(struct pci_dev *dev,
151 struct pci_bus_region *region, struct resource *res);
153 extern void pcibios_bus_to_resource(struct pci_dev *dev,
154 struct resource *res, struct pci_bus_region *region);
156 static inline struct resource *
157 pcibios_select_root(struct pci_dev *pdev, struct resource *res)
159 struct resource *root = NULL;
161 if (res->flags & IORESOURCE_IO)
162 root = &ioport_resource;
163 if (res->flags & IORESOURCE_MEM)
164 root = &iomem_resource;
166 return root;
169 #define pcibios_scan_all_fns(a, b) 0
171 #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
172 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
174 return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
177 #endif /* _ASM_IA64_PCI_H */