OMAP3: GPIO: Enable debounce clock only when debounce is enabled v3.
[linux-ginger.git] / arch / arm / mach-mv78xx0 / pcie.c
blob430ea84d587dfd2c2d87fbc079c10a6a43d98136
1 /*
2 * arch/arm/mach-mv78xx0/pcie.c
4 * PCIe functions for Marvell MV78xx0 SoCs
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/mbus.h>
14 #include <asm/mach/pci.h>
15 #include <plat/pcie.h>
16 #include "common.h"
18 struct pcie_port {
19 u8 maj;
20 u8 min;
21 u8 root_bus_nr;
22 void __iomem *base;
23 spinlock_t conf_lock;
24 char io_space_name[16];
25 char mem_space_name[16];
26 struct resource res[2];
29 static struct pcie_port pcie_port[8];
30 static int num_pcie_ports;
31 static struct resource pcie_io_space;
32 static struct resource pcie_mem_space;
35 static void __init mv78xx0_pcie_preinit(void)
37 int i;
38 u32 size_each;
39 u32 start;
40 int win;
42 pcie_io_space.name = "PCIe I/O Space";
43 pcie_io_space.start = MV78XX0_PCIE_IO_PHYS_BASE(0);
44 pcie_io_space.end =
45 MV78XX0_PCIE_IO_PHYS_BASE(0) + MV78XX0_PCIE_IO_SIZE * 8 - 1;
46 pcie_io_space.flags = IORESOURCE_IO;
47 if (request_resource(&iomem_resource, &pcie_io_space))
48 panic("can't allocate PCIe I/O space");
50 pcie_mem_space.name = "PCIe MEM Space";
51 pcie_mem_space.start = MV78XX0_PCIE_MEM_PHYS_BASE;
52 pcie_mem_space.end =
53 MV78XX0_PCIE_MEM_PHYS_BASE + MV78XX0_PCIE_MEM_SIZE - 1;
54 pcie_mem_space.flags = IORESOURCE_MEM;
55 if (request_resource(&iomem_resource, &pcie_mem_space))
56 panic("can't allocate PCIe MEM space");
58 for (i = 0; i < num_pcie_ports; i++) {
59 struct pcie_port *pp = pcie_port + i;
61 snprintf(pp->io_space_name, sizeof(pp->io_space_name),
62 "PCIe %d.%d I/O", pp->maj, pp->min);
63 pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0;
64 pp->res[0].name = pp->io_space_name;
65 pp->res[0].start = MV78XX0_PCIE_IO_PHYS_BASE(i);
66 pp->res[0].end = pp->res[0].start + MV78XX0_PCIE_IO_SIZE - 1;
67 pp->res[0].flags = IORESOURCE_IO;
69 snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
70 "PCIe %d.%d MEM", pp->maj, pp->min);
71 pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
72 pp->res[1].name = pp->mem_space_name;
73 pp->res[1].flags = IORESOURCE_MEM;
76 switch (num_pcie_ports) {
77 case 0:
78 size_each = 0;
79 break;
81 case 1:
82 size_each = 0x30000000;
83 break;
85 case 2 ... 3:
86 size_each = 0x10000000;
87 break;
89 case 4 ... 6:
90 size_each = 0x08000000;
91 break;
93 case 7:
94 size_each = 0x04000000;
95 break;
97 default:
98 panic("invalid number of PCIe ports");
101 start = MV78XX0_PCIE_MEM_PHYS_BASE;
102 for (i = 0; i < num_pcie_ports; i++) {
103 struct pcie_port *pp = pcie_port + i;
105 pp->res[1].start = start;
106 pp->res[1].end = start + size_each - 1;
107 start += size_each;
110 for (i = 0; i < num_pcie_ports; i++) {
111 struct pcie_port *pp = pcie_port + i;
113 if (request_resource(&pcie_io_space, &pp->res[0]))
114 panic("can't allocate PCIe I/O sub-space");
116 if (request_resource(&pcie_mem_space, &pp->res[1]))
117 panic("can't allocate PCIe MEM sub-space");
120 win = 0;
121 for (i = 0; i < num_pcie_ports; i++) {
122 struct pcie_port *pp = pcie_port + i;
124 mv78xx0_setup_pcie_io_win(win++, pp->res[0].start,
125 pp->res[0].end - pp->res[0].start + 1,
126 pp->maj, pp->min);
128 mv78xx0_setup_pcie_mem_win(win++, pp->res[1].start,
129 pp->res[1].end - pp->res[1].start + 1,
130 pp->maj, pp->min);
134 static int __init mv78xx0_pcie_setup(int nr, struct pci_sys_data *sys)
136 struct pcie_port *pp;
138 if (nr >= num_pcie_ports)
139 return 0;
141 pp = &pcie_port[nr];
142 pp->root_bus_nr = sys->busnr;
145 * Generic PCIe unit setup.
147 orion_pcie_set_local_bus_nr(pp->base, sys->busnr);
148 orion_pcie_setup(pp->base, &mv78xx0_mbus_dram_info);
150 sys->resource[0] = &pp->res[0];
151 sys->resource[1] = &pp->res[1];
152 sys->resource[2] = NULL;
154 return 1;
157 static struct pcie_port *bus_to_port(int bus)
159 int i;
161 for (i = num_pcie_ports - 1; i >= 0; i--) {
162 int rbus = pcie_port[i].root_bus_nr;
163 if (rbus != -1 && rbus <= bus)
164 break;
167 return i >= 0 ? pcie_port + i : NULL;
170 static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
173 * Don't go out when trying to access nonexisting devices
174 * on the local bus.
176 if (bus == pp->root_bus_nr && dev > 1)
177 return 0;
179 return 1;
182 static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
183 int size, u32 *val)
185 struct pcie_port *pp = bus_to_port(bus->number);
186 unsigned long flags;
187 int ret;
189 if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) {
190 *val = 0xffffffff;
191 return PCIBIOS_DEVICE_NOT_FOUND;
194 spin_lock_irqsave(&pp->conf_lock, flags);
195 ret = orion_pcie_rd_conf(pp->base, bus, devfn, where, size, val);
196 spin_unlock_irqrestore(&pp->conf_lock, flags);
198 return ret;
201 static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
202 int where, int size, u32 val)
204 struct pcie_port *pp = bus_to_port(bus->number);
205 unsigned long flags;
206 int ret;
208 if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0)
209 return PCIBIOS_DEVICE_NOT_FOUND;
211 spin_lock_irqsave(&pp->conf_lock, flags);
212 ret = orion_pcie_wr_conf(pp->base, bus, devfn, where, size, val);
213 spin_unlock_irqrestore(&pp->conf_lock, flags);
215 return ret;
218 static struct pci_ops pcie_ops = {
219 .read = pcie_rd_conf,
220 .write = pcie_wr_conf,
223 static void __devinit rc_pci_fixup(struct pci_dev *dev)
226 * Prevent enumeration of root complex.
228 if (dev->bus->parent == NULL && dev->devfn == 0) {
229 int i;
231 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
232 dev->resource[i].start = 0;
233 dev->resource[i].end = 0;
234 dev->resource[i].flags = 0;
238 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
240 static struct pci_bus __init *
241 mv78xx0_pcie_scan_bus(int nr, struct pci_sys_data *sys)
243 struct pci_bus *bus;
245 if (nr < num_pcie_ports) {
246 bus = pci_scan_bus(sys->busnr, &pcie_ops, sys);
247 } else {
248 bus = NULL;
249 BUG();
252 return bus;
255 static int __init mv78xx0_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
257 struct pcie_port *pp = bus_to_port(dev->bus->number);
259 return IRQ_MV78XX0_PCIE_00 + (pp->maj << 2) + pp->min;
262 static struct hw_pci mv78xx0_pci __initdata = {
263 .nr_controllers = 8,
264 .preinit = mv78xx0_pcie_preinit,
265 .swizzle = pci_std_swizzle,
266 .setup = mv78xx0_pcie_setup,
267 .scan = mv78xx0_pcie_scan_bus,
268 .map_irq = mv78xx0_pcie_map_irq,
271 static void __init add_pcie_port(int maj, int min, unsigned long base)
273 printk(KERN_INFO "MV78xx0 PCIe port %d.%d: ", maj, min);
275 if (orion_pcie_link_up((void __iomem *)base)) {
276 struct pcie_port *pp = &pcie_port[num_pcie_ports++];
278 printk("link up\n");
280 pp->maj = maj;
281 pp->min = min;
282 pp->root_bus_nr = -1;
283 pp->base = (void __iomem *)base;
284 spin_lock_init(&pp->conf_lock);
285 memset(pp->res, 0, sizeof(pp->res));
286 } else {
287 printk("link down, ignoring\n");
291 void __init mv78xx0_pcie_init(int init_port0, int init_port1)
293 if (init_port0) {
294 add_pcie_port(0, 0, PCIE00_VIRT_BASE);
295 if (!orion_pcie_x4_mode((void __iomem *)PCIE00_VIRT_BASE)) {
296 add_pcie_port(0, 1, PCIE01_VIRT_BASE);
297 add_pcie_port(0, 2, PCIE02_VIRT_BASE);
298 add_pcie_port(0, 3, PCIE03_VIRT_BASE);
302 if (init_port1) {
303 add_pcie_port(1, 0, PCIE10_VIRT_BASE);
304 if (!orion_pcie_x4_mode((void __iomem *)PCIE10_VIRT_BASE)) {
305 add_pcie_port(1, 1, PCIE11_VIRT_BASE);
306 add_pcie_port(1, 2, PCIE12_VIRT_BASE);
307 add_pcie_port(1, 3, PCIE13_VIRT_BASE);
311 pci_common_init(&mv78xx0_pci);