x86/boot: Rename overlapping memcpy() to memmove()
[linux/fpc-iii.git] / arch / arm / mach-pxa / pxa3xx.c
blob3c9184d1d6b9b7ebee86157db11fe390771b950a
1 /*
2 * linux/arch/arm/mach-pxa/pxa3xx.c
4 * code specific to pxa3xx aka Monahans
6 * Copyright (C) 2006 Marvell International Ltd.
8 * 2007-09-02: eric miao <eric.miao@marvell.com>
9 * initial version
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/gpio-pxa.h>
19 #include <linux/pm.h>
20 #include <linux/platform_device.h>
21 #include <linux/irq.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/i2c/pxa-i2c.h>
27 #include <asm/mach/map.h>
28 #include <asm/suspend.h>
29 #include <mach/hardware.h>
30 #include <mach/pxa3xx-regs.h>
31 #include <mach/reset.h>
32 #include <linux/platform_data/usb-ohci-pxa27x.h>
33 #include "pm.h"
34 #include <mach/dma.h>
35 #include <mach/smemc.h>
36 #include <mach/irqs.h>
38 #include "generic.h"
39 #include "devices.h"
41 #define PECR_IE(n) ((1 << ((n) * 2)) << 28)
42 #define PECR_IS(n) ((1 << ((n) * 2)) << 29)
44 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
47 * NAND NFC: DFI bus arbitration subset
49 #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
50 #define NDCR_ND_ARB_EN (1 << 12)
51 #define NDCR_ND_ARB_CNTL (1 << 19)
53 #ifdef CONFIG_PM
55 #define ISRAM_START 0x5c000000
56 #define ISRAM_SIZE SZ_256K
58 static void __iomem *sram;
59 static unsigned long wakeup_src;
62 * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
63 * memory controller has to be reinitialised, so we place some code
64 * in the SRAM to perform this function.
66 * We disable FIQs across the standby - otherwise, we might receive a
67 * FIQ while the SDRAM is unavailable.
69 static void pxa3xx_cpu_standby(unsigned int pwrmode)
71 void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
73 memcpy_toio(sram + 0x8000, pm_enter_standby_start,
74 pm_enter_standby_end - pm_enter_standby_start);
76 AD2D0SR = ~0;
77 AD2D1SR = ~0;
78 AD2D0ER = wakeup_src;
79 AD2D1ER = 0;
80 ASCR = ASCR;
81 ARSR = ARSR;
83 local_fiq_disable();
84 fn(pwrmode);
85 local_fiq_enable();
87 AD2D0ER = 0;
88 AD2D1ER = 0;
92 * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
93 * PXA3xx development kits assumes that the resuming process continues
94 * with the address stored within the first 4 bytes of SDRAM. The PSPR
95 * register is used privately by BootROM and OBM, and _must_ be set to
96 * 0x5c014000 for the moment.
98 static void pxa3xx_cpu_pm_suspend(void)
100 volatile unsigned long *p = (volatile void *)0xc0000000;
101 unsigned long saved_data = *p;
102 #ifndef CONFIG_IWMMXT
103 u64 acc0;
105 asm volatile(".arch_extension xscale\n\t"
106 "mra %Q0, %R0, acc0" : "=r" (acc0));
107 #endif
109 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
110 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
111 CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
113 /* clear and setup wakeup source */
114 AD3SR = ~0;
115 AD3ER = wakeup_src;
116 ASCR = ASCR;
117 ARSR = ARSR;
119 PCFR |= (1u << 13); /* L1_DIS */
120 PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
122 PSPR = 0x5c014000;
124 /* overwrite with the resume address */
125 *p = virt_to_phys(cpu_resume);
127 cpu_suspend(0, pxa3xx_finish_suspend);
129 *p = saved_data;
131 AD3ER = 0;
133 #ifndef CONFIG_IWMMXT
134 asm volatile(".arch_extension xscale\n\t"
135 "mar acc0, %Q0, %R0" : "=r" (acc0));
136 #endif
139 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
142 * Don't sleep if no wakeup sources are defined
144 if (wakeup_src == 0) {
145 printk(KERN_ERR "Not suspending: no wakeup sources\n");
146 return;
149 switch (state) {
150 case PM_SUSPEND_STANDBY:
151 pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
152 break;
154 case PM_SUSPEND_MEM:
155 pxa3xx_cpu_pm_suspend();
156 break;
160 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
162 return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
165 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
166 .valid = pxa3xx_cpu_pm_valid,
167 .enter = pxa3xx_cpu_pm_enter,
170 static void __init pxa3xx_init_pm(void)
172 sram = ioremap(ISRAM_START, ISRAM_SIZE);
173 if (!sram) {
174 printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
175 return;
179 * Since we copy wakeup code into the SRAM, we need to ensure
180 * that it is preserved over the low power modes. Note: bit 8
181 * is undocumented in the developer manual, but must be set.
183 AD1R |= ADXR_L2 | ADXR_R0;
184 AD2R |= ADXR_L2 | ADXR_R0;
185 AD3R |= ADXR_L2 | ADXR_R0;
188 * Clear the resume enable registers.
190 AD1D0ER = 0;
191 AD2D0ER = 0;
192 AD2D1ER = 0;
193 AD3ER = 0;
195 pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
198 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
200 unsigned long flags, mask = 0;
202 switch (d->irq) {
203 case IRQ_SSP3:
204 mask = ADXER_MFP_WSSP3;
205 break;
206 case IRQ_MSL:
207 mask = ADXER_WMSL0;
208 break;
209 case IRQ_USBH2:
210 case IRQ_USBH1:
211 mask = ADXER_WUSBH;
212 break;
213 case IRQ_KEYPAD:
214 mask = ADXER_WKP;
215 break;
216 case IRQ_AC97:
217 mask = ADXER_MFP_WAC97;
218 break;
219 case IRQ_USIM:
220 mask = ADXER_WUSIM0;
221 break;
222 case IRQ_SSP2:
223 mask = ADXER_MFP_WSSP2;
224 break;
225 case IRQ_I2C:
226 mask = ADXER_MFP_WI2C;
227 break;
228 case IRQ_STUART:
229 mask = ADXER_MFP_WUART3;
230 break;
231 case IRQ_BTUART:
232 mask = ADXER_MFP_WUART2;
233 break;
234 case IRQ_FFUART:
235 mask = ADXER_MFP_WUART1;
236 break;
237 case IRQ_MMC:
238 mask = ADXER_MFP_WMMC1;
239 break;
240 case IRQ_SSP:
241 mask = ADXER_MFP_WSSP1;
242 break;
243 case IRQ_RTCAlrm:
244 mask = ADXER_WRTC;
245 break;
246 case IRQ_SSP4:
247 mask = ADXER_MFP_WSSP4;
248 break;
249 case IRQ_TSI:
250 mask = ADXER_WTSI;
251 break;
252 case IRQ_USIM2:
253 mask = ADXER_WUSIM1;
254 break;
255 case IRQ_MMC2:
256 mask = ADXER_MFP_WMMC2;
257 break;
258 case IRQ_NAND:
259 mask = ADXER_MFP_WFLASH;
260 break;
261 case IRQ_USB2:
262 mask = ADXER_WUSB2;
263 break;
264 case IRQ_WAKEUP0:
265 mask = ADXER_WEXTWAKE0;
266 break;
267 case IRQ_WAKEUP1:
268 mask = ADXER_WEXTWAKE1;
269 break;
270 case IRQ_MMC3:
271 mask = ADXER_MFP_GEN12;
272 break;
273 default:
274 return -EINVAL;
277 local_irq_save(flags);
278 if (on)
279 wakeup_src |= mask;
280 else
281 wakeup_src &= ~mask;
282 local_irq_restore(flags);
284 return 0;
286 #else
287 static inline void pxa3xx_init_pm(void) {}
288 #define pxa3xx_set_wake NULL
289 #endif
291 static void pxa_ack_ext_wakeup(struct irq_data *d)
293 PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
296 static void pxa_mask_ext_wakeup(struct irq_data *d)
298 pxa_mask_irq(d);
299 PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
302 static void pxa_unmask_ext_wakeup(struct irq_data *d)
304 pxa_unmask_irq(d);
305 PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
308 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
310 if (flow_type & IRQ_TYPE_EDGE_RISING)
311 PWER |= 1 << (d->irq - IRQ_WAKEUP0);
313 if (flow_type & IRQ_TYPE_EDGE_FALLING)
314 PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
316 return 0;
319 static struct irq_chip pxa_ext_wakeup_chip = {
320 .name = "WAKEUP",
321 .irq_ack = pxa_ack_ext_wakeup,
322 .irq_mask = pxa_mask_ext_wakeup,
323 .irq_unmask = pxa_unmask_ext_wakeup,
324 .irq_set_type = pxa_set_ext_wakeup_type,
327 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
328 unsigned int))
330 int irq;
332 for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
333 irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
334 handle_edge_irq);
335 irq_clear_status_flags(irq, IRQ_NOREQUEST);
338 pxa_ext_wakeup_chip.irq_set_wake = fn;
341 static void __init __pxa3xx_init_irq(void)
343 /* enable CP6 access */
344 u32 value;
345 __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
346 value |= (1 << 6);
347 __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
349 pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
352 void __init pxa3xx_init_irq(void)
354 __pxa3xx_init_irq();
355 pxa_init_irq(56, pxa3xx_set_wake);
358 #ifdef CONFIG_OF
359 void __init pxa3xx_dt_init_irq(void)
361 __pxa3xx_init_irq();
362 pxa_dt_irq_init(pxa3xx_set_wake);
364 #endif /* CONFIG_OF */
366 static struct map_desc pxa3xx_io_desc[] __initdata = {
367 { /* Mem Ctl */
368 .virtual = (unsigned long)SMEMC_VIRT,
369 .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
370 .length = SMEMC_SIZE,
371 .type = MT_DEVICE
372 }, {
373 .virtual = (unsigned long)NAND_VIRT,
374 .pfn = __phys_to_pfn(NAND_PHYS),
375 .length = NAND_SIZE,
376 .type = MT_DEVICE
380 void __init pxa3xx_map_io(void)
382 pxa_map_io();
383 iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
384 pxa3xx_get_clk_frequency_khz(1);
388 * device registration specific to PXA3xx.
391 void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
393 pxa_register_device(&pxa3xx_device_i2c_power, info);
396 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
397 .irq_base = PXA_GPIO_TO_IRQ(0),
400 static struct platform_device *devices[] __initdata = {
401 &pxa27x_device_udc,
402 &pxa_device_pmu,
403 &pxa_device_i2s,
404 &pxa_device_asoc_ssp1,
405 &pxa_device_asoc_ssp2,
406 &pxa_device_asoc_ssp3,
407 &pxa_device_asoc_ssp4,
408 &pxa_device_asoc_platform,
409 &pxa_device_rtc,
410 &pxa3xx_device_ssp1,
411 &pxa3xx_device_ssp2,
412 &pxa3xx_device_ssp3,
413 &pxa3xx_device_ssp4,
414 &pxa27x_device_pwm0,
415 &pxa27x_device_pwm1,
418 static int __init pxa3xx_init(void)
420 int ret = 0;
422 if (cpu_is_pxa3xx()) {
424 reset_status = ARSR;
427 * clear RDH bit every time after reset
429 * Note: the last 3 bits DxS are write-1-to-clear so carefully
430 * preserve them here in case they will be referenced later
432 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
435 * Disable DFI bus arbitration, to prevent a system bus lock if
436 * somebody disables the NAND clock (unused clock) while this
437 * bit remains set.
439 NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
441 if ((ret = pxa_init_dma(IRQ_DMA, 32)))
442 return ret;
444 pxa3xx_init_pm();
446 register_syscore_ops(&pxa_irq_syscore_ops);
447 register_syscore_ops(&pxa3xx_mfp_syscore_ops);
449 if (of_have_populated_dt())
450 return 0;
452 pxa2xx_set_dmac_info(32, 100);
453 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
454 if (ret)
455 return ret;
456 if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
457 platform_device_add_data(&pxa3xx_device_gpio,
458 &pxa3xx_gpio_pdata,
459 sizeof(pxa3xx_gpio_pdata));
460 ret = platform_device_register(&pxa3xx_device_gpio);
464 return ret;
467 postcore_initcall(pxa3xx_init);