2 * linux/arch/arm/mach-pxa/pxa3xx.c
4 * code specific to pxa3xx aka Monahans
6 * Copyright (C) 2006 Marvell International Ltd.
8 * 2007-09-02: eric miao <eric.miao@marvell.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/gpio-pxa.h>
20 #include <linux/platform_device.h>
21 #include <linux/irq.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/i2c/pxa-i2c.h>
27 #include <asm/mach/map.h>
28 #include <asm/suspend.h>
29 #include <mach/hardware.h>
30 #include <mach/pxa3xx-regs.h>
31 #include <mach/reset.h>
32 #include <linux/platform_data/usb-ohci-pxa27x.h>
35 #include <mach/smemc.h>
36 #include <mach/irqs.h>
41 #define PECR_IE(n) ((1 << ((n) * 2)) << 28)
42 #define PECR_IS(n) ((1 << ((n) * 2)) << 29)
44 extern void __init
pxa_dt_irq_init(int (*fn
)(struct irq_data
*, unsigned int));
47 * NAND NFC: DFI bus arbitration subset
49 #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
50 #define NDCR_ND_ARB_EN (1 << 12)
51 #define NDCR_ND_ARB_CNTL (1 << 19)
55 #define ISRAM_START 0x5c000000
56 #define ISRAM_SIZE SZ_256K
58 static void __iomem
*sram
;
59 static unsigned long wakeup_src
;
62 * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
63 * memory controller has to be reinitialised, so we place some code
64 * in the SRAM to perform this function.
66 * We disable FIQs across the standby - otherwise, we might receive a
67 * FIQ while the SDRAM is unavailable.
69 static void pxa3xx_cpu_standby(unsigned int pwrmode
)
71 void (*fn
)(unsigned int) = (void __force
*)(sram
+ 0x8000);
73 memcpy_toio(sram
+ 0x8000, pm_enter_standby_start
,
74 pm_enter_standby_end
- pm_enter_standby_start
);
92 * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
93 * PXA3xx development kits assumes that the resuming process continues
94 * with the address stored within the first 4 bytes of SDRAM. The PSPR
95 * register is used privately by BootROM and OBM, and _must_ be set to
96 * 0x5c014000 for the moment.
98 static void pxa3xx_cpu_pm_suspend(void)
100 volatile unsigned long *p
= (volatile void *)0xc0000000;
101 unsigned long saved_data
= *p
;
102 #ifndef CONFIG_IWMMXT
105 asm volatile(".arch_extension xscale\n\t"
106 "mra %Q0, %R0, acc0" : "=r" (acc0
));
109 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
110 CKENA
|= (1 << CKEN_BOOT
) | (1 << CKEN_TPM
);
111 CKENB
|= 1 << (CKEN_HSIO2
& 0x1f);
113 /* clear and setup wakeup source */
119 PCFR
|= (1u << 13); /* L1_DIS */
120 PCFR
&= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
124 /* overwrite with the resume address */
125 *p
= virt_to_phys(cpu_resume
);
127 cpu_suspend(0, pxa3xx_finish_suspend
);
133 #ifndef CONFIG_IWMMXT
134 asm volatile(".arch_extension xscale\n\t"
135 "mar acc0, %Q0, %R0" : "=r" (acc0
));
139 static void pxa3xx_cpu_pm_enter(suspend_state_t state
)
142 * Don't sleep if no wakeup sources are defined
144 if (wakeup_src
== 0) {
145 printk(KERN_ERR
"Not suspending: no wakeup sources\n");
150 case PM_SUSPEND_STANDBY
:
151 pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2
);
155 pxa3xx_cpu_pm_suspend();
160 static int pxa3xx_cpu_pm_valid(suspend_state_t state
)
162 return state
== PM_SUSPEND_MEM
|| state
== PM_SUSPEND_STANDBY
;
165 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns
= {
166 .valid
= pxa3xx_cpu_pm_valid
,
167 .enter
= pxa3xx_cpu_pm_enter
,
170 static void __init
pxa3xx_init_pm(void)
172 sram
= ioremap(ISRAM_START
, ISRAM_SIZE
);
174 printk(KERN_ERR
"Unable to map ISRAM: disabling standby/suspend\n");
179 * Since we copy wakeup code into the SRAM, we need to ensure
180 * that it is preserved over the low power modes. Note: bit 8
181 * is undocumented in the developer manual, but must be set.
183 AD1R
|= ADXR_L2
| ADXR_R0
;
184 AD2R
|= ADXR_L2
| ADXR_R0
;
185 AD3R
|= ADXR_L2
| ADXR_R0
;
188 * Clear the resume enable registers.
195 pxa_cpu_pm_fns
= &pxa3xx_cpu_pm_fns
;
198 static int pxa3xx_set_wake(struct irq_data
*d
, unsigned int on
)
200 unsigned long flags
, mask
= 0;
204 mask
= ADXER_MFP_WSSP3
;
217 mask
= ADXER_MFP_WAC97
;
223 mask
= ADXER_MFP_WSSP2
;
226 mask
= ADXER_MFP_WI2C
;
229 mask
= ADXER_MFP_WUART3
;
232 mask
= ADXER_MFP_WUART2
;
235 mask
= ADXER_MFP_WUART1
;
238 mask
= ADXER_MFP_WMMC1
;
241 mask
= ADXER_MFP_WSSP1
;
247 mask
= ADXER_MFP_WSSP4
;
256 mask
= ADXER_MFP_WMMC2
;
259 mask
= ADXER_MFP_WFLASH
;
265 mask
= ADXER_WEXTWAKE0
;
268 mask
= ADXER_WEXTWAKE1
;
271 mask
= ADXER_MFP_GEN12
;
277 local_irq_save(flags
);
282 local_irq_restore(flags
);
287 static inline void pxa3xx_init_pm(void) {}
288 #define pxa3xx_set_wake NULL
291 static void pxa_ack_ext_wakeup(struct irq_data
*d
)
293 PECR
|= PECR_IS(d
->irq
- IRQ_WAKEUP0
);
296 static void pxa_mask_ext_wakeup(struct irq_data
*d
)
299 PECR
&= ~PECR_IE(d
->irq
- IRQ_WAKEUP0
);
302 static void pxa_unmask_ext_wakeup(struct irq_data
*d
)
305 PECR
|= PECR_IE(d
->irq
- IRQ_WAKEUP0
);
308 static int pxa_set_ext_wakeup_type(struct irq_data
*d
, unsigned int flow_type
)
310 if (flow_type
& IRQ_TYPE_EDGE_RISING
)
311 PWER
|= 1 << (d
->irq
- IRQ_WAKEUP0
);
313 if (flow_type
& IRQ_TYPE_EDGE_FALLING
)
314 PWER
|= 1 << (d
->irq
- IRQ_WAKEUP0
+ 2);
319 static struct irq_chip pxa_ext_wakeup_chip
= {
321 .irq_ack
= pxa_ack_ext_wakeup
,
322 .irq_mask
= pxa_mask_ext_wakeup
,
323 .irq_unmask
= pxa_unmask_ext_wakeup
,
324 .irq_set_type
= pxa_set_ext_wakeup_type
,
327 static void __init
pxa_init_ext_wakeup_irq(int (*fn
)(struct irq_data
*,
332 for (irq
= IRQ_WAKEUP0
; irq
<= IRQ_WAKEUP1
; irq
++) {
333 irq_set_chip_and_handler(irq
, &pxa_ext_wakeup_chip
,
335 irq_clear_status_flags(irq
, IRQ_NOREQUEST
);
338 pxa_ext_wakeup_chip
.irq_set_wake
= fn
;
341 static void __init
__pxa3xx_init_irq(void)
343 /* enable CP6 access */
345 __asm__
__volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value
));
347 __asm__
__volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value
));
349 pxa_init_ext_wakeup_irq(pxa3xx_set_wake
);
352 void __init
pxa3xx_init_irq(void)
355 pxa_init_irq(56, pxa3xx_set_wake
);
359 void __init
pxa3xx_dt_init_irq(void)
362 pxa_dt_irq_init(pxa3xx_set_wake
);
364 #endif /* CONFIG_OF */
366 static struct map_desc pxa3xx_io_desc
[] __initdata
= {
368 .virtual = (unsigned long)SMEMC_VIRT
,
369 .pfn
= __phys_to_pfn(PXA3XX_SMEMC_BASE
),
370 .length
= SMEMC_SIZE
,
373 .virtual = (unsigned long)NAND_VIRT
,
374 .pfn
= __phys_to_pfn(NAND_PHYS
),
380 void __init
pxa3xx_map_io(void)
383 iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc
));
384 pxa3xx_get_clk_frequency_khz(1);
388 * device registration specific to PXA3xx.
391 void __init
pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data
*info
)
393 pxa_register_device(&pxa3xx_device_i2c_power
, info
);
396 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata
= {
397 .irq_base
= PXA_GPIO_TO_IRQ(0),
400 static struct platform_device
*devices
[] __initdata
= {
404 &pxa_device_asoc_ssp1
,
405 &pxa_device_asoc_ssp2
,
406 &pxa_device_asoc_ssp3
,
407 &pxa_device_asoc_ssp4
,
408 &pxa_device_asoc_platform
,
418 static int __init
pxa3xx_init(void)
422 if (cpu_is_pxa3xx()) {
427 * clear RDH bit every time after reset
429 * Note: the last 3 bits DxS are write-1-to-clear so carefully
430 * preserve them here in case they will be referenced later
432 ASCR
&= ~(ASCR_RDH
| ASCR_D1S
| ASCR_D2S
| ASCR_D3S
);
435 * Disable DFI bus arbitration, to prevent a system bus lock if
436 * somebody disables the NAND clock (unused clock) while this
439 NDCR
= (NDCR
& ~NDCR_ND_ARB_EN
) | NDCR_ND_ARB_CNTL
;
441 if ((ret
= pxa_init_dma(IRQ_DMA
, 32)))
446 register_syscore_ops(&pxa_irq_syscore_ops
);
447 register_syscore_ops(&pxa3xx_mfp_syscore_ops
);
449 if (of_have_populated_dt())
452 pxa2xx_set_dmac_info(32, 100);
453 ret
= platform_add_devices(devices
, ARRAY_SIZE(devices
));
456 if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
457 platform_device_add_data(&pxa3xx_device_gpio
,
459 sizeof(pxa3xx_gpio_pdata
));
460 ret
= platform_device_register(&pxa3xx_device_gpio
);
467 postcore_initcall(pxa3xx_init
);