Staging: hv: mousevsc: Cleanup alloc_input_device()
[zen-stable.git] / arch / avr32 / mach-at32ap / pm.c
blob32d680eb6f4842be56d17f40197c2a6fd4f77f37
1 /*
2 * AVR32 AP Power Management
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10 #include <linux/io.h>
11 #include <linux/suspend.h>
12 #include <linux/vmalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/sysreg.h>
17 #include <mach/chip.h>
18 #include <mach/pm.h>
19 #include <mach/sram.h>
21 #include "sdramc.h"
23 #define SRAM_PAGE_FLAGS (SYSREG_BIT(TLBELO_D) | SYSREG_BF(SZ, 1) \
24 | SYSREG_BF(AP, 3) | SYSREG_BIT(G))
27 static unsigned long pm_sram_start;
28 static size_t pm_sram_size;
29 static struct vm_struct *pm_sram_area;
31 static void (*avr32_pm_enter_standby)(unsigned long sdramc_base);
32 static void (*avr32_pm_enter_str)(unsigned long sdramc_base);
35 * Must be called with interrupts disabled. Exceptions will be masked
36 * on return (i.e. all exceptions will be "unrecoverable".)
38 static void *avr32_pm_map_sram(void)
40 unsigned long vaddr;
41 unsigned long page_addr;
42 u32 tlbehi;
43 u32 mmucr;
45 vaddr = (unsigned long)pm_sram_area->addr;
46 page_addr = pm_sram_start & PAGE_MASK;
49 * Mask exceptions and grab the first TLB entry. We won't be
50 * needing it while sleeping.
52 asm volatile("ssrf %0" : : "i"(SYSREG_EM_OFFSET) : "memory");
54 mmucr = sysreg_read(MMUCR);
55 tlbehi = sysreg_read(TLBEHI);
56 sysreg_write(MMUCR, SYSREG_BFINS(DRP, 0, mmucr));
58 tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
59 tlbehi |= vaddr & PAGE_MASK;
60 tlbehi |= SYSREG_BIT(TLBEHI_V);
62 sysreg_write(TLBELO, page_addr | SRAM_PAGE_FLAGS);
63 sysreg_write(TLBEHI, tlbehi);
64 __builtin_tlbw();
66 return (void *)(vaddr + pm_sram_start - page_addr);
70 * Must be called with interrupts disabled. Exceptions will be
71 * unmasked on return.
73 static void avr32_pm_unmap_sram(void)
75 u32 mmucr;
76 u32 tlbehi;
77 u32 tlbarlo;
79 /* Going to update TLB entry at index 0 */
80 mmucr = sysreg_read(MMUCR);
81 tlbehi = sysreg_read(TLBEHI);
82 sysreg_write(MMUCR, SYSREG_BFINS(DRP, 0, mmucr));
84 /* Clear the "valid" bit */
85 tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
86 sysreg_write(TLBEHI, tlbehi);
88 /* Mark it as "not accessed" */
89 tlbarlo = sysreg_read(TLBARLO);
90 sysreg_write(TLBARLO, tlbarlo | 0x80000000U);
92 /* Update the TLB */
93 __builtin_tlbw();
95 /* Unmask exceptions */
96 asm volatile("csrf %0" : : "i"(SYSREG_EM_OFFSET) : "memory");
99 static int avr32_pm_valid_state(suspend_state_t state)
101 switch (state) {
102 case PM_SUSPEND_ON:
103 case PM_SUSPEND_STANDBY:
104 case PM_SUSPEND_MEM:
105 return 1;
107 default:
108 return 0;
112 static int avr32_pm_enter(suspend_state_t state)
114 u32 lpr_saved;
115 u32 evba_saved;
116 void *sram;
118 switch (state) {
119 case PM_SUSPEND_STANDBY:
120 sram = avr32_pm_map_sram();
122 /* Switch to in-sram exception handlers */
123 evba_saved = sysreg_read(EVBA);
124 sysreg_write(EVBA, (unsigned long)sram);
127 * Save the LPR register so that we can re-enable
128 * SDRAM Low Power mode on resume.
130 lpr_saved = sdramc_readl(LPR);
131 pr_debug("%s: Entering standby...\n", __func__);
132 avr32_pm_enter_standby(SDRAMC_BASE);
133 sdramc_writel(LPR, lpr_saved);
135 /* Switch back to regular exception handlers */
136 sysreg_write(EVBA, evba_saved);
138 avr32_pm_unmap_sram();
139 break;
141 case PM_SUSPEND_MEM:
142 sram = avr32_pm_map_sram();
144 /* Switch to in-sram exception handlers */
145 evba_saved = sysreg_read(EVBA);
146 sysreg_write(EVBA, (unsigned long)sram);
149 * Save the LPR register so that we can re-enable
150 * SDRAM Low Power mode on resume.
152 lpr_saved = sdramc_readl(LPR);
153 pr_debug("%s: Entering suspend-to-ram...\n", __func__);
154 avr32_pm_enter_str(SDRAMC_BASE);
155 sdramc_writel(LPR, lpr_saved);
157 /* Switch back to regular exception handlers */
158 sysreg_write(EVBA, evba_saved);
160 avr32_pm_unmap_sram();
161 break;
163 case PM_SUSPEND_ON:
164 pr_debug("%s: Entering idle...\n", __func__);
165 cpu_enter_idle();
166 break;
168 default:
169 pr_debug("%s: Invalid suspend state %d\n", __func__, state);
170 goto out;
173 pr_debug("%s: wakeup\n", __func__);
175 out:
176 return 0;
179 static const struct platform_suspend_ops avr32_pm_ops = {
180 .valid = avr32_pm_valid_state,
181 .enter = avr32_pm_enter,
184 static unsigned long avr32_pm_offset(void *symbol)
186 extern u8 pm_exception[];
188 return (unsigned long)symbol - (unsigned long)pm_exception;
191 static int __init avr32_pm_init(void)
193 extern u8 pm_exception[];
194 extern u8 pm_irq0[];
195 extern u8 pm_standby[];
196 extern u8 pm_suspend_to_ram[];
197 extern u8 pm_sram_end[];
198 void *dst;
201 * To keep things simple, we depend on not needing more than a
202 * single page.
204 pm_sram_size = avr32_pm_offset(pm_sram_end);
205 if (pm_sram_size > PAGE_SIZE)
206 goto err;
208 pm_sram_start = sram_alloc(pm_sram_size);
209 if (!pm_sram_start)
210 goto err_alloc_sram;
212 /* Grab a virtual area we can use later on. */
213 pm_sram_area = get_vm_area(pm_sram_size, VM_IOREMAP);
214 if (!pm_sram_area)
215 goto err_vm_area;
216 pm_sram_area->phys_addr = pm_sram_start;
218 local_irq_disable();
219 dst = avr32_pm_map_sram();
220 memcpy(dst, pm_exception, pm_sram_size);
221 flush_dcache_region(dst, pm_sram_size);
222 invalidate_icache_region(dst, pm_sram_size);
223 avr32_pm_unmap_sram();
224 local_irq_enable();
226 avr32_pm_enter_standby = dst + avr32_pm_offset(pm_standby);
227 avr32_pm_enter_str = dst + avr32_pm_offset(pm_suspend_to_ram);
228 intc_set_suspend_handler(avr32_pm_offset(pm_irq0));
230 suspend_set_ops(&avr32_pm_ops);
232 printk("AVR32 AP Power Management enabled\n");
234 return 0;
236 err_vm_area:
237 sram_free(pm_sram_start, pm_sram_size);
238 err_alloc_sram:
239 err:
240 pr_err("AVR32 Power Management initialization failed\n");
241 return -ENOMEM;
243 arch_initcall(avr32_pm_init);