2 * sh7372 lowlevel sleep code for "Core Standby Mode"
4 * Copyright (C) 2011 Magnus Damm
6 * In "Core Standby Mode" the ARM core is off, but L2 cache is still on
8 * Based on mach-omap2/sleep34xx.S
10 * (C) Copyright 2007 Texas Instruments
11 * Karthik Dasu <karthik-dp@ti.com>
13 * (C) Copyright 2004 Texas Instruments, <www.ti.com>
14 * Richard Woodruff <r-woodruff2@ti.com>
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
32 #include <linux/linkage.h>
33 #include <asm/assembler.h>
35 #define SMFRAM 0xe6a70000
39 .word v7_flush_dcache_all
42 ENTRY(sh7372_cpu_suspend)
43 stmfd sp!, {r0-r12, lr} @ save registers on stack
48 mrs r5, spsr @ Store spsr
52 mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
53 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
54 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
55 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
58 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
59 mrc p15, 0, r5, c10, c2, 0 @ PRRR
60 mrc p15, 0, r6, c10, c2, 1 @ NMRR
63 mrc p15, 0, r4, c13, c0, 1 @ Context ID
64 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
65 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
66 mrs r7, cpsr @ Store current cpsr
69 mrc p15, 0, r4, c1, c0, 0 @ save control register
73 * jump out to kernel flush routine
74 * - reuse that code is better
75 * - it executes in a cached space so is faster than refetch per-block
76 * - should be faster and will change with kernel
77 * - 'might' have to copy address, load and jump to it
78 * Flush all data from the L1 data cache before disabling
86 * Clear the SCTLR.C bit to prevent further data cache
87 * allocation. Clearing SCTLR.C would make all the data accesses
88 * strongly ordered and would not hit the cache.
90 mrc p15, 0, r0, c1, c0, 0
91 bic r0, r0, #(1 << 2) @ Disable the C bit
92 mcr p15, 0, r0, c1, c0, 0
96 * Invalidate L1 data cache. Even though only invalidate is
97 * necessary exported flush API is used here. Doing clean
98 * on already clean cache would be almost NOP.
103 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
104 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
105 * This sequence switches back to ARM. Note that .align may insert a
106 * nop: bx pc needs to be word-aligned in order to work.
114 /* Data memory barrier and Data sync barrier */
119 * ===================================
120 * == WFI instruction => Enter idle ==
121 * ===================================
123 wfi @ wait for interrupt
126 * ===================================
127 * == Resume path for non-OFF modes ==
128 * ===================================
130 mrc p15, 0, r0, c1, c0, 0
131 tst r0, #(1 << 2) @ Check C bit enabled?
132 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
133 mcreq p15, 0, r0, c1, c0, 0
137 * ===================================
138 * == Exit point from non-OFF modes ==
139 * ===================================
141 ldmfd sp!, {r0-r12, pc} @ restore regs and return
147 .global sh7372_cpu_resume
152 * Invalidate all instruction caches to PoU
153 * and flush branch target cache
155 mcr p15, 0, r1, c7, c5, 0
160 mov sp, r4 @ Restore sp
161 msr spsr_cxsf, r5 @ Restore spsr
162 mov lr, r6 @ Restore lr
165 mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
166 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
167 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
168 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
171 mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
172 mcr p15, 0, r5, c10, c2, 0 @ PRRR
173 mcr p15, 0, r6, c10, c2, 1 @ NMRR
176 mcr p15, 0, r4, c13, c0, 1 @ Context ID
177 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
178 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
179 msr cpsr, r7 @ store cpsr
181 /* Starting to enable MMU here */
182 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
183 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
189 * More work needs to be done to support N[0:2] value other than 0
190 * So looping here so that the error can be detected
195 cache_pred_disable_mask:
205 mrc p15, 0, r2, c2, c0, 0
209 ldr r5, table_index_mask
210 and r4, r5 @ r4 = 31 to 20 bits of pc
211 /* Extract the value to be written to table entry */
213 /* r6 has the value to be written to table entry */
215 /* Getting the address of table entry to modify */
217 /* r2 has the location which needs to be modified */
220 str r6, [r2] /* modify the table entry */
225 /* r5 = original page table address */
226 /* r6 = original page table data */
229 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
230 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
231 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
232 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
235 * Restore control register. This enables the MMU.
236 * The caches and prediction are not enabled here, they
237 * will be enabled after restoring the MMU table entry.
240 stmia r3!, {r5} /* save original page table address */
241 stmia r3!, {r6} /* save original page table data */
242 stmia r3!, {r7} /* save modified page table data */
244 ldr r2, cache_pred_disable_mask
246 mcr p15, 0, r4, c1, c0, 0
250 ldr r0, =restoremmu_on
254 * ==============================
255 * == Exit point from OFF mode ==
256 * ==============================
260 ldmfd sp!, {r0-r12, pc} @ restore regs and return