4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
7 * This program is free software,you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/linkage.h>
13 #include <asm/assembler.h>
14 #include <asm/smp_scu.h>
15 #include <asm/memory.h>
16 #include <asm/hardware/cache-l2x0.h>
18 #include "omap-secure.h"
22 #include "omap4-sar-layout.h"
24 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
38 #ifdef CONFIG_ARCH_OMAP4
41 * =============================
42 * == CPU suspend finisher ==
43 * =============================
45 * void omap4_finish_suspend(unsigned long cpu_state)
47 * This function code saves the CPU context and performs the CPU
48 * power down sequence. Calling WFI effectively changes the CPU
49 * power domains states to the desired target power state.
51 * @cpu_state : contains context save state (r0)
53 * 1 - CPUx L1 and logic lost: MPUSS CSWR
54 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
55 * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
56 * @return: This function never returns for CPU OFF and DORMANT power states.
57 * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
58 * from this follows a full CPU reset path via ROM code to CPU restore code.
59 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
60 * It returns to the caller for CPU INACTIVE and ON power states or in case
61 * CPU failed to transition to targeted OFF/DORMANT state.
63 * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
64 * stack frame and it expects the caller to take care of it. Hence the entire
65 * stack frame is saved to avoid possible stack corruption.
67 ENTRY(omap4_finish_suspend)
68 stmfd sp!, {r4-r12, lr}
70 beq do_WFI @ No lowpower state, jump to WFI
73 * Flush all data from the L1 data cache before disabling
76 bl omap4_get_sar_ram_base
77 ldr r9, [r0, #OMAP_TYPE_OFFSET]
78 cmp r9, #0x1 @ Check for HS device
79 bne skip_secure_l1_clean
80 mov r0, #SCU_PM_NORMAL
81 mov r1, #0xFF @ clean seucre L1
82 stmfd r13!, {r4-r12, r14}
83 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
85 ldmfd r13!, {r4-r12, r14}
87 bl v7_flush_dcache_all
90 * Clear the SCTLR.C bit to prevent further data cache
91 * allocation. Clearing SCTLR.C would make all the data accesses
92 * strongly ordered and would not hit the cache.
94 mrc p15, 0, r0, c1, c0, 0
95 bic r0, r0, #(1 << 2) @ Disable the C bit
96 mcr p15, 0, r0, c1, c0, 0
100 * Invalidate L1 data cache. Even though only invalidate is
101 * necessary exported flush API is used here. Doing clean
102 * on already clean cache would be almost NOP.
104 bl v7_flush_dcache_all
107 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
108 * to AsymmetricMultiprocessing (AMP) mode by programming
109 * the SCU power status to DORMANT or OFF mode.
110 * This enables the CPU to be taken out of coherency by
111 * preventing the CPU from receiving cache, TLB, or BTB
112 * maintenance operations broadcast by other CPUs in the cluster.
114 bl omap4_get_sar_ram_base
116 ldr r9, [r8, #OMAP_TYPE_OFFSET]
117 cmp r9, #0x1 @ Check for HS device
119 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
121 ldreq r0, [r8, #SCU_OFFSET0]
122 ldrne r0, [r8, #SCU_OFFSET1]
124 stmfd r13!, {r4-r12, r14}
125 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
127 ldmfd r13!, {r4-r12, r14}
130 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
132 ldreq r1, [r8, #SCU_OFFSET0]
133 ldrne r1, [r8, #SCU_OFFSET1]
134 bl omap4_get_scu_base
137 mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
139 mrcne p15, 0, r0, c1, c0, 1
140 bicne r0, r0, #(1 << 6) @ Disable SMP bit
141 mcrne p15, 0, r0, c1, c0, 1
144 #ifdef CONFIG_CACHE_L2X0
146 * Clean and invalidate the L2 cache.
147 * Common cache-l2x0.c functions can't be used here since it
148 * uses spinlocks. We are out of coherency here with data cache
149 * disabled. The spinlock implementation uses exclusive load/store
150 * instruction which can fail without data cache being enabled.
151 * OMAP4 hardware doesn't support exclusive monitor which can
152 * overcome exclusive access issue. Because of this, CPU can
155 bl omap4_get_sar_ram_base
157 mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
159 ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
160 ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
163 #ifdef CONFIG_PL310_ERRATA_727915
165 mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
168 bl omap4_get_l2cache_base
171 str r0, [r2, #L2X0_CLEAN_INV_WAY]
173 ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
177 #ifdef CONFIG_PL310_ERRATA_727915
179 mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
183 bl omap4_get_l2cache_base
186 str r0, [r2, #L2X0_CACHE_SYNC]
188 ldr r0, [r2, #L2X0_CACHE_SYNC]
197 * CPU is here when it failed to enter OFF/DORMANT or
198 * no low power state was attempted.
200 mrc p15, 0, r0, c1, c0, 0
201 tst r0, #(1 << 2) @ Check C bit enabled?
202 orreq r0, r0, #(1 << 2) @ Enable the C bit
203 mcreq p15, 0, r0, c1, c0, 0
207 * Ensure the CPU power state is set to NORMAL in
208 * SCU power state so that CPU is back in coherency.
209 * In non-coherent mode CPU can lock-up and lead to
212 mrc p15, 0, r0, c1, c0, 1
213 tst r0, #(1 << 6) @ Check SMP bit enabled?
214 orreq r0, r0, #(1 << 6)
215 mcreq p15, 0, r0, c1, c0, 1
217 bl omap4_get_sar_ram_base
219 ldr r9, [r8, #OMAP_TYPE_OFFSET]
220 cmp r9, #0x1 @ Check for HS device
222 mov r0, #SCU_PM_NORMAL
224 stmfd r13!, {r4-r12, r14}
225 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
227 ldmfd r13!, {r4-r12, r14}
230 bl omap4_get_scu_base
231 mov r1, #SCU_PM_NORMAL
236 ldmfd sp!, {r4-r12, pc}
237 ENDPROC(omap4_finish_suspend)
240 * ============================
241 * == CPU resume entry point ==
242 * ============================
244 * void omap4_cpu_resume(void)
246 * ROM code jumps to this function while waking up from CPU
247 * OFF or DORMANT state. Physical address of the function is
248 * stored in the SAR RAM while entering to OFF or DORMANT mode.
249 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
251 ENTRY(omap4_cpu_resume)
253 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
254 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
255 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
256 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
257 * OMAP443X GP devices- SMP bit isn't accessible.
258 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
260 ldr r8, =OMAP44XX_SAR_RAM_BASE
261 ldr r9, [r8, #OMAP_TYPE_OFFSET]
262 cmp r9, #0x1 @ Skip if GP device
263 bne skip_ns_smp_enable
264 mrc p15, 0, r0, c0, c0, 5
266 beq skip_ns_smp_enable
268 mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
269 adr r3, ppa_zero_params @ Pointer to parameters
270 mov r1, #0x0 @ Process ID
273 mov r12, #0x00 @ Secure Service ID
275 cmp r0, #0x0 @ API returns 0 on success.
279 mrc p15, 0, r0, c1, c0, 1
280 tst r0, #(1 << 6) @ Check SMP bit enabled?
281 orreq r0, r0, #(1 << 6)
282 mcreq p15, 0, r0, c1, c0, 1
285 #ifdef CONFIG_CACHE_L2X0
287 * Restore the L2 AUXCTRL and enable the L2 cache.
288 * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
289 * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
290 * register r0 contains value to be programmed.
291 * L2 cache is already invalidate by ROM code as part
292 * of MPUSS OFF wakeup path.
294 ldr r2, =OMAP44XX_L2CACHE_BASE
295 ldr r0, [r2, #L2X0_CTRL]
298 beq skip_l2en @ Skip if already enabled
299 ldr r3, =OMAP44XX_SAR_RAM_BASE
300 ldr r1, [r3, #OMAP_TYPE_OFFSET]
301 cmp r1, #0x1 @ Check for HS device
303 ldr r0, =OMAP4_PPA_L2_POR_INDEX
304 ldr r1, =OMAP44XX_SAR_RAM_BASE
305 ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
306 adr r3, ppa_por_params
308 mov r1, #0x0 @ Process ID
311 mov r12, #0x00 @ Secure Service ID
315 ldr r1, =OMAP44XX_SAR_RAM_BASE
316 ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
317 ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
320 ldr r1, =OMAP44XX_SAR_RAM_BASE
321 ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
322 ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
325 ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
330 b cpu_resume @ Jump to generic resume
331 ENDPROC(omap4_cpu_resume)
332 #endif /* CONFIG_ARCH_OMAP4 */
334 #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
338 #ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
339 /* Drain interconnect write buffers. */
340 bl omap_interconnect_sync
344 * Execute an ISB instruction to ensure that all of the
345 * CP15 register changes have been committed.
350 * Execute a barrier instruction to ensure that all cache,
351 * TLB and branch predictor maintenance operations issued
352 * by any CPU in the cluster have completed.
358 * Execute a WFI instruction and wait until the
359 * STANDBYWFI output is asserted to indicate that the
360 * CPU is in idle and low power state. CPU can specualatively
361 * prefetch the instructions so add NOPs after WFI. Sixteen
362 * NOPs as per Cortex-A9 pipeline.
364 wfi @ Wait For Interrupt