2 * OMAP44xx CPU low power powerdown and powerup code.
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
7 * This program is free software,you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/linkage.h>
13 #include <asm/system.h>
14 #include <asm/smp_scu.h>
15 #include <asm/memory.h>
16 #include <asm/hardware/cache-l2x0.h>
18 #include <plat/omap44xx.h>
19 #include <mach/omap4-common.h>
21 #include "omap4-sar-layout.h"
25 /* Masks used for MMU manipulation */
26 #define TTRBIT_MASK 0xffffc000
27 #define TABLE_INDEX_MASK 0xfff00000
28 #define TABLE_ENTRY 0x00000c02
29 #define CACHE_DISABLE_MASK 0xffffe7fb
30 #define TABLE_ADDRESS_OFFSET 0x04
31 #define CR_VALUE_OFFSET 0x08
32 #define SCU_POWER_SECURE_INDEX 0x108
36 * Macro to call PPA svc when MMU is OFF
37 * Caller must setup r0 and r3 before calling this macro
39 * @r3: Pointer to params
41 .macro LM_CALL_PPA_SERVICE_PA
42 mov r1, #0x0 @ Process ID
45 mov r12, #0x00 @ Secure Service ID
51 * To load POR which was saved in SAR RAM
61 * =============================
62 * == CPU suspend entry point ==
63 * =============================
65 * void omap4_cpu_suspend(unsigned int cpu, unsigned int save_state)
67 * This function code saves the CPU context and performs the CPU
68 * power down sequence. Calling WFI effectively changes the CPU
69 * power domains states to the desired target power state.
71 * @cpu : contains cpu id (r0)
72 * @save_state : contains context save state (r1)
74 * 1 - CPUx L1 and logic lost: MPUSS CSWR
75 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
76 * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
77 * @return: This function never returns for CPU OFF and DORMANT power states.
78 * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
79 * from this follows a full CPU reset path via ROM code to CPU restore code.
80 * It returns to the caller for CPU INACTIVE and ON power states or in case
81 * CPU failed to transition to targeted OFF/DORMANT state.
84 ENTRY(omap4_cpu_suspend)
85 stmfd sp!, {r0-r12, lr} @ Save registers on stack
87 beq do_WFI @ Nothing to save, jump to WFI
90 bl omap4_get_sar_ram_base
93 streq r6, [r8, #L2X0_SAVE_OFFSET0] @ Store save state
94 strne r6, [r8, #L2X0_SAVE_OFFSET1]
95 orreq r8, r8, #CPU0_SAVE_OFFSET
96 orrne r8, r8, #CPU1_SAVE_OFFSET
99 * Save only needed CPU CP15 registers. VFP, breakpoint,
100 * performance monitor registers are not saved. Generic
101 * code suppose to take care of those.
103 mov r4, sp @ Store sp
104 mrs r5, spsr @ Store spsr
105 mov r6, lr @ Store lr
108 /* c1 and c2 registers */
109 mrc p15, 0, r4, c1, c0, 2 @ CPACR
110 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
111 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
112 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
115 /* c3 and c10 registers */
116 mrc p15, 0, r4, c3, c0, 0 @ DACR
117 mrc p15, 0, r5, c10, c2, 0 @ PRRR
118 mrc p15, 0, r6, c10, c2, 1 @ NMRR
121 /* c12, c13 and CPSR registers */
122 mrc p15, 0, r4, c13, c0, 1 @ Context ID
123 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread ID
124 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
125 mrs r7, cpsr @ Store CPSR
128 /* c1 control register */
129 mrc p15, 0, r4, c1, c0, 0 @ Save control register
133 * Flush all data from the L1 data cache before disabling
136 bl v7_flush_dcache_all
138 bl omap4_get_sar_ram_base
139 ldr r9, [r0, #OMAP_TYPE_OFFSET]
140 cmp r9, #0x1 @ Check for HS device
141 bne skip_secure_l1_flush
142 mov r0, #SCU_PM_NORMAL
143 mov r1, #0xFF @ clean seucre L1
144 stmfd r13!, {r4-r12, r14}
145 ldr r12, =SCU_POWER_SECURE_INDEX
149 ldmfd r13!, {r4-r12, r14}
150 skip_secure_l1_flush:
153 * Clear the SCTLR.C bit to prevent further data cache
154 * allocation. Clearing SCTLR.C would make all the data accesses
155 * strongly ordered and would not hit the cache.
157 mrc p15, 0, r0, c1, c0, 0
158 bic r0, r0, #(1 << 2) @ Disable the C bit
159 mcr p15, 0, r0, c1, c0, 0
163 * Invalidate L1 data cache. Even though only invalidate is
164 * necessary exported flush API is used here. Doing clean
165 * on already clean cache would be almost NOP.
167 bl v7_flush_dcache_all
170 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
171 * to AsymmetricMultiprocessing (AMP) mode by programming
172 * the SCU power status to DORMANT or OFF mode.
173 * This enables the CPU to be taken out of coherency by
174 * preventing the CPU from receiving cache, TLB, or BTB
175 * maintenance operations broadcast by other CPUs in the cluster.
177 bl omap4_get_sar_ram_base
179 ldr r9, [r8, #OMAP_TYPE_OFFSET]
180 cmp r9, #0x1 @ Check for HS device
182 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
184 ldreq r0, [r8, #SCU_OFFSET0]
185 ldrne r0, [r8, #SCU_OFFSET1]
186 mov r1, #0x00 @ Secure L1 is clean already
187 stmfd r13!, {r4-r12, r14}
188 ldr r12, =SCU_POWER_SECURE_INDEX
192 ldmfd r13!, {r4-r12, r14}
195 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
197 ldreq r1, [r8, #SCU_OFFSET0]
198 ldrne r1, [r8, #SCU_OFFSET1]
199 bl omap4_get_scu_base
205 mrc p15, 0, r0, c1, c1, 2 @Read NSACR data
207 mrcne p15, 0, r0, c1, c0, 1
208 bicne r0, r0, #(1 << 6)
209 mcrne p15, 0, r0, c1, c0, 1
213 #ifdef CONFIG_CACHE_L2X0
215 * Clean and invalidate the L2 cache.
216 * Common cache-l2x0.c functions can't be used here since it
217 * uses spinlocks. We are out of coherency here with data cache
218 * disabled. The spinlock implementation uses exclusive load/store
219 * instruction which can fail without data cache being enabled.
220 * OMAP4 hardware doesn't support exclusive monitor which can
221 * overcome exclusive access issue. Because of this, CPU can
225 bl omap4_get_sar_ram_base
227 mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
229 ldreq r0, [r8, #L2X0_SAVE_OFFSET0]
230 ldrne r0, [r8, #L2X0_SAVE_OFFSET1]
233 #ifdef CONFIG_PL310_ERRATA_727915
240 bl omap4_get_l2cache_base
243 str r0, [r2, #L2X0_CLEAN_INV_WAY]
245 ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
248 #ifdef CONFIG_PL310_ERRATA_727915
256 bl omap4_get_l2cache_base
259 str r0, [r2, #L2X0_CACHE_SYNC]
261 ldr r0, [r2, #L2X0_CACHE_SYNC]
270 * CPU is here when it failed to enter OFF/DORMANT or
271 * no low power state was attempted.
273 mrc p15, 0, r0, c1, c0, 0
274 tst r0, #(1 << 2) @ Check C bit enabled?
275 orreq r0, r0, #(1 << 2) @ Enable the C bit
276 mcreq p15, 0, r0, c1, c0, 0
279 /* Enable SMP bit if it's being disabled */
280 mrc p15, 0, r0, c1, c0, 1
281 tst r0, #(1 << 6) @ Check SMP bit enabled?
282 orreq r0, r0, #(1 << 6)
283 mcreq p15, 0, r0, c1, c0, 1
287 * Ensure the CPU power state is set to NORMAL in
288 * SCU power state so that CPU is back in coherency.
289 * In non-coherent mode CPU can lock-up and lead to
292 bl omap4_get_sar_ram_base
294 ldr r9, [r8, #OMAP_TYPE_OFFSET]
295 cmp r9, #0x1 @ Check for HS device
297 mov r0, #SCU_PM_NORMAL
299 stmfd r13!, {r4-r12, r14}
300 ldr r12, =SCU_POWER_SECURE_INDEX
304 ldmfd r13!, {r4-r12, r14}
307 bl omap4_get_scu_base
308 mov r1, #SCU_PM_NORMAL
314 ldmfd sp!, {r0-r12, pc} @ Restore regs and return
315 ENDPROC(omap4_cpu_suspend)
318 * ============================
319 * == CPU resume entry point ==
320 * ============================
322 * void omap4_cpu_resume(void)
324 * ROM code jumps to this function while waking up from CPU
325 * OFF or DORMANT state. Physical address of the function is
326 * stored in the SAR RAM while entering to OFF or DORMANT mode.
329 ENTRY(omap4_cpu_resume)
331 * Each CPU get the device type from SAR RAM and store in r9
333 ldr r8, =OMAP44XX_SAR_RAM_BASE
334 ldr r9, [r8, #OMAP_TYPE_OFFSET]
336 * CPU1 must check if CPU0 is alive/awake,
337 * if PL310 is OFF, MPUSS was OFF and CPU0 is still off,
338 * CPU1 must go to sleep and wait for CPU0.
339 * Only CPU0 should enable cache controller.
341 mrc p15, 0, r0, c0, c0, 5 @ Get cpuID
342 ands r0, r0, #0x0f @ Continue boot if CPU0
343 bne is_cpu0_up @ CPU1: Must check if CPU0 is up
344 cmp r9, #0x1 @ CPU0: Check for HS device in r9
346 bne gp_cp15_configure
348 ldr r2, =OMAP44XX_L2CACHE_BASE
349 ldr r0, [r2, #L2X0_CTRL]
351 cmp r0, #1 @ is CPU0 already UP?
352 beq cpu1_configure_cp15
354 * When CPU1 is released to control of HLOS in the case of OSWR
355 * and OFF mode, PPA below v1.7.3[1] is not performing all
356 * Memory coherency and TLB operations required.
358 * For GP devices (PPA never present) this WA path is also taken if
359 * CPU1 wakes up first, this mechanism should be used to synchronize
360 * booting by shutting off CPU1 thus allowing CPU0 to boot first
361 * and restore OMAP context.
363 * A WA to recover cleanly from this scenario is to switch CPU1 back to
364 * previous OFF state. This forces a reset of CPU1, which in turn
365 * forces CPU1 not to override MMU descriptors already in place in
366 * internal RAM setup by CPU0. CPU1 will also sync to the in-place
367 * descriptors on the next wakeup. CPU1 wakeup is done by
368 * later kernel subsystems depending on suspend or cpuidle path
370 * NOTE - for OSWR, state provided is 2, and for OFF, state is 3,
371 * Since the bug impacts OFF and OSWR, we need to force a 0x3 to
374 * Since many distributions may not be able to update PPA OR would like
375 * to support platforms with older PPA, we provide a config option.
376 * This is simpler and makes the current code remain cleaner in
377 * comparison to a flag based handling in CPU1 recovery for
378 * board + PPA revision combinations.
380 * Having this config option enabled even on platforms with fixed PPA
381 * should not impact stability, however, ability to make CPU1 available
382 * for operations a little earlier is curtailed.
385 * v1.7.3 is the official TI PPA version. Custom PPA could have
386 * the relevant changes ported over to it.
388 #ifdef CONFIG_OMAP4_PPA_CPU1_ONLINE_BUG
389 mov r0, #0x03 @ target CPU1 to OFF(mpusspd=OSWR/OFF)
390 mov r1, #0x00 @ Secure L1 is already clean
391 ldr r12, =SCU_POWER_SECURE_INDEX
395 isb @ Necessary barriers before wfi
398 wfi @ wait for interrupt
403 * IF we came out of WFI immediately, something unknown happend.
404 * Fall through AND loop back to the checks. Failing which retry WFI.
408 * CPU0 and CPU1 are release together from OFF mode, however,
409 * CPU0 can be busy doing restore operations while waking
410 * from OFF mode, However, for many PPA services we need
411 * CPU0, so, we ask CPU1 to loop back to stagger CPU1 behind CPU0
416 * Select right API to set cp15 depending on device type
418 cmp r9, #0x1 @ Check for HS device in r9
419 bne gp_cp15_configure @ Jump to GP API
420 ppa_cp15_cpu1_configure:
422 * In HS devices CPU0's CP15 is configured at wakeup by PPA, CPU1 must
423 * call PPA to configure it and CPU0 must be online for any PPA API to
424 * work. In 4430 devices CPU1 this call also enables the access to SMP
425 * bit, on 4460 devices, CPU1 will have SMP bit access by default.
427 mov r0, #PPA_SERVICE_DEFAULT_POR_NS_SMP
428 adr r3, ppa_zero_params @ Pointer to parameters
429 LM_CALL_PPA_SERVICE_PA
432 cmp r0, #0x0 @ API returns 0 on success.
433 bne ppa_cp15_cpu1_configure @ retry if we did not succeed
434 /* HS device cp15 done, jump to continue_boot */
437 /* In GP devices, both CPUs must configure their CP15 */
438 /* Compute the ARM revision */
440 mrc p15, 0, r1, c0, c0, 0 @ read main ID register
441 and r2, r1, #0x00f00000 @ variant
442 and r3, r1, #0x0000000f @ revision
443 orr r3, r3, r2, lsr #20-4 @ r3: has variant and revision
446 #ifdef CONFIG_OMAP4_ARM_ERRATA_742230
447 cmp r3, #0x10 @ present in r1p0 onwards
449 cmp r3, #0x22 @ not present after r2p2
450 orrle r0, r0, #0x10 @ Set bit 4
453 #ifdef CONFIG_OMAP4_ARM_ERRATA_751472
454 cmp r3, #0x30 @ present prior to r3p0
455 orrlt r0, r0, #0x800 @ Set bit 11
457 #ifdef CONFIG_OMAP4_ARM_ERRATA_743622
458 cmp r3, #0x20 @ present in r2p0 onwards
460 cmp r3, #0x30 @ not preset in r3p0 onwards
461 orrlt r0, r0, #0x40 @ Set bit 6
466 mov r12, #HAL_DIAGREG_0
472 #ifdef CONFIG_CACHE_L2X0
474 * Restore the L2 AUXCTRL and enable the L2 cache.
475 * 0x109 = Program the L2X0 AUXCTRL
476 * 0x102 = Enable the L2 using L2X0 CTRL
477 * register r0 contains value to be programmed.
478 * L2 cache is already invalidate by ROM code as part
479 * of MPUSS OFF wakeup path.
481 ldr r2, =OMAP44XX_L2CACHE_BASE
482 ldr r0, [r2, #L2X0_CTRL]
485 beq skip_l2en @ Skip if already enabled
488 ldr r0, =OMAP44XX_SAR_RAM_BASE @ Check DEVICE type
489 ldr r1, [r0, #OMAP_TYPE_OFFSET]
490 cmp r1, #0x1 @ Check for HS device
492 ldr r0, =PPA_SERVICE_PL310_POR @ Setup PPA HAL call
493 ldr r1, =OMAP44XX_SAR_RAM_BASE
494 ldr r4, [r1, #L2X0_PREFETCHCTRL_OFFSET]
497 LM_CALL_PPA_SERVICE_PA
500 ldr r3, =OMAP44XX_SAR_RAM_BASE
501 ldr r0, [r3, #L2X0_AUXCTRL_OFFSET]
502 ldr r12, =0x109 @ Setup L2 AUXCTRL value
506 ldr r2, =OMAP44XX_L2CACHE_BASE
507 ldr r4, =OMAP44XX_SAR_RAM_BASE
508 ldr r9, [r4, #L2X0_LOCKDOWN_OFFSET0]
509 str r9, [r2, #L2X0_LOCKDOWN_WAY_D0]
510 str r9, [r2, #L2X0_LOCKDOWN_WAY_D1]
511 str r9, [r2, #L2X0_LOCKDOWN_WAY_I0]
512 str r9, [r2, #L2X0_LOCKDOWN_WAY_I1]
516 ldr r12, =0x102 @ Enable L2 Cache controller
523 /* Check if we have Public access to SMP bit */
524 mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
526 beq skip_ns_smp_enable @ Skip if still no access
528 /* Set the SMP bit if it is not already set */
529 mrc p15, 0, r0, c1, c0, 1
530 tst r0, #(1 << 6) @ Check SMP bit enabled?
531 orreq r0, r0, #(1 << 6)
532 mcreq p15, 0, r0, c1, c0, 1
536 * Check the wakeup cpuid and use appropriate
537 * SAR BANK location for context restore.
539 ldr r3, =OMAP44XX_SAR_RAM_BASE
541 mcr p15, 0, r1, c7, c5, 0 @ Invalidate L1 I
542 mrc p15, 0, r0, c0, c0, 5 @ MPIDR
544 orreq r3, r3, #CPU0_SAVE_OFFSET
545 orrne r3, r3, #CPU1_SAVE_OFFSET
547 /* Restore cp15 registers */
549 mov sp, r4 @ Restore sp
550 msr spsr_cxsf, r5 @ Restore spsr
551 mov lr, r6 @ Restore lr
553 /* c1 and c2 registers */
555 mcr p15, 0, r4, c1, c0, 2 @ CPACR
556 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
557 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
558 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
560 /* c3 and c10 registers */
562 mcr p15, 0, r4, c3, c0, 0 @ DACR
563 mcr p15, 0, r5, c10, c2, 0 @ PRRR
564 mcr p15, 0, r6, c10, c2, 1 @ NMRR
566 /* c12, c13 and CPSR registers */
568 mcr p15, 0, r4, c13, c0, 1 @ Context ID
569 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread ID
570 mcr p15, 0, r6, c12, c0, 0 @ Secure or NS VBAR
571 msr cpsr, r7 @ store cpsr
574 * Enabling MMU here. Page entry needs to be altered
575 * to create temporary 1:1 map and then resore the entry
576 * ones MMU is enabled
578 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
579 and r7, #0x7 @ Extract N (0:2) to decide
580 cmp r7, #0x0 @ TTBR0/TTBR1
583 b ttbr_error @ Only N = 0 supported
585 mrc p15, 0, r2, c2, c0, 0 @ Read TTBR0
589 ldr r5, =TABLE_INDEX_MASK
590 and r4, r5 @ r4 = 31 to 20 bits of pc
592 add r1, r1, r4 @ r1 has value of table entry
593 lsr r4, #18 @ Address of table entry
594 add r2, r4 @ r2 - location to be modified
596 /* Ensure the modified entry makes it to main memory */
597 #ifdef CONFIG_CACHE_L2X0
598 ldr r5, =OMAP44XX_L2CACHE_BASE
599 str r2, [r5, #L2X0_CLEAN_INV_LINE_PA]
601 ldr r0, [r5, #L2X0_CLEAN_INV_LINE_PA]
606 /* Storing previous entry of location being modified */
607 ldr r5, =OMAP44XX_SAR_RAM_BASE
609 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
611 streq r4, [r5, #MMU_OFFSET0] @ Modify the table entry
612 strne r4, [r5, #MMU_OFFSET1]
616 * Storing address of entry being modified
617 * It will be restored after enabling MMU
619 ldr r5, =OMAP44XX_SAR_RAM_BASE
620 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
622 orreq r5, r5, #MMU_OFFSET0
623 orrne r5, r5, #MMU_OFFSET1
624 str r2, [r5, #TABLE_ADDRESS_OFFSET]
626 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
627 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
628 mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
629 mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
632 * Restore control register but don't enable Data caches here.
633 * Caches will be enabled after restoring MMU table entry.
636 str r4, [r5, #CR_VALUE_OFFSET] @ Store previous value of CR
637 ldr r2, =CACHE_DISABLE_MASK
639 mcr p15, 0, r4, c1, c0, 0
642 ldr r0, =mmu_on_label
645 /* Set up the per-CPU stacks */
649 * Restore the MMU table entry that was modified for
652 bl omap4_get_sar_ram_base
654 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
656 orreq r8, r8, #MMU_OFFSET0 @ Get address of entry that..
657 orrne r8, r8, #MMU_OFFSET1 @ was modified
658 ldr r2, [r8, #TABLE_ADDRESS_OFFSET]
659 ldr r3, =local_va2pa_offet
661 ldr r0, [r8] @ Get the previous value..
662 str r0, [r2] @ which needs to be restored
664 mcr p15, 0, r0, c7, c1, 6 @ flush TLB and issue barriers
665 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
666 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
667 mcr p15, 0, r0, c8, c5, 0 @ Invalidate ITLB
668 mcr p15, 0, r0, c8, c6, 0 @ Invalidate DTLB
671 ldr r0, [r8, #CR_VALUE_OFFSET] @ Restore the Control register
672 mcr p15, 0, r0, c1, c0, 0 @ with caches enabled.
675 ldmfd sp!, {r0-r12, pc} @ restore regs and return
677 .equ local_va2pa_offet, (PLAT_PHYS_OFFSET + PAGE_OFFSET)
679 ENDPROC(omap4_cpu_resume)
683 /* SO write to drain of MPU-2-DDR T2ASYNC FIFO */
684 bl omap_get_dram_barrier_base
687 /* SO write to drain MPU-2-L3 T2ASYNC FIFO */
688 bl omap_get_sram_barrier_base
693 ENDPROC(omap_bus_sync)
697 /* Drain interconnect write buffers. */
701 * Execute an ISB instruction to ensure that all of the
702 * CP15 register changes have been committed.
707 * Execute a barrier instruction to ensure that all cache,
708 * TLB and branch predictor maintenance operations issued
709 * by any CPU in the cluster have completed.
715 * Execute a WFI instruction and wait until the
716 * STANDBYWFI output is asserted to indicate that the
717 * CPU is in idle and low power state. CPU can specualatively
718 * prefetch the instructions so add NOPs after WFI. Sixteen
719 * NOPs as per Cortex-A9 pipeline.
721 wfi @ Wait For Interrupt
743 * ============================
744 * == ARM get revision id ==
745 * ============================
747 * unsigned int omap_get_arm_rev()
749 * This function returns the ARM revision id ROM,
750 * eg, 0x20 for arm r2p0, 0x21 for arm r2p1, 0x30 for arm r3p0
752 ENTRY(omap_get_arm_rev)
755 mrc p15, 0, r1, c0, c0, 0 @ read main ID register
756 and r2, r1, #0xff000000 @ ARM?
759 and r2, r1, #0x00f00000 @ variant
760 and r3, r1, #0x0000000f @ revision
761 orr r3, r3, r2, lsr #20-4 @ combine variant and revision
765 ENDPROC(omap_get_arm_rev)