ARM: cpu topology: Add debugfs interface for cpu_power
[cmplus.git] / arch / arm / mach-omap2 / omap4-mpuss-lowpower.c
bloba702b1ea75bec5a629714e2a5ae4b7c94d762fcc
1 /*
2 * OMAP4 MPUSS low power code
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
7 * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
8 * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
9 * CPU0 and CPU1 LPRM modules.
10 * CPU0, CPU1 and MPUSS each have there own power domain and
11 * hence multiple low power combinations of MPUSS are possible.
13 * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
14 * because the mode is not supported by hw constraints of dormant
15 * mode. While waking up from the dormant mode, a reset signal
16 * to the Cortex-A9 processor must be asserted by the external
17 * power controller.
19 * With architectural inputs and hardware recommendations, only
20 * below modes are supported from power gain vs latency point of view.
22 * CPU0 CPU1 MPUSS
23 * ----------------------------------------------
24 * ON ON ON
25 * ON(Inactive) OFF ON(Inactive)
26 * OFF OFF CSWR
27 * OFF OFF OSWR
28 * OFF OFF OFF
29 * ----------------------------------------------
31 * Note: CPU0 is the master core and it is the last CPU to go down
32 * and first to wake-up when MPUSS low power states are excercised
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License version 2 as
37 * published by the Free Software Foundation.
40 #include <linux/kernel.h>
41 #include <linux/io.h>
42 #include <linux/errno.h>
43 #include <linux/linkage.h>
44 #include <linux/smp.h>
46 #include <asm/cacheflush.h>
47 #include <linux/dma-mapping.h>
49 #include <asm/tlbflush.h>
50 #include <asm/smp_scu.h>
51 #include <asm/system.h>
52 #include <asm/irq.h>
53 #include <asm/hardware/gic.h>
54 #include <asm/hardware/cache-l2x0.h>
56 #include <plat/omap44xx.h>
57 #include <mach/omap4-common.h>
58 #include <mach/omap-wakeupgen.h>
59 #include <linux/clk.h>
60 #include "omap4-sar-layout.h"
61 #include "pm.h"
62 #include "prcm_mpu44xx.h"
63 #include "prminst44xx.h"
64 #include "prcm44xx.h"
65 #include "prm44xx.h"
66 #include "prm-regbits-44xx.h"
67 #include "cm.h"
68 #include "prm.h"
69 #include "cm44xx.h"
70 #include "prcm-common.h"
71 #include "clockdomain.h"
73 #ifdef CONFIG_SMP
75 #define GIC_MASK_ALL 0x0
76 #define GIC_ISR_NON_SECURE 0xffffffff
77 #define SPI_ENABLE_SET_OFFSET 0x04
78 #define PPI_PRI_OFFSET 0x1c
79 #define SPI_PRI_OFFSET 0x20
80 #define SPI_TARGET_OFFSET 0x20
81 #define SPI_CONFIG_OFFSET 0x20
83 /* GIC save SAR bank base */
84 static struct powerdomain *mpuss_pd;
86 * Maximum Secure memory storage size.
88 #define OMAP4_SECURE_RAM_STORAGE (88 * SZ_1K)
90 * Physical address of secure memory storage
92 dma_addr_t omap4_secure_ram_phys;
93 static void *secure_ram;
94 struct clk *l3_main_3_ick;
96 /* Variables to store maximum spi(Shared Peripheral Interrupts) registers. */
97 static u32 max_spi_irq, max_spi_reg;
99 struct omap4_cpu_pm_info {
100 struct powerdomain *pwrdm;
101 void __iomem *scu_sar_addr;
104 static void __iomem *gic_dist_base;
105 static void __iomem *gic_cpu_base;
106 static void __iomem *sar_base;
108 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
110 #define PPI_CONTEXT_SIZE 11
111 static DEFINE_PER_CPU(u32[PPI_CONTEXT_SIZE], gic_ppi_context);
112 static DEFINE_PER_CPU(u32, gic_ppi_enable_mask);
114 /* Helper functions */
115 static inline void sar_writel(u32 val, u32 offset, u8 idx)
117 __raw_writel(val, sar_base + offset + 4 * idx);
120 static inline u32 gic_readl(u32 offset, u8 idx)
122 return __raw_readl(gic_dist_base + offset + 4 * idx);
125 u32 gic_cpu_read(u32 reg)
127 return __raw_readl(gic_cpu_base + reg);
131 * Set the CPUx powerdomain's previous power state
133 static inline void set_cpu_next_pwrst(unsigned int cpu_id,
134 unsigned int power_state)
136 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
138 pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
142 * Read CPU's previous power state
144 static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id)
146 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
148 return pwrdm_read_prev_pwrst(pm_info->pwrdm);
152 * Clear the CPUx powerdomain's previous power state
154 static inline void clear_cpu_prev_pwrst(unsigned int cpu_id)
156 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
158 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
161 struct reg_tuple {
162 void __iomem *addr;
163 u32 val;
166 static struct reg_tuple tesla_reg[] = {
167 {.addr = OMAP4430_CM_TESLA_CLKSTCTRL},
168 {.addr = OMAP4430_CM_TESLA_TESLA_CLKCTRL},
169 {.addr = OMAP4430_PM_TESLA_PWRSTCTRL},
172 static struct reg_tuple ivahd_reg[] = {
173 {.addr = OMAP4430_CM_IVAHD_CLKSTCTRL},
174 {.addr = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL},
175 {.addr = OMAP4430_CM_IVAHD_SL2_CLKCTRL},
176 {.addr = OMAP4430_PM_IVAHD_PWRSTCTRL}
179 static struct reg_tuple l3instr_reg[] = {
180 {.addr = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL},
181 {.addr = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL},
182 {.addr = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL},
186 * Store the SCU power status value to scratchpad memory
188 static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
190 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
191 u32 scu_pwr_st;
193 switch (cpu_state) {
194 case PWRDM_POWER_RET:
195 scu_pwr_st = SCU_PM_DORMANT;
196 break;
197 case PWRDM_POWER_OFF:
198 scu_pwr_st = SCU_PM_POWEROFF;
199 break;
200 case PWRDM_POWER_ON:
201 case PWRDM_POWER_INACTIVE:
202 default:
203 scu_pwr_st = SCU_PM_NORMAL;
204 break;
207 __raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
210 static void gic_save_ppi(void)
212 void __iomem *gic_dist_base = omap4_get_gic_dist_base();
213 u32 *context = __get_cpu_var(gic_ppi_context);
214 int i = 0;
216 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI);
217 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x4);
218 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x8);
219 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0xc);
220 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x10);
221 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x14);
222 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x18);
223 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_PRI + 0x1c);
224 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_CONFIG);
225 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_CONFIG + 0x4);
226 context[i++] = readl_relaxed(gic_dist_base + GIC_DIST_ENABLE_SET);
228 BUG_ON(i != PPI_CONTEXT_SIZE);
231 static void gic_restore_ppi(void)
233 void __iomem *gic_dist_base = omap4_get_gic_dist_base();
234 u32 *context = __get_cpu_var(gic_ppi_context);
235 int i = 0;
237 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI);
238 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x4);
239 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x8);
240 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0xc);
241 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x10);
242 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x14);
243 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x18);
244 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_PRI + 0x1c);
245 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_CONFIG);
246 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_CONFIG + 0x4);
247 writel_relaxed(context[i++], gic_dist_base + GIC_DIST_ENABLE_SET);
249 BUG_ON(i != PPI_CONTEXT_SIZE);
253 * Mask all the PPIs. This should only be called after they have been saved
254 * through secure trap or through save_ppi(). This is primarily needed to
255 * mask the local timer irq that could be pending since timekeeping gets
256 * suspended after the local irqs are disabled. The pending interrupt would
257 * kick the CPU out of WFI immediately, and prevent it from going to the lower
258 * power states. The correct value will be restored when the CPU is brought
259 * back up by restore.
261 static void gic_mask_ppi(void)
263 void __iomem *gic_dist_base = omap4_get_gic_dist_base();
265 __get_cpu_var(gic_ppi_enable_mask) =
266 readl_relaxed(gic_dist_base + GIC_DIST_ENABLE_SET);
267 writel_relaxed(0xffffffff, gic_dist_base + GIC_DIST_ENABLE_CLEAR);
270 static void gic_unmask_ppi(void)
272 void __iomem *gic_dist_base = omap4_get_gic_dist_base();
273 writel_relaxed(__get_cpu_var(gic_ppi_enable_mask),
274 gic_dist_base + GIC_DIST_ENABLE_SET);
278 * Save GIC context in SAR RAM. Restore is done by ROM code
279 * GIC is lost only when MPU hits OSWR or OFF. It consists
280 * of a distributor and a per-CPU interface module. The GIC
281 * save restore is optimised to save only necessary registers.
283 static void gic_save_context(void)
285 u8 i;
286 u32 val;
289 * Interrupt Clear Enable registers are inverse of set enable
290 * and hence not needed to be saved. ROM code programs it
291 * based on Set Enable register values.
294 /* Save CPU 0 Interrupt Set Enable register */
295 val = gic_readl(GIC_DIST_ENABLE_SET, 0);
296 sar_writel(val, ICDISER_CPU0_OFFSET, 0);
298 /* Disable interrupts on CPU1 */
299 sar_writel(GIC_MASK_ALL, ICDISER_CPU1_OFFSET, 0);
301 /* Save all SPI Set Enable register */
302 for (i = 0; i < max_spi_reg; i++) {
303 val = gic_readl(GIC_DIST_ENABLE_SET + SPI_ENABLE_SET_OFFSET, i);
304 sar_writel(val, ICDISER_SPI_OFFSET, i);
308 * Interrupt Priority Registers
309 * Secure sw accesses, last 5 bits of the 8 bits (bit[7:3] are used)
310 * Non-Secure sw accesses, last 4 bits (i.e. bits[7:4] are used)
311 * But the Secure Bits[7:3] are shifted by 1 in Non-Secure access.
312 * Secure (bits[7:3] << 1)== Non Secure bits[7:4]
313 * Hence right shift the value by 1 while saving the priority
316 /* Save SGI priority registers (Software Generated Interrupt) */
317 for (i = 0; i < 4; i++) {
318 val = gic_readl(GIC_DIST_PRI, i);
320 /* Save the priority bits of the Interrupts */
321 sar_writel(val >> 0x1, ICDIPR_SFI_CPU0_OFFSET, i);
323 /* Disable the interrupts on CPU1 */
324 sar_writel(GIC_MASK_ALL, ICDIPR_SFI_CPU1_OFFSET, i);
327 /* Save PPI priority registers (Private Peripheral Intterupts) */
328 val = gic_readl(GIC_DIST_PRI + PPI_PRI_OFFSET, 0);
329 sar_writel(val >> 0x1, ICDIPR_PPI_CPU0_OFFSET, 0);
330 sar_writel(GIC_MASK_ALL, ICDIPR_PPI_CPU1_OFFSET, 0);
332 /* SPI priority registers - 4 interrupts/register */
333 for (i = 0; i < (max_spi_irq / 4); i++) {
334 val = gic_readl((GIC_DIST_PRI + SPI_PRI_OFFSET), i);
335 sar_writel(val >> 0x1, ICDIPR_SPI_OFFSET, i);
338 /* SPI Interrupt Target registers - 4 interrupts/register */
339 for (i = 0; i < (max_spi_irq / 4); i++) {
340 val = gic_readl((GIC_DIST_TARGET + SPI_TARGET_OFFSET), i);
341 sar_writel(val, ICDIPTR_SPI_OFFSET, i);
344 /* SPI Interrupt Congigeration eegisters- 16 interrupts/register */
345 for (i = 0; i < (max_spi_irq / 16); i++) {
346 val = gic_readl((GIC_DIST_CONFIG + SPI_CONFIG_OFFSET), i);
347 sar_writel(val, ICDICFR_OFFSET, i);
350 /* Set the Backup Bit Mask status for GIC */
351 val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
352 val |= (SAR_BACKUP_STATUS_GIC_CPU0 | SAR_BACKUP_STATUS_GIC_CPU1);
353 __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
356 * API to save GIC and Wakeupgen using secure API
357 * for HS/EMU device
359 static void save_gic_wakeupgen_secure(void)
361 u32 ret;
362 ret = omap4_secure_dispatcher(HAL_SAVEGIC_INDEX,
363 FLAG_START_CRITICAL,
364 0, 0, 0, 0, 0);
365 if (!ret)
366 pr_debug("GIC and Wakeupgen context save failed\n");
371 * API to save Secure RAM, GIC, WakeupGen Registers using secure API
372 * for HS/EMU device
374 static void save_secure_all(void)
376 u32 ret;
377 ret = omap4_secure_dispatcher(HAL_SAVEALL_INDEX,
378 FLAG_START_CRITICAL,
379 1, omap4_secure_ram_phys, 0, 0, 0);
380 if (ret)
381 pr_debug("Secure all context save failed\n");
385 * API to save Secure RAM using secure API
386 * for HS/EMU device
388 static void save_secure_ram(void)
390 u32 ret;
391 ret = omap4_secure_dispatcher(HAL_SAVESECURERAM_INDEX,
392 FLAG_START_CRITICAL,
393 1, omap4_secure_ram_phys, 0, 0, 0);
394 if (!ret)
395 pr_debug("Secure ram context save failed\n");
398 /* Helper functions for MPUSS OSWR */
399 static inline u32 mpuss_read_prev_logic_pwrst(void)
401 u32 reg;
403 reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
404 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
405 reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
406 return reg;
409 static inline void mpuss_clear_prev_logic_pwrst(void)
411 u32 reg;
413 reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
414 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
415 omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
416 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
419 static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
421 u32 reg;
423 if (cpu_id) {
424 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
425 OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
426 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
427 OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
428 } else {
429 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
430 OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
431 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
432 OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
436 static inline void save_ivahd_tesla_regs(void)
438 int i;
440 for (i = 0; i < ARRAY_SIZE(tesla_reg); i++)
441 tesla_reg[i].val = __raw_readl(tesla_reg[i].addr);
443 for (i = 0; i < ARRAY_SIZE(ivahd_reg); i++)
444 ivahd_reg[i].val = __raw_readl(ivahd_reg[i].addr);
447 static inline void restore_ivahd_tesla_regs(void)
449 int i;
451 for (i = 0; i < ARRAY_SIZE(tesla_reg); i++)
452 __raw_writel(tesla_reg[i].val, tesla_reg[i].addr);
454 for (i = 0; i < ARRAY_SIZE(ivahd_reg); i++)
455 __raw_writel(ivahd_reg[i].val, ivahd_reg[i].addr);
458 static inline void save_l3instr_regs(void)
460 int i;
462 for (i = 0; i < ARRAY_SIZE(l3instr_reg); i++)
463 l3instr_reg[i].val = __raw_readl(l3instr_reg[i].addr);
466 static inline void restore_l3instr_regs(void)
468 int i;
470 for (i = 0; i < ARRAY_SIZE(l3instr_reg); i++)
471 __raw_writel(l3instr_reg[i].val, l3instr_reg[i].addr);
475 * OMAP4 MPUSS Low Power Entry Function
477 * The purpose of this function is to manage low power programming
478 * of OMAP4 MPUSS subsystem
479 * Paramenters:
480 * cpu : CPU ID
481 * power_state: Targetted Low power state.
483 * MPUSS Low power states
484 * The basic rule is that the MPUSS power domain must be at the higher or
485 * equal power state (state that consume more power) than the higher of the
486 * two CPUs. For example, it is illegal for system power to be OFF, while
487 * the power of one or both of the CPU is DORMANT. When an illegal state is
488 * entered, then the hardware behavior is unpredictable.
490 * MPUSS state for the context save
491 * save_state =
492 * 0 - Nothing lost and no need to save: MPUSS INACTIVE
493 * 1 - CPUx L1 and logic lost: MPUSS CSWR
494 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
495 * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
497 int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
499 unsigned int save_state = 0;
500 unsigned int wakeup_cpu;
501 unsigned int inst_clk_enab = 0;
503 if ((cpu >= NR_CPUS) || (omap_rev() == OMAP4430_REV_ES1_0))
504 goto ret;
506 switch (power_state) {
507 case PWRDM_POWER_ON:
508 case PWRDM_POWER_INACTIVE:
509 save_state = 0;
510 break;
511 case PWRDM_POWER_OFF:
512 save_state = 1;
513 break;
514 case PWRDM_POWER_RET:
515 default:
517 * CPUx CSWR is invalid hardware state. Also CPUx OSWR
518 * doesn't make much scense, since logic is lost and $L1
519 * needs to be cleaned because of coherency. This makes
520 * CPUx OSWR equivalent to CPUX OFF and hence not supported
522 WARN_ON(1);
523 goto ret;
527 * MPUSS book keeping should be executed by master
528 * CPU only which is also the last CPU to go down.
530 if (cpu)
531 goto cpu_prepare;
533 pwrdm_pre_transition();
536 * Check MPUSS next state and save GIC if needed
537 * GIC lost during MPU OFF and OSWR
539 pwrdm_clear_all_prev_pwrst(mpuss_pd);
540 mpuss_clear_prev_logic_pwrst();
541 if (omap4_device_next_state_off()) {
542 if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
543 omap_wakeupgen_save();
544 gic_save_context();
545 } else {
546 /* FIXME: Check if this can be optimised */
547 /* l3_main inst clock must be enabled for
548 * a save ram operation
550 if (!l3_main_3_ick->usecount) {
551 inst_clk_enab = 1;
552 clk_enable(l3_main_3_ick);
554 save_secure_all();
556 if (inst_clk_enab == 1)
557 clk_disable(l3_main_3_ick);
558 save_ivahd_tesla_regs();
559 save_l3instr_regs();
561 save_state = 3;
562 goto cpu_prepare;
565 switch (pwrdm_read_next_pwrst(mpuss_pd)) {
566 case PWRDM_POWER_RET:
568 * MPUSS OSWR - Complete logic lost + L2$ retained.
569 * MPUSS CSWR - Complete logic retained + L2$ retained.
571 if (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF) {
572 if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
573 omap_wakeupgen_save();
574 gic_save_context();
575 } else {
576 save_gic_wakeupgen_secure();
577 save_ivahd_tesla_regs();
578 save_l3instr_regs();
580 save_state = 2;
582 break;
583 case PWRDM_POWER_OFF:
584 /* MPUSS OFF - logic lost + L2$ lost */
585 if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
586 omap_wakeupgen_save();
587 gic_save_context();
588 } else {
589 /* l3_main inst clock must be enabled for
590 * a save ram operation
592 if (!l3_main_3_ick->usecount) {
593 inst_clk_enab = 1;
594 clk_enable(l3_main_3_ick);
596 save_gic_wakeupgen_secure();
597 save_ivahd_tesla_regs();
598 save_l3instr_regs();
599 save_secure_ram();
600 if (inst_clk_enab == 1)
601 clk_disable(l3_main_3_ick);
603 save_state = 3;
604 break;
605 case PWRDM_POWER_ON:
606 case PWRDM_POWER_INACTIVE:
607 /* No need to save MPUSS context */
608 default:
612 cpu_prepare:
613 if (cpu)
614 gic_save_ppi();
617 * mask all PPIs to prevent them from kicking us out of wfi.
619 gic_mask_ppi();
621 clear_cpu_prev_pwrst(cpu);
622 cpu_clear_prev_logic_pwrst(cpu);
623 set_cpu_next_pwrst(cpu, power_state);
624 scu_pwrst_prepare(cpu, power_state);
627 * Call low level function with targeted CPU id
628 * and its low power state.
630 stop_critical_timings();
631 omap4_cpu_suspend(cpu, save_state);
632 start_critical_timings();
635 * Restore the CPUx power state to ON otherwise CPUx
636 * power domain can transitions to programmed low power
637 * state while doing WFI outside the low powe code. On
638 * secure devices, CPUx does WFI which can result in
639 * domain transition
641 wakeup_cpu = hard_smp_processor_id();
642 set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON);
645 * If we didn't actually get into the low power state (e.g. immediately
646 * exited wfi due to a pending interrupt), the secure side
647 * would not have restored CPU0's GIC PPI enable mask.
648 * For other CPUs, gic_restore_ppi will do that for us.
650 if (cpu)
651 gic_restore_ppi();
652 else
653 gic_unmask_ppi();
656 * If !master cpu return to hotplug-path.
658 * GIC distributor control register has changed between
659 * CortexA9 r1pX and r2pX. The Control Register secure
660 * banked version is now composed of 2 bits:
661 * bit 0 == Secure Enable
662 * bit 1 == Non-Secure Enable
663 * The Non-Secure banked register has not changed
664 * Because the ROM Code is based on the r1pX GIC, the CPU1
665 * GIC restoration will cause a problem to CPU0 Non-Secure SW.
666 * The workaround must be:
667 * 1) Before doing the CPU1 wakeup, CPU0 must disable
668 * the GIC distributor
669 * 2) CPU1 must re-enable the GIC distributor on
670 * it's wakeup path.
672 if (wakeup_cpu) {
673 if (!cpu_is_omap443x())
674 gic_dist_enable();
675 goto ret;
678 /* Check if MPUSS lost it's logic */
679 if (mpuss_read_prev_logic_pwrst()) {
680 /* Clear SAR BACKUP status on GP devices */
681 if (omap_type() == OMAP2_DEVICE_TYPE_GP)
682 __raw_writel(0x0, sar_base + SAR_BACKUP_STATUS_OFFSET);
683 /* Enable GIC distributor and interface on CPU0*/
684 gic_cpu_enable();
685 gic_dist_enable();
687 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
689 * Dummy dispatcher call after OSWR and OFF
690 * Restore the right return Kernel address (with MMU on) for
691 * subsequent calls to secure ROM. Otherwise the return address
692 * will be to a PA return address and the system will hang.
694 omap4_secure_dispatcher(PPA_SERVICE_0,
695 FLAG_START_CRITICAL,
696 0, 0, 0, 0, 0);
698 /* Due to ROM BUG at wake up from MPU OSWR/OFF
699 * on HS/EMU device only (not GP device),
700 * the ROM Code reconfigures some of
701 * IVAHD/TESLA/L3INSTR registers.
702 * So these IVAHD/TESLA and L3INSTR registers
703 * need to be restored.*/
704 restore_ivahd_tesla_regs();
705 restore_l3instr_regs();
709 pwrdm_post_transition();
711 ret:
712 return 0;
715 static void save_l2x0_auxctrl(void)
717 #ifdef CONFIG_CACHE_L2X0
719 * Save the L2X0 AUXCTRL value to SAR memory. Its used to
720 * in every restore patch MPUSS OFF path.
722 void __iomem *l2x0_base = omap4_get_l2cache_base();
723 u32 val;
725 val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
726 __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
729 * Save the L2X0 PREFETCH_CTRL value to SAR memory.
730 * Its used in every restore path MPUSS OFF path.
733 val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
734 __raw_writel(val, sar_base + L2X0_PREFETCHCTRL_OFFSET);
736 /* Save L2X0 LOCKDOWN_OFFSET0 during SAR */
737 val = readl_relaxed(l2x0_base + 0x900);
738 writel_relaxed(val, sar_base + L2X0_LOCKDOWN_OFFSET0);
739 #endif
743 * Initialise OMAP4 MPUSS
745 int __init omap4_mpuss_init(void)
747 struct omap4_cpu_pm_info *pm_info;
748 u8 i;
750 /* Get GIC and SAR RAM base addresses */
751 sar_base = omap4_get_sar_ram_base();
752 gic_dist_base = omap4_get_gic_dist_base();
753 gic_cpu_base = omap4_get_gic_cpu_base();
755 if (omap_rev() == OMAP4430_REV_ES1_0) {
756 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
757 return -ENODEV;
760 /* Initilaise per CPU PM information */
761 pm_info = &per_cpu(omap4_pm_info, 0x0);
762 pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
763 pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
764 if (!pm_info->pwrdm) {
765 pr_err("Lookup failed for CPU0 pwrdm\n");
766 return -ENODEV;
769 /* Clear CPU previous power domain state */
770 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
771 cpu_clear_prev_logic_pwrst(0);
773 /* Initialise CPU0 power domain state to ON */
774 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
776 pm_info = &per_cpu(omap4_pm_info, 0x1);
777 pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
778 pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
779 if (!pm_info->pwrdm) {
780 pr_err("Lookup failed for CPU1 pwrdm\n");
781 return -ENODEV;
785 * Check the OMAP type and store it to scratchpad
787 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
788 /* Memory not released */
789 secure_ram = dma_alloc_coherent(NULL, OMAP4_SECURE_RAM_STORAGE,
790 (dma_addr_t *)&omap4_secure_ram_phys, GFP_ATOMIC);
791 if (!secure_ram)
792 pr_err("Unable to allocate secure ram storage\n");
793 writel(0x1, sar_base + OMAP_TYPE_OFFSET);
794 } else {
795 writel(0x0, sar_base + OMAP_TYPE_OFFSET);
798 /* Clear CPU previous power domain state */
799 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
800 cpu_clear_prev_logic_pwrst(1);
802 /* Initialise CPU1 power domain state to ON */
803 pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
806 * Program the wakeup routine address for the CPU0 and CPU1
807 * used for OFF or DORMANT wakeup. Wakeup routine address
808 * is fixed so programit in init itself.
810 __raw_writel(virt_to_phys(omap4_cpu_resume),
811 sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
812 __raw_writel(virt_to_phys(omap4_cpu_resume),
813 sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET);
815 mpuss_pd = pwrdm_lookup("mpu_pwrdm");
816 if (!mpuss_pd) {
817 pr_err("Failed to get lookup for MPUSS pwrdm\n");
818 return -ENODEV;
821 l3_main_3_ick = clk_get(NULL, "l3_main_3_ick");
823 /* Clear CPU previous power domain state */
824 pwrdm_clear_all_prev_pwrst(mpuss_pd);
825 mpuss_clear_prev_logic_pwrst();
828 * Find out how many interrupts are supported.
829 * OMAP4 supports max of 128 SPIs where as GIC can support
830 * up to 1020 interrupt sources. On OMAP4, maximum SPIs are
831 * fused in DIST_CTR bit-fields as 128. Hence the code is safe
832 * from reserved register writes since its well within 1020.
834 max_spi_reg = __raw_readl(gic_dist_base + GIC_DIST_CTR) & 0x1f;
835 max_spi_irq = max_spi_reg * 32;
838 * Mark the PPI and SPI interrupts as non-secure.
839 * program the SAR locations for interrupt security registers to
840 * reflect the same.
842 if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
843 sar_writel(GIC_ISR_NON_SECURE, ICDISR_CPU0_OFFSET, 0);
844 sar_writel(GIC_ISR_NON_SECURE, ICDISR_CPU1_OFFSET, 0);
845 for (i = 0; i < max_spi_reg; i++)
846 sar_writel(GIC_ISR_NON_SECURE, ICDISR_SPI_OFFSET, i);
848 save_l2x0_auxctrl();
850 return 0;
853 #endif