1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP3 Power Management Routines
5 * Copyright (C) 2006-2008 Nokia Corporation
6 * Tony Lindgren <tony@atomide.com>
9 * Copyright (C) 2007 Texas Instruments, Inc.
10 * Rajendra Nayak <rnayak@ti.com>
12 * Copyright (C) 2005 Texas Instruments, Inc.
13 * Richard Woodruff <r-woodruff2@ti.com>
15 * Based on pm.c for omap1
18 #include <linux/cpu_pm.h>
20 #include <linux/suspend.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/list.h>
24 #include <linux/err.h>
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/omap-gpmc.h>
30 #include <trace/events/power.h>
32 #include <asm/fncpy.h>
33 #include <asm/suspend.h>
34 #include <asm/system_misc.h>
36 #include "clockdomain.h"
37 #include "powerdomain.h"
41 #include "cm-regbits-34xx.h"
42 #include "prm-regbits-34xx.h"
46 #include "omap-secure.h"
51 /* pm34xx errata defined in pm.h */
55 struct powerdomain
*pwrdm
;
60 struct list_head node
;
63 static LIST_HEAD(pwrst_list
);
65 void (*omap3_do_wfi_sram
)(void);
67 static struct powerdomain
*mpu_pwrdm
, *neon_pwrdm
;
68 static struct powerdomain
*core_pwrdm
, *per_pwrdm
;
70 static void omap3_core_save_context(void)
72 omap3_ctrl_save_padconf();
75 * Force write last pad into memory, as this can fail in some
76 * cases according to errata 1.157, 1.185
78 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14
),
79 OMAP343X_CONTROL_MEM_WKUP
+ 0x2a0);
81 /* Save the Interrupt controller context */
82 omap_intc_save_context();
83 /* Save the GPMC context */
84 omap3_gpmc_save_context();
85 /* Save the system control module context, padconf already save above*/
86 omap3_control_save_context();
89 static void omap3_core_restore_context(void)
91 /* Restore the control module context, padconf restored by h/w */
92 omap3_control_restore_context();
93 /* Restore the GPMC context */
94 omap3_gpmc_restore_context();
95 /* Restore the interrupt controller context */
96 omap_intc_restore_context();
100 * FIXME: This function should be called before entering off-mode after
101 * OMAP3 secure services have been accessed. Currently it is only called
102 * once during boot sequence, but this works as we are not using secure
105 static void omap3_save_secure_ram_context(void)
108 int mpu_next_state
= pwrdm_read_next_pwrst(mpu_pwrdm
);
110 if (omap_type() != OMAP2_DEVICE_TYPE_GP
) {
112 * MPU next state must be set to POWER_ON temporarily,
113 * otherwise the WFI executed inside the ROM code
114 * will hang the system.
116 pwrdm_set_next_pwrst(mpu_pwrdm
, PWRDM_POWER_ON
);
117 ret
= omap3_save_secure_ram(omap3_secure_ram_storage
,
118 OMAP3_SAVE_SECURE_RAM_SZ
);
119 pwrdm_set_next_pwrst(mpu_pwrdm
, mpu_next_state
);
120 /* Following is for error tracking, it should not happen */
122 pr_err("save_secure_sram() returns %08x\n", ret
);
129 static irqreturn_t
_prcm_int_handle_io(int irq
, void *unused
)
133 c
= omap_prm_clear_mod_irqs(WKUP_MOD
, 1, OMAP3430_ST_IO_MASK
|
134 OMAP3430_ST_IO_CHAIN_MASK
);
136 return c
? IRQ_HANDLED
: IRQ_NONE
;
139 static irqreturn_t
_prcm_int_handle_wakeup(int irq
, void *unused
)
144 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
145 * these are handled in a separate handler to avoid acking
146 * IO events before parsing in mux code
148 c
= omap_prm_clear_mod_irqs(WKUP_MOD
, 1, ~(OMAP3430_ST_IO_MASK
|
149 OMAP3430_ST_IO_CHAIN_MASK
));
150 c
+= omap_prm_clear_mod_irqs(CORE_MOD
, 1, ~0);
151 c
+= omap_prm_clear_mod_irqs(OMAP3430_PER_MOD
, 1, ~0);
152 if (omap_rev() > OMAP3430_REV_ES1_0
) {
153 c
+= omap_prm_clear_mod_irqs(CORE_MOD
, 3, ~0);
154 c
+= omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD
, 1, ~0);
157 return c
? IRQ_HANDLED
: IRQ_NONE
;
160 static void omap34xx_save_context(u32
*save
)
164 /* Read Auxiliary Control Register */
165 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val
));
169 /* Read L2 AUX ctrl register */
170 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val
));
175 static int omap34xx_do_sram_idle(unsigned long save_state
)
177 omap34xx_cpu_suspend(save_state
);
181 void omap_sram_idle(void)
183 /* Variable to tell what needs to be saved and restored
184 * in omap_sram_idle*/
185 /* save_state = 0 => Nothing to save and restored */
186 /* save_state = 1 => Only L1 and logic lost */
187 /* save_state = 2 => Only L2 lost */
188 /* save_state = 3 => L1, L2 and logic lost */
190 int mpu_next_state
= PWRDM_POWER_ON
;
191 int per_next_state
= PWRDM_POWER_ON
;
192 int core_next_state
= PWRDM_POWER_ON
;
195 mpu_next_state
= pwrdm_read_next_pwrst(mpu_pwrdm
);
196 switch (mpu_next_state
) {
198 case PWRDM_POWER_RET
:
199 /* No need to save context */
202 case PWRDM_POWER_OFF
:
207 pr_err("Invalid mpu state in sram_idle\n");
212 if (pwrdm_read_pwrst(neon_pwrdm
) == PWRDM_POWER_ON
)
213 pwrdm_set_next_pwrst(neon_pwrdm
, mpu_next_state
);
215 /* Enable IO-PAD and IO-CHAIN wakeups */
216 per_next_state
= pwrdm_read_next_pwrst(per_pwrdm
);
217 core_next_state
= pwrdm_read_next_pwrst(core_pwrdm
);
219 pwrdm_pre_transition(NULL
);
222 if (per_next_state
== PWRDM_POWER_OFF
)
223 cpu_cluster_pm_enter();
226 if (core_next_state
< PWRDM_POWER_ON
) {
227 if (core_next_state
== PWRDM_POWER_OFF
) {
228 omap3_core_save_context();
229 omap3_cm_save_context();
233 /* Configure PMIC signaling for I2C4 or sys_off_mode */
234 omap3_vc_set_pmic_signaling(core_next_state
);
236 omap3_intc_prepare_idle();
239 * On EMU/HS devices ROM code restores a SRDC value
240 * from scratchpad which has automatic self refresh on timeout
241 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
242 * Hence store/restore the SDRC_POWER register here.
244 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0
&&
245 (omap_type() == OMAP2_DEVICE_TYPE_EMU
||
246 omap_type() == OMAP2_DEVICE_TYPE_SEC
) &&
247 core_next_state
== PWRDM_POWER_OFF
)
248 sdrc_pwr
= sdrc_read_reg(SDRC_POWER
);
251 * omap3_arm_context is the location where some ARM context
252 * get saved. The rest is placed on the stack, and restored
253 * from there before resuming.
256 omap34xx_save_context(omap3_arm_context
);
257 if (save_state
== 1 || save_state
== 3)
258 cpu_suspend(save_state
, omap34xx_do_sram_idle
);
260 omap34xx_do_sram_idle(save_state
);
262 /* Restore normal SDRC POWER settings */
263 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0
&&
264 (omap_type() == OMAP2_DEVICE_TYPE_EMU
||
265 omap_type() == OMAP2_DEVICE_TYPE_SEC
) &&
266 core_next_state
== PWRDM_POWER_OFF
)
267 sdrc_write_reg(sdrc_pwr
, SDRC_POWER
);
270 if (core_next_state
< PWRDM_POWER_ON
&&
271 pwrdm_read_prev_pwrst(core_pwrdm
) == PWRDM_POWER_OFF
) {
272 omap3_core_restore_context();
273 omap3_cm_restore_context();
274 omap3_sram_restore_context();
275 omap2_sms_restore_context();
278 * In off-mode resume path above, omap3_core_restore_context
279 * also handles the INTC autoidle restore done here so limit
280 * this to non-off mode resume paths so we don't do it twice.
282 omap3_intc_resume_idle();
285 pwrdm_post_transition(NULL
);
288 if (per_next_state
== PWRDM_POWER_OFF
)
289 cpu_cluster_pm_exit();
292 static void omap3_pm_idle(void)
294 if (omap_irq_pending())
297 trace_cpu_idle_rcuidle(1, smp_processor_id());
301 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
304 #ifdef CONFIG_SUSPEND
305 static int omap3_pm_suspend(void)
307 struct power_state
*pwrst
;
310 /* Read current next_pwrsts */
311 list_for_each_entry(pwrst
, &pwrst_list
, node
)
312 pwrst
->saved_state
= pwrdm_read_next_pwrst(pwrst
->pwrdm
);
313 /* Set ones wanted by suspend */
314 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
315 if (omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->next_state
))
317 if (pwrdm_clear_all_prev_pwrst(pwrst
->pwrdm
))
321 omap3_intc_suspend();
326 /* Restore next_pwrsts */
327 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
328 state
= pwrdm_read_prev_pwrst(pwrst
->pwrdm
);
329 if (state
> pwrst
->next_state
) {
330 pr_info("Powerdomain (%s) didn't enter target state %d\n",
331 pwrst
->pwrdm
->name
, pwrst
->next_state
);
334 omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->saved_state
);
337 pr_err("Could not enter target state in pm_suspend\n");
339 pr_info("Successfully put all powerdomains to target state\n");
344 #define omap3_pm_suspend NULL
345 #endif /* CONFIG_SUSPEND */
347 static void __init
prcm_setup_regs(void)
351 omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva());
354 void omap3_pm_off_mode_enable(int enable
)
356 struct power_state
*pwrst
;
360 state
= PWRDM_POWER_OFF
;
362 state
= PWRDM_POWER_RET
;
364 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
365 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583
) &&
366 pwrst
->pwrdm
== core_pwrdm
&&
367 state
== PWRDM_POWER_OFF
) {
368 pwrst
->next_state
= PWRDM_POWER_RET
;
369 pr_warn("%s: Core OFF disabled due to errata i583\n",
372 pwrst
->next_state
= state
;
374 omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->next_state
);
378 int omap3_pm_get_suspend_state(struct powerdomain
*pwrdm
)
380 struct power_state
*pwrst
;
382 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
383 if (pwrst
->pwrdm
== pwrdm
)
384 return pwrst
->next_state
;
389 int omap3_pm_set_suspend_state(struct powerdomain
*pwrdm
, int state
)
391 struct power_state
*pwrst
;
393 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
394 if (pwrst
->pwrdm
== pwrdm
) {
395 pwrst
->next_state
= state
;
402 static int __init
pwrdms_setup(struct powerdomain
*pwrdm
, void *unused
)
404 struct power_state
*pwrst
;
409 pwrst
= kmalloc(sizeof(struct power_state
), GFP_ATOMIC
);
412 pwrst
->pwrdm
= pwrdm
;
413 pwrst
->next_state
= PWRDM_POWER_RET
;
414 list_add(&pwrst
->node
, &pwrst_list
);
416 if (pwrdm_has_hdwr_sar(pwrdm
))
417 pwrdm_enable_hdwr_sar(pwrdm
);
419 return omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->next_state
);
423 * Push functions to SRAM
425 * The minimum set of functions is pushed to SRAM for execution:
426 * - omap3_do_wfi for erratum i581 WA,
428 void omap_push_sram_idle(void)
430 omap3_do_wfi_sram
= omap_sram_push(omap3_do_wfi
, omap3_do_wfi_sz
);
433 static void __init
pm_errata_configure(void)
435 if (cpu_is_omap3630()) {
436 pm34xx_errata
|= PM_RTA_ERRATUM_i608
;
437 /* Enable the l2 cache toggling in sleep logic */
438 enable_omap3630_toggle_l2_on_restore();
439 if (omap_rev() < OMAP3630_REV_ES1_2
)
440 pm34xx_errata
|= (PM_SDRC_WAKEUP_ERRATUM_i583
|
441 PM_PER_MEMORIES_ERRATUM_i582
);
442 } else if (cpu_is_omap34xx()) {
443 pm34xx_errata
|= PM_PER_MEMORIES_ERRATUM_i582
;
447 int __init
omap3_pm_init(void)
449 struct power_state
*pwrst
, *tmp
;
450 struct clockdomain
*neon_clkdm
, *mpu_clkdm
, *per_clkdm
, *wkup_clkdm
;
453 if (!omap3_has_io_chain_ctrl())
454 pr_warn("PM: no software I/O chain control; some wakeups may be lost\n");
456 pm_errata_configure();
458 /* XXX prcm_setup_regs needs to be before enabling hw
459 * supervised mode for powerdomains */
462 ret
= request_irq(omap_prcm_event_to_irq("wkup"),
463 _prcm_int_handle_wakeup
, IRQF_NO_SUSPEND
, "pm_wkup", NULL
);
466 pr_err("pm: Failed to request pm_wkup irq\n");
470 /* IO interrupt is shared with mux code */
471 ret
= request_irq(omap_prcm_event_to_irq("io"),
472 _prcm_int_handle_io
, IRQF_SHARED
| IRQF_NO_SUSPEND
, "pm_io",
476 pr_err("pm: Failed to request pm_io irq\n");
480 ret
= pwrdm_for_each(pwrdms_setup
, NULL
);
482 pr_err("Failed to setup powerdomains\n");
486 (void) clkdm_for_each(omap_pm_clkdms_setup
, NULL
);
488 mpu_pwrdm
= pwrdm_lookup("mpu_pwrdm");
489 if (mpu_pwrdm
== NULL
) {
490 pr_err("Failed to get mpu_pwrdm\n");
495 neon_pwrdm
= pwrdm_lookup("neon_pwrdm");
496 per_pwrdm
= pwrdm_lookup("per_pwrdm");
497 core_pwrdm
= pwrdm_lookup("core_pwrdm");
499 neon_clkdm
= clkdm_lookup("neon_clkdm");
500 mpu_clkdm
= clkdm_lookup("mpu_clkdm");
501 per_clkdm
= clkdm_lookup("per_clkdm");
502 wkup_clkdm
= clkdm_lookup("wkup_clkdm");
504 omap_common_suspend_init(omap3_pm_suspend
);
506 arm_pm_idle
= omap3_pm_idle
;
510 * RTA is disabled during initialization as per erratum i608
511 * it is safer to disable RTA by the bootloader, but we would like
512 * to be doubly sure here and prevent any mishaps.
514 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608
))
515 omap3630_ctrl_disable_rta();
518 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
519 * not correctly reset when the PER powerdomain comes back
520 * from OFF or OSWR when the CORE powerdomain is kept active.
521 * See OMAP36xx Erratum i582 "PER Domain reset issue after
522 * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a
523 * complete workaround. The kernel must also prevent the PER
524 * powerdomain from going to OSWR/OFF while the CORE
525 * powerdomain is not going to OSWR/OFF. And if PER last
526 * power state was off while CORE last power state was ON, the
527 * UART3/4 and McBSP2/3 SIDETONE devices need to run a
528 * self-test using their loopback tests; if that fails, those
529 * devices are unusable until the PER/CORE can complete a transition
530 * from ON to OSWR/OFF and then back to ON.
532 * XXX Technically this workaround is only needed if off-mode
533 * or OSWR is enabled.
535 if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582
))
536 clkdm_add_wkdep(per_clkdm
, wkup_clkdm
);
538 clkdm_add_wkdep(neon_clkdm
, mpu_clkdm
);
539 if (omap_type() != OMAP2_DEVICE_TYPE_GP
) {
540 omap3_secure_ram_storage
=
541 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ
, GFP_KERNEL
);
542 if (!omap3_secure_ram_storage
)
543 pr_err("Memory allocation failed when allocating for secure sram context\n");
547 omap3_save_secure_ram_context();
552 omap3_save_scratchpad_contents();
556 list_for_each_entry_safe(pwrst
, tmp
, &pwrst_list
, node
) {
557 list_del(&pwrst
->node
);
560 free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init
);
562 free_irq(omap_prcm_event_to_irq("wkup"), NULL
);