2 * OMAP4 Power Management Routines
4 * Copyright (C) 2010-2011 Texas Instruments, Inc.
5 * Rajendra Nayak <rnayak@ti.com>
6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 #include <linux/suspend.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/err.h>
18 #include <linux/slab.h>
21 #include "clockdomain.h"
22 #include "powerdomain.h"
26 struct powerdomain
*pwrdm
;
30 u32 saved_logic_state
;
32 struct list_head node
;
35 static LIST_HEAD(pwrst_list
);
38 static int omap4_pm_suspend(void)
40 struct power_state
*pwrst
;
42 u32 cpu_id
= smp_processor_id();
44 /* Save current powerdomain state */
45 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
46 pwrst
->saved_state
= pwrdm_read_next_pwrst(pwrst
->pwrdm
);
47 pwrst
->saved_logic_state
= pwrdm_read_logic_retst(pwrst
->pwrdm
);
50 /* Set targeted power domain states by suspend */
51 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
52 omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->next_state
);
53 pwrdm_set_logic_retst(pwrst
->pwrdm
, PWRDM_POWER_OFF
);
57 * For MPUSS to hit power domain retention(CSWR or OSWR),
58 * CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
59 * since CPU power domain CSWR is not supported by hardware
60 * Only master CPU follows suspend path. All other CPUs follow
61 * CPU hotplug path in system wide suspend. On OMAP4, CPU power
62 * domain CSWR is not supported by hardware.
63 * More details can be found in OMAP4430 TRM section 4.3.4.2.
65 omap4_enter_lowpower(cpu_id
, PWRDM_POWER_OFF
);
67 /* Restore next powerdomain state */
68 list_for_each_entry(pwrst
, &pwrst_list
, node
) {
69 state
= pwrdm_read_prev_pwrst(pwrst
->pwrdm
);
70 if (state
> pwrst
->next_state
) {
71 pr_info("Powerdomain (%s) didn't enter "
73 pwrst
->pwrdm
->name
, pwrst
->next_state
);
76 omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->saved_state
);
77 pwrdm_set_logic_retst(pwrst
->pwrdm
, pwrst
->saved_logic_state
);
80 pr_crit("Could not enter target state in pm_suspend\n");
82 pr_info("Successfully put all powerdomains to target state\n");
87 static int omap4_pm_enter(suspend_state_t suspend_state
)
91 switch (suspend_state
) {
92 case PM_SUSPEND_STANDBY
:
94 ret
= omap4_pm_suspend();
103 static int omap4_pm_begin(suspend_state_t state
)
109 static void omap4_pm_end(void)
115 static const struct platform_suspend_ops omap_pm_ops
= {
116 .begin
= omap4_pm_begin
,
118 .enter
= omap4_pm_enter
,
119 .valid
= suspend_valid_only_mem
,
121 #endif /* CONFIG_SUSPEND */
124 * Enable hardware supervised mode for all clockdomains if it's
125 * supported. Initiate sleep transition for other clockdomains, if
128 static int __init
clkdms_setup(struct clockdomain
*clkdm
, void *unused
)
130 if (clkdm
->flags
& CLKDM_CAN_ENABLE_AUTO
)
131 clkdm_allow_idle(clkdm
);
132 else if (clkdm
->flags
& CLKDM_CAN_FORCE_SLEEP
&&
133 atomic_read(&clkdm
->usecount
) == 0)
139 static int __init
pwrdms_setup(struct powerdomain
*pwrdm
, void *unused
)
141 struct power_state
*pwrst
;
147 * Skip CPU0 and CPU1 power domains. CPU1 is programmed
148 * through hotplug path and CPU0 explicitly programmed
149 * further down in the code path
151 if (!strncmp(pwrdm
->name
, "cpu", 3))
155 * FIXME: Remove this check when core retention is supported
156 * Only MPUSS power domain is added in the list.
158 if (strcmp(pwrdm
->name
, "mpu_pwrdm"))
161 pwrst
= kmalloc(sizeof(struct power_state
), GFP_ATOMIC
);
165 pwrst
->pwrdm
= pwrdm
;
166 pwrst
->next_state
= PWRDM_POWER_RET
;
167 list_add(&pwrst
->node
, &pwrst_list
);
169 return omap_set_pwrdm_state(pwrst
->pwrdm
, pwrst
->next_state
);
173 * omap_default_idle - OMAP4 default ilde routine.'
175 * Implements OMAP4 memory, IO ordering requirements which can't be addressed
176 * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
177 * by secondary CPU with CONFIG_CPUIDLE.
179 static void omap_default_idle(void)
191 * omap4_pm_init - Init routine for OMAP4 PM
193 * Initializes all powerdomain and clockdomain target states
194 * and all PRCM settings.
196 static int __init
omap4_pm_init(void)
199 struct clockdomain
*emif_clkdm
, *mpuss_clkdm
, *l3_1_clkdm
;
200 struct clockdomain
*ducati_clkdm
, *l3_2_clkdm
, *l4_per_clkdm
;
202 if (!cpu_is_omap44xx())
205 if (omap_rev() == OMAP4430_REV_ES1_0
) {
206 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
210 pr_err("Power Management for TI OMAP4.\n");
212 ret
= pwrdm_for_each(pwrdms_setup
, NULL
);
214 pr_err("Failed to setup powerdomains\n");
219 * The dynamic dependency between MPUSS -> MEMIF and
220 * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
221 * expected. The hardware recommendation is to enable static
222 * dependencies for these to avoid system lock ups or random crashes.
224 mpuss_clkdm
= clkdm_lookup("mpuss_clkdm");
225 emif_clkdm
= clkdm_lookup("l3_emif_clkdm");
226 l3_1_clkdm
= clkdm_lookup("l3_1_clkdm");
227 l3_2_clkdm
= clkdm_lookup("l3_2_clkdm");
228 l4_per_clkdm
= clkdm_lookup("l4_per_clkdm");
229 ducati_clkdm
= clkdm_lookup("ducati_clkdm");
230 if ((!mpuss_clkdm
) || (!emif_clkdm
) || (!l3_1_clkdm
) ||
231 (!l3_2_clkdm
) || (!ducati_clkdm
) || (!l4_per_clkdm
))
234 ret
= clkdm_add_wkdep(mpuss_clkdm
, emif_clkdm
);
235 ret
|= clkdm_add_wkdep(mpuss_clkdm
, l3_1_clkdm
);
236 ret
|= clkdm_add_wkdep(mpuss_clkdm
, l3_2_clkdm
);
237 ret
|= clkdm_add_wkdep(mpuss_clkdm
, l4_per_clkdm
);
238 ret
|= clkdm_add_wkdep(ducati_clkdm
, l3_1_clkdm
);
239 ret
|= clkdm_add_wkdep(ducati_clkdm
, l3_2_clkdm
);
241 pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
242 "wakeup dependency\n");
246 ret
= omap4_mpuss_init();
248 pr_err("Failed to initialise OMAP4 MPUSS\n");
252 (void) clkdm_for_each(clkdms_setup
, NULL
);
254 #ifdef CONFIG_SUSPEND
255 suspend_set_ops(&omap_pm_ops
);
256 #endif /* CONFIG_SUSPEND */
258 /* Overwrite the default arch_idle() */
259 pm_idle
= omap_default_idle
;
266 late_initcall(omap4_pm_init
);