2 * OMAP4-specific DPLL control functions
4 * Copyright (C) 2011 Texas Instruments, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/clk.h>
16 #include <linux/bitops.h>
17 #include <linux/spinlock.h>
19 #include <plat/clock.h>
20 #include <plat/common.h>
22 #include <mach/emif.h>
23 #include <mach/omap4-common.h>
26 #include "clock44xx.h"
31 #include "cminst44xx.h"
32 #include "clock44xx.h"
33 #include "clockdomain.h"
34 #include "cm-regbits-44xx.h"
37 #define MAX_FREQ_UPDATE_TIMEOUT 100000
39 static struct clockdomain
*l3_emif_clkdm
;
40 static DEFINE_SPINLOCK(l3_emif_lock
);
43 * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
44 * @clk: struct clk * of DPLL to set
45 * @rate: rounded target rate
47 * Programs the CM shadow registers to update CORE DPLL M2
48 * divider. M2 divider is used to clock external DDR and its
49 * reconfiguration on frequency change is managed through a
50 * hardware sequencer. This is managed by the PRCM with EMIF
51 * uding shadow registers.
52 * Returns -EINVAL/-1 on error and 0 on success.
54 int omap4_core_dpll_m2_set_rate(struct clk
*clk
, unsigned long rate
)
57 u32 validrate
= 0, shadow_freq_cfg1
= 0, new_div
= 0;
63 validrate
= omap2_clksel_round_rate_div(clk
, rate
, &new_div
);
64 if (validrate
!= rate
)
67 /* Just to avoid look-up on every call to speed up */
69 l3_emif_clkdm
= clkdm_lookup("l3_emif_clkdm");
71 pr_err("%s: clockdomain lookup failed\n", __func__
);
76 spin_lock_irqsave(&l3_emif_lock
, flags
);
78 /* Configures MEMIF domain in SW_WKUP */
79 clkdm_wakeup(l3_emif_clkdm
);
86 * If during a small window the following three events occur:
88 * 1) The EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM SR_TIMING counter expires
89 * 2) Frequency change update is requested CM_SHADOW_FREQ_CONFIG1
90 * FREQ_UPDATE set to 1
91 * 3) OCP access is requested
93 * There will be clock instability on the DDR interface.
97 * Prevent event 1) while event 2) is happening.
99 * Disable the self-refresh when requesting a frequency change.
100 * Before requesting a frequency change, program
101 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0
102 * (omap_emif_frequency_pre_notify)
104 * When the frequency change is completed, reprogram
105 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2.
106 * (omap_emif_frequency_post_notify)
108 omap_emif_frequency_pre_notify();
111 * Program EMIF timing parameters in EMIF shadow registers
112 * for targetted DRR clock.
113 * DDR Clock = core_dpll_m2 / 2
115 omap_emif_setup_registers(validrate
>> 1, LPDDR2_VOLTAGE_STABLE
);
118 * FREQ_UPDATE sequence:
119 * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
120 * after CORE DPLL lock)
121 * - DLL_RESET=1 (DLL must be reset upon frequency change)
122 * - DPLL_CORE_M2_DIV with same value as the one already
124 * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock)
125 * - FREQ_UPDATE=1 (to start HW sequence)
127 shadow_freq_cfg1
= (1 << OMAP4430_DLL_RESET_SHIFT
) |
128 (new_div
<< OMAP4430_DPLL_CORE_M2_DIV_SHIFT
) |
129 (DPLL_LOCKED
<< OMAP4430_DPLL_CORE_DPLL_EN_SHIFT
) |
130 (1 << OMAP4430_FREQ_UPDATE_SHIFT
);
131 shadow_freq_cfg1
&= ~OMAP4430_DLL_OVERRIDE_MASK
;
132 __raw_writel(shadow_freq_cfg1
, OMAP4430_CM_SHADOW_FREQ_CONFIG1
);
134 /* wait for the configuration to be applied */
135 omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1
)
136 & OMAP4430_FREQ_UPDATE_MASK
) == 0),
137 MAX_FREQ_UPDATE_TIMEOUT
, i
);
139 /* Re-enable DDR self refresh */
140 omap_emif_frequency_post_notify();
142 /* Configures MEMIF domain back to HW_WKUP */
143 clkdm_allow_idle(l3_emif_clkdm
);
145 spin_unlock_irqrestore(&l3_emif_lock
, flags
);
147 if (i
== MAX_FREQ_UPDATE_TIMEOUT
) {
148 pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
153 /* Update the clock change */
154 clk
->rate
= validrate
;
161 * omap4_prcm_freq_update - set freq_update bit
163 * Programs the CM shadow registers to update EMIF
164 * parametrs. Few usecase only few registers needs to
165 * be updated using prcm freq update sequence.
166 * EMIF read-idle control and zq-config needs to be
167 * updated for temprature alerts and voltage change
168 * Returns -1 on error and 0 on success.
170 int omap4_prcm_freq_update(void)
172 u32 shadow_freq_cfg1
;
176 if (!l3_emif_clkdm
) {
177 pr_err("%s: clockdomain lookup failed\n", __func__
);
181 spin_lock_irqsave(&l3_emif_lock
, flags
);
182 /* Configures MEMIF domain in SW_WKUP */
183 clkdm_wakeup(l3_emif_clkdm
);
185 /* Disable DDR self refresh (Errata ID: i728) */
186 omap_emif_frequency_pre_notify();
189 * FREQ_UPDATE sequence:
190 * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
191 * after CORE DPLL lock)
192 * - FREQ_UPDATE=1 (to start HW sequence)
194 shadow_freq_cfg1
= __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1
);
195 shadow_freq_cfg1
|= (1 << OMAP4430_DLL_RESET_SHIFT
) |
196 (1 << OMAP4430_FREQ_UPDATE_SHIFT
);
197 shadow_freq_cfg1
&= ~OMAP4430_DLL_OVERRIDE_MASK
;
198 __raw_writel(shadow_freq_cfg1
, OMAP4430_CM_SHADOW_FREQ_CONFIG1
);
200 /* wait for the configuration to be applied */
201 omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1
)
202 & OMAP4430_FREQ_UPDATE_MASK
) == 0),
203 MAX_FREQ_UPDATE_TIMEOUT
, i
);
205 /* Re-enable DDR self refresh */
206 omap_emif_frequency_post_notify();
208 /* Configures MEMIF domain back to HW_WKUP */
209 clkdm_allow_idle(l3_emif_clkdm
);
211 spin_unlock_irqrestore(&l3_emif_lock
, flags
);
213 if (i
== MAX_FREQ_UPDATE_TIMEOUT
) {
214 pr_err("%s: Frequency update failed (call from %pF)\n",
215 __func__
, (void *)_RET_IP_
);
216 pr_err("CLKCTRL: EMIF_1=0x%x EMIF_2=0x%x DMM=0x%x\n",
217 __raw_readl(OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL
),
218 __raw_readl(OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL
),
219 __raw_readl(OMAP4430_CM_MEMIF_DMM_CLKCTRL
));
228 /* Use a very high retry count - we should not hit this condition */
229 #define MAX_DPLL_WAIT_TRIES 1000000
231 #define OMAP_1_5GHz 1500000000
232 #define OMAP_1_2GHz 1200000000
233 #define OMAP_1GHz 1000000000
234 #define OMAP_920MHz 920000000
235 #define OMAP_748MHz 748000000
237 /* Supported only on OMAP4 */
238 int omap4_dpllmx_gatectrl_read(struct clk
*clk
)
243 if (!clk
|| !clk
->clksel_reg
|| !cpu_is_omap44xx())
246 mask
= clk
->flags
& CLOCK_CLKOUTX2
?
247 OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK
:
248 OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK
;
250 v
= __raw_readl(clk
->clksel_reg
);
257 void omap4_dpllmx_allow_gatectrl(struct clk
*clk
)
262 if (!clk
|| !clk
->clksel_reg
|| !cpu_is_omap44xx())
265 mask
= clk
->flags
& CLOCK_CLKOUTX2
?
266 OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK
:
267 OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK
;
269 v
= __raw_readl(clk
->clksel_reg
);
270 /* Clear the bit to allow gatectrl */
272 __raw_writel(v
, clk
->clksel_reg
);
275 void omap4_dpllmx_deny_gatectrl(struct clk
*clk
)
280 if (!clk
|| !clk
->clksel_reg
|| !cpu_is_omap44xx())
283 mask
= clk
->flags
& CLOCK_CLKOUTX2
?
284 OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK
:
285 OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK
;
287 v
= __raw_readl(clk
->clksel_reg
);
288 /* Set the bit to deny gatectrl */
290 __raw_writel(v
, clk
->clksel_reg
);
293 const struct clkops clkops_omap4_dpllmx_ops
= {
294 .allow_idle
= omap4_dpllmx_allow_gatectrl
,
295 .deny_idle
= omap4_dpllmx_deny_gatectrl
,
298 static void omap4460_mpu_dpll_update_children(unsigned long rate
)
303 * The interconnect frequency to EMIF should
304 * be switched between MPU clk divide by 4 (for
305 * frequencies higher than 920Mhz) and MPU clk divide
306 * by 2 (for frequencies lower than or equal to 920Mhz)
307 * Also the async bridge to ABE must be MPU clk divide
308 * by 8 for MPU clk > 748Mhz and MPU clk divide by 4
309 * for lower frequencies.
311 v
= __raw_readl(OMAP4430_CM_MPU_MPU_CLKCTRL
);
312 if (rate
> OMAP_920MHz
)
313 v
|= OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK
;
315 v
&= ~OMAP4460_CLKSEL_EMIF_DIV_MODE_MASK
;
317 if (rate
> OMAP_748MHz
)
318 v
|= OMAP4460_CLKSEL_ABE_DIV_MODE_MASK
;
320 v
&= ~OMAP4460_CLKSEL_ABE_DIV_MODE_MASK
;
321 __raw_writel(v
, OMAP4430_CM_MPU_MPU_CLKCTRL
);
324 int omap4460_mpu_dpll_set_rate(struct clk
*clk
, unsigned long rate
)
326 struct dpll_data
*dd
;
328 unsigned long dpll_rate
;
330 if (!clk
|| !rate
|| !clk
->parent
)
333 dd
= clk
->parent
->dpll_data
;
338 if (!clk
->parent
->set_rate
)
341 if (rate
> clk
->rate
)
342 omap4460_mpu_dpll_update_children(rate
);
345 * On OMAP4460, to obtain MPU DPLL frequency higher
346 * than 1GHz, DCC (Duty Cycle Correction) needs to
348 * And needs to be kept disabled for < 1 Ghz.
350 dpll_rate
= omap2_get_dpll_rate(clk
->parent
);
351 if (rate
<= OMAP_1_5GHz
) {
352 /* If DCC is enabled, disable it */
353 v
= __raw_readl(dd
->mult_div1_reg
);
354 if (v
& OMAP4460_DCC_EN_MASK
) {
355 v
&= ~OMAP4460_DCC_EN_MASK
;
356 __raw_writel(v
, dd
->mult_div1_reg
);
359 if (rate
!= dpll_rate
)
360 clk
->parent
->set_rate(clk
->parent
, rate
);
363 * On 4460, the MPU clk for frequencies higher than 1Ghz
364 * is sourced from CLKOUTX2_M3, instead of CLKOUT_M2, while
365 * value of M3 is fixed to 1. Hence for frequencies higher
366 * than 1 Ghz, lock the DPLL at half the rate so the
367 * CLKOUTX2_M3 then matches the requested rate.
369 if (rate
!= dpll_rate
* 2)
370 clk
->parent
->set_rate(clk
->parent
, rate
/ 2);
372 v
= __raw_readl(dd
->mult_div1_reg
);
373 v
&= ~OMAP4460_DCC_COUNT_MAX_MASK
;
374 v
|= (5 << OMAP4460_DCC_COUNT_MAX_SHIFT
);
375 __raw_writel(v
, dd
->mult_div1_reg
);
377 v
|= OMAP4460_DCC_EN_MASK
;
378 __raw_writel(v
, dd
->mult_div1_reg
);
381 if (rate
< clk
->rate
)
382 omap4460_mpu_dpll_update_children(rate
);
389 long omap4460_mpu_dpll_round_rate(struct clk
*clk
, unsigned long rate
)
391 if (!clk
|| !rate
|| !clk
->parent
)
394 if (clk
->parent
->round_rate
)
395 return clk
->parent
->round_rate(clk
->parent
, rate
);
400 unsigned long omap4460_mpu_dpll_recalc(struct clk
*clk
)
402 struct dpll_data
*dd
;
405 if (!clk
|| !clk
->parent
)
408 dd
= clk
->parent
->dpll_data
;
413 v
= __raw_readl(dd
->mult_div1_reg
);
414 if (v
& OMAP4460_DCC_EN_MASK
)
415 return omap2_get_dpll_rate(clk
->parent
) * 2;
417 return omap2_get_dpll_rate(clk
->parent
);
420 unsigned long omap4_dpll_regm4xen_recalc(struct clk
*clk
)
424 struct dpll_data
*dd
;
426 if (!clk
|| !clk
->dpll_data
)
431 rate
= omap2_get_dpll_rate(clk
);
433 /* regm4xen adds a multiplier of 4 to DPLL calculations */
434 v
= __raw_readl(dd
->control_reg
);
435 if (v
& OMAP4430_DPLL_REGM4XEN_MASK
)
436 rate
*= OMAP4430_REGM4XEN_MULT
;
441 long omap4_dpll_regm4xen_round_rate(struct clk
*clk
, unsigned long target_rate
)
444 struct dpll_data
*dd
;
446 if (!clk
|| !clk
->dpll_data
)
451 /* regm4xen adds a multiplier of 4 to DPLL calculations */
452 v
= __raw_readl(dd
->control_reg
) & OMAP4430_DPLL_REGM4XEN_MASK
;
455 target_rate
= target_rate
/ OMAP4430_REGM4XEN_MULT
;
457 omap2_dpll_round_rate(clk
, target_rate
);
460 clk
->dpll_data
->last_rounded_rate
*= OMAP4430_REGM4XEN_MULT
;
462 return clk
->dpll_data
->last_rounded_rate
;
465 struct dpll_reg_tuple
{
470 struct omap4_dpll_regs
{
474 struct dpll_reg_tuple clkmode
;
475 struct dpll_reg_tuple autoidle
;
476 struct dpll_reg_tuple idlest
;
477 struct dpll_reg_tuple clksel
;
478 struct dpll_reg_tuple div_m2
;
479 struct dpll_reg_tuple div_m3
;
480 struct dpll_reg_tuple div_m4
;
481 struct dpll_reg_tuple div_m5
;
482 struct dpll_reg_tuple div_m6
;
483 struct dpll_reg_tuple div_m7
;
484 struct dpll_reg_tuple clkdcoldo
;
487 static struct omap4_dpll_regs dpll_regs
[] = {
490 .mod_partition
= OMAP4430_CM1_PARTITION
,
491 .mod_inst
= OMAP4430_CM1_CKGEN_INST
,
492 .clkmode
= {.addr
= OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET
},
493 .autoidle
= {.addr
= OMAP4_CM_AUTOIDLE_DPLL_MPU_OFFSET
},
494 .idlest
= {.addr
= OMAP4_CM_IDLEST_DPLL_MPU_OFFSET
},
495 .clksel
= {.addr
= OMAP4_CM_CLKSEL_DPLL_MPU_OFFSET
},
496 .div_m2
= {.addr
= OMAP4_CM_DIV_M2_DPLL_MPU_OFFSET
},
500 .mod_partition
= OMAP4430_CM1_PARTITION
,
501 .mod_inst
= OMAP4430_CM1_CKGEN_INST
,
502 .clkmode
= {.addr
= OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET
},
503 .autoidle
= {.addr
= OMAP4_CM_AUTOIDLE_DPLL_IVA_OFFSET
},
504 .idlest
= {.addr
= OMAP4_CM_IDLEST_DPLL_IVA_OFFSET
},
505 .clksel
= {.addr
= OMAP4_CM_CLKSEL_DPLL_IVA_OFFSET
},
506 .div_m4
= {.addr
= OMAP4_CM_DIV_M4_DPLL_IVA_OFFSET
},
507 .div_m5
= {.addr
= OMAP4_CM_DIV_M5_DPLL_IVA_OFFSET
},
511 .mod_partition
= OMAP4430_CM1_PARTITION
,
512 .mod_inst
= OMAP4430_CM1_CKGEN_INST
,
513 .clkmode
= {.addr
= OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET
},
514 .autoidle
= {.addr
= OMAP4_CM_AUTOIDLE_DPLL_ABE_OFFSET
},
515 .idlest
= {.addr
= OMAP4_CM_IDLEST_DPLL_ABE_OFFSET
},
516 .clksel
= {.addr
= OMAP4_CM_CLKSEL_DPLL_ABE_OFFSET
},
517 .div_m2
= {.addr
= OMAP4_CM_DIV_M2_DPLL_ABE_OFFSET
},
518 .div_m3
= {.addr
= OMAP4_CM_DIV_M3_DPLL_ABE_OFFSET
},
522 .mod_partition
= OMAP4430_CM2_PARTITION
,
523 .mod_inst
= OMAP4430_CM2_CKGEN_INST
,
524 .clkmode
= {.addr
= OMAP4_CM_CLKMODE_DPLL_USB_OFFSET
},
525 .autoidle
= {.addr
= OMAP4_CM_AUTOIDLE_DPLL_USB_OFFSET
},
526 .idlest
= {.addr
= OMAP4_CM_IDLEST_DPLL_USB_OFFSET
},
527 .clksel
= {.addr
= OMAP4_CM_CLKSEL_DPLL_USB_OFFSET
},
528 .div_m2
= {.addr
= OMAP4_CM_DIV_M2_DPLL_USB_OFFSET
},
529 .clkdcoldo
= {.addr
= OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET
},
533 .mod_partition
= OMAP4430_CM2_PARTITION
,
534 .mod_inst
= OMAP4430_CM2_CKGEN_INST
,
535 .clkmode
= {.addr
= OMAP4_CM_CLKMODE_DPLL_PER_OFFSET
},
536 .autoidle
= {.addr
= OMAP4_CM_AUTOIDLE_DPLL_PER_OFFSET
},
537 .idlest
= {.addr
= OMAP4_CM_IDLEST_DPLL_PER_OFFSET
},
538 .clksel
= {.addr
= OMAP4_CM_CLKSEL_DPLL_PER_OFFSET
},
539 .div_m2
= {.addr
= OMAP4_CM_DIV_M2_DPLL_PER_OFFSET
},
540 .div_m3
= {.addr
= OMAP4_CM_DIV_M3_DPLL_PER_OFFSET
},
541 .div_m4
= {.addr
= OMAP4_CM_DIV_M4_DPLL_PER_OFFSET
},
542 .div_m5
= {.addr
= OMAP4_CM_DIV_M5_DPLL_PER_OFFSET
},
543 .div_m6
= {.addr
= OMAP4_CM_DIV_M6_DPLL_PER_OFFSET
},
544 .div_m7
= {.addr
= OMAP4_CM_DIV_M7_DPLL_PER_OFFSET
},
548 static inline void omap4_dpll_store_reg(struct omap4_dpll_regs
*dpll_reg
,
549 struct dpll_reg_tuple
*tuple
)
553 omap4_cminst_read_inst_reg(dpll_reg
->mod_partition
,
554 dpll_reg
->mod_inst
, tuple
->addr
);
557 void omap4_dpll_prepare_off(void)
560 struct omap4_dpll_regs
*dpll_reg
= dpll_regs
;
562 for (i
= 0; i
< ARRAY_SIZE(dpll_regs
); i
++, dpll_reg
++) {
563 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->clkmode
);
564 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->autoidle
);
565 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->clksel
);
566 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->div_m2
);
567 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->div_m3
);
568 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->div_m4
);
569 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->div_m5
);
570 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->div_m6
);
571 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->div_m7
);
572 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->clkdcoldo
);
573 omap4_dpll_store_reg(dpll_reg
, &dpll_reg
->idlest
);
577 static void omap4_dpll_print_reg(struct omap4_dpll_regs
*dpll_reg
, char *name
,
578 struct dpll_reg_tuple
*tuple
)
581 pr_warn("%s - Address offset = 0x%08x, value=0x%08x\n", name
,
582 tuple
->addr
, tuple
->val
);
585 static void omap4_dpll_dump_regs(struct omap4_dpll_regs
*dpll_reg
)
587 pr_warn("%s: Unable to lock dpll %s[part=%x inst=%x]:\n",
588 __func__
, dpll_reg
->name
, dpll_reg
->mod_partition
,
590 omap4_dpll_print_reg(dpll_reg
, "clksel", &dpll_reg
->clksel
);
591 omap4_dpll_print_reg(dpll_reg
, "div_m2", &dpll_reg
->div_m2
);
592 omap4_dpll_print_reg(dpll_reg
, "div_m3", &dpll_reg
->div_m3
);
593 omap4_dpll_print_reg(dpll_reg
, "div_m4", &dpll_reg
->div_m4
);
594 omap4_dpll_print_reg(dpll_reg
, "div_m5", &dpll_reg
->div_m5
);
595 omap4_dpll_print_reg(dpll_reg
, "div_m6", &dpll_reg
->div_m6
);
596 omap4_dpll_print_reg(dpll_reg
, "div_m7", &dpll_reg
->div_m7
);
597 omap4_dpll_print_reg(dpll_reg
, "clkdcoldo", &dpll_reg
->clkdcoldo
);
598 omap4_dpll_print_reg(dpll_reg
, "clkmode", &dpll_reg
->clkmode
);
599 omap4_dpll_print_reg(dpll_reg
, "autoidle", &dpll_reg
->autoidle
);
600 if (dpll_reg
->idlest
.addr
)
601 pr_warn("idlest - Address offset = 0x%08x, before val=0x%08x"
602 " after = 0x%08x\n", dpll_reg
->idlest
.addr
,
603 dpll_reg
->idlest
.val
,
604 omap4_cminst_read_inst_reg(dpll_reg
->mod_partition
,
606 dpll_reg
->idlest
.addr
));
609 static void omap4_wait_dpll_lock(struct omap4_dpll_regs
*dpll_reg
)
613 /* Return if we dont need to lock. */
614 if ((dpll_reg
->clkmode
.val
& OMAP4430_DPLL_EN_MASK
) !=
615 DPLL_LOCKED
<< OMAP4430_DPLL_EN_SHIFT
);
618 while ((omap4_cminst_read_inst_reg(dpll_reg
->mod_partition
,
620 dpll_reg
->idlest
.addr
)
621 & OMAP4430_ST_DPLL_CLK_MASK
) !=
622 0x1 << OMAP4430_ST_DPLL_CLK_SHIFT
623 && j
< MAX_DPLL_WAIT_TRIES
) {
628 /* if we are unable to lock, warn and move on.. */
629 if (j
== MAX_DPLL_WAIT_TRIES
)
630 omap4_dpll_dump_regs(dpll_reg
);
633 static inline void omap4_dpll_restore_reg(struct omap4_dpll_regs
*dpll_reg
,
634 struct dpll_reg_tuple
*tuple
)
637 omap4_cminst_write_inst_reg(tuple
->val
, dpll_reg
->mod_partition
,
638 dpll_reg
->mod_inst
, tuple
->addr
);
641 void omap4_dpll_resume_off(void)
644 struct omap4_dpll_regs
*dpll_reg
= dpll_regs
;
646 for (i
= 0; i
< ARRAY_SIZE(dpll_regs
); i
++, dpll_reg
++) {
647 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->clksel
);
648 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->div_m2
);
649 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->div_m3
);
650 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->div_m4
);
651 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->div_m5
);
652 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->div_m6
);
653 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->div_m7
);
654 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->clkdcoldo
);
656 /* Restore clkmode after the above registers are restored */
657 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->clkmode
);
659 omap4_wait_dpll_lock(dpll_reg
);
661 /* Restore autoidle settings after the dpll is locked */
662 omap4_dpll_restore_reg(dpll_reg
, &dpll_reg
->autoidle
);