OMAP3: PM: VDD2 dvfs at higher VDD1 opp
[linux-ginger.git] / arch / arm / mach-omap2 / resource34xx.c
blob2f9d49ef7bfa981c3710e5ca61a9b642bc2957cd
1 /*
2 * linux/arch/arm/mach-omap2/resource34xx.c
3 * OMAP3 resource init/change_level/validate_level functions
5 * Copyright (C) 2007-2008 Texas Instruments, Inc.
6 * Rajendra Nayak <rnayak@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 * History:
19 #include <linux/pm_qos_params.h>
20 #include <linux/cpufreq.h>
21 #include <linux/delay.h>
23 #include <plat/powerdomain.h>
24 #include <plat/clockdomain.h>
25 #include <plat/omap34xx.h>
27 #include "smartreflex.h"
28 #include "resource34xx.h"
29 #include "pm.h"
30 #include "cm.h"
31 #include "cm-regbits-34xx.h"
33 /**
34 * init_latency - Initializes the mpu/core latency resource.
35 * @resp: Latency resource to be initalized
37 * No return value.
39 void init_latency(struct shared_resource *resp)
41 resp->no_of_users = 0;
42 resp->curr_level = RES_DEFAULTLEVEL;
43 *((u8 *)resp->resource_data) = 0;
44 return;
47 /**
48 * set_latency - Adds/Updates and removes the CPU_DMA_LATENCY in *pm_qos_params.
49 * @resp: resource pointer
50 * @latency: target latency to be set
52 * Returns 0 on success, or error values as returned by
53 * pm_qos_update_requirement/pm_qos_add_requirement.
55 int set_latency(struct shared_resource *resp, u32 latency)
57 u8 *pm_qos_req_added;
59 if (resp->curr_level == latency)
60 return 0;
61 else
62 /* Update the resources current level */
63 resp->curr_level = latency;
65 pm_qos_req_added = resp->resource_data;
66 if (latency == RES_DEFAULTLEVEL)
67 /* No more users left, remove the pm_qos_req if present */
68 if (*pm_qos_req_added) {
69 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
70 resp->name);
71 *pm_qos_req_added = 0;
72 return 0;
75 if (*pm_qos_req_added) {
76 return pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
77 resp->name, latency);
78 } else {
79 *pm_qos_req_added = 1;
80 return pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
81 resp->name, latency);
85 /**
86 * init_pd_latency - Initializes the power domain latency resource.
87 * @resp: Power Domain Latency resource to be initialized.
89 * No return value.
91 void init_pd_latency(struct shared_resource *resp)
93 struct pd_latency_db *pd_lat_db;
95 resp->no_of_users = 0;
96 if (enable_off_mode)
97 resp->curr_level = PD_LATENCY_OFF;
98 else
99 resp->curr_level = PD_LATENCY_RET;
100 pd_lat_db = resp->resource_data;
101 /* Populate the power domain associated with the latency resource */
102 pd_lat_db->pd = pwrdm_lookup(pd_lat_db->pwrdm_name);
103 set_pwrdm_state(pd_lat_db->pd, resp->curr_level);
104 return;
108 * set_pd_latency - Updates the curr_level of the power domain resource.
109 * @resp: Power domain latency resource.
110 * @latency: New latency value acceptable.
112 * This function maps the latency in microsecs to the acceptable
113 * Power domain state using the latency DB.
114 * It then programs the power domain to enter the target state.
115 * Always returns 0.
117 int set_pd_latency(struct shared_resource *resp, u32 latency)
119 u32 pd_lat_level, ind;
120 struct pd_latency_db *pd_lat_db;
121 struct powerdomain *pwrdm;
123 pd_lat_db = resp->resource_data;
124 pwrdm = pd_lat_db->pd;
125 pd_lat_level = PD_LATENCY_OFF;
126 /* using the latency db map to the appropriate PD state */
127 for (ind = 0; ind < PD_LATENCY_MAXLEVEL; ind++) {
128 if (pd_lat_db->latency[ind] < latency) {
129 pd_lat_level = ind;
130 break;
134 if (!enable_off_mode && pd_lat_level == PD_LATENCY_OFF)
135 pd_lat_level = PD_LATENCY_RET;
137 resp->curr_level = pd_lat_level;
138 set_pwrdm_state(pwrdm, pd_lat_level);
139 return 0;
142 static struct shared_resource *vdd1_resp;
143 static struct shared_resource *vdd2_resp;
144 static struct device dummy_mpu_dev;
145 static struct device dummy_dsp_dev;
146 static struct device vdd2_dev;
147 static int vdd1_lock;
148 static int vdd2_lock;
149 static struct clk *dpll1_clk, *dpll2_clk, *dpll3_clk;
150 static int curr_vdd1_opp;
151 static int curr_vdd2_opp;
152 static DEFINE_MUTEX(dvfs_mutex);
154 static unsigned short get_opp(struct omap_opp *opp_freq_table,
155 unsigned long freq)
157 struct omap_opp *prcm_config;
158 prcm_config = opp_freq_table;
160 if (prcm_config->rate <= freq)
161 return prcm_config->opp_id; /* Return the Highest OPP */
162 for (; prcm_config->rate; prcm_config--)
163 if (prcm_config->rate < freq)
164 return (prcm_config+1)->opp_id;
165 else if (prcm_config->rate == freq)
166 return prcm_config->opp_id;
167 /* Return the least OPP */
168 return (prcm_config+1)->opp_id;
172 * init_opp - Initialize the OPP resource
174 void init_opp(struct shared_resource *resp)
176 struct clk *l3_clk;
177 resp->no_of_users = 0;
179 if (!mpu_opps || !dsp_opps || !l3_opps)
180 return;
182 /* Initialize the current level of the OPP resource
183 * to the opp set by u-boot.
185 if (strcmp(resp->name, "vdd1_opp") == 0) {
186 vdd1_resp = resp;
187 dpll1_clk = clk_get(NULL, "dpll1_ck");
188 dpll2_clk = clk_get(NULL, "dpll2_ck");
189 resp->curr_level = get_opp(mpu_opps + MAX_VDD1_OPP,
190 dpll1_clk->rate);
191 curr_vdd1_opp = resp->curr_level;
192 } else if (strcmp(resp->name, "vdd2_opp") == 0) {
193 vdd2_resp = resp;
194 dpll3_clk = clk_get(NULL, "dpll3_m2_ck");
195 l3_clk = clk_get(NULL, "l3_ick");
196 resp->curr_level = get_opp(l3_opps + MAX_VDD2_OPP,
197 l3_clk->rate);
198 curr_vdd2_opp = resp->curr_level;
200 return;
203 int resource_access_opp_lock(int res, int delta)
205 if (res == VDD1_OPP) {
206 vdd1_lock += delta;
207 return vdd1_lock;
208 } else if (res == VDD2_OPP) {
209 vdd2_lock += delta;
210 return vdd2_lock;
212 return -EINVAL;
215 #ifndef CONFIG_CPU_FREQ
216 static unsigned long compute_lpj(unsigned long ref, u_int div, u_int mult)
218 unsigned long new_jiffy_l, new_jiffy_h;
221 * Recalculate loops_per_jiffy. We do it this way to
222 * avoid math overflow on 32-bit machines. Maybe we
223 * should make this architecture dependent? If you have
224 * a better way of doing this, please replace!
226 * new = old * mult / div
228 new_jiffy_h = ref / div;
229 new_jiffy_l = (ref % div) / 100;
230 new_jiffy_h *= mult;
231 new_jiffy_l = new_jiffy_l * mult / div;
233 return new_jiffy_h + new_jiffy_l * 100;
235 #endif
237 static int program_opp_freq(int res, int target_level, int current_level)
239 int ret = 0, l3_div;
240 int *curr_opp;
242 if (res == VDD1_OPP) {
243 curr_opp = &curr_vdd1_opp;
244 clk_set_rate(dpll1_clk, mpu_opps[target_level].rate);
245 clk_set_rate(dpll2_clk, dsp_opps[target_level].rate);
246 #ifndef CONFIG_CPU_FREQ
247 /*Update loops_per_jiffy if processor speed is being changed*/
248 loops_per_jiffy = compute_lpj(loops_per_jiffy,
249 mpu_opps[current_level].rate/1000,
250 mpu_opps[target_level].rate/1000);
251 #endif
252 } else {
253 curr_opp = &curr_vdd2_opp;
254 l3_div = cm_read_mod_reg(CORE_MOD, CM_CLKSEL) &
255 OMAP3430_CLKSEL_L3_MASK;
256 ret = clk_set_rate(dpll3_clk,
257 l3_opps[target_level].rate * l3_div);
259 if (ret)
260 return current_level;
261 #ifdef CONFIG_PM
262 omap3_save_scratchpad_contents();
263 #endif
264 *curr_opp = target_level;
265 return target_level;
268 static int program_opp(int res, struct omap_opp *opp, int target_level,
269 int current_level)
271 int i, ret = 0, raise;
272 #ifdef CONFIG_OMAP_SMARTREFLEX
273 unsigned long t_opp;
275 t_opp = ID_VDD(res) | ID_OPP_NO(opp[target_level].opp_id);
276 #endif
277 if (target_level > current_level)
278 raise = 1;
279 else
280 raise = 0;
282 for (i = 0; i < 2; i++) {
283 if (i == raise)
284 ret = program_opp_freq(res, target_level,
285 current_level);
286 #ifdef CONFIG_OMAP_SMARTREFLEX
287 else
288 sr_voltagescale_vcbypass(t_opp,
289 opp[target_level].vsel);
290 #endif
293 return ret;
296 int resource_set_opp_level(int res, u32 target_level, int flags)
298 unsigned long mpu_freq, mpu_old_freq;
299 #ifdef CONFIG_CPU_FREQ
300 struct cpufreq_freqs freqs_notify;
301 #endif
302 struct shared_resource *resp;
304 if (res == VDD1_OPP)
305 resp = vdd1_resp;
306 else if (res == VDD2_OPP)
307 resp = vdd2_resp;
308 else
309 return 0;
311 if (resp->curr_level == target_level)
312 return 0;
314 if (!mpu_opps || !dsp_opps || !l3_opps)
315 return 0;
317 mutex_lock(&dvfs_mutex);
319 if (res == VDD1_OPP) {
320 if (flags != OPP_IGNORE_LOCK && vdd1_lock) {
321 mutex_unlock(&dvfs_mutex);
322 return 0;
324 mpu_old_freq = mpu_opps[resp->curr_level].rate;
325 mpu_freq = mpu_opps[target_level].rate;
327 #ifdef CONFIG_CPU_FREQ
328 freqs_notify.old = mpu_old_freq/1000;
329 freqs_notify.new = mpu_freq/1000;
330 freqs_notify.cpu = 0;
331 /* Send pre notification to CPUFreq */
332 cpufreq_notify_transition(&freqs_notify, CPUFREQ_PRECHANGE);
333 #endif
334 resp->curr_level = program_opp(res, mpu_opps, target_level,
335 resp->curr_level);
336 #ifdef CONFIG_CPU_FREQ
337 /* Send a post notification to CPUFreq */
338 cpufreq_notify_transition(&freqs_notify, CPUFREQ_POSTCHANGE);
339 #endif
340 } else {
341 if (!(flags & OPP_IGNORE_LOCK) && vdd2_lock) {
342 mutex_unlock(&dvfs_mutex);
343 return 0;
345 resp->curr_level = program_opp(res, l3_opps, target_level,
346 resp->curr_level);
348 mutex_unlock(&dvfs_mutex);
349 return 0;
352 int set_opp(struct shared_resource *resp, u32 target_level)
354 unsigned long tput;
355 unsigned long req_l3_freq;
356 int ind;
358 if (resp == vdd1_resp) {
359 if (target_level < 3)
360 resource_release("vdd2_opp", &vdd2_dev);
362 resource_set_opp_level(VDD1_OPP, target_level, 0);
364 * For VDD1 OPP3 and above, make sure the interconnect
365 * is at 100Mhz or above.
366 * throughput in KiB/s for 100 Mhz = 100 * 1000 * 4.
368 if (target_level >= 3)
369 resource_request("vdd2_opp", &vdd2_dev, 400000);
371 } else if (resp == vdd2_resp) {
372 tput = target_level;
374 /* Convert the tput in KiB/s to Bus frequency in MHz */
375 req_l3_freq = (tput * 1000)/4;
377 for (ind = 2; ind <= MAX_VDD2_OPP; ind++)
378 if ((l3_opps + ind)->rate >= req_l3_freq) {
379 target_level = ind;
380 break;
383 /* Set the highest OPP possible */
384 if (ind > MAX_VDD2_OPP)
385 target_level = ind-1;
386 resource_set_opp_level(VDD2_OPP, target_level, 0);
388 return 0;
392 * validate_opp - Validates if valid VDD1 OPP's are passed as the
393 * target_level.
394 * VDD2 OPP levels are passed as L3 throughput, which are then mapped
395 * to an appropriate OPP.
397 int validate_opp(struct shared_resource *resp, u32 target_level)
399 return 0;
403 * init_freq - Initialize the frequency resource.
405 void init_freq(struct shared_resource *resp)
407 char *linked_res_name;
408 resp->no_of_users = 0;
410 if (!mpu_opps || !dsp_opps)
411 return;
413 linked_res_name = (char *)resp->resource_data;
414 /* Initialize the current level of the Freq resource
415 * to the frequency set by u-boot.
417 if (strcmp(resp->name, "mpu_freq") == 0)
418 /* MPU freq in Mhz */
419 resp->curr_level = mpu_opps[curr_vdd1_opp].rate;
420 else if (strcmp(resp->name, "dsp_freq") == 0)
421 /* DSP freq in Mhz */
422 resp->curr_level = dsp_opps[curr_vdd1_opp].rate;
423 return;
426 int set_freq(struct shared_resource *resp, u32 target_level)
428 unsigned int vdd1_opp;
430 if (!mpu_opps || !dsp_opps)
431 return 0;
433 if (strcmp(resp->name, "mpu_freq") == 0) {
434 vdd1_opp = get_opp(mpu_opps + MAX_VDD1_OPP, target_level);
435 resource_request("vdd1_opp", &dummy_mpu_dev, vdd1_opp);
436 } else if (strcmp(resp->name, "dsp_freq") == 0) {
437 vdd1_opp = get_opp(dsp_opps + MAX_VDD1_OPP, target_level);
438 resource_request("vdd1_opp", &dummy_dsp_dev, vdd1_opp);
440 resp->curr_level = target_level;
441 return 0;
444 int validate_freq(struct shared_resource *resp, u32 target_level)
446 return 0;