First Support on Ginger and OMAP TI
[linux-ginger.git] / arch / arm / mach-omap2 / resource34xx.c
blob04be4d26945beccf81cad08a8cdfc6086d6ce70a
1 /*
2 * linux/arch/arm/mach-omap2/resource34xx.c
3 * OMAP3 resource init/change_level/validate_level functions
5 * Copyright (C) 2007-2008 Texas Instruments, Inc.
6 * Rajendra Nayak <rnayak@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 * History:
19 #include <linux/pm_qos_params.h>
20 #include <linux/cpufreq.h>
21 #include <linux/delay.h>
23 #include <plat/powerdomain.h>
24 #include <plat/clockdomain.h>
25 #include <plat/omap34xx.h>
27 #include "smartreflex.h"
28 #include "resource34xx.h"
29 #include "pm.h"
30 #include "cm.h"
31 #include "cm-regbits-34xx.h"
33 #ifndef CONFIG_CPU_IDLE
34 #warning MPU latency constraints require CONFIG_CPU_IDLE to function!
35 #endif
37 /**
38 * init_latency - Initializes the mpu/core latency resource.
39 * @resp: Latency resource to be initalized
41 * No return value.
43 void init_latency(struct shared_resource *resp)
45 resp->no_of_users = 0;
46 resp->curr_level = RES_LATENCY_DEFAULTLEVEL;
47 *((u8 *)resp->resource_data) = 0;
48 return;
51 /**
52 * set_latency - Adds/Updates and removes the CPU_DMA_LATENCY in *pm_qos_params.
53 * @resp: resource pointer
54 * @latency: target latency to be set
56 * Returns 0 on success, or error values as returned by
57 * pm_qos_update_requirement/pm_qos_add_requirement.
59 int set_latency(struct shared_resource *resp, u32 latency)
61 u8 *pm_qos_req_added;
63 if (resp->curr_level == latency)
64 return 0;
65 else
66 /* Update the resources current level */
67 resp->curr_level = latency;
69 pm_qos_req_added = resp->resource_data;
70 if (latency == RES_LATENCY_DEFAULTLEVEL)
71 /* No more users left, remove the pm_qos_req if present */
72 if (*pm_qos_req_added) {
73 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
74 resp->name);
75 *pm_qos_req_added = 0;
76 return 0;
79 if (*pm_qos_req_added) {
80 return pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
81 resp->name, latency);
82 } else {
83 *pm_qos_req_added = 1;
84 return pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
85 resp->name, latency);
89 /**
90 * init_pd_latency - Initializes the power domain latency resource.
91 * @resp: Power Domain Latency resource to be initialized.
93 * No return value.
95 void init_pd_latency(struct shared_resource *resp)
97 struct pd_latency_db *pd_lat_db;
99 resp->no_of_users = 0;
100 if (enable_off_mode)
101 resp->curr_level = PD_LATENCY_OFF;
102 else
103 resp->curr_level = PD_LATENCY_RET;
104 pd_lat_db = resp->resource_data;
105 /* Populate the power domain associated with the latency resource */
106 pd_lat_db->pd = pwrdm_lookup(pd_lat_db->pwrdm_name);
107 set_pwrdm_state(pd_lat_db->pd, resp->curr_level);
108 return;
112 * set_pd_latency - Updates the curr_level of the power domain resource.
113 * @resp: Power domain latency resource.
114 * @latency: New latency value acceptable.
116 * This function maps the latency in microsecs to the acceptable
117 * Power domain state using the latency DB.
118 * It then programs the power domain to enter the target state.
119 * Always returns 0.
121 int set_pd_latency(struct shared_resource *resp, u32 latency)
123 u32 pd_lat_level, ind;
124 struct pd_latency_db *pd_lat_db;
125 struct powerdomain *pwrdm;
127 pd_lat_db = resp->resource_data;
128 pwrdm = pd_lat_db->pd;
129 pd_lat_level = PD_LATENCY_OFF;
130 /* using the latency db map to the appropriate PD state */
131 for (ind = 0; ind < PD_LATENCY_MAXLEVEL; ind++) {
132 if (pd_lat_db->latency[ind] < latency) {
133 pd_lat_level = ind;
134 break;
138 if (!enable_off_mode && pd_lat_level == PD_LATENCY_OFF)
139 pd_lat_level = PD_LATENCY_RET;
141 resp->curr_level = pd_lat_level;
142 set_pwrdm_state(pwrdm, pd_lat_level);
143 return 0;
146 static struct shared_resource *vdd1_resp;
147 static struct shared_resource *vdd2_resp;
148 static struct device dummy_mpu_dev;
149 static struct device dummy_dsp_dev;
150 static struct device vdd2_dev;
151 static int vdd1_lock;
152 static int vdd2_lock;
153 static struct clk *dpll1_clk, *dpll2_clk, *dpll3_clk;
154 static int curr_vdd1_opp;
155 static int curr_vdd2_opp;
156 static DEFINE_MUTEX(dvfs_mutex);
158 static unsigned short get_opp(struct omap_opp *opp_freq_table,
159 unsigned long freq)
161 struct omap_opp *prcm_config;
162 prcm_config = opp_freq_table;
164 if (prcm_config->rate <= freq)
165 return prcm_config->opp_id; /* Return the Highest OPP */
166 for (; prcm_config->rate; prcm_config--)
167 if (prcm_config->rate < freq)
168 return (prcm_config+1)->opp_id;
169 else if (prcm_config->rate == freq)
170 return prcm_config->opp_id;
171 /* Return the least OPP */
172 return (prcm_config+1)->opp_id;
176 * init_opp - Initialize the OPP resource
178 void init_opp(struct shared_resource *resp)
180 struct clk *l3_clk;
181 resp->no_of_users = 0;
183 if (!mpu_opps || !dsp_opps || !l3_opps)
184 return;
186 /* Initialize the current level of the OPP resource
187 * to the opp set by u-boot.
189 if (strcmp(resp->name, "vdd1_opp") == 0) {
190 vdd1_resp = resp;
191 dpll1_clk = clk_get(NULL, "dpll1_ck");
192 dpll2_clk = clk_get(NULL, "dpll2_ck");
193 resp->curr_level = get_opp(mpu_opps + MAX_VDD1_OPP,
194 dpll1_clk->rate);
195 curr_vdd1_opp = resp->curr_level;
196 } else if (strcmp(resp->name, "vdd2_opp") == 0) {
197 vdd2_resp = resp;
198 dpll3_clk = clk_get(NULL, "dpll3_m2_ck");
199 l3_clk = clk_get(NULL, "l3_ick");
200 resp->curr_level = get_opp(l3_opps + MAX_VDD2_OPP,
201 l3_clk->rate);
202 curr_vdd2_opp = resp->curr_level;
204 return;
207 int resource_access_opp_lock(int res, int delta)
209 if (res == VDD1_OPP) {
210 vdd1_lock += delta;
211 return vdd1_lock;
212 } else if (res == VDD2_OPP) {
213 vdd2_lock += delta;
214 return vdd2_lock;
216 return -EINVAL;
219 #ifndef CONFIG_CPU_FREQ
220 static unsigned long compute_lpj(unsigned long ref, u_int div, u_int mult)
222 unsigned long new_jiffy_l, new_jiffy_h;
225 * Recalculate loops_per_jiffy. We do it this way to
226 * avoid math overflow on 32-bit machines. Maybe we
227 * should make this architecture dependent? If you have
228 * a better way of doing this, please replace!
230 * new = old * mult / div
232 new_jiffy_h = ref / div;
233 new_jiffy_l = (ref % div) / 100;
234 new_jiffy_h *= mult;
235 new_jiffy_l = new_jiffy_l * mult / div;
237 return new_jiffy_h + new_jiffy_l * 100;
239 #endif
241 static int program_opp_freq(int res, int target_level, int current_level)
243 int ret = 0, l3_div;
244 int *curr_opp;
246 lock_scratchpad_sem();
247 if (res == VDD1_OPP) {
248 curr_opp = &curr_vdd1_opp;
249 clk_set_rate(dpll1_clk, mpu_opps[target_level].rate);
250 clk_set_rate(dpll2_clk, dsp_opps[target_level].rate);
251 #ifndef CONFIG_CPU_FREQ
252 /*Update loops_per_jiffy if processor speed is being changed*/
253 loops_per_jiffy = compute_lpj(loops_per_jiffy,
254 mpu_opps[current_level].rate/1000,
255 mpu_opps[target_level].rate/1000);
256 #endif
257 } else {
258 curr_opp = &curr_vdd2_opp;
259 l3_div = cm_read_mod_reg(CORE_MOD, CM_CLKSEL) &
260 OMAP3430_CLKSEL_L3_MASK;
261 ret = clk_set_rate(dpll3_clk,
262 l3_opps[target_level].rate * l3_div);
264 if (ret) {
265 unlock_scratchpad_sem();
266 return current_level;
268 #ifdef CONFIG_PM
269 omap3_save_scratchpad_contents();
270 #endif
271 unlock_scratchpad_sem();
273 *curr_opp = target_level;
274 return target_level;
277 static int program_opp(int res, struct omap_opp *opp, int target_level,
278 int current_level)
280 int i, ret = 0, raise;
281 #ifdef CONFIG_OMAP_SMARTREFLEX
282 unsigned long t_opp, c_opp;
284 t_opp = ID_VDD(res) | ID_OPP_NO(opp[target_level].opp_id);
285 c_opp = ID_VDD(res) | ID_OPP_NO(opp[current_level].opp_id);
286 #endif
288 /* Sanity check of the OPP params before attempting to set */
289 if (!opp[target_level].rate || !opp[target_level].vsel)
290 return -EINVAL;
292 if (target_level > current_level)
293 raise = 1;
294 else
295 raise = 0;
297 for (i = 0; i < 2; i++) {
298 if (i == raise)
299 ret = program_opp_freq(res, target_level,
300 current_level);
301 #ifdef CONFIG_OMAP_SMARTREFLEX
302 else
303 sr_voltagescale_vcbypass(t_opp, c_opp,
304 opp[target_level].vsel,
305 opp[current_level].vsel);
306 #endif
309 return ret;
312 int resource_set_opp_level(int res, u32 target_level, int flags)
314 unsigned long mpu_freq, mpu_old_freq;
315 #ifdef CONFIG_CPU_FREQ
316 struct cpufreq_freqs freqs_notify;
317 #endif
318 struct shared_resource *resp;
320 if (res == VDD1_OPP)
321 resp = vdd1_resp;
322 else if (res == VDD2_OPP)
323 resp = vdd2_resp;
324 else
325 return 0;
327 if (resp->curr_level == target_level)
328 return 0;
330 if (!mpu_opps || !dsp_opps || !l3_opps)
331 return 0;
333 mutex_lock(&dvfs_mutex);
335 if (res == VDD1_OPP) {
336 if (flags != OPP_IGNORE_LOCK && vdd1_lock) {
337 mutex_unlock(&dvfs_mutex);
338 return 0;
340 mpu_old_freq = mpu_opps[resp->curr_level].rate;
341 mpu_freq = mpu_opps[target_level].rate;
343 #ifdef CONFIG_CPU_FREQ
344 freqs_notify.old = mpu_old_freq/1000;
345 freqs_notify.new = mpu_freq/1000;
346 freqs_notify.cpu = 0;
347 /* Send pre notification to CPUFreq */
348 cpufreq_notify_transition(&freqs_notify, CPUFREQ_PRECHANGE);
349 #endif
350 resp->curr_level = program_opp(res, mpu_opps, target_level,
351 resp->curr_level);
352 #ifdef CONFIG_CPU_FREQ
353 /* Send a post notification to CPUFreq */
354 cpufreq_notify_transition(&freqs_notify, CPUFREQ_POSTCHANGE);
355 #endif
356 } else {
357 if (!(flags & OPP_IGNORE_LOCK) && vdd2_lock) {
358 mutex_unlock(&dvfs_mutex);
359 return 0;
361 resp->curr_level = program_opp(res, l3_opps, target_level,
362 resp->curr_level);
364 mutex_unlock(&dvfs_mutex);
365 return 0;
368 int set_opp(struct shared_resource *resp, u32 target_level)
370 unsigned long tput;
371 unsigned long req_l3_freq;
372 int ind;
374 if (resp == vdd1_resp) {
375 if (target_level < 3)
376 resource_release("vdd2_opp", &vdd2_dev);
378 resource_set_opp_level(VDD1_OPP, target_level, 0);
380 * For VDD1 OPP3 and above, make sure the interconnect
381 * is at 100Mhz or above.
382 * throughput in KiB/s for 100 Mhz = 100 * 1000 * 4.
384 if (target_level >= 3)
385 resource_request("vdd2_opp", &vdd2_dev, 400000);
387 } else if (resp == vdd2_resp) {
388 tput = target_level;
390 /* Convert the tput in KiB/s to Bus frequency in MHz */
391 req_l3_freq = (tput * 1000)/4;
393 for (ind = 2; ind <= MAX_VDD2_OPP; ind++)
394 if ((l3_opps + ind)->rate >= req_l3_freq) {
395 target_level = ind;
396 break;
399 /* Set the highest OPP possible */
400 if (ind > MAX_VDD2_OPP)
401 target_level = ind-1;
402 resource_set_opp_level(VDD2_OPP, target_level, 0);
404 return 0;
408 * validate_opp - Validates if valid VDD1 OPP's are passed as the
409 * target_level.
410 * VDD2 OPP levels are passed as L3 throughput, which are then mapped
411 * to an appropriate OPP.
413 int validate_opp(struct shared_resource *resp, u32 target_level)
415 return 0;
419 * init_freq - Initialize the frequency resource.
421 void init_freq(struct shared_resource *resp)
423 char *linked_res_name;
424 resp->no_of_users = 0;
426 if (!mpu_opps || !dsp_opps)
427 return;
429 linked_res_name = (char *)resp->resource_data;
430 /* Initialize the current level of the Freq resource
431 * to the frequency set by u-boot.
433 if (strcmp(resp->name, "mpu_freq") == 0)
434 /* MPU freq in Mhz */
435 resp->curr_level = mpu_opps[curr_vdd1_opp].rate;
436 else if (strcmp(resp->name, "dsp_freq") == 0)
437 /* DSP freq in Mhz */
438 resp->curr_level = dsp_opps[curr_vdd1_opp].rate;
439 return;
442 int set_freq(struct shared_resource *resp, u32 target_level)
444 unsigned int vdd1_opp;
446 if (!mpu_opps || !dsp_opps)
447 return 0;
449 if (strcmp(resp->name, "mpu_freq") == 0) {
450 vdd1_opp = get_opp(mpu_opps + MAX_VDD1_OPP, target_level);
451 resource_request("vdd1_opp", &dummy_mpu_dev, vdd1_opp);
452 } else if (strcmp(resp->name, "dsp_freq") == 0) {
453 vdd1_opp = get_opp(dsp_opps + MAX_VDD1_OPP, target_level);
454 resource_request("vdd1_opp", &dummy_dsp_dev, vdd1_opp);
456 resp->curr_level = target_level;
457 return 0;
460 int validate_freq(struct shared_resource *resp, u32 target_level)
462 return 0;