OMAP3: PM: Added resource refresh to OPP unlock requests
[linux-ginger.git] / arch / arm / mach-omap2 / resource34xx.c
blobe6cf1d08a3012091836229402bb1f4bdd72c6554
1 /*
2 * linux/arch/arm/mach-omap2/resource34xx.c
3 * OMAP3 resource init/change_level/validate_level functions
5 * Copyright (C) 2007-2008 Texas Instruments, Inc.
6 * Rajendra Nayak <rnayak@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 * History:
19 #include <linux/pm_qos_params.h>
20 #include <linux/cpufreq.h>
21 #include <linux/delay.h>
23 #include <plat/powerdomain.h>
24 #include <plat/clockdomain.h>
25 #include <plat/omap34xx.h>
27 #include "smartreflex.h"
28 #include "resource34xx.h"
29 #include "pm.h"
30 #include "cm.h"
31 #include "cm-regbits-34xx.h"
33 /**
34 * init_latency - Initializes the mpu/core latency resource.
35 * @resp: Latency resource to be initalized
37 * No return value.
39 void init_latency(struct shared_resource *resp)
41 resp->no_of_users = 0;
42 resp->curr_level = RES_DEFAULTLEVEL;
43 *((u8 *)resp->resource_data) = 0;
44 return;
47 /**
48 * set_latency - Adds/Updates and removes the CPU_DMA_LATENCY in *pm_qos_params.
49 * @resp: resource pointer
50 * @latency: target latency to be set
52 * Returns 0 on success, or error values as returned by
53 * pm_qos_update_requirement/pm_qos_add_requirement.
55 int set_latency(struct shared_resource *resp, u32 latency)
57 u8 *pm_qos_req_added;
59 if (resp->curr_level == latency)
60 return 0;
61 else
62 /* Update the resources current level */
63 resp->curr_level = latency;
65 pm_qos_req_added = resp->resource_data;
66 if (latency == RES_DEFAULTLEVEL)
67 /* No more users left, remove the pm_qos_req if present */
68 if (*pm_qos_req_added) {
69 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
70 resp->name);
71 *pm_qos_req_added = 0;
72 return 0;
75 if (*pm_qos_req_added) {
76 return pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
77 resp->name, latency);
78 } else {
79 *pm_qos_req_added = 1;
80 return pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
81 resp->name, latency);
85 /**
86 * init_pd_latency - Initializes the power domain latency resource.
87 * @resp: Power Domain Latency resource to be initialized.
89 * No return value.
91 void init_pd_latency(struct shared_resource *resp)
93 struct pd_latency_db *pd_lat_db;
95 resp->no_of_users = 0;
96 if (enable_off_mode)
97 resp->curr_level = PD_LATENCY_OFF;
98 else
99 resp->curr_level = PD_LATENCY_RET;
100 pd_lat_db = resp->resource_data;
101 /* Populate the power domain associated with the latency resource */
102 pd_lat_db->pd = pwrdm_lookup(pd_lat_db->pwrdm_name);
103 set_pwrdm_state(pd_lat_db->pd, resp->curr_level);
104 return;
108 * set_pd_latency - Updates the curr_level of the power domain resource.
109 * @resp: Power domain latency resource.
110 * @latency: New latency value acceptable.
112 * This function maps the latency in microsecs to the acceptable
113 * Power domain state using the latency DB.
114 * It then programs the power domain to enter the target state.
115 * Always returns 0.
117 int set_pd_latency(struct shared_resource *resp, u32 latency)
119 u32 pd_lat_level, ind;
120 struct pd_latency_db *pd_lat_db;
121 struct powerdomain *pwrdm;
123 pd_lat_db = resp->resource_data;
124 pwrdm = pd_lat_db->pd;
125 pd_lat_level = PD_LATENCY_OFF;
126 /* using the latency db map to the appropriate PD state */
127 for (ind = 0; ind < PD_LATENCY_MAXLEVEL; ind++) {
128 if (pd_lat_db->latency[ind] < latency) {
129 pd_lat_level = ind;
130 break;
134 if (!enable_off_mode && pd_lat_level == PD_LATENCY_OFF)
135 pd_lat_level = PD_LATENCY_RET;
137 resp->curr_level = pd_lat_level;
138 set_pwrdm_state(pwrdm, pd_lat_level);
139 return 0;
142 static struct shared_resource *vdd1_resp;
143 static struct shared_resource *vdd2_resp;
144 static struct device dummy_mpu_dev;
145 static struct device dummy_dsp_dev;
146 static struct device vdd2_dev;
147 static int vdd1_lock;
148 static int vdd2_lock;
149 static struct clk *dpll1_clk, *dpll2_clk, *dpll3_clk;
150 static int curr_vdd1_opp;
151 static int curr_vdd2_opp;
152 static DEFINE_MUTEX(dvfs_mutex);
154 static unsigned short get_opp(struct omap_opp *opp_freq_table,
155 unsigned long freq)
157 struct omap_opp *prcm_config;
158 prcm_config = opp_freq_table;
160 if (prcm_config->rate <= freq)
161 return prcm_config->opp_id; /* Return the Highest OPP */
162 for (; prcm_config->rate; prcm_config--)
163 if (prcm_config->rate < freq)
164 return (prcm_config+1)->opp_id;
165 else if (prcm_config->rate == freq)
166 return prcm_config->opp_id;
167 /* Return the least OPP */
168 return (prcm_config+1)->opp_id;
172 * init_opp - Initialize the OPP resource
174 void init_opp(struct shared_resource *resp)
176 resp->no_of_users = 0;
178 if (!mpu_opps || !dsp_opps || !l3_opps)
179 return;
181 /* Initialize the current level of the OPP resource
182 * to the opp set by u-boot.
184 if (strcmp(resp->name, "vdd1_opp") == 0) {
185 vdd1_resp = resp;
186 dpll1_clk = clk_get(NULL, "dpll1_ck");
187 dpll2_clk = clk_get(NULL, "dpll2_ck");
188 resp->curr_level = get_opp(mpu_opps + MAX_VDD1_OPP,
189 dpll1_clk->rate);
190 curr_vdd1_opp = resp->curr_level;
191 } else if (strcmp(resp->name, "vdd2_opp") == 0) {
192 vdd2_resp = resp;
193 dpll3_clk = clk_get(NULL, "dpll3_m2_ck");
194 resp->curr_level = get_opp(l3_opps + MAX_VDD2_OPP,
195 dpll2_clk->rate);
196 curr_vdd2_opp = resp->curr_level;
198 return;
201 int resource_access_opp_lock(int res, int delta)
203 if (res == VDD1_OPP) {
204 vdd1_lock += delta;
205 return vdd1_lock;
206 } else if (res == VDD2_OPP) {
207 vdd2_lock += delta;
208 return vdd2_lock;
210 return -EINVAL;
213 #ifndef CONFIG_CPU_FREQ
214 static unsigned long compute_lpj(unsigned long ref, u_int div, u_int mult)
216 unsigned long new_jiffy_l, new_jiffy_h;
219 * Recalculate loops_per_jiffy. We do it this way to
220 * avoid math overflow on 32-bit machines. Maybe we
221 * should make this architecture dependent? If you have
222 * a better way of doing this, please replace!
224 * new = old * mult / div
226 new_jiffy_h = ref / div;
227 new_jiffy_l = (ref % div) / 100;
228 new_jiffy_h *= mult;
229 new_jiffy_l = new_jiffy_l * mult / div;
231 return new_jiffy_h + new_jiffy_l * 100;
233 #endif
235 static int program_opp_freq(int res, int target_level, int current_level)
237 int ret = 0, l3_div;
238 int *curr_opp;
240 if (res == VDD1_OPP) {
241 curr_opp = &curr_vdd1_opp;
242 clk_set_rate(dpll1_clk, mpu_opps[target_level].rate);
243 clk_set_rate(dpll2_clk, dsp_opps[target_level].rate);
244 #ifndef CONFIG_CPU_FREQ
245 /*Update loops_per_jiffy if processor speed is being changed*/
246 loops_per_jiffy = compute_lpj(loops_per_jiffy,
247 mpu_opps[current_level].rate/1000,
248 mpu_opps[target_level].rate/1000);
249 #endif
250 } else {
251 curr_opp = &curr_vdd2_opp;
252 l3_div = cm_read_mod_reg(CORE_MOD, CM_CLKSEL) &
253 OMAP3430_CLKSEL_L3_MASK;
254 ret = clk_set_rate(dpll3_clk,
255 l3_opps[target_level].rate * l3_div);
257 if (ret)
258 return current_level;
259 #ifdef CONFIG_PM
260 omap3_save_scratchpad_contents();
261 #endif
262 *curr_opp = target_level;
263 return target_level;
266 static int program_opp(int res, struct omap_opp *opp, int target_level,
267 int current_level)
269 int i, ret = 0, raise;
270 #ifdef CONFIG_OMAP_SMARTREFLEX
271 unsigned long t_opp;
273 t_opp = ID_VDD(res) | ID_OPP_NO(opp[target_level].opp_id);
274 #endif
275 if (target_level > current_level)
276 raise = 1;
277 else
278 raise = 0;
280 for (i = 0; i < 2; i++) {
281 if (i == raise)
282 ret = program_opp_freq(res, target_level,
283 current_level);
284 #ifdef CONFIG_OMAP_SMARTREFLEX
285 else
286 sr_voltagescale_vcbypass(t_opp,
287 opp[target_level].vsel);
288 #endif
291 return ret;
294 int resource_set_opp_level(int res, u32 target_level, int flags)
296 unsigned long mpu_freq, mpu_old_freq;
297 #ifdef CONFIG_CPU_FREQ
298 struct cpufreq_freqs freqs_notify;
299 #endif
300 struct shared_resource *resp;
302 if (res == VDD1_OPP)
303 resp = vdd1_resp;
304 else if (res == VDD2_OPP)
305 resp = vdd2_resp;
306 else
307 return 0;
309 if (resp->curr_level == target_level)
310 return 0;
312 if (!mpu_opps || !dsp_opps || !l3_opps)
313 return 0;
315 mutex_lock(&dvfs_mutex);
317 if (res == VDD1_OPP) {
318 if (flags != OPP_IGNORE_LOCK && vdd1_lock) {
319 mutex_unlock(&dvfs_mutex);
320 return 0;
322 mpu_old_freq = mpu_opps[resp->curr_level].rate;
323 mpu_freq = mpu_opps[target_level].rate;
325 #ifdef CONFIG_CPU_FREQ
326 freqs_notify.old = mpu_old_freq/1000;
327 freqs_notify.new = mpu_freq/1000;
328 freqs_notify.cpu = 0;
329 /* Send pre notification to CPUFreq */
330 cpufreq_notify_transition(&freqs_notify, CPUFREQ_PRECHANGE);
331 #endif
332 resp->curr_level = program_opp(res, mpu_opps, target_level,
333 resp->curr_level);
334 #ifdef CONFIG_CPU_FREQ
335 /* Send a post notification to CPUFreq */
336 cpufreq_notify_transition(&freqs_notify, CPUFREQ_POSTCHANGE);
337 #endif
338 } else {
339 if (!(flags & OPP_IGNORE_LOCK) && vdd2_lock) {
340 mutex_unlock(&dvfs_mutex);
341 return 0;
343 resp->curr_level = program_opp(res, l3_opps, target_level,
344 resp->curr_level);
346 mutex_unlock(&dvfs_mutex);
347 return 0;
350 int set_opp(struct shared_resource *resp, u32 target_level)
352 unsigned long tput;
353 unsigned long req_l3_freq;
354 int ind;
356 if (resp == vdd1_resp) {
357 resource_set_opp_level(VDD1_OPP, target_level, 0);
359 * For VDD1 OPP3 and above, make sure the interconnect
360 * is at 100Mhz or above.
361 * throughput in KiB/s for 100 Mhz = 100 * 1000 * 4.
363 if (target_level >= 3)
364 resource_request("vdd2_opp", &vdd2_dev, 400000);
365 else
366 resource_release("vdd2_opp", &vdd2_dev);
368 } else if (resp == vdd2_resp) {
369 tput = target_level;
371 /* Convert the tput in KiB/s to Bus frequency in MHz */
372 req_l3_freq = (tput * 1000)/4;
374 for (ind = 2; ind <= MAX_VDD2_OPP; ind++)
375 if ((l3_opps + ind)->rate >= req_l3_freq) {
376 target_level = ind;
377 break;
380 /* Set the highest OPP possible */
381 if (ind > MAX_VDD2_OPP)
382 target_level = ind-1;
383 resource_set_opp_level(VDD2_OPP, target_level, 0);
385 return 0;
389 * validate_opp - Validates if valid VDD1 OPP's are passed as the
390 * target_level.
391 * VDD2 OPP levels are passed as L3 throughput, which are then mapped
392 * to an appropriate OPP.
394 int validate_opp(struct shared_resource *resp, u32 target_level)
396 return 0;
400 * init_freq - Initialize the frequency resource.
402 void init_freq(struct shared_resource *resp)
404 char *linked_res_name;
405 resp->no_of_users = 0;
407 if (!mpu_opps || !dsp_opps)
408 return;
410 linked_res_name = (char *)resp->resource_data;
411 /* Initialize the current level of the Freq resource
412 * to the frequency set by u-boot.
414 if (strcmp(resp->name, "mpu_freq") == 0)
415 /* MPU freq in Mhz */
416 resp->curr_level = mpu_opps[curr_vdd1_opp].rate;
417 else if (strcmp(resp->name, "dsp_freq") == 0)
418 /* DSP freq in Mhz */
419 resp->curr_level = dsp_opps[curr_vdd1_opp].rate;
420 return;
423 int set_freq(struct shared_resource *resp, u32 target_level)
425 unsigned int vdd1_opp;
427 if (!mpu_opps || !dsp_opps)
428 return 0;
430 if (strcmp(resp->name, "mpu_freq") == 0) {
431 vdd1_opp = get_opp(mpu_opps + MAX_VDD1_OPP, target_level);
432 resource_request("vdd1_opp", &dummy_mpu_dev, vdd1_opp);
433 } else if (strcmp(resp->name, "dsp_freq") == 0) {
434 vdd1_opp = get_opp(dsp_opps + MAX_VDD1_OPP, target_level);
435 resource_request("vdd1_opp", &dummy_dsp_dev, vdd1_opp);
437 resp->curr_level = target_level;
438 return 0;
441 int validate_freq(struct shared_resource *resp, u32 target_level)
443 return 0;