Staging: hv: mousevsc: Change the allocation flags to reflect interrupt context
[zen-stable.git] / arch / blackfin / mach-common / cpufreq.c
blob85dc6d69f9c02e1c58a8f438b8bbedf75aba50f3
1 /*
2 * Blackfin core clock scaling
4 * Copyright 2008-2011 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
7 */
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/init.h>
12 #include <linux/cpufreq.h>
13 #include <linux/fs.h>
14 #include <linux/delay.h>
15 #include <asm/blackfin.h>
16 #include <asm/time.h>
17 #include <asm/dpmc.h>
19 /* this is the table of CCLK frequencies, in Hz */
20 /* .index is the entry in the auxiliary dpm_state_table[] */
21 static struct cpufreq_frequency_table bfin_freq_table[] = {
23 .frequency = CPUFREQ_TABLE_END,
24 .index = 0,
27 .frequency = CPUFREQ_TABLE_END,
28 .index = 1,
31 .frequency = CPUFREQ_TABLE_END,
32 .index = 2,
35 .frequency = CPUFREQ_TABLE_END,
36 .index = 0,
40 static struct bfin_dpm_state {
41 unsigned int csel; /* system clock divider */
42 unsigned int tscale; /* change the divider on the core timer interrupt */
43 } dpm_state_table[3];
45 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
47 * normalized to maximum frequency offset for CYCLES,
48 * used in time-ts cycles clock source, but could be used
49 * somewhere also.
51 unsigned long long __bfin_cycles_off;
52 unsigned int __bfin_cycles_mod;
53 #endif
55 /**************************************************************************/
56 static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
59 unsigned long csel, min_cclk;
60 int index;
62 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
63 #if ANOMALY_05000273 || ANOMALY_05000274 || \
64 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
65 min_cclk = sclk * 2;
66 #else
67 min_cclk = sclk;
68 #endif
69 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
71 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
72 bfin_freq_table[index].frequency = cclk >> index;
73 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
74 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
76 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
77 bfin_freq_table[index].frequency,
78 dpm_state_table[index].csel,
79 dpm_state_table[index].tscale);
81 return;
84 static void bfin_adjust_core_timer(void *info)
86 unsigned int tscale;
87 unsigned int index = *(unsigned int *)info;
89 /* we have to adjust the core timer, because it is using cclk */
90 tscale = dpm_state_table[index].tscale;
91 bfin_write_TSCALE(tscale);
92 return;
95 static unsigned int bfin_getfreq_khz(unsigned int cpu)
97 /* Both CoreA/B have the same core clock */
98 return get_cclk() / 1000;
101 static int bfin_target(struct cpufreq_policy *poli,
102 unsigned int target_freq, unsigned int relation)
104 unsigned int index, plldiv, cpu;
105 unsigned long flags, cclk_hz;
106 struct cpufreq_freqs freqs;
107 static unsigned long lpj_ref;
108 static unsigned int lpj_ref_freq;
110 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
111 cycles_t cycles;
112 #endif
114 for_each_online_cpu(cpu) {
115 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
117 if (!policy)
118 continue;
120 if (cpufreq_frequency_table_target(policy, bfin_freq_table,
121 target_freq, relation, &index))
122 return -EINVAL;
124 cclk_hz = bfin_freq_table[index].frequency;
126 freqs.old = bfin_getfreq_khz(0);
127 freqs.new = cclk_hz;
128 freqs.cpu = cpu;
130 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
131 cclk_hz, target_freq, freqs.old);
133 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
134 if (cpu == CPUFREQ_CPU) {
135 flags = hard_local_irq_save();
136 plldiv = (bfin_read_PLL_DIV() & SSEL) |
137 dpm_state_table[index].csel;
138 bfin_write_PLL_DIV(plldiv);
139 on_each_cpu(bfin_adjust_core_timer, &index, 1);
140 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
141 cycles = get_cycles();
142 SSYNC();
143 cycles += 10; /* ~10 cycles we lose after get_cycles() */
144 __bfin_cycles_off +=
145 (cycles << __bfin_cycles_mod) - (cycles << index);
146 __bfin_cycles_mod = index;
147 #endif
148 if (!lpj_ref_freq) {
149 lpj_ref = loops_per_jiffy;
150 lpj_ref_freq = freqs.old;
152 if (freqs.new != freqs.old) {
153 loops_per_jiffy = cpufreq_scale(lpj_ref,
154 lpj_ref_freq, freqs.new);
156 hard_local_irq_restore(flags);
158 /* TODO: just test case for cycles clock source, remove later */
159 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
162 pr_debug("cpufreq: done\n");
163 return 0;
166 static int bfin_verify_speed(struct cpufreq_policy *policy)
168 return cpufreq_frequency_table_verify(policy, bfin_freq_table);
171 static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
174 unsigned long cclk, sclk;
176 cclk = get_cclk() / 1000;
177 sclk = get_sclk() / 1000;
179 if (policy->cpu == CPUFREQ_CPU)
180 bfin_init_tables(cclk, sclk);
182 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
184 policy->cur = cclk;
185 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
186 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
189 static struct freq_attr *bfin_freq_attr[] = {
190 &cpufreq_freq_attr_scaling_available_freqs,
191 NULL,
194 static struct cpufreq_driver bfin_driver = {
195 .verify = bfin_verify_speed,
196 .target = bfin_target,
197 .get = bfin_getfreq_khz,
198 .init = __bfin_cpu_init,
199 .name = "bfin cpufreq",
200 .owner = THIS_MODULE,
201 .attr = bfin_freq_attr,
204 static int __init bfin_cpu_init(void)
206 return cpufreq_register_driver(&bfin_driver);
209 static void __exit bfin_cpu_exit(void)
211 cpufreq_unregister_driver(&bfin_driver);
214 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
215 MODULE_DESCRIPTION("cpufreq driver for Blackfin");
216 MODULE_LICENSE("GPL");
218 module_init(bfin_cpu_init);
219 module_exit(bfin_cpu_exit);