[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / sparc64 / kernel / us2e_cpufreq.c
blob7aae0a18aabe2e7e305453ddaa537e853ba521a9
1 /* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/cpufreq.h>
14 #include <linux/threads.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
19 #include <asm/asi.h>
20 #include <asm/timer.h>
22 static struct cpufreq_driver *cpufreq_us2e_driver;
24 struct us2e_freq_percpu_info {
25 struct cpufreq_frequency_table table[6];
28 /* Indexed by cpu number. */
29 static struct us2e_freq_percpu_info *us2e_freq_table;
31 #define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
32 #define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
34 /* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
35 * in the ESTAR mode control register.
37 #define ESTAR_MODE_DIV_1 0x0000000000000000UL
38 #define ESTAR_MODE_DIV_2 0x0000000000000001UL
39 #define ESTAR_MODE_DIV_4 0x0000000000000003UL
40 #define ESTAR_MODE_DIV_6 0x0000000000000002UL
41 #define ESTAR_MODE_DIV_8 0x0000000000000004UL
42 #define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
44 #define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
45 #define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
46 #define MCTRL0_REFR_COUNT_SHIFT 8
47 #define MCTRL0_REFR_INTERVAL 7800
48 #define MCTRL0_REFR_CLKS_P_CNT 64
50 static unsigned long read_hbreg(unsigned long addr)
52 unsigned long ret;
54 __asm__ __volatile__("ldxa [%1] %2, %0"
55 : "=&r" (ret)
56 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
57 return ret;
60 static void write_hbreg(unsigned long addr, unsigned long val)
62 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
63 "membar #Sync"
64 : /* no outputs */
65 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
66 : "memory");
67 if (addr == HBIRD_ESTAR_MODE_ADDR) {
68 /* Need to wait 16 clock cycles for the PLL to lock. */
69 udelay(1);
73 static void self_refresh_ctl(int enable)
75 unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
77 if (enable)
78 mctrl |= MCTRL0_SREFRESH_ENAB;
79 else
80 mctrl &= ~MCTRL0_SREFRESH_ENAB;
81 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
82 (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
85 static void frob_mem_refresh(int cpu_slowing_down,
86 unsigned long clock_tick,
87 unsigned long old_divisor, unsigned long divisor)
89 unsigned long old_refr_count, refr_count, mctrl;
92 refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
93 refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
95 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
96 old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
97 >> MCTRL0_REFR_COUNT_SHIFT;
99 mctrl &= ~MCTRL0_REFR_COUNT_MASK;
100 mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
101 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
102 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
104 if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
105 unsigned long usecs;
107 /* We have to wait for both refresh counts (old
108 * and new) to go to zero.
110 usecs = (MCTRL0_REFR_CLKS_P_CNT *
111 (refr_count + old_refr_count) *
112 1000000UL *
113 old_divisor) / clock_tick;
114 udelay(usecs + 1UL);
118 static void us2e_transition(unsigned long estar, unsigned long new_bits,
119 unsigned long clock_tick,
120 unsigned long old_divisor, unsigned long divisor)
122 unsigned long flags;
124 local_irq_save(flags);
126 estar &= ~ESTAR_MODE_DIV_MASK;
128 /* This is based upon the state transition diagram in the IIe manual. */
129 if (old_divisor == 2 && divisor == 1) {
130 self_refresh_ctl(0);
131 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
132 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
133 } else if (old_divisor == 1 && divisor == 2) {
134 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
135 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
136 self_refresh_ctl(1);
137 } else if (old_divisor == 1 && divisor > 2) {
138 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
139 1, 2);
140 us2e_transition(estar, new_bits, clock_tick,
141 2, divisor);
142 } else if (old_divisor > 2 && divisor == 1) {
143 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
144 old_divisor, 2);
145 us2e_transition(estar, new_bits, clock_tick,
146 2, divisor);
147 } else if (old_divisor < divisor) {
148 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
149 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
150 } else if (old_divisor > divisor) {
151 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
152 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
153 } else {
154 BUG();
157 local_irq_restore(flags);
160 static unsigned long index_to_estar_mode(unsigned int index)
162 switch (index) {
163 case 0:
164 return ESTAR_MODE_DIV_1;
166 case 1:
167 return ESTAR_MODE_DIV_2;
169 case 2:
170 return ESTAR_MODE_DIV_4;
172 case 3:
173 return ESTAR_MODE_DIV_6;
175 case 4:
176 return ESTAR_MODE_DIV_8;
178 default:
179 BUG();
183 static unsigned long index_to_divisor(unsigned int index)
185 switch (index) {
186 case 0:
187 return 1;
189 case 1:
190 return 2;
192 case 2:
193 return 4;
195 case 3:
196 return 6;
198 case 4:
199 return 8;
201 default:
202 BUG();
206 static unsigned long estar_to_divisor(unsigned long estar)
208 unsigned long ret;
210 switch (estar & ESTAR_MODE_DIV_MASK) {
211 case ESTAR_MODE_DIV_1:
212 ret = 1;
213 break;
214 case ESTAR_MODE_DIV_2:
215 ret = 2;
216 break;
217 case ESTAR_MODE_DIV_4:
218 ret = 4;
219 break;
220 case ESTAR_MODE_DIV_6:
221 ret = 6;
222 break;
223 case ESTAR_MODE_DIV_8:
224 ret = 8;
225 break;
226 default:
227 BUG();
230 return ret;
233 static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
235 unsigned long new_bits, new_freq;
236 unsigned long clock_tick, divisor, old_divisor, estar;
237 cpumask_t cpus_allowed;
238 struct cpufreq_freqs freqs;
240 if (!cpu_online(cpu))
241 return;
243 cpus_allowed = current->cpus_allowed;
244 set_cpus_allowed(current, cpumask_of_cpu(cpu));
246 new_freq = clock_tick = sparc64_get_clock_tick(cpu);
247 new_bits = index_to_estar_mode(index);
248 divisor = index_to_divisor(index);
249 new_freq /= divisor;
251 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
253 old_divisor = estar_to_divisor(estar);
255 freqs.old = clock_tick / old_divisor;
256 freqs.new = new_freq;
257 freqs.cpu = cpu;
258 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
260 if (old_divisor != divisor)
261 us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
263 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
265 set_cpus_allowed(current, cpus_allowed);
268 static int us2e_freq_target(struct cpufreq_policy *policy,
269 unsigned int target_freq,
270 unsigned int relation)
272 unsigned int new_index = 0;
274 if (cpufreq_frequency_table_target(policy,
275 &us2e_freq_table[policy->cpu].table[0],
276 target_freq,
277 relation,
278 &new_index))
279 return -EINVAL;
281 us2e_set_cpu_divider_index(policy->cpu, new_index);
283 return 0;
286 static int us2e_freq_verify(struct cpufreq_policy *policy)
288 return cpufreq_frequency_table_verify(policy,
289 &us2e_freq_table[policy->cpu].table[0]);
292 static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
294 unsigned int cpu = policy->cpu;
295 unsigned long clock_tick = sparc64_get_clock_tick(cpu);
296 struct cpufreq_frequency_table *table =
297 &us2e_freq_table[cpu].table[0];
299 table[0].index = 0;
300 table[0].frequency = clock_tick / 1;
301 table[1].index = 1;
302 table[1].frequency = clock_tick / 2;
303 table[2].index = 2;
304 table[2].frequency = clock_tick / 4;
305 table[2].index = 3;
306 table[2].frequency = clock_tick / 6;
307 table[2].index = 4;
308 table[2].frequency = clock_tick / 8;
309 table[2].index = 5;
310 table[3].frequency = CPUFREQ_TABLE_END;
312 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
313 policy->cpuinfo.transition_latency = 0;
314 policy->cur = clock_tick;
316 return cpufreq_frequency_table_cpuinfo(policy, table);
319 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
321 if (cpufreq_us2e_driver)
322 us2e_set_cpu_divider_index(policy->cpu, 0);
324 return 0;
327 static int __init us2e_freq_init(void)
329 unsigned long manuf, impl, ver;
330 int ret;
332 __asm__("rdpr %%ver, %0" : "=r" (ver));
333 manuf = ((ver >> 48) & 0xffff);
334 impl = ((ver >> 32) & 0xffff);
336 if (manuf == 0x17 && impl == 0x13) {
337 struct cpufreq_driver *driver;
339 ret = -ENOMEM;
340 driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
341 if (!driver)
342 goto err_out;
343 memset(driver, 0, sizeof(*driver));
345 us2e_freq_table = kmalloc(
346 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
347 GFP_KERNEL);
348 if (!us2e_freq_table)
349 goto err_out;
351 memset(us2e_freq_table, 0,
352 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
354 driver->verify = us2e_freq_verify;
355 driver->target = us2e_freq_target;
356 driver->init = us2e_freq_cpu_init;
357 driver->exit = us2e_freq_cpu_exit;
358 driver->owner = THIS_MODULE,
359 strcpy(driver->name, "UltraSPARC-IIe");
361 cpufreq_us2e_driver = driver;
362 ret = cpufreq_register_driver(driver);
363 if (ret)
364 goto err_out;
366 return 0;
368 err_out:
369 if (driver) {
370 kfree(driver);
371 cpufreq_us2e_driver = NULL;
373 if (us2e_freq_table) {
374 kfree(us2e_freq_table);
375 us2e_freq_table = NULL;
377 return ret;
380 return -ENODEV;
383 static void __exit us2e_freq_exit(void)
385 if (cpufreq_us2e_driver) {
386 cpufreq_unregister_driver(cpufreq_us2e_driver);
388 kfree(cpufreq_us2e_driver);
389 cpufreq_us2e_driver = NULL;
390 kfree(us2e_freq_table);
391 us2e_freq_table = NULL;
395 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
396 MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
397 MODULE_LICENSE("GPL");
399 module_init(us2e_freq_init);
400 module_exit(us2e_freq_exit);