2 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 /* This file has been patched with Linux PHC: www.linux-phc.org
29 * Patch version: linux-phc-0.3.2
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/smp.h>
36 #include <linux/sched.h>
37 #include <linux/cpufreq.h>
38 #include <linux/compiler.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
42 #include <linux/acpi.h>
44 #include <linux/delay.h>
45 #include <linux/uaccess.h>
47 #include <acpi/processor.h>
50 #include <asm/processor.h>
51 #include <asm/cpufeature.h>
54 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
55 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
56 MODULE_LICENSE("GPL");
59 UNDEFINED_CAPABLE
= 0,
60 SYSTEM_INTEL_MSR_CAPABLE
,
64 #define INTEL_MSR_RANGE (0xffff)
65 #define INTEL_MSR_VID_MASK (0x00ff)
66 #define INTEL_MSR_FID_MASK (0xff00)
67 #define INTEL_MSR_FID_SHIFT (0x8)
68 #define PHC_VERSION_STRING "0.3.2:2"
70 struct acpi_cpufreq_data
{
71 struct acpi_processor_performance
*acpi_data
;
72 struct cpufreq_frequency_table
*freq_table
;
74 unsigned int cpu_feature
;
75 acpi_integer
*original_controls
;
78 static DEFINE_PER_CPU(struct acpi_cpufreq_data
*, acfreq_data
);
80 /* acpi_perf_data is a pointer to percpu data. */
81 static struct acpi_processor_performance __percpu
*acpi_perf_data
;
83 static struct cpufreq_driver acpi_cpufreq_driver
;
85 static unsigned int acpi_pstate_strict
;
87 static int check_est_cpu(unsigned int cpuid
)
89 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
91 return cpu_has(cpu
, X86_FEATURE_EST
);
94 static unsigned extract_io(u32 value
, struct acpi_cpufreq_data
*data
)
96 struct acpi_processor_performance
*perf
;
99 perf
= data
->acpi_data
;
101 for (i
= 0; i
< perf
->state_count
; i
++) {
102 if (value
== perf
->states
[i
].status
)
103 return data
->freq_table
[i
].frequency
;
108 static unsigned extract_msr(u32 msr
, struct acpi_cpufreq_data
*data
)
112 struct acpi_processor_performance
*perf
;
114 fid
= msr
& INTEL_MSR_FID_MASK
;
115 perf
= data
->acpi_data
;
117 for (i
= 0; data
->freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
118 if (fid
== (perf
->states
[data
->freq_table
[i
].index
].status
& INTEL_MSR_FID_MASK
))
119 return data
->freq_table
[i
].frequency
;
121 return data
->freq_table
[0].frequency
;
124 static unsigned extract_freq(u32 val
, struct acpi_cpufreq_data
*data
)
126 switch (data
->cpu_feature
) {
127 case SYSTEM_INTEL_MSR_CAPABLE
:
128 return extract_msr(val
, data
);
129 case SYSTEM_IO_CAPABLE
:
130 return extract_io(val
, data
);
147 const struct cpumask
*mask
;
155 /* Called via smp_call_function_single(), on the target CPU */
156 static void do_drv_read(void *_cmd
)
158 struct drv_cmd
*cmd
= _cmd
;
162 case SYSTEM_INTEL_MSR_CAPABLE
:
163 rdmsr(cmd
->addr
.msr
.reg
, cmd
->val
, h
);
165 case SYSTEM_IO_CAPABLE
:
166 acpi_os_read_port((acpi_io_address
)cmd
->addr
.io
.port
,
168 (u32
)cmd
->addr
.io
.bit_width
);
175 /* Called via smp_call_function_many(), on the target CPUs */
176 static void do_drv_write(void *_cmd
)
178 struct drv_cmd
*cmd
= _cmd
;
182 case SYSTEM_INTEL_MSR_CAPABLE
:
183 rdmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
184 lo
= (lo
& ~INTEL_MSR_RANGE
) | (cmd
->val
& INTEL_MSR_RANGE
);
185 wrmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
187 case SYSTEM_IO_CAPABLE
:
188 acpi_os_write_port((acpi_io_address
)cmd
->addr
.io
.port
,
190 (u32
)cmd
->addr
.io
.bit_width
);
197 static void drv_read(struct drv_cmd
*cmd
)
202 err
= smp_call_function_any(cmd
->mask
, do_drv_read
, cmd
, 1);
203 WARN_ON_ONCE(err
); /* smp_call_function_any() was buggy? */
206 static void drv_write(struct drv_cmd
*cmd
)
210 this_cpu
= get_cpu();
211 if (cpumask_test_cpu(this_cpu
, cmd
->mask
))
213 smp_call_function_many(cmd
->mask
, do_drv_write
, cmd
, 1);
217 static u32
get_cur_val(const struct cpumask
*mask
)
219 struct acpi_processor_performance
*perf
;
222 if (unlikely(cpumask_empty(mask
)))
225 switch (per_cpu(acfreq_data
, cpumask_first(mask
))->cpu_feature
) {
226 case SYSTEM_INTEL_MSR_CAPABLE
:
227 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
228 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_STATUS
;
230 case SYSTEM_IO_CAPABLE
:
231 cmd
.type
= SYSTEM_IO_CAPABLE
;
232 perf
= per_cpu(acfreq_data
, cpumask_first(mask
))->acpi_data
;
233 cmd
.addr
.io
.port
= perf
->control_register
.address
;
234 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
243 pr_debug("get_cur_val = %u\n", cmd
.val
);
248 static unsigned int get_cur_freq_on_cpu(unsigned int cpu
)
250 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, cpu
);
252 unsigned int cached_freq
;
254 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu
);
256 if (unlikely(data
== NULL
||
257 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
261 cached_freq
= data
->freq_table
[data
->acpi_data
->state
].frequency
;
262 freq
= extract_freq(get_cur_val(cpumask_of(cpu
)), data
);
263 if (freq
!= cached_freq
) {
265 * The dreaded BIOS frequency change behind our back.
266 * Force set the frequency on next target call.
271 pr_debug("cur freq = %u\n", freq
);
276 static unsigned int check_freqs(const struct cpumask
*mask
, unsigned int freq
,
277 struct acpi_cpufreq_data
*data
)
279 unsigned int cur_freq
;
282 for (i
= 0; i
< 100; i
++) {
283 cur_freq
= extract_freq(get_cur_val(mask
), data
);
284 if (cur_freq
== freq
)
291 static int acpi_cpufreq_target(struct cpufreq_policy
*policy
,
292 unsigned int target_freq
, unsigned int relation
)
294 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
295 struct acpi_processor_performance
*perf
;
296 struct cpufreq_freqs freqs
;
298 unsigned int next_state
= 0; /* Index into freq_table */
299 unsigned int next_perf_state
= 0; /* Index into perf table */
303 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq
, policy
->cpu
);
305 if (unlikely(data
== NULL
||
306 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
310 perf
= data
->acpi_data
;
311 result
= cpufreq_frequency_table_target(policy
,
314 relation
, &next_state
);
315 if (unlikely(result
)) {
320 next_perf_state
= data
->freq_table
[next_state
].index
;
321 if (perf
->state
== next_perf_state
) {
322 if (unlikely(data
->resume
)) {
323 pr_debug("Called after resume, resetting to P%d\n",
327 pr_debug("Already at target state (P%d)\n",
333 switch (data
->cpu_feature
) {
334 case SYSTEM_INTEL_MSR_CAPABLE
:
335 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
336 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
337 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
339 case SYSTEM_IO_CAPABLE
:
340 cmd
.type
= SYSTEM_IO_CAPABLE
;
341 cmd
.addr
.io
.port
= perf
->control_register
.address
;
342 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
343 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
350 /* cpufreq holds the hotplug lock, so we are safe from here on */
351 if (policy
->shared_type
!= CPUFREQ_SHARED_TYPE_ANY
)
352 cmd
.mask
= policy
->cpus
;
354 cmd
.mask
= cpumask_of(policy
->cpu
);
356 freqs
.old
= perf
->states
[perf
->state
].core_frequency
* 1000;
357 freqs
.new = data
->freq_table
[next_state
].frequency
;
358 for_each_cpu(i
, policy
->cpus
) {
360 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
365 if (acpi_pstate_strict
) {
366 if (!check_freqs(cmd
.mask
, freqs
.new, data
)) {
367 pr_debug("acpi_cpufreq_target failed (%d)\n",
374 for_each_cpu(i
, policy
->cpus
) {
376 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
378 perf
->state
= next_perf_state
;
384 static int acpi_cpufreq_verify(struct cpufreq_policy
*policy
)
386 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
388 pr_debug("acpi_cpufreq_verify\n");
390 return cpufreq_frequency_table_verify(policy
, data
->freq_table
);
394 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data
*data
, unsigned int cpu
)
396 struct acpi_processor_performance
*perf
= data
->acpi_data
;
399 /* search the closest match to cpu_khz */
402 unsigned long freqn
= perf
->states
[0].core_frequency
* 1000;
404 for (i
= 0; i
< (perf
->state_count
-1); i
++) {
406 freqn
= perf
->states
[i
+1].core_frequency
* 1000;
407 if ((2 * cpu_khz
) > (freqn
+ freq
)) {
412 perf
->state
= perf
->state_count
-1;
415 /* assume CPU is at P0... */
417 return perf
->states
[0].core_frequency
* 1000;
421 static void free_acpi_perf_data(void)
425 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
426 for_each_possible_cpu(i
)
427 free_cpumask_var(per_cpu_ptr(acpi_perf_data
, i
)
429 free_percpu(acpi_perf_data
);
433 * acpi_cpufreq_early_init - initialize ACPI P-States library
435 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
436 * in order to determine correct frequency and voltage pairings. We can
437 * do _PDC and _PSD and find out the processor dependency for the
438 * actual init that will happen later...
440 static int __init
acpi_cpufreq_early_init(void)
443 pr_debug("acpi_cpufreq_early_init\n");
445 acpi_perf_data
= alloc_percpu(struct acpi_processor_performance
);
446 if (!acpi_perf_data
) {
447 pr_debug("Memory allocation error for acpi_perf_data.\n");
450 for_each_possible_cpu(i
) {
451 if (!zalloc_cpumask_var_node(
452 &per_cpu_ptr(acpi_perf_data
, i
)->shared_cpu_map
,
453 GFP_KERNEL
, cpu_to_node(i
))) {
455 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
456 free_acpi_perf_data();
461 /* Do initialization in ACPI core */
462 acpi_processor_preregister_performance(acpi_perf_data
);
468 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
469 * or do it in BIOS firmware and won't inform about it to OS. If not
470 * detected, this has a side effect of making CPU run at a different speed
471 * than OS intended it to run at. Detect it and handle it cleanly.
473 static int bios_with_sw_any_bug
;
475 static int sw_any_bug_found(const struct dmi_system_id
*d
)
477 bios_with_sw_any_bug
= 1;
481 static const struct dmi_system_id sw_any_bug_dmi_table
[] = {
483 .callback
= sw_any_bug_found
,
484 .ident
= "Supermicro Server X6DLP",
486 DMI_MATCH(DMI_SYS_VENDOR
, "Supermicro"),
487 DMI_MATCH(DMI_BIOS_VERSION
, "080010"),
488 DMI_MATCH(DMI_PRODUCT_NAME
, "X6DLP"),
494 static int acpi_cpufreq_blacklist(struct cpuinfo_x86
*c
)
496 /* Intel Xeon Processor 7100 Series Specification Update
497 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
498 * AL30: A Machine Check Exception (MCE) Occurring during an
499 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
500 * Both Processor Cores to Lock Up. */
501 if (c
->x86_vendor
== X86_VENDOR_INTEL
) {
502 if ((c
->x86
== 15) &&
503 (c
->x86_model
== 6) &&
504 (c
->x86_mask
== 8)) {
505 printk(KERN_INFO
"acpi-cpufreq: Intel(R) "
506 "Xeon(R) 7100 Errata AL30, processors may "
507 "lock up on frequency changes: disabling "
516 static int acpi_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
519 unsigned int valid_states
= 0;
520 unsigned int cpu
= policy
->cpu
;
521 struct acpi_cpufreq_data
*data
;
522 unsigned int result
= 0;
523 struct cpuinfo_x86
*c
= &cpu_data(policy
->cpu
);
524 struct acpi_processor_performance
*perf
;
526 static int blacklisted
;
529 pr_debug("acpi_cpufreq_cpu_init\n");
534 blacklisted
= acpi_cpufreq_blacklist(c
);
539 data
= kzalloc(sizeof(struct acpi_cpufreq_data
), GFP_KERNEL
);
543 data
->acpi_data
= per_cpu_ptr(acpi_perf_data
, cpu
);
544 per_cpu(acfreq_data
, cpu
) = data
;
546 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
))
547 acpi_cpufreq_driver
.flags
|= CPUFREQ_CONST_LOOPS
;
549 result
= acpi_processor_register_performance(data
->acpi_data
, cpu
);
553 perf
= data
->acpi_data
;
554 policy
->shared_type
= perf
->shared_type
;
557 * Will let policy->cpus know about dependency only when software
558 * coordination is required.
560 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
||
561 policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
) {
562 cpumask_copy(policy
->cpus
, perf
->shared_cpu_map
);
564 cpumask_copy(policy
->related_cpus
, perf
->shared_cpu_map
);
567 dmi_check_system(sw_any_bug_dmi_table
);
568 if (bios_with_sw_any_bug
&& cpumask_weight(policy
->cpus
) == 1) {
569 policy
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
570 cpumask_copy(policy
->cpus
, cpu_core_mask(cpu
));
574 /* capability check */
575 if (perf
->state_count
<= 1) {
576 pr_debug("No P-States\n");
581 if (perf
->control_register
.space_id
!= perf
->status_register
.space_id
) {
586 switch (perf
->control_register
.space_id
) {
587 case ACPI_ADR_SPACE_SYSTEM_IO
:
588 pr_debug("SYSTEM IO addr space\n");
589 data
->cpu_feature
= SYSTEM_IO_CAPABLE
;
591 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
592 pr_debug("HARDWARE addr space\n");
593 if (!check_est_cpu(cpu
)) {
597 data
->cpu_feature
= SYSTEM_INTEL_MSR_CAPABLE
;
600 pr_debug("Unknown addr space %d\n",
601 (u32
) (perf
->control_register
.space_id
));
606 data
->freq_table
= kmalloc(sizeof(struct cpufreq_frequency_table
) *
607 (perf
->state_count
+1), GFP_KERNEL
);
608 if (!data
->freq_table
) {
613 /* detect transition latency */
614 policy
->cpuinfo
.transition_latency
= 0;
615 for (i
= 0; i
< perf
->state_count
; i
++) {
616 if ((perf
->states
[i
].transition_latency
* 1000) >
617 policy
->cpuinfo
.transition_latency
)
618 policy
->cpuinfo
.transition_latency
=
619 perf
->states
[i
].transition_latency
* 1000;
622 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
623 if (perf
->control_register
.space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
&&
624 policy
->cpuinfo
.transition_latency
> 20 * 1000) {
625 policy
->cpuinfo
.transition_latency
= 20 * 1000;
626 printk_once(KERN_INFO
627 "P-state transition latency capped at 20 uS\n");
631 for (i
= 0; i
< perf
->state_count
; i
++) {
632 if (i
> 0 && perf
->states
[i
].core_frequency
>=
633 data
->freq_table
[valid_states
-1].frequency
/ 1000)
636 data
->freq_table
[valid_states
].index
= i
;
637 data
->freq_table
[valid_states
].frequency
=
638 perf
->states
[i
].core_frequency
* 1000;
641 data
->freq_table
[valid_states
].frequency
= CPUFREQ_TABLE_END
;
644 result
= cpufreq_frequency_table_cpuinfo(policy
, data
->freq_table
);
648 if (perf
->states
[0].core_frequency
* 1000 != policy
->cpuinfo
.max_freq
)
649 printk(KERN_WARNING FW_WARN
"P-state 0 is not max freq\n");
651 switch (perf
->control_register
.space_id
) {
652 case ACPI_ADR_SPACE_SYSTEM_IO
:
653 /* Current speed is unknown and not detectable by IO port */
654 policy
->cur
= acpi_cpufreq_guess_freq(data
, policy
->cpu
);
656 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
657 acpi_cpufreq_driver
.get
= get_cur_freq_on_cpu
;
658 policy
->cur
= get_cur_freq_on_cpu(cpu
);
664 /* notify BIOS that we exist */
665 acpi_processor_notify_smm(THIS_MODULE
);
667 /* Check for APERF/MPERF support in hardware */
668 if (boot_cpu_has(X86_FEATURE_APERFMPERF
))
669 acpi_cpufreq_driver
.getavg
= cpufreq_get_measured_perf
;
671 pr_debug("CPU%u - ACPI performance management activated.\n", cpu
);
672 for (i
= 0; i
< perf
->state_count
; i
++)
673 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
674 (i
== perf
->state
? '*' : ' '), i
,
675 (u32
) perf
->states
[i
].core_frequency
,
676 (u32
) perf
->states
[i
].power
,
677 (u32
) perf
->states
[i
].transition_latency
);
679 cpufreq_frequency_table_get_attr(data
->freq_table
, policy
->cpu
);
682 * the first call to ->target() should result in us actually
683 * writing something to the appropriate registers.
690 kfree(data
->freq_table
);
692 acpi_processor_unregister_performance(perf
, cpu
);
695 per_cpu(acfreq_data
, cpu
) = NULL
;
700 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
702 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
704 pr_debug("acpi_cpufreq_cpu_exit\n");
707 cpufreq_frequency_table_put_attr(policy
->cpu
);
708 per_cpu(acfreq_data
, policy
->cpu
) = NULL
;
709 acpi_processor_unregister_performance(data
->acpi_data
,
711 if (data
->original_controls
)
712 kfree(data
->original_controls
);
713 kfree(data
->freq_table
);
720 static int acpi_cpufreq_resume(struct cpufreq_policy
*policy
)
722 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
724 pr_debug("acpi_cpufreq_resume\n");
731 /* sysfs interface to change operating points voltages */
733 static unsigned int extract_fid_from_control(unsigned int control
)
735 return ((control
& INTEL_MSR_FID_MASK
) >> INTEL_MSR_FID_SHIFT
);
738 static unsigned int extract_vid_from_control(unsigned int control
)
740 return (control
& INTEL_MSR_VID_MASK
);
744 static bool check_cpu_control_capability(struct acpi_cpufreq_data
*data
) {
745 /* check if the cpu we are running on is capable of setting new control data
748 if (unlikely(data
== NULL
||
749 data
->acpi_data
== NULL
||
750 data
->freq_table
== NULL
||
751 data
->cpu_feature
!= SYSTEM_INTEL_MSR_CAPABLE
)) {
759 static ssize_t
check_origial_table (struct acpi_cpufreq_data
*data
)
762 struct acpi_processor_performance
*acpi_data
;
763 struct cpufreq_frequency_table
*freq_table
;
764 unsigned int state_index
;
766 acpi_data
= data
->acpi_data
;
767 freq_table
= data
->freq_table
;
769 if (data
->original_controls
== NULL
) {
770 // Backup original control values
771 data
->original_controls
= kcalloc(acpi_data
->state_count
,
772 sizeof(acpi_integer
), GFP_KERNEL
);
773 if (data
->original_controls
== NULL
) {
774 printk("failed to allocate memory for original control values\n");
777 for (state_index
= 0; state_index
< acpi_data
->state_count
; state_index
++) {
778 data
->original_controls
[state_index
] = acpi_data
->states
[state_index
].control
;
784 static ssize_t
show_freq_attr_vids(struct cpufreq_policy
*policy
, char *buf
)
785 /* display phc's voltage id's
789 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
790 struct acpi_processor_performance
*acpi_data
;
791 struct cpufreq_frequency_table
*freq_table
;
796 if (!check_cpu_control_capability(data
)) return -ENODEV
; //check if CPU is capable of changing controls
798 acpi_data
= data
->acpi_data
;
799 freq_table
= data
->freq_table
;
801 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
802 vid
= extract_vid_from_control(acpi_data
->states
[freq_table
[i
].index
].control
);
803 count
+= sprintf(&buf
[count
], "%u ", vid
);
805 count
+= sprintf(&buf
[count
], "\n");
810 static ssize_t
show_freq_attr_default_vids(struct cpufreq_policy
*policy
, char *buf
)
811 /* display acpi's default voltage id's
815 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
816 struct cpufreq_frequency_table
*freq_table
;
822 if (!check_cpu_control_capability(data
)) return -ENODEV
; //check if CPU is capable of changing controls
824 retval
= check_origial_table(data
);
828 freq_table
= data
->freq_table
;
830 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
831 vid
= extract_vid_from_control(data
->original_controls
[freq_table
[i
].index
]);
832 count
+= sprintf(&buf
[count
], "%u ", vid
);
834 count
+= sprintf(&buf
[count
], "\n");
839 static ssize_t
show_freq_attr_fids(struct cpufreq_policy
*policy
, char *buf
)
840 /* display phc's frequeny id's
844 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
845 struct acpi_processor_performance
*acpi_data
;
846 struct cpufreq_frequency_table
*freq_table
;
851 if (!check_cpu_control_capability(data
)) return -ENODEV
; //check if CPU is capable of changing controls
853 acpi_data
= data
->acpi_data
;
854 freq_table
= data
->freq_table
;
856 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
857 fid
= extract_fid_from_control(acpi_data
->states
[freq_table
[i
].index
].control
);
858 count
+= sprintf(&buf
[count
], "%u ", fid
);
860 count
+= sprintf(&buf
[count
], "\n");
865 static ssize_t
show_freq_attr_controls(struct cpufreq_policy
*policy
, char *buf
)
866 /* display phc's controls for the cpu (frequency id's and related voltage id's)
870 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
871 struct acpi_processor_performance
*acpi_data
;
872 struct cpufreq_frequency_table
*freq_table
;
878 if (!check_cpu_control_capability(data
)) return -ENODEV
; //check if CPU is capable of changing controls
880 acpi_data
= data
->acpi_data
;
881 freq_table
= data
->freq_table
;
883 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
884 fid
= extract_fid_from_control(acpi_data
->states
[freq_table
[i
].index
].control
);
885 vid
= extract_vid_from_control(acpi_data
->states
[freq_table
[i
].index
].control
);
886 count
+= sprintf(&buf
[count
], "%u:%u ", fid
, vid
);
888 count
+= sprintf(&buf
[count
], "\n");
893 static ssize_t
show_freq_attr_default_controls(struct cpufreq_policy
*policy
, char *buf
)
894 /* display acpi's default controls for the cpu (frequency id's and related voltage id's)
898 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
899 struct cpufreq_frequency_table
*freq_table
;
906 if (!check_cpu_control_capability(data
)) return -ENODEV
; //check if CPU is capable of changing controls
908 retval
= check_origial_table(data
);
912 freq_table
= data
->freq_table
;
914 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
915 fid
= extract_fid_from_control(data
->original_controls
[freq_table
[i
].index
]);
916 vid
= extract_vid_from_control(data
->original_controls
[freq_table
[i
].index
]);
917 count
+= sprintf(&buf
[count
], "%u:%u ", fid
, vid
);
919 count
+= sprintf(&buf
[count
], "\n");
925 static ssize_t
store_freq_attr_vids(struct cpufreq_policy
*policy
, const char *buf
, size_t count
)
926 /* store the voltage id's for the related frequency
927 * We are going to do some sanity checks here to prevent users
928 * from setting higher voltages than the default one.
931 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
932 struct acpi_processor_performance
*acpi_data
;
933 struct cpufreq_frequency_table
*freq_table
;
934 unsigned int freq_index
;
935 unsigned int state_index
;
936 unsigned int new_vid
;
937 unsigned int original_vid
;
938 unsigned int new_control
;
939 unsigned int original_control
;
940 const char *curr_buf
= buf
;
944 if (!check_cpu_control_capability(data
)) return -ENODEV
; //check if CPU is capable of changing controls
946 retval
= check_origial_table(data
);
950 acpi_data
= data
->acpi_data
;
951 freq_table
= data
->freq_table
;
953 /* for each value taken from the sysfs interfalce (phc_vids) get entrys and convert them to unsigned long integers*/
954 for (freq_index
= 0; freq_table
[freq_index
].frequency
!= CPUFREQ_TABLE_END
; freq_index
++) {
955 new_vid
= simple_strtoul(curr_buf
, &next_buf
, 10);
956 if (next_buf
== curr_buf
) {
957 if ((curr_buf
- buf
== count
- 1) && (*curr_buf
== '\n')) { //end of line?
961 //if we didn't got end of line but there is nothing more to read something went wrong...
962 printk("failed to parse vid value at %i (%s)\n", freq_index
, curr_buf
);
966 state_index
= freq_table
[freq_index
].index
;
967 original_control
= data
->original_controls
[state_index
];
968 original_vid
= original_control
& INTEL_MSR_VID_MASK
;
970 /* before we store the values we do some checks to prevent
971 * users to set up values higher than the default one
973 if (new_vid
<= original_vid
) {
974 new_control
= (original_control
& ~INTEL_MSR_VID_MASK
) | new_vid
;
975 pr_debug("setting control at %i to %x (default is %x)\n",
976 freq_index
, new_control
, original_control
);
977 acpi_data
->states
[state_index
].control
= new_control
;
980 printk("skipping vid at %i, %u is greater than default %u\n",
981 freq_index
, new_vid
, original_vid
);
985 /* jump over value seperators (space or comma).
986 * There could be more than one space or comma character
987 * to separate two values so we better do it using a loop.
989 while ((curr_buf
- buf
< count
) && ((*curr_buf
== ' ') || (*curr_buf
== ','))) {
994 /* set new voltage for current frequency */
996 acpi_cpufreq_target(policy
, get_cur_freq_on_cpu(policy
->cpu
), CPUFREQ_RELATION_L
);
998 return curr_buf
- buf
;
1001 static ssize_t
store_freq_attr_controls(struct cpufreq_policy
*policy
, const char *buf
, size_t count
)
1002 /* store the controls (frequency id's and related voltage id's)
1003 * We are going to do some sanity checks here to prevent users
1004 * from setting higher voltages than the default one.
1007 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
1008 struct acpi_processor_performance
*acpi_data
;
1009 struct cpufreq_frequency_table
*freq_table
;
1010 const char *curr_buf
;
1011 unsigned int op_count
;
1012 unsigned int state_index
;
1016 unsigned int new_vid
;
1017 unsigned int original_vid
;
1018 unsigned int new_fid
;
1019 unsigned int old_fid
;
1020 unsigned int original_control
;
1021 unsigned int old_control
;
1022 unsigned int new_control
;
1025 if (!check_cpu_control_capability(data
)) return -ENODEV
;
1027 retval
= check_origial_table(data
);
1031 acpi_data
= data
->acpi_data
;
1032 freq_table
= data
->freq_table
;
1039 while ( (isok
) && (curr_buf
!= NULL
) )
1043 new_fid
= simple_strtoul(curr_buf
, &next_buf
, 10);
1044 if ((next_buf
!= curr_buf
) && (next_buf
!= NULL
))
1046 // Parse separator between frequency and voltage
1047 curr_buf
= next_buf
;
1053 new_vid
= simple_strtoul(curr_buf
, &next_buf
, 10);
1054 if ((next_buf
!= curr_buf
) && (next_buf
!= NULL
))
1057 for (state_index
= 0; state_index
< acpi_data
->state_count
; state_index
++) {
1058 old_control
= acpi_data
->states
[state_index
].control
;
1059 old_fid
= extract_fid_from_control(old_control
);
1060 if (new_fid
== old_fid
)
1063 original_control
= data
->original_controls
[state_index
];
1064 original_vid
= extract_vid_from_control(original_control
);
1065 if (new_vid
<= original_vid
)
1067 new_control
= (original_control
& ~INTEL_MSR_VID_MASK
) | new_vid
;
1068 pr_debug("setting control at %i to %x (default is %x)\n",
1069 state_index
, new_control
, original_control
);
1070 acpi_data
->states
[state_index
].control
= new_control
;
1073 printk("skipping vid at %i, %u is greater than default %u\n",
1074 state_index
, new_vid
, original_vid
);
1081 printk("operating point # %u not found (FID = %u)\n", op_count
, new_fid
);
1085 // Parse seprator before next operating point, if any
1086 curr_buf
= next_buf
;
1088 if ((*curr_buf
== ',') || (*curr_buf
== ' '))
1095 printk("failed to parse VID of operating point # %u (%s)\n", op_count
, curr_buf
);
1101 printk("failed to parse operating point # %u (%s)\n", op_count
, curr_buf
);
1107 printk("failed to parse FID of operating point # %u (%s)\n", op_count
, curr_buf
);
1115 /* set new voltage at current frequency */
1117 acpi_cpufreq_target(policy
, get_cur_freq_on_cpu(policy
->cpu
), CPUFREQ_RELATION_L
);
1127 static ssize_t
show_freq_attr_phc_version(struct cpufreq_policy
*policy
, char *buf
)
1128 /* print out the phc version string set at the beginning of that file
1132 count
+= sprintf(&buf
[count
], "%s\n", PHC_VERSION_STRING
);
1138 static struct freq_attr cpufreq_freq_attr_phc_version
=
1140 /*display phc's version string*/
1141 .attr
= { .name
= "phc_version", .mode
= 0444 },
1142 .show
= show_freq_attr_phc_version
,
1146 static struct freq_attr cpufreq_freq_attr_vids
=
1148 /*display phc's voltage id's for the cpu*/
1149 .attr
= { .name
= "phc_vids", .mode
= 0644 },
1150 .show
= show_freq_attr_vids
,
1151 .store
= store_freq_attr_vids
,
1154 static struct freq_attr cpufreq_freq_attr_default_vids
=
1156 /*display acpi's default frequency id's for the cpu*/
1157 .attr
= { .name
= "phc_default_vids", .mode
= 0444 },
1158 .show
= show_freq_attr_default_vids
,
1162 static struct freq_attr cpufreq_freq_attr_fids
=
1164 /*display phc's default frequency id's for the cpu*/
1165 .attr
= { .name
= "phc_fids", .mode
= 0444 },
1166 .show
= show_freq_attr_fids
,
1170 static struct freq_attr cpufreq_freq_attr_controls
=
1172 /*display phc's current voltage/frequency controls for the cpu*/
1173 .attr
= { .name
= "phc_controls", .mode
= 0644 },
1174 .show
= show_freq_attr_controls
,
1175 .store
= store_freq_attr_controls
,
1178 static struct freq_attr cpufreq_freq_attr_default_controls
=
1180 /*display acpi's default voltage/frequency controls for the cpu*/
1181 .attr
= { .name
= "phc_default_controls", .mode
= 0444 },
1182 .show
= show_freq_attr_default_controls
,
1187 static struct freq_attr
*acpi_cpufreq_attr
[] = {
1188 &cpufreq_freq_attr_scaling_available_freqs
,
1189 &cpufreq_freq_attr_phc_version
,
1190 &cpufreq_freq_attr_vids
,
1191 &cpufreq_freq_attr_default_vids
,
1192 &cpufreq_freq_attr_fids
,
1193 &cpufreq_freq_attr_controls
,
1194 &cpufreq_freq_attr_default_controls
,
1198 static struct cpufreq_driver acpi_cpufreq_driver
= {
1199 .verify
= acpi_cpufreq_verify
,
1200 .target
= acpi_cpufreq_target
,
1201 .bios_limit
= acpi_processor_get_bios_limit
,
1202 .init
= acpi_cpufreq_cpu_init
,
1203 .exit
= acpi_cpufreq_cpu_exit
,
1204 .resume
= acpi_cpufreq_resume
,
1205 .name
= "acpi-cpufreq",
1206 .owner
= THIS_MODULE
,
1207 .attr
= acpi_cpufreq_attr
,
1210 static int __init
acpi_cpufreq_init(void)
1217 pr_debug("acpi_cpufreq_init\n");
1219 ret
= acpi_cpufreq_early_init();
1223 ret
= cpufreq_register_driver(&acpi_cpufreq_driver
);
1225 free_acpi_perf_data();
1230 static void __exit
acpi_cpufreq_exit(void)
1232 pr_debug("acpi_cpufreq_exit\n");
1234 cpufreq_unregister_driver(&acpi_cpufreq_driver
);
1236 free_acpi_perf_data();
1239 module_param(acpi_pstate_strict
, uint
, 0644);
1240 MODULE_PARM_DESC(acpi_pstate_strict
,
1241 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1242 "performed during frequency changes.");
1244 late_initcall(acpi_cpufreq_init
);
1245 module_exit(acpi_cpufreq_exit
);
1247 MODULE_ALIAS("acpi");