2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define SAMPLE_COUNT 3
37 #define BYT_RATIOS 0x66a
38 #define BYT_VIDS 0x66b
41 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
42 #define fp_toint(X) ((X) >> FRAC_BITS)
44 static inline int32_t mul_fp(int32_t x
, int32_t y
)
46 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
49 static inline int32_t div_fp(int32_t x
, int32_t y
)
51 return div_s64((int64_t)x
<< FRAC_BITS
, (int64_t)y
);
55 int32_t core_pct_busy
;
58 unsigned long long tsc
;
90 struct timer_list timer
;
92 struct pstate_data pstate
;
98 unsigned long long prev_tsc
;
100 struct sample samples
[SAMPLE_COUNT
];
103 static struct cpudata
**all_cpu_data
;
104 struct pstate_adjust_policy
{
113 struct pstate_funcs
{
114 int (*get_max
)(void);
115 int (*get_min
)(void);
116 int (*get_turbo
)(void);
117 void (*set
)(struct cpudata
*, int pstate
);
118 void (*get_vid
)(struct cpudata
*);
121 struct cpu_defaults
{
122 struct pstate_adjust_policy pid_policy
;
123 struct pstate_funcs funcs
;
126 static struct pstate_adjust_policy pid_params
;
127 static struct pstate_funcs pstate_funcs
;
139 static struct perf_limits limits
= {
142 .max_perf
= int_tofp(1),
145 .max_policy_pct
= 100,
146 .max_sysfs_pct
= 100,
149 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
150 int deadband
, int integral
) {
151 pid
->setpoint
= setpoint
;
152 pid
->deadband
= deadband
;
153 pid
->integral
= int_tofp(integral
);
154 pid
->last_err
= setpoint
- busy
;
157 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
159 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
162 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
164 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
167 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
170 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
173 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
176 int32_t pterm
, dterm
, fp_error
;
177 int32_t integral_limit
;
179 fp_error
= int_tofp(pid
->setpoint
) - busy
;
181 if (abs(fp_error
) <= int_tofp(pid
->deadband
))
184 pterm
= mul_fp(pid
->p_gain
, fp_error
);
186 pid
->integral
+= fp_error
;
188 /* limit the integral term */
189 integral_limit
= int_tofp(30);
190 if (pid
->integral
> integral_limit
)
191 pid
->integral
= integral_limit
;
192 if (pid
->integral
< -integral_limit
)
193 pid
->integral
= -integral_limit
;
195 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
196 pid
->last_err
= fp_error
;
198 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
200 return (signed int)fp_toint(result
);
203 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
205 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
206 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
207 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
216 static inline void intel_pstate_reset_all_pid(void)
219 for_each_online_cpu(cpu
) {
220 if (all_cpu_data
[cpu
])
221 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
225 /************************** debugfs begin ************************/
226 static int pid_param_set(void *data
, u64 val
)
229 intel_pstate_reset_all_pid();
232 static int pid_param_get(void *data
, u64
*val
)
237 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
,
238 pid_param_set
, "%llu\n");
245 static struct pid_param pid_files
[] = {
246 {"sample_rate_ms", &pid_params
.sample_rate_ms
},
247 {"d_gain_pct", &pid_params
.d_gain_pct
},
248 {"i_gain_pct", &pid_params
.i_gain_pct
},
249 {"deadband", &pid_params
.deadband
},
250 {"setpoint", &pid_params
.setpoint
},
251 {"p_gain_pct", &pid_params
.p_gain_pct
},
255 static struct dentry
*debugfs_parent
;
256 static void intel_pstate_debug_expose_params(void)
260 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
261 if (IS_ERR_OR_NULL(debugfs_parent
))
263 while (pid_files
[i
].name
) {
264 debugfs_create_file(pid_files
[i
].name
, 0660,
265 debugfs_parent
, pid_files
[i
].value
,
271 /************************** debugfs end ************************/
273 /************************** sysfs begin ************************/
274 #define show_one(file_name, object) \
275 static ssize_t show_##file_name \
276 (struct kobject *kobj, struct attribute *attr, char *buf) \
278 return sprintf(buf, "%u\n", limits.object); \
281 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
282 const char *buf
, size_t count
)
286 ret
= sscanf(buf
, "%u", &input
);
289 limits
.no_turbo
= clamp_t(int, input
, 0 , 1);
294 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
295 const char *buf
, size_t count
)
299 ret
= sscanf(buf
, "%u", &input
);
303 limits
.max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
304 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
305 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
309 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
310 const char *buf
, size_t count
)
314 ret
= sscanf(buf
, "%u", &input
);
317 limits
.min_perf_pct
= clamp_t(int, input
, 0 , 100);
318 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
323 show_one(no_turbo
, no_turbo
);
324 show_one(max_perf_pct
, max_perf_pct
);
325 show_one(min_perf_pct
, min_perf_pct
);
327 define_one_global_rw(no_turbo
);
328 define_one_global_rw(max_perf_pct
);
329 define_one_global_rw(min_perf_pct
);
331 static struct attribute
*intel_pstate_attributes
[] = {
338 static struct attribute_group intel_pstate_attr_group
= {
339 .attrs
= intel_pstate_attributes
,
341 static struct kobject
*intel_pstate_kobject
;
343 static void intel_pstate_sysfs_expose_params(void)
347 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
348 &cpu_subsys
.dev_root
->kobj
);
349 BUG_ON(!intel_pstate_kobject
);
350 rc
= sysfs_create_group(intel_pstate_kobject
,
351 &intel_pstate_attr_group
);
355 /************************** sysfs end ************************/
356 static int byt_get_min_pstate(void)
359 rdmsrl(BYT_RATIOS
, value
);
363 static int byt_get_max_pstate(void)
366 rdmsrl(BYT_RATIOS
, value
);
367 return (value
>> 16) & 0xFF;
370 static void byt_set_pstate(struct cpudata
*cpudata
, int pstate
)
380 vid_fp
= cpudata
->vid
.min
+ mul_fp(
381 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
384 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
385 vid
= fp_toint(vid_fp
);
389 wrmsrl(MSR_IA32_PERF_CTL
, val
);
392 static void byt_get_vid(struct cpudata
*cpudata
)
396 rdmsrl(BYT_VIDS
, value
);
397 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
398 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
399 cpudata
->vid
.ratio
= div_fp(
400 cpudata
->vid
.max
- cpudata
->vid
.min
,
401 int_tofp(cpudata
->pstate
.max_pstate
-
402 cpudata
->pstate
.min_pstate
));
406 static int core_get_min_pstate(void)
409 rdmsrl(MSR_PLATFORM_INFO
, value
);
410 return (value
>> 40) & 0xFF;
413 static int core_get_max_pstate(void)
416 rdmsrl(MSR_PLATFORM_INFO
, value
);
417 return (value
>> 8) & 0xFF;
420 static int core_get_turbo_pstate(void)
424 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
425 nont
= core_get_max_pstate();
426 ret
= ((value
) & 255);
432 static void core_set_pstate(struct cpudata
*cpudata
, int pstate
)
440 wrmsrl(MSR_IA32_PERF_CTL
, val
);
443 static struct cpu_defaults core_params
= {
445 .sample_rate_ms
= 10,
453 .get_max
= core_get_max_pstate
,
454 .get_min
= core_get_min_pstate
,
455 .get_turbo
= core_get_turbo_pstate
,
456 .set
= core_set_pstate
,
460 static struct cpu_defaults byt_params
= {
462 .sample_rate_ms
= 10,
470 .get_max
= byt_get_max_pstate
,
471 .get_min
= byt_get_min_pstate
,
472 .get_turbo
= byt_get_max_pstate
,
473 .set
= byt_set_pstate
,
474 .get_vid
= byt_get_vid
,
479 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
481 int max_perf
= cpu
->pstate
.turbo_pstate
;
485 max_perf
= cpu
->pstate
.max_pstate
;
487 max_perf_adj
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.max_perf
));
488 *max
= clamp_t(int, max_perf_adj
,
489 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
491 min_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.min_perf
));
492 *min
= clamp_t(int, min_perf
,
493 cpu
->pstate
.min_pstate
, max_perf
);
496 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
498 int max_perf
, min_perf
;
500 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
502 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
504 if (pstate
== cpu
->pstate
.current_pstate
)
507 trace_cpu_frequency(pstate
* 100000, cpu
->cpu
);
509 cpu
->pstate
.current_pstate
= pstate
;
511 pstate_funcs
.set(cpu
, pstate
);
514 static inline void intel_pstate_pstate_increase(struct cpudata
*cpu
, int steps
)
517 target
= cpu
->pstate
.current_pstate
+ steps
;
519 intel_pstate_set_pstate(cpu
, target
);
522 static inline void intel_pstate_pstate_decrease(struct cpudata
*cpu
, int steps
)
525 target
= cpu
->pstate
.current_pstate
- steps
;
526 intel_pstate_set_pstate(cpu
, target
);
529 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
531 sprintf(cpu
->name
, "Intel 2nd generation core");
533 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
534 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
535 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
537 if (pstate_funcs
.get_vid
)
538 pstate_funcs
.get_vid(cpu
);
541 * goto max pstate so we don't slow up boot if we are built-in if we are
542 * a module we will take care of it during normal operation
544 intel_pstate_set_pstate(cpu
, cpu
->pstate
.max_pstate
);
547 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
,
548 struct sample
*sample
)
553 core_pct
= div64_u64(sample
->aperf
* 100, sample
->mperf
);
555 c0_pct
= div64_u64(sample
->mperf
* 100, sample
->tsc
);
556 sample
->freq
= fp_toint(
557 mul_fp(int_tofp(cpu
->pstate
.max_pstate
),
558 int_tofp(core_pct
* 1000)));
560 sample
->core_pct_busy
= mul_fp(int_tofp(core_pct
),
561 div_fp(int_tofp(c0_pct
+ 1), int_tofp(100)));
564 static inline void intel_pstate_sample(struct cpudata
*cpu
)
567 unsigned long long tsc
;
569 rdmsrl(MSR_IA32_APERF
, aperf
);
570 rdmsrl(MSR_IA32_MPERF
, mperf
);
571 tsc
= native_read_tsc();
573 cpu
->sample_ptr
= (cpu
->sample_ptr
+ 1) % SAMPLE_COUNT
;
574 cpu
->samples
[cpu
->sample_ptr
].aperf
= aperf
;
575 cpu
->samples
[cpu
->sample_ptr
].mperf
= mperf
;
576 cpu
->samples
[cpu
->sample_ptr
].tsc
= tsc
;
577 cpu
->samples
[cpu
->sample_ptr
].aperf
-= cpu
->prev_aperf
;
578 cpu
->samples
[cpu
->sample_ptr
].mperf
-= cpu
->prev_mperf
;
579 cpu
->samples
[cpu
->sample_ptr
].tsc
-= cpu
->prev_tsc
;
581 intel_pstate_calc_busy(cpu
, &cpu
->samples
[cpu
->sample_ptr
]);
583 cpu
->prev_aperf
= aperf
;
584 cpu
->prev_mperf
= mperf
;
588 static inline void intel_pstate_set_sample_time(struct cpudata
*cpu
)
590 int sample_time
, delay
;
592 sample_time
= pid_params
.sample_rate_ms
;
593 delay
= msecs_to_jiffies(sample_time
);
594 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
597 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata
*cpu
)
599 int32_t core_busy
, max_pstate
, current_pstate
;
601 core_busy
= cpu
->samples
[cpu
->sample_ptr
].core_pct_busy
;
602 max_pstate
= int_tofp(cpu
->pstate
.max_pstate
);
603 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
604 return mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
607 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
615 busy_scaled
= intel_pstate_get_scaled_busy(cpu
);
617 ctl
= pid_calc(pid
, busy_scaled
);
622 intel_pstate_pstate_increase(cpu
, steps
);
624 intel_pstate_pstate_decrease(cpu
, steps
);
627 static void intel_pstate_timer_func(unsigned long __data
)
629 struct cpudata
*cpu
= (struct cpudata
*) __data
;
630 struct sample
*sample
;
632 intel_pstate_sample(cpu
);
634 sample
= &cpu
->samples
[cpu
->sample_ptr
];
636 intel_pstate_adjust_busy_pstate(cpu
);
638 trace_pstate_sample(fp_toint(sample
->core_pct_busy
),
639 fp_toint(intel_pstate_get_scaled_busy(cpu
)),
640 cpu
->pstate
.current_pstate
,
645 intel_pstate_set_sample_time(cpu
);
648 #define ICPU(model, policy) \
649 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
650 (unsigned long)&policy }
652 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
653 ICPU(0x2a, core_params
),
654 ICPU(0x2d, core_params
),
655 ICPU(0x37, byt_params
),
656 ICPU(0x3a, core_params
),
657 ICPU(0x3c, core_params
),
658 ICPU(0x3e, core_params
),
659 ICPU(0x3f, core_params
),
660 ICPU(0x45, core_params
),
661 ICPU(0x46, core_params
),
664 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
666 static int intel_pstate_init_cpu(unsigned int cpunum
)
669 const struct x86_cpu_id
*id
;
672 id
= x86_match_cpu(intel_pstate_cpu_ids
);
676 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
), GFP_KERNEL
);
677 if (!all_cpu_data
[cpunum
])
680 cpu
= all_cpu_data
[cpunum
];
682 intel_pstate_get_cpu_pstates(cpu
);
683 if (!cpu
->pstate
.current_pstate
) {
684 all_cpu_data
[cpunum
] = NULL
;
691 init_timer_deferrable(&cpu
->timer
);
692 cpu
->timer
.function
= intel_pstate_timer_func
;
695 cpu
->timer
.expires
= jiffies
+ HZ
/100;
696 intel_pstate_busy_pid_reset(cpu
);
697 intel_pstate_sample(cpu
);
698 intel_pstate_set_pstate(cpu
, cpu
->pstate
.max_pstate
);
700 add_timer_on(&cpu
->timer
, cpunum
);
702 pr_info("Intel pstate controlling: cpu %d\n", cpunum
);
707 static unsigned int intel_pstate_get(unsigned int cpu_num
)
709 struct sample
*sample
;
712 cpu
= all_cpu_data
[cpu_num
];
715 sample
= &cpu
->samples
[cpu
->sample_ptr
];
719 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
723 cpu
= all_cpu_data
[policy
->cpu
];
725 if (!policy
->cpuinfo
.max_freq
)
728 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
729 limits
.min_perf_pct
= 100;
730 limits
.min_perf
= int_tofp(1);
731 limits
.max_perf_pct
= 100;
732 limits
.max_perf
= int_tofp(1);
736 limits
.min_perf_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
737 limits
.min_perf_pct
= clamp_t(int, limits
.min_perf_pct
, 0 , 100);
738 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
740 limits
.max_policy_pct
= policy
->max
* 100 / policy
->cpuinfo
.max_freq
;
741 limits
.max_policy_pct
= clamp_t(int, limits
.max_policy_pct
, 0 , 100);
742 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
743 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
748 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
750 cpufreq_verify_within_cpu_limits(policy
);
752 if ((policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
) &&
753 (policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
))
759 static int intel_pstate_cpu_exit(struct cpufreq_policy
*policy
)
761 int cpu
= policy
->cpu
;
763 del_timer(&all_cpu_data
[cpu
]->timer
);
764 kfree(all_cpu_data
[cpu
]);
765 all_cpu_data
[cpu
] = NULL
;
769 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
774 rc
= intel_pstate_init_cpu(policy
->cpu
);
778 cpu
= all_cpu_data
[policy
->cpu
];
780 if (!limits
.no_turbo
&&
781 limits
.min_perf_pct
== 100 && limits
.max_perf_pct
== 100)
782 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
784 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
786 policy
->min
= cpu
->pstate
.min_pstate
* 100000;
787 policy
->max
= cpu
->pstate
.turbo_pstate
* 100000;
789 /* cpuinfo and default policy values */
790 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* 100000;
791 policy
->cpuinfo
.max_freq
= cpu
->pstate
.turbo_pstate
* 100000;
792 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
793 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
798 static struct cpufreq_driver intel_pstate_driver
= {
799 .flags
= CPUFREQ_CONST_LOOPS
,
800 .verify
= intel_pstate_verify_policy
,
801 .setpolicy
= intel_pstate_set_policy
,
802 .get
= intel_pstate_get
,
803 .init
= intel_pstate_cpu_init
,
804 .exit
= intel_pstate_cpu_exit
,
805 .name
= "intel_pstate",
808 static int __initdata no_load
;
810 static int intel_pstate_msrs_not_valid(void)
812 /* Check that all the msr's we are using are valid. */
813 u64 aperf
, mperf
, tmp
;
815 rdmsrl(MSR_IA32_APERF
, aperf
);
816 rdmsrl(MSR_IA32_MPERF
, mperf
);
818 if (!pstate_funcs
.get_max() ||
819 !pstate_funcs
.get_min() ||
820 !pstate_funcs
.get_turbo())
823 rdmsrl(MSR_IA32_APERF
, tmp
);
827 rdmsrl(MSR_IA32_MPERF
, tmp
);
834 static void copy_pid_params(struct pstate_adjust_policy
*policy
)
836 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
837 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
838 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
839 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
840 pid_params
.deadband
= policy
->deadband
;
841 pid_params
.setpoint
= policy
->setpoint
;
844 static void copy_cpu_funcs(struct pstate_funcs
*funcs
)
846 pstate_funcs
.get_max
= funcs
->get_max
;
847 pstate_funcs
.get_min
= funcs
->get_min
;
848 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
849 pstate_funcs
.set
= funcs
->set
;
850 pstate_funcs
.get_vid
= funcs
->get_vid
;
853 #if IS_ENABLED(CONFIG_ACPI)
854 #include <acpi/processor.h>
856 static bool intel_pstate_no_acpi_pss(void)
860 for_each_possible_cpu(i
) {
862 union acpi_object
*pss
;
863 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
864 struct acpi_processor
*pr
= per_cpu(processors
, i
);
869 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
870 if (ACPI_FAILURE(status
))
873 pss
= buffer
.pointer
;
874 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
885 struct hw_vendor_info
{
887 char oem_id
[ACPI_OEM_ID_SIZE
];
888 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
891 /* Hardware vendor-specific info that has its own power management modes */
892 static struct hw_vendor_info vendor_info
[] = {
893 {1, "HP ", "ProLiant"},
897 static bool intel_pstate_platform_pwr_mgmt_exists(void)
899 struct acpi_table_header hdr
;
900 struct hw_vendor_info
*v_info
;
903 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
906 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
907 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
)
908 && !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
, ACPI_OEM_TABLE_ID_SIZE
)
909 && intel_pstate_no_acpi_pss())
915 #else /* CONFIG_ACPI not enabled */
916 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
917 #endif /* CONFIG_ACPI */
919 static int __init
intel_pstate_init(void)
922 const struct x86_cpu_id
*id
;
923 struct cpu_defaults
*cpu_info
;
928 id
= x86_match_cpu(intel_pstate_cpu_ids
);
933 * The Intel pstate driver will be ignored if the platform
934 * firmware has its own power management modes.
936 if (intel_pstate_platform_pwr_mgmt_exists())
939 cpu_info
= (struct cpu_defaults
*)id
->driver_data
;
941 copy_pid_params(&cpu_info
->pid_policy
);
942 copy_cpu_funcs(&cpu_info
->funcs
);
944 if (intel_pstate_msrs_not_valid())
947 pr_info("Intel P-state driver initializing.\n");
949 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
953 rc
= cpufreq_register_driver(&intel_pstate_driver
);
957 intel_pstate_debug_expose_params();
958 intel_pstate_sysfs_expose_params();
963 for_each_online_cpu(cpu
) {
964 if (all_cpu_data
[cpu
]) {
965 del_timer_sync(&all_cpu_data
[cpu
]->timer
);
966 kfree(all_cpu_data
[cpu
]);
974 device_initcall(intel_pstate_init
);
976 static int __init
intel_pstate_setup(char *str
)
981 if (!strcmp(str
, "disable"))
985 early_param("intel_pstate", intel_pstate_setup
);
987 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
988 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
989 MODULE_LICENSE("GPL");