2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define SAMPLE_COUNT 3
37 #define BYT_RATIOS 0x66a
38 #define BYT_VIDS 0x66b
41 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
42 #define fp_toint(X) ((X) >> FRAC_BITS)
44 static inline int32_t mul_fp(int32_t x
, int32_t y
)
46 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
49 static inline int32_t div_fp(int32_t x
, int32_t y
)
51 return div_s64((int64_t)x
<< FRAC_BITS
, (int64_t)y
);
54 static u64 energy_divisor
;
57 int32_t core_pct_busy
;
60 unsigned long long tsc
;
92 struct timer_list timer
;
94 struct pstate_data pstate
;
100 unsigned long long prev_tsc
;
102 struct sample samples
[SAMPLE_COUNT
];
105 static struct cpudata
**all_cpu_data
;
106 struct pstate_adjust_policy
{
115 struct pstate_funcs
{
116 int (*get_max
)(void);
117 int (*get_min
)(void);
118 int (*get_turbo
)(void);
119 void (*set
)(struct cpudata
*, int pstate
);
120 void (*get_vid
)(struct cpudata
*);
123 struct cpu_defaults
{
124 struct pstate_adjust_policy pid_policy
;
125 struct pstate_funcs funcs
;
128 static struct pstate_adjust_policy pid_params
;
129 static struct pstate_funcs pstate_funcs
;
141 static struct perf_limits limits
= {
144 .max_perf
= int_tofp(1),
147 .max_policy_pct
= 100,
148 .max_sysfs_pct
= 100,
151 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
152 int deadband
, int integral
) {
153 pid
->setpoint
= setpoint
;
154 pid
->deadband
= deadband
;
155 pid
->integral
= int_tofp(integral
);
156 pid
->last_err
= setpoint
- busy
;
159 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
161 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
164 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
166 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
169 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
172 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
175 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
178 int32_t pterm
, dterm
, fp_error
;
179 int32_t integral_limit
;
181 fp_error
= int_tofp(pid
->setpoint
) - busy
;
183 if (abs(fp_error
) <= int_tofp(pid
->deadband
))
186 pterm
= mul_fp(pid
->p_gain
, fp_error
);
188 pid
->integral
+= fp_error
;
190 /* limit the integral term */
191 integral_limit
= int_tofp(30);
192 if (pid
->integral
> integral_limit
)
193 pid
->integral
= integral_limit
;
194 if (pid
->integral
< -integral_limit
)
195 pid
->integral
= -integral_limit
;
197 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
198 pid
->last_err
= fp_error
;
200 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
202 return (signed int)fp_toint(result
);
205 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
207 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
208 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
209 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
218 static inline void intel_pstate_reset_all_pid(void)
221 for_each_online_cpu(cpu
) {
222 if (all_cpu_data
[cpu
])
223 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
227 /************************** debugfs begin ************************/
228 static int pid_param_set(void *data
, u64 val
)
231 intel_pstate_reset_all_pid();
234 static int pid_param_get(void *data
, u64
*val
)
239 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
,
240 pid_param_set
, "%llu\n");
247 static struct pid_param pid_files
[] = {
248 {"sample_rate_ms", &pid_params
.sample_rate_ms
},
249 {"d_gain_pct", &pid_params
.d_gain_pct
},
250 {"i_gain_pct", &pid_params
.i_gain_pct
},
251 {"deadband", &pid_params
.deadband
},
252 {"setpoint", &pid_params
.setpoint
},
253 {"p_gain_pct", &pid_params
.p_gain_pct
},
257 static struct dentry
*debugfs_parent
;
258 static void intel_pstate_debug_expose_params(void)
262 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
263 if (IS_ERR_OR_NULL(debugfs_parent
))
265 while (pid_files
[i
].name
) {
266 debugfs_create_file(pid_files
[i
].name
, 0660,
267 debugfs_parent
, pid_files
[i
].value
,
273 /************************** debugfs end ************************/
275 /************************** sysfs begin ************************/
276 #define show_one(file_name, object) \
277 static ssize_t show_##file_name \
278 (struct kobject *kobj, struct attribute *attr, char *buf) \
280 return sprintf(buf, "%u\n", limits.object); \
283 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
284 const char *buf
, size_t count
)
288 ret
= sscanf(buf
, "%u", &input
);
291 limits
.no_turbo
= clamp_t(int, input
, 0 , 1);
296 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
297 const char *buf
, size_t count
)
301 ret
= sscanf(buf
, "%u", &input
);
305 limits
.max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
306 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
307 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
311 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
312 const char *buf
, size_t count
)
316 ret
= sscanf(buf
, "%u", &input
);
319 limits
.min_perf_pct
= clamp_t(int, input
, 0 , 100);
320 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
325 show_one(no_turbo
, no_turbo
);
326 show_one(max_perf_pct
, max_perf_pct
);
327 show_one(min_perf_pct
, min_perf_pct
);
329 define_one_global_rw(no_turbo
);
330 define_one_global_rw(max_perf_pct
);
331 define_one_global_rw(min_perf_pct
);
333 static struct attribute
*intel_pstate_attributes
[] = {
340 static struct attribute_group intel_pstate_attr_group
= {
341 .attrs
= intel_pstate_attributes
,
343 static struct kobject
*intel_pstate_kobject
;
345 static void intel_pstate_sysfs_expose_params(void)
349 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
350 &cpu_subsys
.dev_root
->kobj
);
351 BUG_ON(!intel_pstate_kobject
);
352 rc
= sysfs_create_group(intel_pstate_kobject
,
353 &intel_pstate_attr_group
);
357 /************************** sysfs end ************************/
358 static int byt_get_min_pstate(void)
361 rdmsrl(BYT_RATIOS
, value
);
365 static int byt_get_max_pstate(void)
368 rdmsrl(BYT_RATIOS
, value
);
369 return (value
>> 16) & 0xFF;
372 static void byt_set_pstate(struct cpudata
*cpudata
, int pstate
)
382 vid_fp
= cpudata
->vid
.min
+ mul_fp(
383 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
386 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
387 vid
= fp_toint(vid_fp
);
391 wrmsrl(MSR_IA32_PERF_CTL
, val
);
394 static void byt_get_vid(struct cpudata
*cpudata
)
398 rdmsrl(BYT_VIDS
, value
);
399 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
400 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
401 cpudata
->vid
.ratio
= div_fp(
402 cpudata
->vid
.max
- cpudata
->vid
.min
,
403 int_tofp(cpudata
->pstate
.max_pstate
-
404 cpudata
->pstate
.min_pstate
));
408 static int core_get_min_pstate(void)
411 rdmsrl(MSR_PLATFORM_INFO
, value
);
412 return (value
>> 40) & 0xFF;
415 static int core_get_max_pstate(void)
418 rdmsrl(MSR_PLATFORM_INFO
, value
);
419 return (value
>> 8) & 0xFF;
422 static int core_get_turbo_pstate(void)
426 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
427 nont
= core_get_max_pstate();
428 ret
= ((value
) & 255);
434 static void core_set_pstate(struct cpudata
*cpudata
, int pstate
)
442 wrmsrl(MSR_IA32_PERF_CTL
, val
);
445 static struct cpu_defaults core_params
= {
447 .sample_rate_ms
= 10,
455 .get_max
= core_get_max_pstate
,
456 .get_min
= core_get_min_pstate
,
457 .get_turbo
= core_get_turbo_pstate
,
458 .set
= core_set_pstate
,
462 static struct cpu_defaults byt_params
= {
464 .sample_rate_ms
= 10,
472 .get_max
= byt_get_max_pstate
,
473 .get_min
= byt_get_min_pstate
,
474 .get_turbo
= byt_get_max_pstate
,
475 .set
= byt_set_pstate
,
476 .get_vid
= byt_get_vid
,
481 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
483 int max_perf
= cpu
->pstate
.turbo_pstate
;
487 max_perf
= cpu
->pstate
.max_pstate
;
489 max_perf_adj
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.max_perf
));
490 *max
= clamp_t(int, max_perf_adj
,
491 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
493 min_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.min_perf
));
494 *min
= clamp_t(int, min_perf
,
495 cpu
->pstate
.min_pstate
, max_perf
);
498 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
500 int max_perf
, min_perf
;
502 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
504 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
506 if (pstate
== cpu
->pstate
.current_pstate
)
509 trace_cpu_frequency(pstate
* 100000, cpu
->cpu
);
511 cpu
->pstate
.current_pstate
= pstate
;
513 pstate_funcs
.set(cpu
, pstate
);
516 static inline void intel_pstate_pstate_increase(struct cpudata
*cpu
, int steps
)
519 target
= cpu
->pstate
.current_pstate
+ steps
;
521 intel_pstate_set_pstate(cpu
, target
);
524 static inline void intel_pstate_pstate_decrease(struct cpudata
*cpu
, int steps
)
527 target
= cpu
->pstate
.current_pstate
- steps
;
528 intel_pstate_set_pstate(cpu
, target
);
531 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
533 sprintf(cpu
->name
, "Intel 2nd generation core");
535 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
536 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
537 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
539 if (pstate_funcs
.get_vid
)
540 pstate_funcs
.get_vid(cpu
);
543 * goto max pstate so we don't slow up boot if we are built-in if we are
544 * a module we will take care of it during normal operation
546 intel_pstate_set_pstate(cpu
, cpu
->pstate
.max_pstate
);
549 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
,
550 struct sample
*sample
)
555 core_pct
= div64_u64(sample
->aperf
* 100, sample
->mperf
);
557 c0_pct
= div64_u64(sample
->mperf
* 100, sample
->tsc
);
558 sample
->freq
= fp_toint(
559 mul_fp(int_tofp(cpu
->pstate
.max_pstate
),
560 int_tofp(core_pct
* 1000)));
562 sample
->core_pct_busy
= mul_fp(int_tofp(core_pct
),
563 div_fp(int_tofp(c0_pct
+ 1), int_tofp(100)));
566 static inline void intel_pstate_sample(struct cpudata
*cpu
)
569 unsigned long long tsc
;
571 rdmsrl(MSR_IA32_APERF
, aperf
);
572 rdmsrl(MSR_IA32_MPERF
, mperf
);
573 tsc
= native_read_tsc();
575 cpu
->sample_ptr
= (cpu
->sample_ptr
+ 1) % SAMPLE_COUNT
;
576 cpu
->samples
[cpu
->sample_ptr
].aperf
= aperf
;
577 cpu
->samples
[cpu
->sample_ptr
].mperf
= mperf
;
578 cpu
->samples
[cpu
->sample_ptr
].tsc
= tsc
;
579 cpu
->samples
[cpu
->sample_ptr
].aperf
-= cpu
->prev_aperf
;
580 cpu
->samples
[cpu
->sample_ptr
].mperf
-= cpu
->prev_mperf
;
581 cpu
->samples
[cpu
->sample_ptr
].tsc
-= cpu
->prev_tsc
;
583 intel_pstate_calc_busy(cpu
, &cpu
->samples
[cpu
->sample_ptr
]);
585 cpu
->prev_aperf
= aperf
;
586 cpu
->prev_mperf
= mperf
;
590 static inline void intel_pstate_set_sample_time(struct cpudata
*cpu
)
592 int sample_time
, delay
;
594 sample_time
= pid_params
.sample_rate_ms
;
595 delay
= msecs_to_jiffies(sample_time
);
596 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
599 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata
*cpu
)
601 int32_t core_busy
, max_pstate
, current_pstate
;
603 core_busy
= cpu
->samples
[cpu
->sample_ptr
].core_pct_busy
;
604 max_pstate
= int_tofp(cpu
->pstate
.max_pstate
);
605 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
606 return mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
609 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
617 busy_scaled
= intel_pstate_get_scaled_busy(cpu
);
619 ctl
= pid_calc(pid
, busy_scaled
);
624 intel_pstate_pstate_increase(cpu
, steps
);
626 intel_pstate_pstate_decrease(cpu
, steps
);
629 static void intel_pstate_timer_func(unsigned long __data
)
631 struct cpudata
*cpu
= (struct cpudata
*) __data
;
632 struct sample
*sample
;
635 intel_pstate_sample(cpu
);
637 sample
= &cpu
->samples
[cpu
->sample_ptr
];
638 rdmsrl(MSR_PKG_ENERGY_STATUS
, energy
);
640 intel_pstate_adjust_busy_pstate(cpu
);
642 trace_pstate_sample(fp_toint(sample
->core_pct_busy
),
643 fp_toint(intel_pstate_get_scaled_busy(cpu
)),
644 cpu
->pstate
.current_pstate
,
647 div64_u64(energy
, energy_divisor
),
650 intel_pstate_set_sample_time(cpu
);
653 #define ICPU(model, policy) \
654 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
655 (unsigned long)&policy }
657 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
658 ICPU(0x2a, core_params
),
659 ICPU(0x2d, core_params
),
660 ICPU(0x37, byt_params
),
661 ICPU(0x3a, core_params
),
662 ICPU(0x3c, core_params
),
663 ICPU(0x3e, core_params
),
664 ICPU(0x3f, core_params
),
665 ICPU(0x45, core_params
),
666 ICPU(0x46, core_params
),
669 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
671 static int intel_pstate_init_cpu(unsigned int cpunum
)
674 const struct x86_cpu_id
*id
;
677 id
= x86_match_cpu(intel_pstate_cpu_ids
);
681 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
), GFP_KERNEL
);
682 if (!all_cpu_data
[cpunum
])
685 cpu
= all_cpu_data
[cpunum
];
687 intel_pstate_get_cpu_pstates(cpu
);
688 if (!cpu
->pstate
.current_pstate
) {
689 all_cpu_data
[cpunum
] = NULL
;
696 init_timer_deferrable(&cpu
->timer
);
697 cpu
->timer
.function
= intel_pstate_timer_func
;
700 cpu
->timer
.expires
= jiffies
+ HZ
/100;
701 intel_pstate_busy_pid_reset(cpu
);
702 intel_pstate_sample(cpu
);
703 intel_pstate_set_pstate(cpu
, cpu
->pstate
.max_pstate
);
705 add_timer_on(&cpu
->timer
, cpunum
);
707 pr_info("Intel pstate controlling: cpu %d\n", cpunum
);
712 static unsigned int intel_pstate_get(unsigned int cpu_num
)
714 struct sample
*sample
;
717 cpu
= all_cpu_data
[cpu_num
];
720 sample
= &cpu
->samples
[cpu
->sample_ptr
];
724 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
728 cpu
= all_cpu_data
[policy
->cpu
];
730 if (!policy
->cpuinfo
.max_freq
)
733 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
734 limits
.min_perf_pct
= 100;
735 limits
.min_perf
= int_tofp(1);
736 limits
.max_perf_pct
= 100;
737 limits
.max_perf
= int_tofp(1);
741 limits
.min_perf_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
742 limits
.min_perf_pct
= clamp_t(int, limits
.min_perf_pct
, 0 , 100);
743 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
745 limits
.max_policy_pct
= policy
->max
* 100 / policy
->cpuinfo
.max_freq
;
746 limits
.max_policy_pct
= clamp_t(int, limits
.max_policy_pct
, 0 , 100);
747 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
748 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
753 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
755 cpufreq_verify_within_cpu_limits(policy
);
757 if ((policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
) &&
758 (policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
))
764 static int intel_pstate_cpu_exit(struct cpufreq_policy
*policy
)
766 int cpu
= policy
->cpu
;
768 del_timer(&all_cpu_data
[cpu
]->timer
);
769 kfree(all_cpu_data
[cpu
]);
770 all_cpu_data
[cpu
] = NULL
;
774 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
779 rc
= intel_pstate_init_cpu(policy
->cpu
);
783 cpu
= all_cpu_data
[policy
->cpu
];
785 if (!limits
.no_turbo
&&
786 limits
.min_perf_pct
== 100 && limits
.max_perf_pct
== 100)
787 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
789 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
791 policy
->min
= cpu
->pstate
.min_pstate
* 100000;
792 policy
->max
= cpu
->pstate
.turbo_pstate
* 100000;
794 /* cpuinfo and default policy values */
795 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* 100000;
796 policy
->cpuinfo
.max_freq
= cpu
->pstate
.turbo_pstate
* 100000;
797 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
798 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
803 static struct cpufreq_driver intel_pstate_driver
= {
804 .flags
= CPUFREQ_CONST_LOOPS
,
805 .verify
= intel_pstate_verify_policy
,
806 .setpolicy
= intel_pstate_set_policy
,
807 .get
= intel_pstate_get
,
808 .init
= intel_pstate_cpu_init
,
809 .exit
= intel_pstate_cpu_exit
,
810 .name
= "intel_pstate",
813 static int __initdata no_load
;
815 static int intel_pstate_msrs_not_valid(void)
817 /* Check that all the msr's we are using are valid. */
818 u64 aperf
, mperf
, tmp
;
820 rdmsrl(MSR_IA32_APERF
, aperf
);
821 rdmsrl(MSR_IA32_MPERF
, mperf
);
823 if (!pstate_funcs
.get_max() ||
824 !pstate_funcs
.get_min() ||
825 !pstate_funcs
.get_turbo())
828 rdmsrl(MSR_IA32_APERF
, tmp
);
832 rdmsrl(MSR_IA32_MPERF
, tmp
);
839 static void copy_pid_params(struct pstate_adjust_policy
*policy
)
841 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
842 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
843 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
844 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
845 pid_params
.deadband
= policy
->deadband
;
846 pid_params
.setpoint
= policy
->setpoint
;
849 static void copy_cpu_funcs(struct pstate_funcs
*funcs
)
851 pstate_funcs
.get_max
= funcs
->get_max
;
852 pstate_funcs
.get_min
= funcs
->get_min
;
853 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
854 pstate_funcs
.set
= funcs
->set
;
855 pstate_funcs
.get_vid
= funcs
->get_vid
;
858 #if IS_ENABLED(CONFIG_ACPI)
859 #include <acpi/processor.h>
861 static bool intel_pstate_no_acpi_pss(void)
865 for_each_possible_cpu(i
) {
867 union acpi_object
*pss
;
868 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
869 struct acpi_processor
*pr
= per_cpu(processors
, i
);
874 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
875 if (ACPI_FAILURE(status
))
878 pss
= buffer
.pointer
;
879 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
890 struct hw_vendor_info
{
892 char oem_id
[ACPI_OEM_ID_SIZE
];
893 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
896 /* Hardware vendor-specific info that has its own power management modes */
897 static struct hw_vendor_info vendor_info
[] = {
898 {1, "HP ", "ProLiant"},
902 static bool intel_pstate_platform_pwr_mgmt_exists(void)
904 struct acpi_table_header hdr
;
905 struct hw_vendor_info
*v_info
;
908 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
911 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
912 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
)
913 && !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
, ACPI_OEM_TABLE_ID_SIZE
)
914 && intel_pstate_no_acpi_pss())
920 #else /* CONFIG_ACPI not enabled */
921 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
922 #endif /* CONFIG_ACPI */
924 static int __init
intel_pstate_init(void)
927 const struct x86_cpu_id
*id
;
928 struct cpu_defaults
*cpu_info
;
934 id
= x86_match_cpu(intel_pstate_cpu_ids
);
939 * The Intel pstate driver will be ignored if the platform
940 * firmware has its own power management modes.
942 if (intel_pstate_platform_pwr_mgmt_exists())
945 cpu_info
= (struct cpu_defaults
*)id
->driver_data
;
947 copy_pid_params(&cpu_info
->pid_policy
);
948 copy_cpu_funcs(&cpu_info
->funcs
);
950 if (intel_pstate_msrs_not_valid())
953 pr_info("Intel P-state driver initializing.\n");
955 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
959 rc
= cpufreq_register_driver(&intel_pstate_driver
);
963 rdmsrl(MSR_RAPL_POWER_UNIT
, units
);
964 energy_divisor
= 1 << ((units
>> 8) & 0x1f); /* bits{12:8} */
966 intel_pstate_debug_expose_params();
967 intel_pstate_sysfs_expose_params();
972 for_each_online_cpu(cpu
) {
973 if (all_cpu_data
[cpu
]) {
974 del_timer_sync(&all_cpu_data
[cpu
]->timer
);
975 kfree(all_cpu_data
[cpu
]);
983 device_initcall(intel_pstate_init
);
985 static int __init
intel_pstate_setup(char *str
)
990 if (!strcmp(str
, "disable"))
994 early_param("intel_pstate", intel_pstate_setup
);
996 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
997 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
998 MODULE_LICENSE("GPL");