2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <trace/events/power.h>
30 #include <asm/div64.h>
32 #include <asm/cpu_device_id.h>
34 #define SAMPLE_COUNT 3
37 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
38 #define fp_toint(X) ((X) >> FRAC_BITS)
40 static inline int32_t mul_fp(int32_t x
, int32_t y
)
42 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
45 static inline int32_t div_fp(int32_t x
, int32_t y
)
47 return div_s64((int64_t)x
<< FRAC_BITS
, (int64_t)y
);
79 struct timer_list timer
;
81 struct pstate_adjust_policy
*pstate_policy
;
82 struct pstate_data pstate
;
90 struct sample samples
[SAMPLE_COUNT
];
93 static struct cpudata
**all_cpu_data
;
94 struct pstate_adjust_policy
{
103 static struct pstate_adjust_policy default_policy
= {
104 .sample_rate_ms
= 10,
122 static struct perf_limits limits
= {
125 .max_perf
= int_tofp(1),
128 .max_policy_pct
= 100,
129 .max_sysfs_pct
= 100,
132 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
133 int deadband
, int integral
) {
134 pid
->setpoint
= setpoint
;
135 pid
->deadband
= deadband
;
136 pid
->integral
= int_tofp(integral
);
137 pid
->last_err
= setpoint
- busy
;
140 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
142 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
145 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
147 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
150 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
153 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
156 static signed int pid_calc(struct _pid
*pid
, int busy
)
158 signed int err
, result
;
159 int32_t pterm
, dterm
, fp_error
;
160 int32_t integral_limit
;
162 err
= pid
->setpoint
- busy
;
163 fp_error
= int_tofp(err
);
165 if (abs(err
) <= pid
->deadband
)
168 pterm
= mul_fp(pid
->p_gain
, fp_error
);
170 pid
->integral
+= fp_error
;
172 /* limit the integral term */
173 integral_limit
= int_tofp(30);
174 if (pid
->integral
> integral_limit
)
175 pid
->integral
= integral_limit
;
176 if (pid
->integral
< -integral_limit
)
177 pid
->integral
= -integral_limit
;
179 dterm
= mul_fp(pid
->d_gain
, (err
- pid
->last_err
));
182 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
184 return (signed int)fp_toint(result
);
187 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
189 pid_p_gain_set(&cpu
->pid
, cpu
->pstate_policy
->p_gain_pct
);
190 pid_d_gain_set(&cpu
->pid
, cpu
->pstate_policy
->d_gain_pct
);
191 pid_i_gain_set(&cpu
->pid
, cpu
->pstate_policy
->i_gain_pct
);
194 cpu
->pstate_policy
->setpoint
,
196 cpu
->pstate_policy
->deadband
,
200 static inline void intel_pstate_reset_all_pid(void)
203 for_each_online_cpu(cpu
) {
204 if (all_cpu_data
[cpu
])
205 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
209 /************************** debugfs begin ************************/
210 static int pid_param_set(void *data
, u64 val
)
213 intel_pstate_reset_all_pid();
216 static int pid_param_get(void *data
, u64
*val
)
221 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
,
222 pid_param_set
, "%llu\n");
229 static struct pid_param pid_files
[] = {
230 {"sample_rate_ms", &default_policy
.sample_rate_ms
},
231 {"d_gain_pct", &default_policy
.d_gain_pct
},
232 {"i_gain_pct", &default_policy
.i_gain_pct
},
233 {"deadband", &default_policy
.deadband
},
234 {"setpoint", &default_policy
.setpoint
},
235 {"p_gain_pct", &default_policy
.p_gain_pct
},
239 static struct dentry
*debugfs_parent
;
240 static void intel_pstate_debug_expose_params(void)
244 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
245 if (IS_ERR_OR_NULL(debugfs_parent
))
247 while (pid_files
[i
].name
) {
248 debugfs_create_file(pid_files
[i
].name
, 0660,
249 debugfs_parent
, pid_files
[i
].value
,
255 /************************** debugfs end ************************/
257 /************************** sysfs begin ************************/
258 #define show_one(file_name, object) \
259 static ssize_t show_##file_name \
260 (struct kobject *kobj, struct attribute *attr, char *buf) \
262 return sprintf(buf, "%u\n", limits.object); \
265 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
266 const char *buf
, size_t count
)
270 ret
= sscanf(buf
, "%u", &input
);
273 limits
.no_turbo
= clamp_t(int, input
, 0 , 1);
278 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
279 const char *buf
, size_t count
)
283 ret
= sscanf(buf
, "%u", &input
);
287 limits
.max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
288 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
289 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
293 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
294 const char *buf
, size_t count
)
298 ret
= sscanf(buf
, "%u", &input
);
301 limits
.min_perf_pct
= clamp_t(int, input
, 0 , 100);
302 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
307 show_one(no_turbo
, no_turbo
);
308 show_one(max_perf_pct
, max_perf_pct
);
309 show_one(min_perf_pct
, min_perf_pct
);
311 define_one_global_rw(no_turbo
);
312 define_one_global_rw(max_perf_pct
);
313 define_one_global_rw(min_perf_pct
);
315 static struct attribute
*intel_pstate_attributes
[] = {
322 static struct attribute_group intel_pstate_attr_group
= {
323 .attrs
= intel_pstate_attributes
,
325 static struct kobject
*intel_pstate_kobject
;
327 static void intel_pstate_sysfs_expose_params(void)
331 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
332 &cpu_subsys
.dev_root
->kobj
);
333 BUG_ON(!intel_pstate_kobject
);
334 rc
= sysfs_create_group(intel_pstate_kobject
,
335 &intel_pstate_attr_group
);
339 /************************** sysfs end ************************/
341 static int intel_pstate_min_pstate(void)
344 rdmsrl(MSR_PLATFORM_INFO
, value
);
345 return (value
>> 40) & 0xFF;
348 static int intel_pstate_max_pstate(void)
351 rdmsrl(MSR_PLATFORM_INFO
, value
);
352 return (value
>> 8) & 0xFF;
355 static int intel_pstate_turbo_pstate(void)
359 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
360 nont
= intel_pstate_max_pstate();
361 ret
= ((value
) & 255);
367 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
369 int max_perf
= cpu
->pstate
.turbo_pstate
;
372 max_perf
= cpu
->pstate
.max_pstate
;
374 max_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.max_perf
));
375 *max
= clamp_t(int, max_perf
,
376 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
378 min_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.min_perf
));
379 *min
= clamp_t(int, min_perf
,
380 cpu
->pstate
.min_pstate
, max_perf
);
383 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
385 int max_perf
, min_perf
;
387 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
389 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
391 if (pstate
== cpu
->pstate
.current_pstate
)
394 trace_cpu_frequency(pstate
* 100000, cpu
->cpu
);
396 cpu
->pstate
.current_pstate
= pstate
;
397 wrmsrl(MSR_IA32_PERF_CTL
, pstate
<< 8);
401 static inline void intel_pstate_pstate_increase(struct cpudata
*cpu
, int steps
)
404 target
= cpu
->pstate
.current_pstate
+ steps
;
406 intel_pstate_set_pstate(cpu
, target
);
409 static inline void intel_pstate_pstate_decrease(struct cpudata
*cpu
, int steps
)
412 target
= cpu
->pstate
.current_pstate
- steps
;
413 intel_pstate_set_pstate(cpu
, target
);
416 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
418 sprintf(cpu
->name
, "Intel 2nd generation core");
420 cpu
->pstate
.min_pstate
= intel_pstate_min_pstate();
421 cpu
->pstate
.max_pstate
= intel_pstate_max_pstate();
422 cpu
->pstate
.turbo_pstate
= intel_pstate_turbo_pstate();
425 * goto max pstate so we don't slow up boot if we are built-in if we are
426 * a module we will take care of it during normal operation
428 intel_pstate_set_pstate(cpu
, cpu
->pstate
.max_pstate
);
431 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
,
432 struct sample
*sample
)
435 core_pct
= div64_u64(sample
->aperf
* 100, sample
->mperf
);
436 sample
->freq
= cpu
->pstate
.max_pstate
* core_pct
* 1000;
438 sample
->core_pct_busy
= core_pct
;
441 static inline void intel_pstate_sample(struct cpudata
*cpu
)
445 rdmsrl(MSR_IA32_APERF
, aperf
);
446 rdmsrl(MSR_IA32_MPERF
, mperf
);
447 cpu
->sample_ptr
= (cpu
->sample_ptr
+ 1) % SAMPLE_COUNT
;
448 cpu
->samples
[cpu
->sample_ptr
].aperf
= aperf
;
449 cpu
->samples
[cpu
->sample_ptr
].mperf
= mperf
;
450 cpu
->samples
[cpu
->sample_ptr
].aperf
-= cpu
->prev_aperf
;
451 cpu
->samples
[cpu
->sample_ptr
].mperf
-= cpu
->prev_mperf
;
453 intel_pstate_calc_busy(cpu
, &cpu
->samples
[cpu
->sample_ptr
]);
455 cpu
->prev_aperf
= aperf
;
456 cpu
->prev_mperf
= mperf
;
459 static inline void intel_pstate_set_sample_time(struct cpudata
*cpu
)
461 int sample_time
, delay
;
463 sample_time
= cpu
->pstate_policy
->sample_rate_ms
;
464 delay
= msecs_to_jiffies(sample_time
);
465 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
468 static inline int intel_pstate_get_scaled_busy(struct cpudata
*cpu
)
471 int32_t core_busy
, max_pstate
, current_pstate
;
473 core_busy
= int_tofp(cpu
->samples
[cpu
->sample_ptr
].core_pct_busy
);
474 max_pstate
= int_tofp(cpu
->pstate
.max_pstate
);
475 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
476 busy_scaled
= mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
478 return fp_toint(busy_scaled
);
481 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
489 busy_scaled
= intel_pstate_get_scaled_busy(cpu
);
491 ctl
= pid_calc(pid
, busy_scaled
);
495 intel_pstate_pstate_increase(cpu
, steps
);
497 intel_pstate_pstate_decrease(cpu
, steps
);
500 static void intel_pstate_timer_func(unsigned long __data
)
502 struct cpudata
*cpu
= (struct cpudata
*) __data
;
504 intel_pstate_sample(cpu
);
505 intel_pstate_adjust_busy_pstate(cpu
);
507 if (cpu
->pstate
.current_pstate
== cpu
->pstate
.min_pstate
) {
508 cpu
->min_pstate_count
++;
509 if (!(cpu
->min_pstate_count
% 5)) {
510 intel_pstate_set_pstate(cpu
, cpu
->pstate
.max_pstate
);
513 cpu
->min_pstate_count
= 0;
515 intel_pstate_set_sample_time(cpu
);
518 #define ICPU(model, policy) \
519 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
521 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
522 ICPU(0x2a, default_policy
),
523 ICPU(0x2d, default_policy
),
524 ICPU(0x3a, default_policy
),
525 ICPU(0x3c, default_policy
),
526 ICPU(0x3e, default_policy
),
527 ICPU(0x3f, default_policy
),
528 ICPU(0x45, default_policy
),
529 ICPU(0x46, default_policy
),
532 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
534 static int intel_pstate_init_cpu(unsigned int cpunum
)
537 const struct x86_cpu_id
*id
;
540 id
= x86_match_cpu(intel_pstate_cpu_ids
);
544 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
), GFP_KERNEL
);
545 if (!all_cpu_data
[cpunum
])
548 cpu
= all_cpu_data
[cpunum
];
550 intel_pstate_get_cpu_pstates(cpu
);
554 (struct pstate_adjust_policy
*)id
->driver_data
;
555 init_timer_deferrable(&cpu
->timer
);
556 cpu
->timer
.function
= intel_pstate_timer_func
;
559 cpu
->timer
.expires
= jiffies
+ HZ
/100;
560 intel_pstate_busy_pid_reset(cpu
);
561 intel_pstate_sample(cpu
);
562 intel_pstate_set_pstate(cpu
, cpu
->pstate
.max_pstate
);
564 add_timer_on(&cpu
->timer
, cpunum
);
566 pr_info("Intel pstate controlling: cpu %d\n", cpunum
);
571 static unsigned int intel_pstate_get(unsigned int cpu_num
)
573 struct sample
*sample
;
576 cpu
= all_cpu_data
[cpu_num
];
579 sample
= &cpu
->samples
[cpu
->sample_ptr
];
583 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
587 cpu
= all_cpu_data
[policy
->cpu
];
589 if (!policy
->cpuinfo
.max_freq
)
592 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
593 limits
.min_perf_pct
= 100;
594 limits
.min_perf
= int_tofp(1);
595 limits
.max_perf_pct
= 100;
596 limits
.max_perf
= int_tofp(1);
600 limits
.min_perf_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
601 limits
.min_perf_pct
= clamp_t(int, limits
.min_perf_pct
, 0 , 100);
602 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
604 limits
.max_policy_pct
= policy
->max
* 100 / policy
->cpuinfo
.max_freq
;
605 limits
.max_policy_pct
= clamp_t(int, limits
.max_policy_pct
, 0 , 100);
606 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
607 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
612 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
614 cpufreq_verify_within_limits(policy
,
615 policy
->cpuinfo
.min_freq
,
616 policy
->cpuinfo
.max_freq
);
618 if ((policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
) &&
619 (policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
))
625 static int intel_pstate_cpu_exit(struct cpufreq_policy
*policy
)
627 int cpu
= policy
->cpu
;
629 del_timer(&all_cpu_data
[cpu
]->timer
);
630 kfree(all_cpu_data
[cpu
]);
631 all_cpu_data
[cpu
] = NULL
;
635 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
637 int rc
, min_pstate
, max_pstate
;
640 rc
= intel_pstate_init_cpu(policy
->cpu
);
644 cpu
= all_cpu_data
[policy
->cpu
];
646 if (!limits
.no_turbo
&&
647 limits
.min_perf_pct
== 100 && limits
.max_perf_pct
== 100)
648 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
650 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
652 intel_pstate_get_min_max(cpu
, &min_pstate
, &max_pstate
);
653 policy
->min
= min_pstate
* 100000;
654 policy
->max
= max_pstate
* 100000;
656 /* cpuinfo and default policy values */
657 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* 100000;
658 policy
->cpuinfo
.max_freq
= cpu
->pstate
.turbo_pstate
* 100000;
659 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
660 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
665 static struct cpufreq_driver intel_pstate_driver
= {
666 .flags
= CPUFREQ_CONST_LOOPS
,
667 .verify
= intel_pstate_verify_policy
,
668 .setpolicy
= intel_pstate_set_policy
,
669 .get
= intel_pstate_get
,
670 .init
= intel_pstate_cpu_init
,
671 .exit
= intel_pstate_cpu_exit
,
672 .name
= "intel_pstate",
675 static int __initdata no_load
;
677 static int intel_pstate_msrs_not_valid(void)
679 /* Check that all the msr's we are using are valid. */
680 u64 aperf
, mperf
, tmp
;
682 rdmsrl(MSR_IA32_APERF
, aperf
);
683 rdmsrl(MSR_IA32_MPERF
, mperf
);
685 if (!intel_pstate_min_pstate() ||
686 !intel_pstate_max_pstate() ||
687 !intel_pstate_turbo_pstate())
690 rdmsrl(MSR_IA32_APERF
, tmp
);
694 rdmsrl(MSR_IA32_MPERF
, tmp
);
700 static int __init
intel_pstate_init(void)
703 const struct x86_cpu_id
*id
;
708 id
= x86_match_cpu(intel_pstate_cpu_ids
);
712 if (intel_pstate_msrs_not_valid())
715 pr_info("Intel P-state driver initializing.\n");
717 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
721 rc
= cpufreq_register_driver(&intel_pstate_driver
);
725 intel_pstate_debug_expose_params();
726 intel_pstate_sysfs_expose_params();
730 for_each_online_cpu(cpu
) {
731 if (all_cpu_data
[cpu
]) {
732 del_timer_sync(&all_cpu_data
[cpu
]->timer
);
733 kfree(all_cpu_data
[cpu
]);
741 device_initcall(intel_pstate_init
);
743 static int __init
intel_pstate_setup(char *str
)
748 if (!strcmp(str
, "disable"))
752 early_param("intel_pstate", intel_pstate_setup
);
754 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
755 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
756 MODULE_LICENSE("GPL");