Merge branch 'akpm'
[linux-2.6/next.git] / arch / x86 / kernel / cpu / sched.c
bloba640ae5ad2017b3cd966bcd888e128e23a8b6435
1 #include <linux/sched.h>
2 #include <linux/math64.h>
3 #include <linux/percpu.h>
4 #include <linux/irqflags.h>
6 #include <asm/cpufeature.h>
7 #include <asm/processor.h>
9 #ifdef CONFIG_SMP
11 static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched);
13 static unsigned long scale_aperfmperf(void)
15 struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched);
16 unsigned long ratio, flags;
18 local_irq_save(flags);
19 get_aperfmperf(&val);
20 local_irq_restore(flags);
22 ratio = calc_aperfmperf_ratio(old, &val);
23 *old = val;
25 return ratio;
28 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
31 * do aperf/mperf on the cpu level because it includes things
32 * like turbo mode, which are relevant to full cores.
34 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
35 return scale_aperfmperf();
38 * maybe have something cpufreq here
41 return default_scale_freq_power(sd, cpu);
44 unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu)
47 * aperf/mperf already includes the smt gain
49 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
50 return SCHED_LOAD_SCALE;
52 return default_scale_smt_power(sd, cpu);
55 #endif