Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[linux/fpc-iii.git] / drivers / acpi / processor_thermal.c
blobe003663b2f8e5346d8862f336e604c8c6bf35035
1 /*
2 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33 #include <linux/acpi.h>
34 #include <acpi/processor.h>
35 #include <asm/uaccess.h>
37 #define PREFIX "ACPI: "
39 #define ACPI_PROCESSOR_CLASS "processor"
40 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
41 ACPI_MODULE_NAME("processor_thermal");
43 #ifdef CONFIG_CPU_FREQ
45 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
46 * offers (in most cases) voltage scaling in addition to frequency scaling, and
47 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
48 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
51 #define CPUFREQ_THERMAL_MIN_STEP 0
52 #define CPUFREQ_THERMAL_MAX_STEP 3
54 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
55 static unsigned int acpi_thermal_cpufreq_is_init = 0;
57 #define reduction_pctg(cpu) \
58 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
61 * Emulate "per package data" using per cpu data (which should really be
62 * provided elsewhere)
64 * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
65 * temporarily. Fortunately that's not a big issue here (I hope)
67 static int phys_package_first_cpu(int cpu)
69 int i;
70 int id = topology_physical_package_id(cpu);
72 for_each_online_cpu(i)
73 if (topology_physical_package_id(i) == id)
74 return i;
75 return 0;
78 static int cpu_has_cpufreq(unsigned int cpu)
80 struct cpufreq_policy policy;
81 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
82 return 0;
83 return 1;
86 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
87 unsigned long event, void *data)
89 struct cpufreq_policy *policy = data;
90 unsigned long max_freq = 0;
92 if (event != CPUFREQ_ADJUST)
93 goto out;
95 max_freq = (
96 policy->cpuinfo.max_freq *
97 (100 - reduction_pctg(policy->cpu) * 20)
98 ) / 100;
100 cpufreq_verify_within_limits(policy, 0, max_freq);
102 out:
103 return 0;
106 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
107 .notifier_call = acpi_thermal_cpufreq_notifier,
110 static int cpufreq_get_max_state(unsigned int cpu)
112 if (!cpu_has_cpufreq(cpu))
113 return 0;
115 return CPUFREQ_THERMAL_MAX_STEP;
118 static int cpufreq_get_cur_state(unsigned int cpu)
120 if (!cpu_has_cpufreq(cpu))
121 return 0;
123 return reduction_pctg(cpu);
126 static int cpufreq_set_cur_state(unsigned int cpu, int state)
128 int i;
130 if (!cpu_has_cpufreq(cpu))
131 return 0;
133 reduction_pctg(cpu) = state;
136 * Update all the CPUs in the same package because they all
137 * contribute to the temperature and often share the same
138 * frequency.
140 for_each_online_cpu(i) {
141 if (topology_physical_package_id(i) ==
142 topology_physical_package_id(cpu))
143 cpufreq_update_policy(i);
145 return 0;
148 void acpi_thermal_cpufreq_init(void)
150 int i;
152 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
153 CPUFREQ_POLICY_NOTIFIER);
154 if (!i)
155 acpi_thermal_cpufreq_is_init = 1;
158 void acpi_thermal_cpufreq_exit(void)
160 if (acpi_thermal_cpufreq_is_init)
161 cpufreq_unregister_notifier
162 (&acpi_thermal_cpufreq_notifier_block,
163 CPUFREQ_POLICY_NOTIFIER);
165 acpi_thermal_cpufreq_is_init = 0;
168 #else /* ! CONFIG_CPU_FREQ */
169 static int cpufreq_get_max_state(unsigned int cpu)
171 return 0;
174 static int cpufreq_get_cur_state(unsigned int cpu)
176 return 0;
179 static int cpufreq_set_cur_state(unsigned int cpu, int state)
181 return 0;
184 #endif
186 /* thermal cooling device callbacks */
187 static int acpi_processor_max_state(struct acpi_processor *pr)
189 int max_state = 0;
192 * There exists four states according to
193 * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
195 max_state += cpufreq_get_max_state(pr->id);
196 if (pr->flags.throttling)
197 max_state += (pr->throttling.state_count -1);
199 return max_state;
201 static int
202 processor_get_max_state(struct thermal_cooling_device *cdev,
203 unsigned long *state)
205 struct acpi_device *device = cdev->devdata;
206 struct acpi_processor *pr;
208 if (!device)
209 return -EINVAL;
211 pr = acpi_driver_data(device);
212 if (!pr)
213 return -EINVAL;
215 *state = acpi_processor_max_state(pr);
216 return 0;
219 static int
220 processor_get_cur_state(struct thermal_cooling_device *cdev,
221 unsigned long *cur_state)
223 struct acpi_device *device = cdev->devdata;
224 struct acpi_processor *pr;
226 if (!device)
227 return -EINVAL;
229 pr = acpi_driver_data(device);
230 if (!pr)
231 return -EINVAL;
233 *cur_state = cpufreq_get_cur_state(pr->id);
234 if (pr->flags.throttling)
235 *cur_state += pr->throttling.state;
236 return 0;
239 static int
240 processor_set_cur_state(struct thermal_cooling_device *cdev,
241 unsigned long state)
243 struct acpi_device *device = cdev->devdata;
244 struct acpi_processor *pr;
245 int result = 0;
246 int max_pstate;
248 if (!device)
249 return -EINVAL;
251 pr = acpi_driver_data(device);
252 if (!pr)
253 return -EINVAL;
255 max_pstate = cpufreq_get_max_state(pr->id);
257 if (state > acpi_processor_max_state(pr))
258 return -EINVAL;
260 if (state <= max_pstate) {
261 if (pr->flags.throttling && pr->throttling.state)
262 result = acpi_processor_set_throttling(pr, 0, false);
263 cpufreq_set_cur_state(pr->id, state);
264 } else {
265 cpufreq_set_cur_state(pr->id, max_pstate);
266 result = acpi_processor_set_throttling(pr,
267 state - max_pstate, false);
269 return result;
272 const struct thermal_cooling_device_ops processor_cooling_ops = {
273 .get_max_state = processor_get_max_state,
274 .get_cur_state = processor_get_cur_state,
275 .set_cur_state = processor_set_cur_state,