8322 nl: misleading-indentation
[unleashed/tickless.git] / usr / src / uts / i86pc / os / cpupm / cpupm_throttle.c
blob63ced2b9b1c75d782cfbd1a31045100bf6c488c6
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/x86_archext.h>
27 #include <sys/machsystm.h>
28 #include <sys/x_call.h>
29 #include <sys/cpu_acpi.h>
30 #include <sys/cpupm_throttle.h>
31 #include <sys/dtrace.h>
32 #include <sys/sdt.h>
34 static int cpupm_throttle_init(cpu_t *);
35 static void cpupm_throttle_fini(cpu_t *);
36 static void cpupm_throttle(cpuset_t, uint32_t);
37 static void cpupm_throttle_stop(cpu_t *);
39 cpupm_state_ops_t cpupm_throttle_ops = {
40 "Generic ACPI T-state Support",
41 cpupm_throttle_init,
42 cpupm_throttle_fini,
43 cpupm_throttle,
44 cpupm_throttle_stop
48 * Error returns
50 #define THROTTLE_RET_SUCCESS 0x00
51 #define THROTTLE_RET_INCOMPLETE_DATA 0x01
52 #define THROTTLE_RET_UNSUP_STATE 0x02
53 #define THROTTLE_RET_TRANS_INCOMPLETE 0x03
55 #define THROTTLE_LATENCY_WAIT 1
58 * MSR register for clock modulation
60 #define IA32_CLOCK_MODULATION_MSR 0x19A
63 * Debugging support
65 #ifdef DEBUG
66 volatile int cpupm_throttle_debug = 0;
67 #define CTDEBUG(arglist) if (cpupm_throttle_debug) printf arglist;
68 #else
69 #define CTDEBUG(arglist)
70 #endif
73 * Write the _PTC ctrl register. How it is written, depends upon the _PTC
74 * APCI object value.
76 static int
77 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
79 cpu_acpi_ptc_t *ptc_ctrl;
80 uint64_t reg;
81 int ret = 0;
83 ptc_ctrl = CPU_ACPI_PTC_CTRL(handle);
85 switch (ptc_ctrl->cr_addrspace_id) {
86 case ACPI_ADR_SPACE_FIXED_HARDWARE:
88 * Read current thermal state because reserved bits must be
89 * preserved, compose new value, and write it.The writable
90 * bits are 4:1 (1 to 4).
91 * Bits 3:1 => On-Demand Clock Modulation Duty Cycle
92 * Bit 4 => On-Demand Clock Modulation Enable
93 * Left shift ctrl by 1 to allign with bits 1-4 of MSR
95 reg = rdmsr(IA32_CLOCK_MODULATION_MSR);
96 reg &= ~((uint64_t)0x1E);
97 reg |= ctrl;
98 wrmsr(IA32_CLOCK_MODULATION_MSR, reg);
99 break;
101 case ACPI_ADR_SPACE_SYSTEM_IO:
102 ret = cpu_acpi_write_port(ptc_ctrl->cr_address, ctrl,
103 ptc_ctrl->cr_width);
104 break;
106 default:
107 DTRACE_PROBE1(throttle_ctrl_unsupported_type, uint8_t,
108 ptc_ctrl->cr_addrspace_id);
110 ret = -1;
113 DTRACE_PROBE1(throttle_ctrl_write, uint32_t, ctrl);
114 DTRACE_PROBE1(throttle_ctrl_write_err, int, ret);
116 return (ret);
119 static int
120 read_status(cpu_acpi_handle_t handle, uint32_t *stat)
122 cpu_acpi_ptc_t *ptc_stat;
123 uint64_t reg;
124 int ret = 0;
126 ptc_stat = CPU_ACPI_PTC_STATUS(handle);
128 switch (ptc_stat->cr_addrspace_id) {
129 case ACPI_ADR_SPACE_FIXED_HARDWARE:
130 reg = rdmsr(IA32_CLOCK_MODULATION_MSR);
131 *stat = reg & 0x1E;
132 ret = 0;
133 break;
135 case ACPI_ADR_SPACE_SYSTEM_IO:
136 ret = cpu_acpi_read_port(ptc_stat->cr_address, stat,
137 ptc_stat->cr_width);
138 break;
140 default:
141 DTRACE_PROBE1(throttle_status_unsupported_type, uint8_t,
142 ptc_stat->cr_addrspace_id);
144 return (-1);
147 DTRACE_PROBE1(throttle_status_read, uint32_t, *stat);
148 DTRACE_PROBE1(throttle_status_read_err, int, ret);
150 return (ret);
154 * Transition the current processor to the requested throttling state.
156 static void
157 cpupm_tstate_transition(uint32_t req_state)
159 cpupm_mach_state_t *mach_state =
160 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
161 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
162 cpu_acpi_tstate_t *req_tstate;
163 uint32_t ctrl;
164 uint32_t stat;
165 int i;
167 req_tstate = (cpu_acpi_tstate_t *)CPU_ACPI_TSTATES(handle);
168 req_tstate += req_state;
169 DTRACE_PROBE1(throttle_transition, uint32_t,
170 CPU_ACPI_FREQPER(req_tstate));
173 * Initiate the processor t-state change.
175 ctrl = CPU_ACPI_TSTATE_CTRL(req_tstate);
176 if (write_ctrl(handle, ctrl) != 0) {
177 return;
181 * If status is zero, then transition is synchronous and
182 * no status value comparison is required.
184 if (CPU_ACPI_TSTATE_STAT(req_tstate) == 0) {
185 return;
188 /* Wait until switch is complete, but bound the loop just in case. */
189 for (i = CPU_ACPI_TSTATE_TRANSLAT(req_tstate) * 2; i >= 0;
190 i -= THROTTLE_LATENCY_WAIT) {
191 if (read_status(handle, &stat) == 0 &&
192 CPU_ACPI_TSTATE_STAT(req_tstate) == stat)
193 break;
194 drv_usecwait(THROTTLE_LATENCY_WAIT);
197 if (CPU_ACPI_TSTATE_STAT(req_tstate) != stat) {
198 DTRACE_PROBE(throttle_transition_incomplete);
202 static void
203 cpupm_throttle(cpuset_t set, uint32_t throtl_lvl)
206 * If thread is already running on target CPU then just
207 * make the transition request. Otherwise, we'll need to
208 * make a cross-call.
210 kpreempt_disable();
211 if (CPU_IN_SET(set, CPU->cpu_id)) {
212 cpupm_tstate_transition(throtl_lvl);
213 CPUSET_DEL(set, CPU->cpu_id);
215 if (!CPUSET_ISNULL(set)) {
216 xc_call((xc_arg_t)throtl_lvl, NULL, NULL,
217 CPUSET2BV(set), (xc_func_t)cpupm_tstate_transition);
219 kpreempt_enable();
222 static int
223 cpupm_throttle_init(cpu_t *cp)
225 cpupm_mach_state_t *mach_state =
226 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
227 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
228 cpu_acpi_ptc_t *ptc_stat;
229 int ret;
231 if ((ret = cpu_acpi_cache_tstate_data(handle)) != 0) {
232 if (ret < 0)
233 cmn_err(CE_NOTE,
234 "!Support for CPU throttling is being "
235 "disabled due to errors parsing ACPI T-state "
236 "objects exported by BIOS.");
237 cpupm_throttle_fini(cp);
238 return (THROTTLE_RET_INCOMPLETE_DATA);
242 * Check the address space used for transitions
244 ptc_stat = CPU_ACPI_PTC_STATUS(handle);
245 switch (ptc_stat->cr_addrspace_id) {
246 case ACPI_ADR_SPACE_FIXED_HARDWARE:
247 CTDEBUG(("T-State transitions will use fixed hardware\n"));
248 break;
249 case ACPI_ADR_SPACE_SYSTEM_IO:
250 CTDEBUG(("T-State transitions will use System IO\n"));
251 break;
252 default:
253 cmn_err(CE_NOTE, "!_PTC configured for unsupported "
254 "address space type = %d.", ptc_stat->cr_addrspace_id);
255 return (THROTTLE_RET_INCOMPLETE_DATA);
258 cpupm_alloc_domains(cp, CPUPM_T_STATES);
260 return (THROTTLE_RET_SUCCESS);
263 static void
264 cpupm_throttle_fini(cpu_t *cp)
266 cpupm_mach_state_t *mach_state =
267 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
268 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
270 cpupm_free_domains(&cpupm_tstate_domains);
271 cpu_acpi_free_tstate_data(handle);
274 static void
275 cpupm_throttle_stop(cpu_t *cp)
277 cpupm_mach_state_t *mach_state =
278 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
279 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
281 cpupm_remove_domains(cp, CPUPM_T_STATES, &cpupm_tstate_domains);
282 cpu_acpi_free_tstate_data(handle);
286 * This routine reads the ACPI _TPC object. It's accessed as a callback
287 * by the cpu driver whenever a _TPC change notification is received.
289 static int
290 cpupm_throttle_get_max(processorid_t cpu_id)
292 cpu_t *cp = cpu[cpu_id];
293 cpupm_mach_state_t *mach_state =
294 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
295 cpu_acpi_handle_t handle;
296 int throtl_level;
297 int max_throttle_lvl;
298 uint_t num_throtl;
300 if (mach_state == NULL) {
301 return (-1);
304 handle = mach_state->ms_acpi_handle;
305 ASSERT(handle != NULL);
307 cpu_acpi_cache_tpc(handle);
308 throtl_level = CPU_ACPI_TPC(handle);
310 num_throtl = CPU_ACPI_TSTATES_COUNT(handle);
312 max_throttle_lvl = num_throtl - 1;
313 if ((throtl_level < 0) || (throtl_level > max_throttle_lvl)) {
314 cmn_err(CE_NOTE, "!cpupm_throttle_get_max: CPU %d: "
315 "_TPC out of range %d", cp->cpu_id, throtl_level);
316 throtl_level = 0;
319 return (throtl_level);
323 * Take care of CPU throttling when _TPC notification arrives
325 void
326 cpupm_throttle_manage_notification(void *ctx)
328 cpu_t *cp = ctx;
329 processorid_t cpu_id = cp->cpu_id;
330 cpupm_mach_state_t *mach_state =
331 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
332 boolean_t is_ready;
333 int new_level;
335 if (mach_state == NULL) {
336 return;
340 * We currently refuse to power-manage if the CPU is not ready to
341 * take cross calls (cross calls fail silently if CPU is not ready
342 * for it).
344 * Additionally, for x86 platforms we cannot power-manage an instance,
345 * until it has been initialized.
347 is_ready = (cp->cpu_flags & CPU_READY) && cpupm_throttle_ready(cp);
348 if (!is_ready)
349 return;
351 if (!(mach_state->ms_caps & CPUPM_T_STATES))
352 return;
353 ASSERT(mach_state->ms_tstate.cma_ops != NULL);
356 * Get the new T-State support level
358 new_level = cpupm_throttle_get_max(cpu_id);
360 cpupm_state_change(cp, new_level, CPUPM_T_STATES);