2 * intel_idle.c - native hardware idle loop for modern Intel processors
4 * Copyright (c) 2013, Intel Corporation.
5 * Len Brown <len.brown@intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * intel_idle is a cpuidle driver that loads on specific Intel processors
23 * in lieu of the legacy ACPI processor_idle driver. The intent is to
24 * make Linux more efficient on these processors, as intel_idle knows
25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
31 * All CPUs have same idle states as boot CPU
33 * Chipset BM_STS (bus master status) bit is a NOP
34 * for preventing entry into deep C-stats
40 * The driver currently initializes for_each_online_cpu() upon modprobe.
41 * It it unaware of subsequent processors hot-added to the system.
42 * This means that if you boot with maxcpus=n and later online
43 * processors above n, those processors will use C1 only.
45 * ACPI has a .suspend hack to turn off deep c-statees during suspend
46 * to avoid complications with the lapic timer workaround.
47 * Have not seen issues with suspend, but may need same workaround here.
49 * There is currently no kernel-based automatic probing/loading mechanism
50 * if the driver is built as a module.
53 /* un-comment DEBUG to enable pr_debug() statements */
56 #include <linux/kernel.h>
57 #include <linux/cpuidle.h>
58 #include <linux/clockchips.h>
59 #include <trace/events/power.h>
60 #include <linux/sched.h>
61 #include <linux/notifier.h>
62 #include <linux/cpu.h>
63 #include <linux/module.h>
64 #include <asm/cpu_device_id.h>
65 #include <asm/mwait.h>
68 #define INTEL_IDLE_VERSION "0.4"
69 #define PREFIX "intel_idle: "
71 static struct cpuidle_driver intel_idle_driver
= {
75 /* intel_idle.max_cstate=0 disables driver */
76 static int max_cstate
= CPUIDLE_STATE_MAX
- 1;
78 static unsigned int mwait_substates
;
80 #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
81 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */
82 static unsigned int lapic_timer_reliable_states
= (1 << 1); /* Default to only C1 */
85 struct cpuidle_state
*state_table
;
88 * Hardware C-state auto-demotion may not always be optimal.
89 * Indicate which enable bits to clear here.
91 unsigned long auto_demotion_disable_flags
;
92 bool byt_auto_demotion_disable_flag
;
93 bool disable_promotion_to_c1e
;
96 static const struct idle_cpu
*icpu
;
97 static struct cpuidle_device __percpu
*intel_idle_cpuidle_devices
;
98 static int intel_idle(struct cpuidle_device
*dev
,
99 struct cpuidle_driver
*drv
, int index
);
100 static int intel_idle_cpu_init(int cpu
);
102 static struct cpuidle_state
*cpuidle_state_table
;
105 * Set this flag for states where the HW flushes the TLB for us
106 * and so we don't need cross-calls to keep it consistent.
107 * If this flag is set, SW flushes the TLB, so even if the
108 * HW doesn't do the flushing, this flag is safe to use.
110 #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
113 * MWAIT takes an 8-bit "hint" in EAX "suggesting"
114 * the C-state (top nibble) and sub-state (bottom nibble)
115 * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
117 * We store the hint at the top of our "flags" for each state.
119 #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
120 #define MWAIT2flg(eax) ((eax & 0xFF) << 24)
123 * States are indexed by the cstate number,
124 * which is also the index into the MWAIT hint array.
125 * Thus C0 is a dummy.
127 static struct cpuidle_state nehalem_cstates
[] = {
130 .desc
= "MWAIT 0x00",
131 .flags
= MWAIT2flg(0x00),
133 .target_residency
= 6,
134 .enter
= &intel_idle
},
137 .desc
= "MWAIT 0x01",
138 .flags
= MWAIT2flg(0x01),
140 .target_residency
= 20,
141 .enter
= &intel_idle
},
144 .desc
= "MWAIT 0x10",
145 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
147 .target_residency
= 80,
148 .enter
= &intel_idle
},
151 .desc
= "MWAIT 0x20",
152 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
154 .target_residency
= 800,
155 .enter
= &intel_idle
},
160 static struct cpuidle_state snb_cstates
[] = {
163 .desc
= "MWAIT 0x00",
164 .flags
= MWAIT2flg(0x00),
166 .target_residency
= 2,
167 .enter
= &intel_idle
},
170 .desc
= "MWAIT 0x01",
171 .flags
= MWAIT2flg(0x01),
173 .target_residency
= 20,
174 .enter
= &intel_idle
},
177 .desc
= "MWAIT 0x10",
178 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
180 .target_residency
= 211,
181 .enter
= &intel_idle
},
184 .desc
= "MWAIT 0x20",
185 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
187 .target_residency
= 345,
188 .enter
= &intel_idle
},
191 .desc
= "MWAIT 0x30",
192 .flags
= MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED
,
194 .target_residency
= 345,
195 .enter
= &intel_idle
},
200 static struct cpuidle_state byt_cstates
[] = {
203 .desc
= "MWAIT 0x00",
204 .flags
= MWAIT2flg(0x00),
206 .target_residency
= 1,
207 .enter
= &intel_idle
},
210 .desc
= "MWAIT 0x01",
211 .flags
= MWAIT2flg(0x01),
213 .target_residency
= 30,
214 .enter
= &intel_idle
},
217 .desc
= "MWAIT 0x58",
218 .flags
= MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED
,
220 .target_residency
= 275,
221 .enter
= &intel_idle
},
224 .desc
= "MWAIT 0x52",
225 .flags
= MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED
,
227 .target_residency
= 560,
228 .enter
= &intel_idle
},
231 .desc
= "MWAIT 0x60",
232 .flags
= MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED
,
233 .exit_latency
= 1200,
234 .target_residency
= 1500,
235 .enter
= &intel_idle
},
238 .desc
= "MWAIT 0x64",
239 .flags
= MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED
,
240 .exit_latency
= 10000,
241 .target_residency
= 20000,
242 .enter
= &intel_idle
},
247 static struct cpuidle_state ivb_cstates
[] = {
250 .desc
= "MWAIT 0x00",
251 .flags
= MWAIT2flg(0x00),
253 .target_residency
= 1,
254 .enter
= &intel_idle
},
257 .desc
= "MWAIT 0x01",
258 .flags
= MWAIT2flg(0x01),
260 .target_residency
= 20,
261 .enter
= &intel_idle
},
264 .desc
= "MWAIT 0x10",
265 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
267 .target_residency
= 156,
268 .enter
= &intel_idle
},
271 .desc
= "MWAIT 0x20",
272 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
274 .target_residency
= 300,
275 .enter
= &intel_idle
},
278 .desc
= "MWAIT 0x30",
279 .flags
= MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED
,
281 .target_residency
= 300,
282 .enter
= &intel_idle
},
287 static struct cpuidle_state ivt_cstates
[] = {
290 .desc
= "MWAIT 0x00",
291 .flags
= MWAIT2flg(0x00),
293 .target_residency
= 1,
294 .enter
= &intel_idle
},
297 .desc
= "MWAIT 0x01",
298 .flags
= MWAIT2flg(0x01),
300 .target_residency
= 80,
301 .enter
= &intel_idle
},
304 .desc
= "MWAIT 0x10",
305 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
307 .target_residency
= 156,
308 .enter
= &intel_idle
},
311 .desc
= "MWAIT 0x20",
312 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
314 .target_residency
= 300,
315 .enter
= &intel_idle
},
320 static struct cpuidle_state ivt_cstates_4s
[] = {
323 .desc
= "MWAIT 0x00",
324 .flags
= MWAIT2flg(0x00),
326 .target_residency
= 1,
327 .enter
= &intel_idle
},
329 .name
= "C1E-IVT-4S",
330 .desc
= "MWAIT 0x01",
331 .flags
= MWAIT2flg(0x01),
333 .target_residency
= 250,
334 .enter
= &intel_idle
},
337 .desc
= "MWAIT 0x10",
338 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
340 .target_residency
= 300,
341 .enter
= &intel_idle
},
344 .desc
= "MWAIT 0x20",
345 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
347 .target_residency
= 400,
348 .enter
= &intel_idle
},
353 static struct cpuidle_state ivt_cstates_8s
[] = {
356 .desc
= "MWAIT 0x00",
357 .flags
= MWAIT2flg(0x00),
359 .target_residency
= 1,
360 .enter
= &intel_idle
},
362 .name
= "C1E-IVT-8S",
363 .desc
= "MWAIT 0x01",
364 .flags
= MWAIT2flg(0x01),
366 .target_residency
= 500,
367 .enter
= &intel_idle
},
370 .desc
= "MWAIT 0x10",
371 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
373 .target_residency
= 600,
374 .enter
= &intel_idle
},
377 .desc
= "MWAIT 0x20",
378 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
380 .target_residency
= 700,
381 .enter
= &intel_idle
},
386 static struct cpuidle_state hsw_cstates
[] = {
389 .desc
= "MWAIT 0x00",
390 .flags
= MWAIT2flg(0x00),
392 .target_residency
= 2,
393 .enter
= &intel_idle
},
396 .desc
= "MWAIT 0x01",
397 .flags
= MWAIT2flg(0x01),
399 .target_residency
= 20,
400 .enter
= &intel_idle
},
403 .desc
= "MWAIT 0x10",
404 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
406 .target_residency
= 100,
407 .enter
= &intel_idle
},
410 .desc
= "MWAIT 0x20",
411 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
413 .target_residency
= 400,
414 .enter
= &intel_idle
},
417 .desc
= "MWAIT 0x32",
418 .flags
= MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED
,
420 .target_residency
= 500,
421 .enter
= &intel_idle
},
424 .desc
= "MWAIT 0x40",
425 .flags
= MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED
,
427 .target_residency
= 900,
428 .enter
= &intel_idle
},
431 .desc
= "MWAIT 0x50",
432 .flags
= MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED
,
434 .target_residency
= 1800,
435 .enter
= &intel_idle
},
438 .desc
= "MWAIT 0x60",
439 .flags
= MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED
,
440 .exit_latency
= 2600,
441 .target_residency
= 7700,
442 .enter
= &intel_idle
},
446 static struct cpuidle_state bdw_cstates
[] = {
449 .desc
= "MWAIT 0x00",
450 .flags
= MWAIT2flg(0x00),
452 .target_residency
= 2,
453 .enter
= &intel_idle
},
456 .desc
= "MWAIT 0x01",
457 .flags
= MWAIT2flg(0x01),
459 .target_residency
= 20,
460 .enter
= &intel_idle
},
463 .desc
= "MWAIT 0x10",
464 .flags
= MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED
,
466 .target_residency
= 100,
467 .enter
= &intel_idle
},
470 .desc
= "MWAIT 0x20",
471 .flags
= MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED
,
473 .target_residency
= 400,
474 .enter
= &intel_idle
},
477 .desc
= "MWAIT 0x32",
478 .flags
= MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED
,
480 .target_residency
= 500,
481 .enter
= &intel_idle
},
484 .desc
= "MWAIT 0x40",
485 .flags
= MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED
,
487 .target_residency
= 900,
488 .enter
= &intel_idle
},
491 .desc
= "MWAIT 0x50",
492 .flags
= MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED
,
494 .target_residency
= 1800,
495 .enter
= &intel_idle
},
498 .desc
= "MWAIT 0x60",
499 .flags
= MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED
,
500 .exit_latency
= 2600,
501 .target_residency
= 7700,
502 .enter
= &intel_idle
},
507 static struct cpuidle_state atom_cstates
[] = {
510 .desc
= "MWAIT 0x00",
511 .flags
= MWAIT2flg(0x00),
513 .target_residency
= 20,
514 .enter
= &intel_idle
},
517 .desc
= "MWAIT 0x10",
518 .flags
= MWAIT2flg(0x10),
520 .target_residency
= 80,
521 .enter
= &intel_idle
},
524 .desc
= "MWAIT 0x30",
525 .flags
= MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED
,
527 .target_residency
= 400,
528 .enter
= &intel_idle
},
531 .desc
= "MWAIT 0x52",
532 .flags
= MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED
,
534 .target_residency
= 560,
535 .enter
= &intel_idle
},
539 static struct cpuidle_state avn_cstates
[] = {
542 .desc
= "MWAIT 0x00",
543 .flags
= MWAIT2flg(0x00),
545 .target_residency
= 2,
546 .enter
= &intel_idle
},
549 .desc
= "MWAIT 0x51",
550 .flags
= MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED
,
552 .target_residency
= 45,
553 .enter
= &intel_idle
},
560 * @dev: cpuidle_device
561 * @drv: cpuidle driver
562 * @index: index of cpuidle state
564 * Must be called under local_irq_disable().
566 static int intel_idle(struct cpuidle_device
*dev
,
567 struct cpuidle_driver
*drv
, int index
)
569 unsigned long ecx
= 1; /* break on interrupt flag */
570 struct cpuidle_state
*state
= &drv
->states
[index
];
571 unsigned long eax
= flg2MWAIT(state
->flags
);
573 int cpu
= smp_processor_id();
575 cstate
= (((eax
) >> MWAIT_SUBSTATE_SIZE
) & MWAIT_CSTATE_MASK
) + 1;
578 * leave_mm() to avoid costly and often unnecessary wakeups
579 * for flushing the user TLB's associated with the active mm.
581 if (state
->flags
& CPUIDLE_FLAG_TLB_FLUSHED
)
584 if (!(lapic_timer_reliable_states
& (1 << (cstate
))))
585 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER
, &cpu
);
587 mwait_idle_with_hints(eax
, ecx
);
589 if (!(lapic_timer_reliable_states
& (1 << (cstate
))))
590 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT
, &cpu
);
595 static void __setup_broadcast_timer(void *arg
)
597 unsigned long reason
= (unsigned long)arg
;
598 int cpu
= smp_processor_id();
601 CLOCK_EVT_NOTIFY_BROADCAST_ON
: CLOCK_EVT_NOTIFY_BROADCAST_OFF
;
603 clockevents_notify(reason
, &cpu
);
606 static int cpu_hotplug_notify(struct notifier_block
*n
,
607 unsigned long action
, void *hcpu
)
609 int hotcpu
= (unsigned long)hcpu
;
610 struct cpuidle_device
*dev
;
612 switch (action
& ~CPU_TASKS_FROZEN
) {
615 if (lapic_timer_reliable_states
!= LAPIC_TIMER_ALWAYS_RELIABLE
)
616 smp_call_function_single(hotcpu
, __setup_broadcast_timer
,
620 * Some systems can hotplug a cpu at runtime after
621 * the kernel has booted, we have to initialize the
622 * driver in this case
624 dev
= per_cpu_ptr(intel_idle_cpuidle_devices
, hotcpu
);
625 if (!dev
->registered
)
626 intel_idle_cpu_init(hotcpu
);
633 static struct notifier_block cpu_hotplug_notifier
= {
634 .notifier_call
= cpu_hotplug_notify
,
637 static void auto_demotion_disable(void *dummy
)
639 unsigned long long msr_bits
;
641 rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL
, msr_bits
);
642 msr_bits
&= ~(icpu
->auto_demotion_disable_flags
);
643 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL
, msr_bits
);
645 static void c1e_promotion_disable(void *dummy
)
647 unsigned long long msr_bits
;
649 rdmsrl(MSR_IA32_POWER_CTL
, msr_bits
);
651 wrmsrl(MSR_IA32_POWER_CTL
, msr_bits
);
654 static const struct idle_cpu idle_cpu_nehalem
= {
655 .state_table
= nehalem_cstates
,
656 .auto_demotion_disable_flags
= NHM_C1_AUTO_DEMOTE
| NHM_C3_AUTO_DEMOTE
,
657 .disable_promotion_to_c1e
= true,
660 static const struct idle_cpu idle_cpu_atom
= {
661 .state_table
= atom_cstates
,
664 static const struct idle_cpu idle_cpu_lincroft
= {
665 .state_table
= atom_cstates
,
666 .auto_demotion_disable_flags
= ATM_LNC_C6_AUTO_DEMOTE
,
669 static const struct idle_cpu idle_cpu_snb
= {
670 .state_table
= snb_cstates
,
671 .disable_promotion_to_c1e
= true,
674 static const struct idle_cpu idle_cpu_byt
= {
675 .state_table
= byt_cstates
,
676 .disable_promotion_to_c1e
= true,
677 .byt_auto_demotion_disable_flag
= true,
680 static const struct idle_cpu idle_cpu_ivb
= {
681 .state_table
= ivb_cstates
,
682 .disable_promotion_to_c1e
= true,
685 static const struct idle_cpu idle_cpu_ivt
= {
686 .state_table
= ivt_cstates
,
687 .disable_promotion_to_c1e
= true,
690 static const struct idle_cpu idle_cpu_hsw
= {
691 .state_table
= hsw_cstates
,
692 .disable_promotion_to_c1e
= true,
695 static const struct idle_cpu idle_cpu_bdw
= {
696 .state_table
= bdw_cstates
,
697 .disable_promotion_to_c1e
= true,
700 static const struct idle_cpu idle_cpu_avn
= {
701 .state_table
= avn_cstates
,
702 .disable_promotion_to_c1e
= true,
705 #define ICPU(model, cpu) \
706 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
708 static const struct x86_cpu_id intel_idle_ids
[] = {
709 ICPU(0x1a, idle_cpu_nehalem
),
710 ICPU(0x1e, idle_cpu_nehalem
),
711 ICPU(0x1f, idle_cpu_nehalem
),
712 ICPU(0x25, idle_cpu_nehalem
),
713 ICPU(0x2c, idle_cpu_nehalem
),
714 ICPU(0x2e, idle_cpu_nehalem
),
715 ICPU(0x1c, idle_cpu_atom
),
716 ICPU(0x26, idle_cpu_lincroft
),
717 ICPU(0x2f, idle_cpu_nehalem
),
718 ICPU(0x2a, idle_cpu_snb
),
719 ICPU(0x2d, idle_cpu_snb
),
720 ICPU(0x36, idle_cpu_atom
),
721 ICPU(0x37, idle_cpu_byt
),
722 ICPU(0x3a, idle_cpu_ivb
),
723 ICPU(0x3e, idle_cpu_ivt
),
724 ICPU(0x3c, idle_cpu_hsw
),
725 ICPU(0x3f, idle_cpu_hsw
),
726 ICPU(0x45, idle_cpu_hsw
),
727 ICPU(0x46, idle_cpu_hsw
),
728 ICPU(0x4d, idle_cpu_avn
),
729 ICPU(0x3d, idle_cpu_bdw
),
730 ICPU(0x4f, idle_cpu_bdw
),
731 ICPU(0x56, idle_cpu_bdw
),
734 MODULE_DEVICE_TABLE(x86cpu
, intel_idle_ids
);
739 static int __init
intel_idle_probe(void)
741 unsigned int eax
, ebx
, ecx
;
742 const struct x86_cpu_id
*id
;
744 if (max_cstate
== 0) {
745 pr_debug(PREFIX
"disabled\n");
749 id
= x86_match_cpu(intel_idle_ids
);
751 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
752 boot_cpu_data
.x86
== 6)
753 pr_debug(PREFIX
"does not run on family %d model %d\n",
754 boot_cpu_data
.x86
, boot_cpu_data
.x86_model
);
758 if (boot_cpu_data
.cpuid_level
< CPUID_MWAIT_LEAF
)
761 cpuid(CPUID_MWAIT_LEAF
, &eax
, &ebx
, &ecx
, &mwait_substates
);
763 if (!(ecx
& CPUID5_ECX_EXTENSIONS_SUPPORTED
) ||
764 !(ecx
& CPUID5_ECX_INTERRUPT_BREAK
) ||
768 pr_debug(PREFIX
"MWAIT substates: 0x%x\n", mwait_substates
);
770 icpu
= (const struct idle_cpu
*)id
->driver_data
;
771 cpuidle_state_table
= icpu
->state_table
;
773 if (boot_cpu_has(X86_FEATURE_ARAT
)) /* Always Reliable APIC Timer */
774 lapic_timer_reliable_states
= LAPIC_TIMER_ALWAYS_RELIABLE
;
776 on_each_cpu(__setup_broadcast_timer
, (void *)true, 1);
778 pr_debug(PREFIX
"v" INTEL_IDLE_VERSION
779 " model 0x%X\n", boot_cpu_data
.x86_model
);
781 pr_debug(PREFIX
"lapic_timer_reliable_states 0x%x\n",
782 lapic_timer_reliable_states
);
787 * intel_idle_cpuidle_devices_uninit()
788 * unregister, free cpuidle_devices
790 static void intel_idle_cpuidle_devices_uninit(void)
793 struct cpuidle_device
*dev
;
795 for_each_online_cpu(i
) {
796 dev
= per_cpu_ptr(intel_idle_cpuidle_devices
, i
);
797 cpuidle_unregister_device(dev
);
800 free_percpu(intel_idle_cpuidle_devices
);
805 * intel_idle_state_table_update()
807 * Update the default state_table for this CPU-id
809 * Currently used to access tuned IVT multi-socket targets
810 * Assumption: num_sockets == (max_package_num + 1)
812 void intel_idle_state_table_update(void)
814 /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
815 if (boot_cpu_data
.x86_model
== 0x3e) { /* IVT */
816 int cpu
, package_num
, num_sockets
= 1;
818 for_each_online_cpu(cpu
) {
819 package_num
= topology_physical_package_id(cpu
);
820 if (package_num
+ 1 > num_sockets
) {
821 num_sockets
= package_num
+ 1;
823 if (num_sockets
> 4) {
824 cpuidle_state_table
= ivt_cstates_8s
;
831 cpuidle_state_table
= ivt_cstates_4s
;
832 /* else, 1 and 2 socket systems use default ivt_cstates */
838 * intel_idle_cpuidle_driver_init()
839 * allocate, initialize cpuidle_states
841 static int __init
intel_idle_cpuidle_driver_init(void)
844 struct cpuidle_driver
*drv
= &intel_idle_driver
;
846 intel_idle_state_table_update();
848 drv
->state_count
= 1;
850 for (cstate
= 0; cstate
< CPUIDLE_STATE_MAX
; ++cstate
) {
851 int num_substates
, mwait_hint
, mwait_cstate
;
853 if (cpuidle_state_table
[cstate
].enter
== NULL
)
856 if (cstate
+ 1 > max_cstate
) {
857 printk(PREFIX
"max_cstate %d reached\n",
862 mwait_hint
= flg2MWAIT(cpuidle_state_table
[cstate
].flags
);
863 mwait_cstate
= MWAIT_HINT2CSTATE(mwait_hint
);
865 /* number of sub-states for this state in CPUID.MWAIT */
866 num_substates
= (mwait_substates
>> ((mwait_cstate
+ 1) * 4))
867 & MWAIT_SUBSTATE_MASK
;
869 /* if NO sub-states for this state in CPUID, skip it */
870 if (num_substates
== 0)
873 if (((mwait_cstate
+ 1) > 2) &&
874 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC
))
875 mark_tsc_unstable("TSC halts in idle"
876 " states deeper than C2");
878 drv
->states
[drv
->state_count
] = /* structure copy */
879 cpuidle_state_table
[cstate
];
881 drv
->state_count
+= 1;
884 if (icpu
->auto_demotion_disable_flags
)
885 on_each_cpu(auto_demotion_disable
, NULL
, 1);
887 if (icpu
->byt_auto_demotion_disable_flag
) {
888 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG
, 0);
889 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG
, 0);
892 if (icpu
->disable_promotion_to_c1e
) /* each-cpu is redundant */
893 on_each_cpu(c1e_promotion_disable
, NULL
, 1);
900 * intel_idle_cpu_init()
901 * allocate, initialize, register cpuidle_devices
902 * @cpu: cpu/core to initialize
904 static int intel_idle_cpu_init(int cpu
)
906 struct cpuidle_device
*dev
;
908 dev
= per_cpu_ptr(intel_idle_cpuidle_devices
, cpu
);
912 if (cpuidle_register_device(dev
)) {
913 pr_debug(PREFIX
"cpuidle_register_device %d failed!\n", cpu
);
914 intel_idle_cpuidle_devices_uninit();
918 if (icpu
->auto_demotion_disable_flags
)
919 smp_call_function_single(cpu
, auto_demotion_disable
, NULL
, 1);
921 if (icpu
->disable_promotion_to_c1e
)
922 smp_call_function_single(cpu
, c1e_promotion_disable
, NULL
, 1);
927 static int __init
intel_idle_init(void)
931 /* Do not load intel_idle at all for now if idle= is passed */
932 if (boot_option_idle_override
!= IDLE_NO_OVERRIDE
)
935 retval
= intel_idle_probe();
939 intel_idle_cpuidle_driver_init();
940 retval
= cpuidle_register_driver(&intel_idle_driver
);
942 struct cpuidle_driver
*drv
= cpuidle_get_driver();
943 printk(KERN_DEBUG PREFIX
"intel_idle yielding to %s",
944 drv
? drv
->name
: "none");
948 intel_idle_cpuidle_devices
= alloc_percpu(struct cpuidle_device
);
949 if (intel_idle_cpuidle_devices
== NULL
)
952 cpu_notifier_register_begin();
954 for_each_online_cpu(i
) {
955 retval
= intel_idle_cpu_init(i
);
957 cpu_notifier_register_done();
958 cpuidle_unregister_driver(&intel_idle_driver
);
962 __register_cpu_notifier(&cpu_hotplug_notifier
);
964 cpu_notifier_register_done();
969 static void __exit
intel_idle_exit(void)
971 intel_idle_cpuidle_devices_uninit();
972 cpuidle_unregister_driver(&intel_idle_driver
);
974 cpu_notifier_register_begin();
976 if (lapic_timer_reliable_states
!= LAPIC_TIMER_ALWAYS_RELIABLE
)
977 on_each_cpu(__setup_broadcast_timer
, (void *)false, 1);
978 __unregister_cpu_notifier(&cpu_hotplug_notifier
);
980 cpu_notifier_register_done();
985 module_init(intel_idle_init
);
986 module_exit(intel_idle_exit
);
988 module_param(max_cstate
, int, 0444);
990 MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
991 MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION
);
992 MODULE_LICENSE("GPL");