2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 * 1. Make # power states dynamic.
27 * 2. Support duty_cycle values that span bit 4.
28 * 3. Optimize by having scheduler determine business instead of
29 * having us try to calculate it here.
30 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/types.h>
37 #include <linux/pci.h>
39 #include <linux/cpufreq.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
44 #include <asm/system.h>
45 #include <asm/delay.h>
46 #include <asm/uaccess.h>
47 #include <asm/processor.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/acpi_drivers.h>
53 #include <acpi/processor.h>
56 #define ACPI_PROCESSOR_COMPONENT 0x01000000
57 #define ACPI_PROCESSOR_CLASS "processor"
58 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
59 #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
60 #define ACPI_PROCESSOR_FILE_INFO "info"
61 #define ACPI_PROCESSOR_FILE_POWER "power"
62 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
63 #define ACPI_PROCESSOR_FILE_LIMIT "limit"
64 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
65 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
66 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
68 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
69 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
73 #define ACPI_PROCESSOR_LIMIT_USER 0
74 #define ACPI_PROCESSOR_LIMIT_THERMAL 1
76 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
77 ACPI_MODULE_NAME ("acpi_processor")
79 MODULE_AUTHOR("Paul Diefenbaugh");
80 MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME
);
81 MODULE_LICENSE("GPL");
84 static int acpi_processor_add (struct acpi_device
*device
);
85 static int acpi_processor_remove (struct acpi_device
*device
, int type
);
86 static int acpi_processor_info_open_fs(struct inode
*inode
, struct file
*file
);
87 static int acpi_processor_throttling_open_fs(struct inode
*inode
, struct file
*file
);
88 static int acpi_processor_power_open_fs(struct inode
*inode
, struct file
*file
);
89 static int acpi_processor_limit_open_fs(struct inode
*inode
, struct file
*file
);
90 static int acpi_processor_get_limit_info(struct acpi_processor
*pr
);
92 static struct acpi_driver acpi_processor_driver
= {
93 .name
= ACPI_PROCESSOR_DRIVER_NAME
,
94 .class = ACPI_PROCESSOR_CLASS
,
95 .ids
= ACPI_PROCESSOR_HID
,
97 .add
= acpi_processor_add
,
98 .remove
= acpi_processor_remove
,
103 struct acpi_processor_errata
{
113 static struct file_operations acpi_processor_info_fops
= {
114 .open
= acpi_processor_info_open_fs
,
117 .release
= single_release
,
120 static struct file_operations acpi_processor_power_fops
= {
121 .open
= acpi_processor_power_open_fs
,
124 .release
= single_release
,
127 static struct file_operations acpi_processor_throttling_fops
= {
128 .open
= acpi_processor_throttling_open_fs
,
131 .release
= single_release
,
134 static struct file_operations acpi_processor_limit_fops
= {
135 .open
= acpi_processor_limit_open_fs
,
138 .release
= single_release
,
141 static struct acpi_processor
*processors
[NR_CPUS
];
142 static struct acpi_processor_errata errata
;
143 static void (*pm_idle_save
)(void);
146 /* --------------------------------------------------------------------------
148 -------------------------------------------------------------------------- */
151 acpi_processor_errata_piix4 (
158 ACPI_FUNCTION_TRACE("acpi_processor_errata_piix4");
161 return_VALUE(-EINVAL
);
164 * Note that 'dev' references the PIIX4 ACPI Controller.
167 pci_read_config_byte(dev
, PCI_REVISION_ID
, &rev
);
171 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found PIIX4 A-step\n"));
174 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found PIIX4 B-step\n"));
177 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found PIIX4E\n"));
180 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found PIIX4M\n"));
183 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found unknown PIIX4\n"));
189 case 0: /* PIIX4 A-step */
190 case 1: /* PIIX4 B-step */
192 * See specification changes #13 ("Manual Throttle Duty Cycle")
193 * and #14 ("Enabling and Disabling Manual Throttle"), plus
194 * erratum #5 ("STPCLK# Deassertion Time") from the January
195 * 2002 PIIX4 specification update. Applies to only older
198 errata
.piix4
.throttle
= 1;
203 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
204 * Livelock") from the January 2002 PIIX4 specification update.
205 * Applies to all PIIX4 models.
211 * Find the PIIX4 IDE Controller and get the Bus Master IDE
212 * Status register address. We'll use this later to read
213 * each IDE controller's DMA status to make sure we catch all
216 dev
= pci_find_subsys(PCI_VENDOR_ID_INTEL
,
217 PCI_DEVICE_ID_INTEL_82371AB
,
218 PCI_ANY_ID
, PCI_ANY_ID
, NULL
);
220 errata
.piix4
.bmisx
= pci_resource_start(dev
, 4);
225 * Find the PIIX4 ISA Controller and read the Motherboard
226 * DMA controller's status to see if Type-F (Fast) DMA mode
227 * is enabled (bit 7) on either channel. Note that we'll
228 * disable C3 support if this is enabled, as some legacy
229 * devices won't operate well if fast DMA is disabled.
231 dev
= pci_find_subsys(PCI_VENDOR_ID_INTEL
,
232 PCI_DEVICE_ID_INTEL_82371AB_0
,
233 PCI_ANY_ID
, PCI_ANY_ID
, NULL
);
235 pci_read_config_byte(dev
, 0x76, &value1
);
236 pci_read_config_byte(dev
, 0x77, &value2
);
237 if ((value1
& 0x80) || (value2
& 0x80))
238 errata
.piix4
.fdma
= 1;
244 if (errata
.piix4
.bmisx
)
245 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
246 "Bus master activity detection (BM-IDE) erratum enabled\n"));
247 if (errata
.piix4
.fdma
)
248 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
249 "Type-F DMA livelock erratum (C3 disabled)\n"));
256 acpi_processor_errata (
257 struct acpi_processor
*pr
)
260 struct pci_dev
*dev
= NULL
;
262 ACPI_FUNCTION_TRACE("acpi_processor_errata");
265 return_VALUE(-EINVAL
);
270 dev
= pci_find_subsys(PCI_VENDOR_ID_INTEL
,
271 PCI_DEVICE_ID_INTEL_82371AB_3
, PCI_ANY_ID
, PCI_ANY_ID
, NULL
);
273 result
= acpi_processor_errata_piix4(dev
);
275 return_VALUE(result
);
279 /* --------------------------------------------------------------------------
281 -------------------------------------------------------------------------- */
290 else if (!acpi_fadt
.tmr_val_ext
)
291 return (((0x00FFFFFF - t1
) + t2
) & 0x00FFFFFF);
293 return ((0xFFFFFFFF - t1
) + t2
);
298 acpi_processor_power_activate (
299 struct acpi_processor
*pr
,
305 pr
->power
.states
[pr
->power
.state
].promotion
.count
= 0;
306 pr
->power
.states
[pr
->power
.state
].demotion
.count
= 0;
308 /* Cleanup from old state. */
309 switch (pr
->power
.state
) {
311 /* Disable bus master reload */
312 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 0, ACPI_MTX_DO_NOT_LOCK
);
316 /* Prepare to use new state. */
319 /* Enable bus master reload */
320 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD
, 1, ACPI_MTX_DO_NOT_LOCK
);
324 pr
->power
.state
= state
;
331 acpi_processor_idle (void)
333 struct acpi_processor
*pr
= NULL
;
334 struct acpi_processor_cx
*cx
= NULL
;
339 pr
= processors
[smp_processor_id()];
344 * Interrupts must be disabled during bus mastering calculations and
345 * for C2/C3 transitions.
349 cx
= &(pr
->power
.states
[pr
->power
.state
]);
354 * Check for bus mastering activity (if required), record, and check
357 if (pr
->flags
.bm_check
) {
360 pr
->power
.bm_activity
<<= 1;
362 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS
,
363 &bm_status
, ACPI_MTX_DO_NOT_LOCK
);
365 pr
->power
.bm_activity
++;
366 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS
,
367 1, ACPI_MTX_DO_NOT_LOCK
);
370 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
371 * the true state of bus mastering activity; forcing us to
372 * manually check the BMIDEA bit of each IDE channel.
374 else if (errata
.piix4
.bmisx
) {
375 if ((inb_p(errata
.piix4
.bmisx
+ 0x02) & 0x01)
376 || (inb_p(errata
.piix4
.bmisx
+ 0x0A) & 0x01))
377 pr
->power
.bm_activity
++;
380 * Apply bus mastering demotion policy. Automatically demote
381 * to avoid a faulty transition. Note that the processor
382 * won't enter a low-power state during this call (to this
383 * funciton) but should upon the next.
385 * TBD: A better policy might be to fallback to the demotion
386 * state (use it for this quantum only) istead of
387 * demoting -- and rely on duration as our sole demotion
388 * qualification. This may, however, introduce DMA
389 * issues (e.g. floppy DMA transfer overrun/underrun).
391 if (pr
->power
.bm_activity
& cx
->demotion
.threshold
.bm
) {
393 next_state
= cx
->demotion
.state
;
403 * Invoke the current Cx state to put the processor to sleep.
405 switch (pr
->power
.state
) {
411 * TBD: Can't get time duration while in C1, as resumes
412 * go to an ISR rather than here. Need to instrument
413 * base interrupt handler.
415 sleep_ticks
= 0xFFFFFFFF;
419 /* Get start time (ticks) */
420 t1
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
422 inb(pr
->power
.states
[ACPI_STATE_C2
].address
);
423 /* Dummy op - must do something useless after P_LVL2 read */
424 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
425 /* Get end time (ticks) */
426 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
427 /* Re-enable interrupts */
429 /* Compute time (ticks) that we were actually asleep */
430 sleep_ticks
= ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C2_OVERHEAD
;
434 /* Disable bus master arbitration */
435 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 1, ACPI_MTX_DO_NOT_LOCK
);
436 /* Get start time (ticks) */
437 t1
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
439 inb(pr
->power
.states
[ACPI_STATE_C3
].address
);
440 /* Dummy op - must do something useless after P_LVL3 read */
441 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
442 /* Get end time (ticks) */
443 t2
= inl(acpi_fadt
.xpm_tmr_blk
.address
);
444 /* Enable bus master arbitration */
445 acpi_set_register(ACPI_BITREG_ARB_DISABLE
, 0, ACPI_MTX_DO_NOT_LOCK
);
446 /* Re-enable interrupts */
448 /* Compute time (ticks) that we were actually asleep */
449 sleep_ticks
= ticks_elapsed(t1
, t2
) - cx
->latency_ticks
- C3_OVERHEAD
;
457 next_state
= pr
->power
.state
;
462 * Track the number of longs (time asleep is greater than threshold)
463 * and promote when the count threshold is reached. Note that bus
464 * mastering activity may prevent promotions.
466 if (cx
->promotion
.state
) {
467 if (sleep_ticks
> cx
->promotion
.threshold
.ticks
) {
468 cx
->promotion
.count
++;
469 cx
->demotion
.count
= 0;
470 if (cx
->promotion
.count
>= cx
->promotion
.threshold
.count
) {
471 if (pr
->flags
.bm_check
) {
472 if (!(pr
->power
.bm_activity
& cx
->promotion
.threshold
.bm
)) {
473 next_state
= cx
->promotion
.state
;
478 next_state
= cx
->promotion
.state
;
488 * Track the number of shorts (time asleep is less than time threshold)
489 * and demote when the usage threshold is reached.
491 if (cx
->demotion
.state
) {
492 if (sleep_ticks
< cx
->demotion
.threshold
.ticks
) {
493 cx
->demotion
.count
++;
494 cx
->promotion
.count
= 0;
495 if (cx
->demotion
.count
>= cx
->demotion
.threshold
.count
) {
496 next_state
= cx
->demotion
.state
;
506 * If we're going to start using a new Cx state we must clean up
507 * from the previous and prepare to use the new.
509 if (next_state
!= pr
->power
.state
)
510 acpi_processor_power_activate(pr
, next_state
);
517 acpi_processor_set_power_policy (
518 struct acpi_processor
*pr
)
520 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
523 * This function sets the default Cx state policy (OS idle handler).
524 * Our scheme is to promote quickly to C2 but more conservatively
525 * to C3. We're favoring C2 for its characteristics of low latency
526 * (quick response), good power savings, and ability to allow bus
527 * mastering activity. Note that the Cx state policy is completely
528 * customizable and can be altered dynamically.
532 return_VALUE(-EINVAL
);
538 pr
->power
.state
= ACPI_STATE_C1
;
539 pr
->power
.default_state
= ACPI_STATE_C1
;
544 * Set the default C1 promotion and C2 demotion policies, where we
545 * promote from C1 to C2 after several (10) successive C1 transitions,
546 * as we cannot (currently) measure the time spent in C1. Demote from
547 * C2 to C1 anytime we experience a 'short' (time spent in C2 is less
548 * than the C2 transtion latency). Note the simplifying assumption
549 * that the 'cost' of a transition is amortized when we sleep for at
550 * least as long as the transition's latency (thus the total transition
551 * time is two times the latency).
553 * TBD: Measure C1 sleep times by instrumenting the core IRQ handler.
554 * TBD: Demote to default C-State after long periods of activity.
555 * TBD: Investigate policy's use of CPU utilization -vs- sleep duration.
557 if (pr
->power
.states
[ACPI_STATE_C2
].valid
) {
558 pr
->power
.states
[ACPI_STATE_C1
].promotion
.threshold
.count
= 10;
559 pr
->power
.states
[ACPI_STATE_C1
].promotion
.threshold
.ticks
=
560 pr
->power
.states
[ACPI_STATE_C2
].latency_ticks
;
561 pr
->power
.states
[ACPI_STATE_C1
].promotion
.state
= ACPI_STATE_C2
;
563 pr
->power
.states
[ACPI_STATE_C2
].demotion
.threshold
.count
= 1;
564 pr
->power
.states
[ACPI_STATE_C2
].demotion
.threshold
.ticks
=
565 pr
->power
.states
[ACPI_STATE_C2
].latency_ticks
;
566 pr
->power
.states
[ACPI_STATE_C2
].demotion
.state
= ACPI_STATE_C1
;
572 * Set default C2 promotion and C3 demotion policies, where we promote
573 * from C2 to C3 after several (4) cycles of no bus mastering activity
574 * while maintaining sleep time criteria. Demote immediately on a
575 * short or whenever bus mastering activity occurs.
577 if ((pr
->power
.states
[ACPI_STATE_C2
].valid
) &&
578 (pr
->power
.states
[ACPI_STATE_C3
].valid
)) {
579 pr
->power
.states
[ACPI_STATE_C2
].promotion
.threshold
.count
= 4;
580 pr
->power
.states
[ACPI_STATE_C2
].promotion
.threshold
.ticks
=
581 pr
->power
.states
[ACPI_STATE_C3
].latency_ticks
;
582 pr
->power
.states
[ACPI_STATE_C2
].promotion
.threshold
.bm
= 0x0F;
583 pr
->power
.states
[ACPI_STATE_C2
].promotion
.state
= ACPI_STATE_C3
;
585 pr
->power
.states
[ACPI_STATE_C3
].demotion
.threshold
.count
= 1;
586 pr
->power
.states
[ACPI_STATE_C3
].demotion
.threshold
.ticks
=
587 pr
->power
.states
[ACPI_STATE_C3
].latency_ticks
;
588 pr
->power
.states
[ACPI_STATE_C3
].demotion
.threshold
.bm
= 0x0F;
589 pr
->power
.states
[ACPI_STATE_C3
].demotion
.state
= ACPI_STATE_C2
;
597 acpi_processor_get_power_info (
598 struct acpi_processor
*pr
)
602 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
605 return_VALUE(-EINVAL
);
607 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
608 "lvl2[0x%08x] lvl3[0x%08x]\n",
609 pr
->power
.states
[ACPI_STATE_C2
].address
,
610 pr
->power
.states
[ACPI_STATE_C3
].address
));
612 /* TBD: Support ACPI 2.0 objects */
617 * This state exists only as filler in our array.
619 pr
->power
.states
[ACPI_STATE_C0
].valid
= 1;
624 * ACPI requires C1 support for all processors.
626 * TBD: What about PROC_C1?
628 pr
->power
.states
[ACPI_STATE_C1
].valid
= 1;
633 * We're (currently) only supporting C2 on UP systems.
635 * TBD: Support for C2 on MP (P_LVL2_UP).
637 if (pr
->power
.states
[ACPI_STATE_C2
].address
) {
639 pr
->power
.states
[ACPI_STATE_C2
].latency
= acpi_fadt
.plvl2_lat
;
642 * C2 latency must be less than or equal to 100 microseconds.
644 if (acpi_fadt
.plvl2_lat
> ACPI_PROCESSOR_MAX_C2_LATENCY
)
645 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
646 "C2 latency too large [%d]\n",
647 acpi_fadt
.plvl2_lat
));
649 * Only support C2 on UP systems (see TBD above).
652 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
653 "C2 not supported in SMP mode\n"));
655 * Otherwise we've met all of our C2 requirements.
656 * Normalize the C2 latency to expidite policy.
659 pr
->power
.states
[ACPI_STATE_C2
].valid
= 1;
660 pr
->power
.states
[ACPI_STATE_C2
].latency_ticks
=
661 US_TO_PM_TIMER_TICKS(acpi_fadt
.plvl2_lat
);
668 * TBD: Investigate use of WBINVD on UP/SMP system in absence of
671 if (pr
->power
.states
[ACPI_STATE_C3
].address
) {
673 pr
->power
.states
[ACPI_STATE_C3
].latency
= acpi_fadt
.plvl3_lat
;
676 * C3 latency must be less than or equal to 1000 microseconds.
678 if (acpi_fadt
.plvl3_lat
> ACPI_PROCESSOR_MAX_C3_LATENCY
)
679 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
680 "C3 latency too large [%d]\n",
681 acpi_fadt
.plvl3_lat
));
683 * Only support C3 when bus mastering arbitration control
684 * is present (able to disable bus mastering to maintain
685 * cache coherency while in C3).
687 else if (!pr
->flags
.bm_control
)
688 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
689 "C3 support requires bus mastering control\n"));
691 * Only support C3 on UP systems, as bm_control is only viable
692 * on a UP system and flushing caches (e.g. WBINVD) is simply
693 * too costly (at this time).
696 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
697 "C3 not supported in SMP mode\n"));
699 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
700 * DMA transfers are used by any ISA device to avoid livelock.
701 * Note that we could disable Type-F DMA (as recommended by
702 * the erratum), but this is known to disrupt certain ISA
703 * devices thus we take the conservative approach.
705 else if (errata
.piix4
.fdma
) {
706 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
707 "C3 not supported on PIIX4 with Type-F DMA\n"));
710 * Otherwise we've met all of our C3 requirements.
711 * Normalize the C2 latency to expidite policy. Enable
712 * checking of bus mastering status (bm_check) so we can
713 * use this in our C3 policy.
716 pr
->power
.states
[ACPI_STATE_C3
].valid
= 1;
717 pr
->power
.states
[ACPI_STATE_C3
].latency_ticks
=
718 US_TO_PM_TIMER_TICKS(acpi_fadt
.plvl3_lat
);
719 pr
->flags
.bm_check
= 1;
726 * Now that we know which state are supported, set the default
727 * policy. Note that this policy can be changed dynamically
728 * (e.g. encourage deeper sleeps to conserve battery life when
731 result
= acpi_processor_set_power_policy(pr
);
733 return_VALUE(result
);
736 * If this processor supports C2 or C3 we denote it as being 'power
737 * manageable'. Note that there's really no policy involved for
738 * when only C1 is supported.
740 if (pr
->power
.states
[ACPI_STATE_C2
].valid
741 || pr
->power
.states
[ACPI_STATE_C3
].valid
)
748 /* --------------------------------------------------------------------------
749 Performance Management
750 -------------------------------------------------------------------------- */
751 #ifdef CONFIG_CPU_FREQ
753 static DECLARE_MUTEX(performance_sem
);
756 * _PPC support is implemented as a CPUfreq policy notifier:
757 * This means each time a CPUfreq driver registered also with
758 * the ACPI core is asked to change the speed policy, the maximum
759 * value is adjusted so that it is within the platform limit.
761 * Also, when a new platform limit value is detected, the CPUfreq
762 * policy is adjusted accordingly.
765 static int acpi_processor_ppc_is_init
= 0;
767 static int acpi_processor_ppc_notifier(struct notifier_block
*nb
,
771 struct cpufreq_policy
*policy
= data
;
772 struct acpi_processor
*pr
;
773 unsigned int ppc
= 0;
775 down(&performance_sem
);
777 if (event
!= CPUFREQ_INCOMPATIBLE
)
780 pr
= processors
[policy
->cpu
];
781 if (!pr
|| !pr
->performance
)
784 ppc
= (unsigned int) pr
->performance_platform_limit
;
788 if (ppc
> pr
->performance
->state_count
)
791 cpufreq_verify_within_limits(policy
, 0,
792 pr
->performance
->states
[ppc
].core_frequency
* 1000);
795 up(&performance_sem
);
801 static struct notifier_block acpi_ppc_notifier_block
= {
802 .notifier_call
= acpi_processor_ppc_notifier
,
807 acpi_processor_get_platform_limit (
808 struct acpi_processor
* pr
)
810 acpi_status status
= 0;
811 unsigned long ppc
= 0;
813 ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit");
816 return_VALUE(-EINVAL
);
819 * _PPC indicates the maximum state currently supported by the platform
820 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
822 status
= acpi_evaluate_integer(pr
->handle
, "_PPC", NULL
, &ppc
);
823 if(ACPI_FAILURE(status
) && status
!= AE_NOT_FOUND
) {
824 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Error evaluating _PPC\n"));
825 return_VALUE(-ENODEV
);
828 pr
->performance_platform_limit
= (int) ppc
;
834 static int acpi_processor_ppc_has_changed(
835 struct acpi_processor
*pr
)
837 int ret
= acpi_processor_get_platform_limit(pr
);
841 return cpufreq_update_policy(pr
->id
);
845 static void acpi_processor_ppc_init(void) {
846 if (!cpufreq_register_notifier(&acpi_ppc_notifier_block
, CPUFREQ_POLICY_NOTIFIER
))
847 acpi_processor_ppc_is_init
= 1;
849 printk(KERN_DEBUG
"Warning: Processor Platform Limit not supported.\n");
853 static void acpi_processor_ppc_exit(void) {
854 if (acpi_processor_ppc_is_init
)
855 cpufreq_unregister_notifier(&acpi_ppc_notifier_block
, CPUFREQ_POLICY_NOTIFIER
);
857 acpi_processor_ppc_is_init
= 0;
861 * when registering a cpufreq driver with this ACPI processor driver, the
862 * _PCT and _PSS structures are read out and written into struct
863 * acpi_processor_performance.
865 static int acpi_processor_set_pdc (struct acpi_processor
*pr
)
867 acpi_status status
= AE_OK
;
869 union acpi_object arg0
= {ACPI_TYPE_BUFFER
};
870 struct acpi_object_list no_object
= {1, &arg0
};
871 struct acpi_object_list
*pdc
;
873 ACPI_FUNCTION_TRACE("acpi_processor_set_pdc");
875 arg0
.buffer
.length
= 12;
876 arg0
.buffer
.pointer
= (u8
*) arg0_buf
;
877 arg0_buf
[0] = ACPI_PDC_REVISION_ID
;
881 pdc
= (pr
->performance
->pdc
) ? pr
->performance
->pdc
: &no_object
;
883 status
= acpi_evaluate_object(pr
->handle
, "_PDC", pdc
, NULL
);
885 if ((ACPI_FAILURE(status
)) && (pr
->performance
->pdc
))
886 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Error evaluating _PDC, using legacy perf. control...\n"));
888 return_VALUE(status
);
893 acpi_processor_get_performance_control (
894 struct acpi_processor
*pr
)
897 acpi_status status
= 0;
898 struct acpi_buffer buffer
= {ACPI_ALLOCATE_BUFFER
, NULL
};
899 union acpi_object
*pct
= NULL
;
900 union acpi_object obj
= {0};
902 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control");
904 status
= acpi_evaluate_object(pr
->handle
, "_PCT", NULL
, &buffer
);
905 if(ACPI_FAILURE(status
)) {
906 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Error evaluating _PCT\n"));
907 return_VALUE(-ENODEV
);
910 pct
= (union acpi_object
*) buffer
.pointer
;
911 if (!pct
|| (pct
->type
!= ACPI_TYPE_PACKAGE
)
912 || (pct
->package
.count
!= 2)) {
913 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid _PCT data\n"));
922 obj
= pct
->package
.elements
[0];
924 if ((obj
.type
!= ACPI_TYPE_BUFFER
)
925 || (obj
.buffer
.length
< sizeof(struct acpi_pct_register
))
926 || (obj
.buffer
.pointer
== NULL
)) {
927 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
928 "Invalid _PCT data (control_register)\n"));
932 memcpy(&pr
->performance
->control_register
, obj
.buffer
.pointer
, sizeof(struct acpi_pct_register
));
939 obj
= pct
->package
.elements
[1];
941 if ((obj
.type
!= ACPI_TYPE_BUFFER
)
942 || (obj
.buffer
.length
< sizeof(struct acpi_pct_register
))
943 || (obj
.buffer
.pointer
== NULL
)) {
944 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
945 "Invalid _PCT data (status_register)\n"));
950 memcpy(&pr
->performance
->status_register
, obj
.buffer
.pointer
, sizeof(struct acpi_pct_register
));
953 acpi_os_free(buffer
.pointer
);
955 return_VALUE(result
);
960 acpi_processor_get_performance_states (
961 struct acpi_processor
*pr
)
964 acpi_status status
= AE_OK
;
965 struct acpi_buffer buffer
= {ACPI_ALLOCATE_BUFFER
, NULL
};
966 struct acpi_buffer format
= {sizeof("NNNNNN"), "NNNNNN"};
967 struct acpi_buffer state
= {0, NULL
};
968 union acpi_object
*pss
= NULL
;
971 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states");
973 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
974 if(ACPI_FAILURE(status
)) {
975 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Error evaluating _PSS\n"));
976 return_VALUE(-ENODEV
);
979 pss
= (union acpi_object
*) buffer
.pointer
;
980 if (!pss
|| (pss
->type
!= ACPI_TYPE_PACKAGE
)) {
981 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid _PSS data\n"));
986 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found %d performance states\n",
987 pss
->package
.count
));
989 pr
->performance
->state_count
= pss
->package
.count
;
990 pr
->performance
->states
= kmalloc(sizeof(struct acpi_processor_px
) * pss
->package
.count
, GFP_KERNEL
);
991 if (!pr
->performance
->states
) {
996 for (i
= 0; i
< pr
->performance
->state_count
; i
++) {
998 struct acpi_processor_px
*px
= &(pr
->performance
->states
[i
]);
1000 state
.length
= sizeof(struct acpi_processor_px
);
1003 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Extracting state %d\n", i
));
1005 status
= acpi_extract_package(&(pss
->package
.elements
[i
]),
1007 if (ACPI_FAILURE(status
)) {
1008 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid _PSS data\n"));
1010 kfree(pr
->performance
->states
);
1014 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1015 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
1017 (u32
) px
->core_frequency
,
1019 (u32
) px
->transition_latency
,
1020 (u32
) px
->bus_master_latency
,
1024 if (!px
->core_frequency
) {
1025 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid _PSS data: freq is zero\n"));
1027 kfree(pr
->performance
->states
);
1033 acpi_os_free(buffer
.pointer
);
1035 return_VALUE(result
);
1040 acpi_processor_get_performance_info (
1041 struct acpi_processor
*pr
)
1044 acpi_status status
= AE_OK
;
1045 acpi_handle handle
= NULL
;
1047 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info");
1049 if (!pr
|| !pr
->performance
|| !pr
->handle
)
1050 return_VALUE(-EINVAL
);
1052 acpi_processor_set_pdc(pr
);
1054 status
= acpi_get_handle(pr
->handle
, "_PCT", &handle
);
1055 if (ACPI_FAILURE(status
)) {
1056 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1057 "ACPI-based processor performance control unavailable\n"));
1058 return_VALUE(-ENODEV
);
1061 result
= acpi_processor_get_performance_control(pr
);
1063 return_VALUE(result
);
1065 result
= acpi_processor_get_performance_states(pr
);
1067 return_VALUE(result
);
1069 result
= acpi_processor_get_platform_limit(pr
);
1071 return_VALUE(result
);
1077 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
1078 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
1080 static int acpi_processor_perf_open_fs(struct inode
*inode
, struct file
*file
);
1081 static struct file_operations acpi_processor_perf_fops
= {
1082 .open
= acpi_processor_perf_open_fs
,
1084 .llseek
= seq_lseek
,
1085 .release
= single_release
,
1088 static int acpi_processor_perf_seq_show(struct seq_file
*seq
, void *offset
)
1090 struct acpi_processor
*pr
= (struct acpi_processor
*)seq
->private;
1093 ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show");
1098 if (!pr
->performance
) {
1099 seq_puts(seq
, "<not supported>\n");
1103 seq_printf(seq
, "state count: %d\n"
1104 "active state: P%d\n",
1105 pr
->performance
->state_count
,
1106 pr
->performance
->state
);
1108 seq_puts(seq
, "states:\n");
1109 for (i
= 0; i
< pr
->performance
->state_count
; i
++)
1110 seq_printf(seq
, " %cP%d: %d MHz, %d mW, %d uS\n",
1111 (i
== pr
->performance
->state
?'*':' '), i
,
1112 (u32
) pr
->performance
->states
[i
].core_frequency
,
1113 (u32
) pr
->performance
->states
[i
].power
,
1114 (u32
) pr
->performance
->states
[i
].transition_latency
);
1120 static int acpi_processor_perf_open_fs(struct inode
*inode
, struct file
*file
)
1122 return single_open(file
, acpi_processor_perf_seq_show
,
1127 acpi_processor_write_performance (
1129 const char __user
*buffer
,
1134 struct seq_file
*m
= (struct seq_file
*) file
->private_data
;
1135 struct acpi_processor
*pr
= (struct acpi_processor
*) m
->private;
1136 struct acpi_processor_performance
*perf
;
1137 char state_string
[12] = {'\0'};
1138 unsigned int new_state
= 0;
1139 struct cpufreq_policy policy
;
1141 ACPI_FUNCTION_TRACE("acpi_processor_write_performance");
1143 if (!pr
|| (count
> sizeof(state_string
) - 1))
1144 return_VALUE(-EINVAL
);
1146 perf
= pr
->performance
;
1148 return_VALUE(-EINVAL
);
1150 if (copy_from_user(state_string
, buffer
, count
))
1151 return_VALUE(-EFAULT
);
1153 state_string
[count
] = '\0';
1154 new_state
= simple_strtoul(state_string
, NULL
, 0);
1156 if (new_state
>= perf
->state_count
)
1157 return_VALUE(-EINVAL
);
1159 cpufreq_get_policy(&policy
, pr
->id
);
1161 policy
.cpu
= pr
->id
;
1162 policy
.min
= perf
->states
[new_state
].core_frequency
* 1000;
1163 policy
.max
= perf
->states
[new_state
].core_frequency
* 1000;
1165 result
= cpufreq_set_policy(&policy
);
1167 return_VALUE(result
);
1169 return_VALUE(count
);
1173 acpi_cpufreq_add_file (
1174 struct acpi_processor
*pr
)
1176 struct proc_dir_entry
*entry
= NULL
;
1177 struct acpi_device
*device
= NULL
;
1179 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
1181 if (acpi_bus_get_device(pr
->handle
, &device
))
1184 /* add file 'performance' [R/W] */
1185 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE
,
1186 S_IFREG
|S_IRUGO
|S_IWUSR
, acpi_device_dir(device
));
1188 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
1189 "Unable to create '%s' fs entry\n",
1190 ACPI_PROCESSOR_FILE_PERFORMANCE
));
1192 entry
->proc_fops
= &acpi_processor_perf_fops
;
1193 entry
->proc_fops
->write
= acpi_processor_write_performance
;
1194 entry
->data
= acpi_driver_data(device
);
1195 entry
->owner
= THIS_MODULE
;
1201 acpi_cpufreq_remove_file (
1202 struct acpi_processor
*pr
)
1204 struct acpi_device
*device
= NULL
;
1206 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
1208 if (acpi_bus_get_device(pr
->handle
, &device
))
1211 /* remove file 'performance' */
1212 remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE
,
1213 acpi_device_dir(device
));
1219 static void acpi_cpufreq_add_file (struct acpi_processor
*pr
) { return; }
1220 static void acpi_cpufreq_remove_file (struct acpi_processor
*pr
) { return; }
1221 #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
1225 acpi_processor_register_performance (
1226 struct acpi_processor_performance
* performance
,
1229 struct acpi_processor
*pr
;
1231 ACPI_FUNCTION_TRACE("acpi_processor_register_performance");
1233 if (!acpi_processor_ppc_is_init
)
1234 return_VALUE(-EINVAL
);
1236 down(&performance_sem
);
1238 pr
= processors
[cpu
];
1240 up(&performance_sem
);
1241 return_VALUE(-ENODEV
);
1244 if (pr
->performance
) {
1245 up(&performance_sem
);
1246 return_VALUE(-EBUSY
);
1249 pr
->performance
= performance
;
1251 if (acpi_processor_get_performance_info(pr
)) {
1252 pr
->performance
= NULL
;
1253 up(&performance_sem
);
1257 acpi_cpufreq_add_file(pr
);
1259 up(&performance_sem
);
1262 EXPORT_SYMBOL(acpi_processor_register_performance
);
1266 acpi_processor_unregister_performance (
1267 struct acpi_processor_performance
* performance
,
1270 struct acpi_processor
*pr
;
1272 ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
1274 if (!acpi_processor_ppc_is_init
)
1277 down(&performance_sem
);
1279 pr
= processors
[cpu
];
1281 up(&performance_sem
);
1285 kfree(pr
->performance
->states
);
1286 pr
->performance
= NULL
;
1288 acpi_cpufreq_remove_file(pr
);
1290 up(&performance_sem
);
1294 EXPORT_SYMBOL(acpi_processor_unregister_performance
);
1297 /* for the rest of it, check arch/i386/kernel/cpu/cpufreq/acpi.c */
1299 #else /* !CONFIG_CPU_FREQ */
1301 static void acpi_processor_ppc_init(void) { return; }
1302 static void acpi_processor_ppc_exit(void) { return; }
1304 static int acpi_processor_ppc_has_changed(struct acpi_processor
*pr
) {
1305 static unsigned int printout
= 1;
1307 printk(KERN_WARNING
"Warning: Processor Platform Limit event detected, but not handled.\n");
1308 printk(KERN_WARNING
"Consider compiling CPUfreq support into your kernel.\n");
1314 #endif /* CONFIG_CPU_FREQ */
1316 /* --------------------------------------------------------------------------
1318 -------------------------------------------------------------------------- */
1321 acpi_processor_get_throttling (
1322 struct acpi_processor
*pr
)
1329 ACPI_FUNCTION_TRACE("acpi_processor_get_throttling");
1332 return_VALUE(-EINVAL
);
1334 if (!pr
->flags
.throttling
)
1335 return_VALUE(-ENODEV
);
1337 pr
->throttling
.state
= 0;
1339 local_irq_disable();
1341 duty_mask
= pr
->throttling
.state_count
- 1;
1343 duty_mask
<<= pr
->throttling
.duty_offset
;
1345 value
= inl(pr
->throttling
.address
);
1348 * Compute the current throttling state when throttling is enabled
1352 duty_value
= value
& duty_mask
;
1353 duty_value
>>= pr
->throttling
.duty_offset
;
1356 state
= pr
->throttling
.state_count
-duty_value
;
1359 pr
->throttling
.state
= state
;
1363 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1364 "Throttling state is T%d (%d%% throttling applied)\n",
1365 state
, pr
->throttling
.states
[state
].performance
));
1372 acpi_processor_set_throttling (
1373 struct acpi_processor
*pr
,
1380 ACPI_FUNCTION_TRACE("acpi_processor_set_throttling");
1383 return_VALUE(-EINVAL
);
1385 if ((state
< 0) || (state
> (pr
->throttling
.state_count
- 1)))
1386 return_VALUE(-EINVAL
);
1388 if (!pr
->flags
.throttling
)
1389 return_VALUE(-ENODEV
);
1391 if (state
== pr
->throttling
.state
)
1394 local_irq_disable();
1397 * Calculate the duty_value and duty_mask.
1400 duty_value
= pr
->throttling
.state_count
- state
;
1402 duty_value
<<= pr
->throttling
.duty_offset
;
1404 /* Used to clear all duty_value bits */
1405 duty_mask
= pr
->throttling
.state_count
- 1;
1407 duty_mask
<<= acpi_fadt
.duty_offset
;
1408 duty_mask
= ~duty_mask
;
1412 * Disable throttling by writing a 0 to bit 4. Note that we must
1413 * turn it off before you can change the duty_value.
1415 value
= inl(pr
->throttling
.address
);
1417 value
&= 0xFFFFFFEF;
1418 outl(value
, pr
->throttling
.address
);
1422 * Write the new duty_value and then enable throttling. Note
1423 * that a state value of 0 leaves throttling disabled.
1427 value
|= duty_value
;
1428 outl(value
, pr
->throttling
.address
);
1430 value
|= 0x00000010;
1431 outl(value
, pr
->throttling
.address
);
1434 pr
->throttling
.state
= state
;
1438 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1439 "Throttling state set to T%d (%d%%)\n", state
,
1440 (pr
->throttling
.states
[state
].performance
?pr
->throttling
.states
[state
].performance
/10:0)));
1447 acpi_processor_get_throttling_info (
1448 struct acpi_processor
*pr
)
1454 ACPI_FUNCTION_TRACE("acpi_processor_get_throttling_info");
1456 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1457 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1458 pr
->throttling
.address
,
1459 pr
->throttling
.duty_offset
,
1460 pr
->throttling
.duty_width
));
1463 return_VALUE(-EINVAL
);
1465 /* TBD: Support ACPI 2.0 objects */
1467 if (!pr
->throttling
.address
) {
1468 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No throttling register\n"));
1471 else if (!pr
->throttling
.duty_width
) {
1472 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No throttling states\n"));
1475 /* TBD: Support duty_cycle values that span bit 4. */
1476 else if ((pr
->throttling
.duty_offset
1477 + pr
->throttling
.duty_width
) > 4) {
1478 ACPI_DEBUG_PRINT((ACPI_DB_WARN
, "duty_cycle spans bit 4\n"));
1483 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1484 * This shouldn't be an issue as few (if any) mobile systems ever
1487 if (errata
.piix4
.throttle
) {
1488 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1489 "Throttling not supported on PIIX4 A- or B-step\n"));
1493 pr
->throttling
.state_count
= 1 << acpi_fadt
.duty_width
;
1496 * Compute state values. Note that throttling displays a linear power/
1497 * performance relationship (at 50% performance the CPU will consume
1498 * 50% power). Values are in 1/10th of a percent to preserve accuracy.
1501 step
= (1000 / pr
->throttling
.state_count
);
1503 for (i
=0; i
<pr
->throttling
.state_count
; i
++) {
1504 pr
->throttling
.states
[i
].performance
= step
* i
;
1505 pr
->throttling
.states
[i
].power
= step
* i
;
1508 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Found %d throttling states\n",
1509 pr
->throttling
.state_count
));
1511 pr
->flags
.throttling
= 1;
1514 * Disable throttling (if enabled). We'll let subsequent policy (e.g.
1515 * thermal) decide to lower performance if it so chooses, but for now
1516 * we'll crank up the speed.
1519 result
= acpi_processor_get_throttling(pr
);
1523 if (pr
->throttling
.state
) {
1524 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Disabling throttling (was T%d)\n",
1525 pr
->throttling
.state
));
1526 result
= acpi_processor_set_throttling(pr
, 0);
1533 pr
->flags
.throttling
= 0;
1535 return_VALUE(result
);
1539 /* --------------------------------------------------------------------------
1541 -------------------------------------------------------------------------- */
1544 acpi_processor_apply_limit (
1545 struct acpi_processor
* pr
)
1551 ACPI_FUNCTION_TRACE("acpi_processor_apply_limit");
1554 return_VALUE(-EINVAL
);
1556 if (!pr
->flags
.limit
)
1557 return_VALUE(-ENODEV
);
1559 if (pr
->flags
.throttling
) {
1560 if (pr
->limit
.user
.tx
> tx
)
1561 tx
= pr
->limit
.user
.tx
;
1562 if (pr
->limit
.thermal
.tx
> tx
)
1563 tx
= pr
->limit
.thermal
.tx
;
1565 result
= acpi_processor_set_throttling(pr
, tx
);
1570 pr
->limit
.state
.px
= px
;
1571 pr
->limit
.state
.tx
= tx
;
1573 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Processor [%d] limit set to (P%d:T%d)\n",
1576 pr
->limit
.state
.tx
));
1580 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Unable to set limit\n"));
1582 return_VALUE(result
);
1586 #ifdef CONFIG_CPU_FREQ
1588 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
1589 * offers (in most cases) voltage scaling in addition to frequency scaling, and
1590 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
1591 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
1594 static unsigned int cpufreq_thermal_reduction_pctg
[NR_CPUS
];
1595 static unsigned int acpi_thermal_cpufreq_is_init
= 0;
1598 static int cpu_has_cpufreq(unsigned int cpu
)
1600 struct cpufreq_policy policy
;
1601 if (!acpi_thermal_cpufreq_is_init
)
1603 if (!cpufreq_get_policy(&policy
, cpu
))
1609 static int acpi_thermal_cpufreq_increase(unsigned int cpu
)
1611 if (!cpu_has_cpufreq(cpu
))
1614 if (cpufreq_thermal_reduction_pctg
[cpu
] < 60) {
1615 cpufreq_thermal_reduction_pctg
[cpu
] += 20;
1616 cpufreq_update_policy(cpu
);
1624 static int acpi_thermal_cpufreq_decrease(unsigned int cpu
)
1626 if (!cpu_has_cpufreq(cpu
))
1629 if (cpufreq_thermal_reduction_pctg
[cpu
] >= 20) {
1630 cpufreq_thermal_reduction_pctg
[cpu
] -= 20;
1631 cpufreq_update_policy(cpu
);
1639 static int acpi_thermal_cpufreq_notifier(
1640 struct notifier_block
*nb
,
1641 unsigned long event
,
1644 struct cpufreq_policy
*policy
= data
;
1645 unsigned long max_freq
= 0;
1647 if (event
!= CPUFREQ_ADJUST
)
1650 max_freq
= (policy
->cpuinfo
.max_freq
* (100 - cpufreq_thermal_reduction_pctg
[policy
->cpu
])) / 100;
1652 cpufreq_verify_within_limits(policy
, 0, max_freq
);
1659 static struct notifier_block acpi_thermal_cpufreq_notifier_block
= {
1660 .notifier_call
= acpi_thermal_cpufreq_notifier
,
1664 static void acpi_thermal_cpufreq_init(void) {
1667 for (i
=0; i
<NR_CPUS
; i
++)
1668 cpufreq_thermal_reduction_pctg
[i
] = 0;
1670 i
= cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block
, CPUFREQ_POLICY_NOTIFIER
);
1672 acpi_thermal_cpufreq_is_init
= 1;
1675 static void acpi_thermal_cpufreq_exit(void) {
1676 if (acpi_thermal_cpufreq_is_init
)
1677 cpufreq_unregister_notifier(&acpi_thermal_cpufreq_notifier_block
, CPUFREQ_POLICY_NOTIFIER
);
1679 acpi_thermal_cpufreq_is_init
= 0;
1682 #else /* ! CONFIG_CPU_FREQ */
1684 static void acpi_thermal_cpufreq_init(void) { return; }
1685 static void acpi_thermal_cpufreq_exit(void) { return; }
1686 static int acpi_thermal_cpufreq_increase(unsigned int cpu
) { return -ENODEV
; }
1687 static int acpi_thermal_cpufreq_decrease(unsigned int cpu
) { return -ENODEV
; }
1694 acpi_processor_set_thermal_limit (
1699 struct acpi_processor
*pr
= NULL
;
1700 struct acpi_device
*device
= NULL
;
1703 ACPI_FUNCTION_TRACE("acpi_processor_set_thermal_limit");
1705 if ((type
< ACPI_PROCESSOR_LIMIT_NONE
)
1706 || (type
> ACPI_PROCESSOR_LIMIT_DECREMENT
))
1707 return_VALUE(-EINVAL
);
1709 result
= acpi_bus_get_device(handle
, &device
);
1711 return_VALUE(result
);
1713 pr
= (struct acpi_processor
*) acpi_driver_data(device
);
1715 return_VALUE(-ENODEV
);
1717 /* Thermal limits are always relative to the current Px/Tx state. */
1718 if (pr
->flags
.throttling
)
1719 pr
->limit
.thermal
.tx
= pr
->throttling
.state
;
1722 * Our default policy is to only use throttling at the lowest
1723 * performance state.
1726 tx
= pr
->limit
.thermal
.tx
;
1730 case ACPI_PROCESSOR_LIMIT_NONE
:
1732 result
= acpi_thermal_cpufreq_decrease(pr
->id
);
1737 case ACPI_PROCESSOR_LIMIT_INCREMENT
:
1738 /* if going up: P-states first, T-states later */
1740 result
= acpi_thermal_cpufreq_increase(pr
->id
);
1743 else if (result
== -ERANGE
)
1744 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1745 "At maximum performance state\n"));
1747 if (pr
->flags
.throttling
) {
1748 if (tx
== (pr
->throttling
.state_count
- 1))
1749 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1750 "At maximum throttling state\n"));
1756 case ACPI_PROCESSOR_LIMIT_DECREMENT
:
1757 /* if going down: T-states first, P-states later */
1759 if (pr
->flags
.throttling
) {
1761 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1762 "At minimum throttling state\n"));
1769 result
= acpi_thermal_cpufreq_decrease(pr
->id
);
1770 if (result
== -ERANGE
)
1771 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
1772 "At minimum performance state\n"));
1778 if (pr
->flags
.throttling
) {
1779 pr
->limit
.thermal
.px
= 0;
1780 pr
->limit
.thermal
.tx
= tx
;
1782 result
= acpi_processor_apply_limit(pr
);
1784 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
1785 "Unable to set thermal limit\n"));
1787 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Thermal limit now (P%d:T%d)\n",
1788 pr
->limit
.thermal
.px
,
1789 pr
->limit
.thermal
.tx
));
1793 return_VALUE(result
);
1798 acpi_processor_get_limit_info (
1799 struct acpi_processor
*pr
)
1801 ACPI_FUNCTION_TRACE("acpi_processor_get_limit_info");
1804 return_VALUE(-EINVAL
);
1806 if (pr
->flags
.throttling
)
1807 pr
->flags
.limit
= 1;
1813 /* --------------------------------------------------------------------------
1814 FS Interface (/proc)
1815 -------------------------------------------------------------------------- */
1817 struct proc_dir_entry
*acpi_processor_dir
= NULL
;
1819 static int acpi_processor_info_seq_show(struct seq_file
*seq
, void *offset
)
1821 struct acpi_processor
*pr
= (struct acpi_processor
*)seq
->private;
1823 ACPI_FUNCTION_TRACE("acpi_processor_info_seq_show");
1828 seq_printf(seq
, "processor id: %d\n"
1830 "bus mastering control: %s\n"
1831 "power management: %s\n"
1832 "throttling control: %s\n"
1833 "limit interface: %s\n",
1836 pr
->flags
.bm_control
? "yes" : "no",
1837 pr
->flags
.power
? "yes" : "no",
1838 pr
->flags
.throttling
? "yes" : "no",
1839 pr
->flags
.limit
? "yes" : "no");
1845 static int acpi_processor_info_open_fs(struct inode
*inode
, struct file
*file
)
1847 return single_open(file
, acpi_processor_info_seq_show
,
1851 static int acpi_processor_power_seq_show(struct seq_file
*seq
, void *offset
)
1853 struct acpi_processor
*pr
= (struct acpi_processor
*)seq
->private;
1856 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
1861 seq_printf(seq
, "active state: C%d\n"
1862 "default state: C%d\n"
1863 "bus master activity: %08x\n",
1865 pr
->power
.default_state
,
1866 pr
->power
.bm_activity
);
1868 seq_puts(seq
, "states:\n");
1870 for (i
= 1; i
< ACPI_C_STATE_COUNT
; i
++) {
1871 seq_printf(seq
, " %cC%d: ",
1872 (i
== pr
->power
.state
?'*':' '), i
);
1874 if (!pr
->power
.states
[i
].valid
) {
1875 seq_puts(seq
, "<not supported>\n");
1879 if (pr
->power
.states
[i
].promotion
.state
)
1880 seq_printf(seq
, "promotion[C%d] ",
1881 pr
->power
.states
[i
].promotion
.state
);
1883 seq_puts(seq
, "promotion[--] ");
1885 if (pr
->power
.states
[i
].demotion
.state
)
1886 seq_printf(seq
, "demotion[C%d] ",
1887 pr
->power
.states
[i
].demotion
.state
);
1889 seq_puts(seq
, "demotion[--] ");
1891 seq_printf(seq
, "latency[%03d] usage[%08d]\n",
1892 pr
->power
.states
[i
].latency
,
1893 pr
->power
.states
[i
].usage
);
1900 static int acpi_processor_power_open_fs(struct inode
*inode
, struct file
*file
)
1902 return single_open(file
, acpi_processor_power_seq_show
,
1906 static int acpi_processor_throttling_seq_show(struct seq_file
*seq
, void *offset
)
1908 struct acpi_processor
*pr
= (struct acpi_processor
*)seq
->private;
1912 ACPI_FUNCTION_TRACE("acpi_processor_throttling_seq_show");
1917 if (!(pr
->throttling
.state_count
> 0)) {
1918 seq_puts(seq
, "<not supported>\n");
1922 result
= acpi_processor_get_throttling(pr
);
1925 seq_puts(seq
, "Could not determine current throttling state.\n");
1929 seq_printf(seq
, "state count: %d\n"
1930 "active state: T%d\n",
1931 pr
->throttling
.state_count
,
1932 pr
->throttling
.state
);
1934 seq_puts(seq
, "states:\n");
1935 for (i
= 0; i
< pr
->throttling
.state_count
; i
++)
1936 seq_printf(seq
, " %cT%d: %02d%%\n",
1937 (i
== pr
->throttling
.state
?'*':' '), i
,
1938 (pr
->throttling
.states
[i
].performance
?pr
->throttling
.states
[i
].performance
/10:0));
1944 static int acpi_processor_throttling_open_fs(struct inode
*inode
, struct file
*file
)
1946 return single_open(file
, acpi_processor_throttling_seq_show
,
1951 acpi_processor_write_throttling (
1953 const char __user
*buffer
,
1958 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1959 struct acpi_processor
*pr
= (struct acpi_processor
*)m
->private;
1960 char state_string
[12] = {'\0'};
1962 ACPI_FUNCTION_TRACE("acpi_processor_write_throttling");
1964 if (!pr
|| (count
> sizeof(state_string
) - 1))
1965 return_VALUE(-EINVAL
);
1967 if (copy_from_user(state_string
, buffer
, count
))
1968 return_VALUE(-EFAULT
);
1970 state_string
[count
] = '\0';
1972 result
= acpi_processor_set_throttling(pr
,
1973 simple_strtoul(state_string
, NULL
, 0));
1975 return_VALUE(result
);
1977 return_VALUE(count
);
1980 static int acpi_processor_limit_seq_show(struct seq_file
*seq
, void *offset
)
1982 struct acpi_processor
*pr
= (struct acpi_processor
*)seq
->private;
1984 ACPI_FUNCTION_TRACE("acpi_processor_limit_seq_show");
1989 if (!pr
->flags
.limit
) {
1990 seq_puts(seq
, "<not supported>\n");
1994 seq_printf(seq
, "active limit: P%d:T%d\n"
1995 "user limit: P%d:T%d\n"
1996 "thermal limit: P%d:T%d\n",
1997 pr
->limit
.state
.px
, pr
->limit
.state
.tx
,
1998 pr
->limit
.user
.px
, pr
->limit
.user
.tx
,
1999 pr
->limit
.thermal
.px
, pr
->limit
.thermal
.tx
);
2005 static int acpi_processor_limit_open_fs(struct inode
*inode
, struct file
*file
)
2007 return single_open(file
, acpi_processor_limit_seq_show
,
2012 acpi_processor_write_limit (
2014 const char __user
*buffer
,
2019 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
2020 struct acpi_processor
*pr
= (struct acpi_processor
*)m
->private;
2021 char limit_string
[25] = {'\0'};
2025 ACPI_FUNCTION_TRACE("acpi_processor_write_limit");
2027 if (!pr
|| (count
> sizeof(limit_string
) - 1)) {
2028 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid argument\n"));
2029 return_VALUE(-EINVAL
);
2032 if (copy_from_user(limit_string
, buffer
, count
)) {
2033 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid data\n"));
2034 return_VALUE(-EFAULT
);
2037 limit_string
[count
] = '\0';
2039 if (sscanf(limit_string
, "%d:%d", &px
, &tx
) != 2) {
2040 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid data format\n"));
2041 return_VALUE(-EINVAL
);
2044 if (pr
->flags
.throttling
) {
2045 if ((tx
< 0) || (tx
> (pr
->throttling
.state_count
- 1))) {
2046 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid tx\n"));
2047 return_VALUE(-EINVAL
);
2049 pr
->limit
.user
.tx
= tx
;
2052 result
= acpi_processor_apply_limit(pr
);
2054 return_VALUE(count
);
2059 acpi_processor_add_fs (
2060 struct acpi_device
*device
)
2062 struct proc_dir_entry
*entry
= NULL
;
2064 ACPI_FUNCTION_TRACE("acpi_processor_add_fs");
2066 if (!acpi_device_dir(device
)) {
2067 acpi_device_dir(device
) = proc_mkdir(acpi_device_bid(device
),
2068 acpi_processor_dir
);
2069 if (!acpi_device_dir(device
))
2070 return_VALUE(-ENODEV
);
2072 acpi_device_dir(device
)->owner
= THIS_MODULE
;
2075 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_INFO
,
2076 S_IRUGO
, acpi_device_dir(device
));
2078 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2079 "Unable to create '%s' fs entry\n",
2080 ACPI_PROCESSOR_FILE_INFO
));
2082 entry
->proc_fops
= &acpi_processor_info_fops
;
2083 entry
->data
= acpi_driver_data(device
);
2084 entry
->owner
= THIS_MODULE
;
2088 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_POWER
,
2089 S_IRUGO
, acpi_device_dir(device
));
2091 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2092 "Unable to create '%s' fs entry\n",
2093 ACPI_PROCESSOR_FILE_POWER
));
2095 entry
->proc_fops
= &acpi_processor_power_fops
;
2096 entry
->data
= acpi_driver_data(device
);
2097 entry
->owner
= THIS_MODULE
;
2100 /* 'throttling' [R/W] */
2101 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING
,
2102 S_IFREG
|S_IRUGO
|S_IWUSR
, acpi_device_dir(device
));
2104 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2105 "Unable to create '%s' fs entry\n",
2106 ACPI_PROCESSOR_FILE_THROTTLING
));
2108 entry
->proc_fops
= &acpi_processor_throttling_fops
;
2109 entry
->proc_fops
->write
= acpi_processor_write_throttling
;
2110 entry
->data
= acpi_driver_data(device
);
2111 entry
->owner
= THIS_MODULE
;
2115 entry
= create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT
,
2116 S_IFREG
|S_IRUGO
|S_IWUSR
, acpi_device_dir(device
));
2118 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2119 "Unable to create '%s' fs entry\n",
2120 ACPI_PROCESSOR_FILE_LIMIT
));
2122 entry
->proc_fops
= &acpi_processor_limit_fops
;
2123 entry
->proc_fops
->write
= acpi_processor_write_limit
;
2124 entry
->data
= acpi_driver_data(device
);
2125 entry
->owner
= THIS_MODULE
;
2133 acpi_processor_remove_fs (
2134 struct acpi_device
*device
)
2136 ACPI_FUNCTION_TRACE("acpi_processor_remove_fs");
2138 if (acpi_device_dir(device
)) {
2139 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO
,acpi_device_dir(device
));
2140 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER
,acpi_device_dir(device
));
2141 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING
,
2142 acpi_device_dir(device
));
2143 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT
,acpi_device_dir(device
));
2144 remove_proc_entry(acpi_device_bid(device
), acpi_processor_dir
);
2145 acpi_device_dir(device
) = NULL
;
2151 /* Use the acpiid in MADT to map cpus in case of SMP */
2153 #define convert_acpiid_to_cpu(acpi_id) (0xff)
2157 #define arch_acpiid_to_apicid ia64_acpiid_to_sapicid
2158 #define arch_cpu_to_apicid ia64_cpu_to_sapicid
2159 #define ARCH_BAD_APICID (0xffff)
2161 #define arch_acpiid_to_apicid x86_acpiid_to_apicid
2162 #define arch_cpu_to_apicid x86_cpu_to_apicid
2163 #define ARCH_BAD_APICID (0xff)
2166 static u8
convert_acpiid_to_cpu(u8 acpi_id
)
2171 apic_id
= arch_acpiid_to_apicid
[acpi_id
];
2172 if (apic_id
== ARCH_BAD_APICID
)
2175 for (i
= 0; i
< NR_CPUS
; i
++) {
2176 if (arch_cpu_to_apicid
[i
] == apic_id
)
2183 /* --------------------------------------------------------------------------
2185 -------------------------------------------------------------------------- */
2188 acpi_processor_get_info (
2189 struct acpi_processor
*pr
)
2191 acpi_status status
= 0;
2192 union acpi_object object
= {0};
2193 struct acpi_buffer buffer
= {sizeof(union acpi_object
), &object
};
2195 static int cpu0_initialized
;
2197 ACPI_FUNCTION_TRACE("acpi_processor_get_info");
2200 return_VALUE(-EINVAL
);
2202 if (num_online_cpus() > 1)
2205 acpi_processor_errata(pr
);
2208 * Check to see if we have bus mastering arbitration control. This
2209 * is required for proper C3 usage (to maintain cache coherency).
2211 if (acpi_fadt
.V1_pm2_cnt_blk
&& acpi_fadt
.pm2_cnt_len
) {
2212 pr
->flags
.bm_control
= 1;
2213 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
2214 "Bus mastering arbitration control present\n"));
2217 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
2218 "No bus mastering arbitration control\n"));
2221 * Evalute the processor object. Note that it is common on SMP to
2222 * have the first (boot) processor with a valid PBLK address while
2223 * all others have a NULL address.
2225 status
= acpi_evaluate_object(pr
->handle
, NULL
, NULL
, &buffer
);
2226 if (ACPI_FAILURE(status
)) {
2227 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2228 "Error evaluating processor object\n"));
2229 return_VALUE(-ENODEV
);
2233 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
2234 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
2236 pr
->acpi_id
= object
.processor
.proc_id
;
2238 cpu_index
= convert_acpiid_to_cpu(pr
->acpi_id
);
2240 if ( !cpu0_initialized
&& (cpu_index
== 0xff)) {
2241 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
2243 } else if (cpu_index
> num_online_cpus()) {
2245 * Extra Processor objects may be enumerated on MP systems with
2246 * less than the max # of CPUs. They should be ignored.
2248 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2249 "Error getting cpuindex for acpiid 0x%x\n",
2251 return_VALUE(-ENODEV
);
2253 cpu0_initialized
= 1;
2257 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "Processor [%d:%d]\n", pr
->id
,
2260 if (!object
.processor
.pblk_address
)
2261 ACPI_DEBUG_PRINT((ACPI_DB_INFO
, "No PBLK (NULL address)\n"));
2262 else if (object
.processor
.pblk_length
!= 6)
2263 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
, "Invalid PBLK length [%d]\n",
2264 object
.processor
.pblk_length
));
2266 pr
->throttling
.address
= object
.processor
.pblk_address
;
2267 pr
->throttling
.duty_offset
= acpi_fadt
.duty_offset
;
2268 pr
->throttling
.duty_width
= acpi_fadt
.duty_width
;
2269 pr
->power
.states
[ACPI_STATE_C2
].address
=
2270 object
.processor
.pblk_address
+ 4;
2271 pr
->power
.states
[ACPI_STATE_C3
].address
=
2272 object
.processor
.pblk_address
+ 5;
2275 * We don't care about error returns - we just try to mark
2276 * these reserved so that nobody else is confused into thinking
2277 * that this region might be unused..
2279 * (In particular, allocating the IO range for Cardbus)
2281 request_region(pr
->throttling
.address
, 6, "ACPI CPU throttle");
2284 acpi_processor_get_power_info(pr
);
2285 #ifdef CONFIG_CPU_FREQ
2286 acpi_processor_ppc_has_changed(pr
);
2288 acpi_processor_get_throttling_info(pr
);
2289 acpi_processor_get_limit_info(pr
);
2296 acpi_processor_notify (
2301 struct acpi_processor
*pr
= (struct acpi_processor
*) data
;
2302 struct acpi_device
*device
= NULL
;
2304 ACPI_FUNCTION_TRACE("acpi_processor_notify");
2309 if (acpi_bus_get_device(pr
->handle
, &device
))
2313 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE
:
2314 acpi_processor_ppc_has_changed(pr
);
2315 acpi_bus_generate_event(device
, event
,
2316 pr
->performance_platform_limit
);
2318 case ACPI_PROCESSOR_NOTIFY_POWER
:
2320 acpi_bus_generate_event(device
, event
, 0);
2323 ACPI_DEBUG_PRINT((ACPI_DB_INFO
,
2324 "Unsupported event [0x%x]\n", event
));
2333 acpi_processor_add (
2334 struct acpi_device
*device
)
2337 acpi_status status
= AE_OK
;
2338 struct acpi_processor
*pr
= NULL
;
2341 ACPI_FUNCTION_TRACE("acpi_processor_add");
2344 return_VALUE(-EINVAL
);
2346 pr
= kmalloc(sizeof(struct acpi_processor
), GFP_KERNEL
);
2348 return_VALUE(-ENOMEM
);
2349 memset(pr
, 0, sizeof(struct acpi_processor
));
2351 pr
->handle
= device
->handle
;
2352 strcpy(acpi_device_name(device
), ACPI_PROCESSOR_DEVICE_NAME
);
2353 strcpy(acpi_device_class(device
), ACPI_PROCESSOR_CLASS
);
2354 acpi_driver_data(device
) = pr
;
2356 result
= acpi_processor_get_info(pr
);
2360 result
= acpi_processor_add_fs(device
);
2364 status
= acpi_install_notify_handler(pr
->handle
, ACPI_DEVICE_NOTIFY
,
2365 acpi_processor_notify
, pr
);
2366 if (ACPI_FAILURE(status
)) {
2367 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2368 "Error installing notify handler\n"));
2373 processors
[pr
->id
] = pr
;
2376 * Install the idle handler if processor power management is supported.
2377 * Note that the default idle handler (default_idle) will be used on
2378 * platforms that only support C1.
2380 if ((pr
->id
== 0) && (pr
->flags
.power
)) {
2381 pm_idle_save
= pm_idle
;
2382 pm_idle
= acpi_processor_idle
;
2385 printk(KERN_INFO PREFIX
"%s [%s] (supports",
2386 acpi_device_name(device
), acpi_device_bid(device
));
2387 for (i
=1; i
<ACPI_C_STATE_COUNT
; i
++)
2388 if (pr
->power
.states
[i
].valid
)
2390 if (pr
->flags
.throttling
)
2391 printk(", %d throttling states", pr
->throttling
.state_count
);
2396 acpi_processor_remove_fs(device
);
2400 return_VALUE(result
);
2405 acpi_processor_remove (
2406 struct acpi_device
*device
,
2409 acpi_status status
= AE_OK
;
2410 struct acpi_processor
*pr
= NULL
;
2412 ACPI_FUNCTION_TRACE("acpi_processor_remove");
2414 if (!device
|| !acpi_driver_data(device
))
2415 return_VALUE(-EINVAL
);
2417 pr
= (struct acpi_processor
*) acpi_driver_data(device
);
2419 /* Unregister the idle handler when processor #0 is removed. */
2421 pm_idle
= pm_idle_save
;
2423 * We are about to unload the current idle thread pm callback
2424 * (pm_idle), Wait for all processors to update cached/local
2425 * copies of pm_idle before proceeding.
2427 synchronize_kernel();
2430 status
= acpi_remove_notify_handler(pr
->handle
, ACPI_DEVICE_NOTIFY
,
2431 acpi_processor_notify
);
2432 if (ACPI_FAILURE(status
)) {
2433 ACPI_DEBUG_PRINT((ACPI_DB_ERROR
,
2434 "Error removing notify handler\n"));
2437 acpi_processor_remove_fs(device
);
2439 processors
[pr
->id
] = NULL
;
2448 acpi_processor_init (void)
2452 ACPI_FUNCTION_TRACE("acpi_processor_init");
2454 memset(&processors
, 0, sizeof(processors
));
2455 memset(&errata
, 0, sizeof(errata
));
2457 acpi_processor_dir
= proc_mkdir(ACPI_PROCESSOR_CLASS
, acpi_root_dir
);
2458 if (!acpi_processor_dir
)
2459 return_VALUE(-ENODEV
);
2460 acpi_processor_dir
->owner
= THIS_MODULE
;
2462 result
= acpi_bus_register_driver(&acpi_processor_driver
);
2464 remove_proc_entry(ACPI_PROCESSOR_CLASS
, acpi_root_dir
);
2465 return_VALUE(-ENODEV
);
2468 acpi_thermal_cpufreq_init();
2470 acpi_processor_ppc_init();
2477 acpi_processor_exit (void)
2479 ACPI_FUNCTION_TRACE("acpi_processor_exit");
2481 acpi_processor_ppc_exit();
2483 acpi_thermal_cpufreq_exit();
2485 acpi_bus_unregister_driver(&acpi_processor_driver
);
2487 remove_proc_entry(ACPI_PROCESSOR_CLASS
, acpi_root_dir
);
2493 module_init(acpi_processor_init
);
2494 module_exit(acpi_processor_exit
);
2496 EXPORT_SYMBOL(acpi_processor_set_thermal_limit
);